content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MiscFuns.R \docType{data} \name{setFixest_dict} \alias{setFixest_dict} \alias{getFixest_dict} \title{Sets/gets the dictionary relabeling the variables} \format{ An object of class \code{function} of length 1. } \usage{ setFixest_dict(dict) getFixest_dict } \arguments{ \item{dict}{A named character vector. E.g. to change my variable named "a" and "b" to (resp.) "$log(a)$" and "$bonus^3$", then use \code{dict = c(a="$log(a)$", b3="$bonus^3$")}. This dictionary is used in Latex tables or in graphs by the function \code{\link[fixest]{coefplot}}. If you want to separate Latex rendering from rendering in graphs, use an ampersand first to make the variable specific to \code{coefplot}.} } \description{ Sets/gets the default dictionary used in the function \code{\link[fixest]{etable}}, \code{\link[fixest]{did_means}} and \code{\link[fixest]{coefplot}}. The dictionaries are used to relabel variables (usually towards a fancier, more explicit formatting) when exporting them into a Latex table or displaying in graphs. By setting the dictionary with \code{setFixest_dict}, you can avoid providing the argument \code{dict}. } \examples{ data(trade) est = feols(log(Euros) ~ log(dist_km)|Origin+Destination+Product, trade) # we export the result & rename some variables esttex(est, dict = c("log(Euros)"="Euros (ln)", Origin="Country of Origin")) # If you export many tables, it can be more convenient to use setFixest_dict: setFixest_dict(c("log(Euros)"="Euros (ln)", Origin="Country of Origin")) esttex(est) # variables are properly relabeled } \author{ Laurent Berge } \keyword{datasets}
/fixest/man/setFixest_dict.Rd
no_license
akhikolla/InformationHouse
R
false
true
1,710
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MiscFuns.R \docType{data} \name{setFixest_dict} \alias{setFixest_dict} \alias{getFixest_dict} \title{Sets/gets the dictionary relabeling the variables} \format{ An object of class \code{function} of length 1. } \usage{ setFixest_dict(dict) getFixest_dict } \arguments{ \item{dict}{A named character vector. E.g. to change my variable named "a" and "b" to (resp.) "$log(a)$" and "$bonus^3$", then use \code{dict = c(a="$log(a)$", b3="$bonus^3$")}. This dictionary is used in Latex tables or in graphs by the function \code{\link[fixest]{coefplot}}. If you want to separate Latex rendering from rendering in graphs, use an ampersand first to make the variable specific to \code{coefplot}.} } \description{ Sets/gets the default dictionary used in the function \code{\link[fixest]{etable}}, \code{\link[fixest]{did_means}} and \code{\link[fixest]{coefplot}}. The dictionaries are used to relabel variables (usually towards a fancier, more explicit formatting) when exporting them into a Latex table or displaying in graphs. By setting the dictionary with \code{setFixest_dict}, you can avoid providing the argument \code{dict}. } \examples{ data(trade) est = feols(log(Euros) ~ log(dist_km)|Origin+Destination+Product, trade) # we export the result & rename some variables esttex(est, dict = c("log(Euros)"="Euros (ln)", Origin="Country of Origin")) # If you export many tables, it can be more convenient to use setFixest_dict: setFixest_dict(c("log(Euros)"="Euros (ln)", Origin="Country of Origin")) esttex(est) # variables are properly relabeled } \author{ Laurent Berge } \keyword{datasets}
data.set <- read.table("http://statmod.ru/wiki/_media/study:fall2020:dataprog:quakes.txt", header = TRUE, sep = "\t") data.set <- select(data.set, -1) summary(data.set) head(data.set) library(dplyr) df <- data.set df <- mutate(df, depth = depth * 3280.839895) degreesToRadians <- function(degrees){ return (degrees * pi / 180) } distance <- function(lat1, lon1, lat2, lon2){ lat1 <- degreesToRadians(lat1) lon1 <- degreesToRadians(lon1) lat2 <- degreesToRadians(lat2) lon2 <- degreesToRadians(lon2) return (6367.44 * 2 * asin( sqrt( (sin((lat2 - lat1)/2))^2 + cos(lat1) * cos(lat2) * (sin( (lon2 - lon1) / 2 )^2 ) ) )) } SPb.lat <- 59.939095 SPb.long <- 30.315868 df <- mutate(df, direct = distance(lat, long, SPb.lat, SPb.long)) df <- filter(df, mag >= 5) write.table(df, file = "new_data.txt", sep = "\t", row.names = FALSE) print(paste0("Fiji archipelago located in area (", min(data.set$lat), ", ", max(data.set$lat), ") latitude, (", min(data.set$long), ", ", max(data.set$long), ") longitude")) print(paste("The deepest event had magnitude", data.set$mag[data.set$depth == max(data.set$depth)])) df1 <- arrange(data.set, desc(stations)) df1[1:5,]
/Semester-5/R/homework_1.R
no_license
Andrei-Loginov/University
R
false
false
1,180
r
data.set <- read.table("http://statmod.ru/wiki/_media/study:fall2020:dataprog:quakes.txt", header = TRUE, sep = "\t") data.set <- select(data.set, -1) summary(data.set) head(data.set) library(dplyr) df <- data.set df <- mutate(df, depth = depth * 3280.839895) degreesToRadians <- function(degrees){ return (degrees * pi / 180) } distance <- function(lat1, lon1, lat2, lon2){ lat1 <- degreesToRadians(lat1) lon1 <- degreesToRadians(lon1) lat2 <- degreesToRadians(lat2) lon2 <- degreesToRadians(lon2) return (6367.44 * 2 * asin( sqrt( (sin((lat2 - lat1)/2))^2 + cos(lat1) * cos(lat2) * (sin( (lon2 - lon1) / 2 )^2 ) ) )) } SPb.lat <- 59.939095 SPb.long <- 30.315868 df <- mutate(df, direct = distance(lat, long, SPb.lat, SPb.long)) df <- filter(df, mag >= 5) write.table(df, file = "new_data.txt", sep = "\t", row.names = FALSE) print(paste0("Fiji archipelago located in area (", min(data.set$lat), ", ", max(data.set$lat), ") latitude, (", min(data.set$long), ", ", max(data.set$long), ") longitude")) print(paste("The deepest event had magnitude", data.set$mag[data.set$depth == max(data.set$depth)])) df1 <- arrange(data.set, desc(stations)) df1[1:5,]
library(tidyverse) library(lubridate) library(plotly) # Helper function to show p-values signif.num <- function(x) { symnum(x, corr = FALSE, na = FALSE, legend = FALSE, cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) } # Daily data from: https://portal.icuregswe.org/siri/report/vtfstart-corona iva <- c(2,1,2,0,2,0,1,3,6,7,6,3,15,13,23,16,27,43,42,33,29) data <- data.frame(Dag = 1:length(iva), IVAFall = cumsum(iva), Nya = iva) # Assume exponential curve model <- lm(log2(IVAFall) ~ Dag, data); model_summary <- summary(model) data$Modell <- as.integer(2 ^ (data$Dag * model$coefficients[2] + model$coefficients[1])) data$Prognos <- NA data$PrognosNya <- NA # Predict next 7 days for (d in (length(iva)+1):(length(iva)+7)) { data <- add_row( data, Dag=d, Prognos=as.integer(2 ^ (d * model$coefficients[2] + model$coefficients[1])), Modell=as.integer(2 ^ (d * model$coefficients[2] + model$coefficients[1])), PrognosNya=as.integer(2 ^ (d * model$coefficients[2] + model$coefficients[1])) - as.integer(2 ^ ((d-1) * model$coefficients[2] + model$coefficients[1])) ) } p_values <- sapply(model_summary$coefficients[,4],signif.num) data$Dag <- as.Date("2020-03-05") + data$Dag plot <- ggplot(data, aes(x=Dag)) + geom_col(aes(y=Nya, fill="Historik"), show.legend = FALSE) + geom_col(aes(y=PrognosNya, fill="Prognos"), show.legend = FALSE) + geom_point(aes(y=IVAFall, color="Historik")) + geom_line(aes(y=Modell, color="Prognos")) + geom_point(aes(y=Prognos, color="Prognos")) + # geom_ribbon( # aes( # ymin = Model - 2 * 2^model_summary$sigma, # ymax = Model + 2 * 2^model_summary$sigma, # fill="Modell" # ), alpha = 0.3, show.legend = FALSE) + scale_color_discrete(aesthetics = c("color", "fill")) + geom_vline(xintercept = Sys.Date()) + geom_text(aes(y = IVAFall, label = IVAFall), vjust = "inward", hjust = "inward", show.legend = FALSE, check_overlap = TRUE) + geom_text(aes(y = Prognos, label = Prognos), vjust = "inward", hjust = "inward", show.legend = FALSE, check_overlap = TRUE) + geom_text(aes(y = Nya, label = Nya), vjust = "inward", hjust = "inward", show.legend = FALSE, check_overlap = TRUE) + geom_text(aes(y = PrognosNya, label = PrognosNya), vjust = "inward", hjust = "inward", show.legend = FALSE, check_overlap = TRUE) + theme_minimal() + ggtitle("Antal nyinskrivna intensivvårdtillfällen med Coronavirus i Sverige", subtitle=paste( "Rådata från: https://portal.icuregswe.org/siri/report/vtfstart-corona", "\nStaplar=nya fall, Linje/Prickar=kumulativa fall" ) ) + ylab("IVA") print(plot) print(plot +scale_y_log10()) model_summary
/sir.R
permissive
joelonsql/sir
R
false
false
2,816
r
library(tidyverse) library(lubridate) library(plotly) # Helper function to show p-values signif.num <- function(x) { symnum(x, corr = FALSE, na = FALSE, legend = FALSE, cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) } # Daily data from: https://portal.icuregswe.org/siri/report/vtfstart-corona iva <- c(2,1,2,0,2,0,1,3,6,7,6,3,15,13,23,16,27,43,42,33,29) data <- data.frame(Dag = 1:length(iva), IVAFall = cumsum(iva), Nya = iva) # Assume exponential curve model <- lm(log2(IVAFall) ~ Dag, data); model_summary <- summary(model) data$Modell <- as.integer(2 ^ (data$Dag * model$coefficients[2] + model$coefficients[1])) data$Prognos <- NA data$PrognosNya <- NA # Predict next 7 days for (d in (length(iva)+1):(length(iva)+7)) { data <- add_row( data, Dag=d, Prognos=as.integer(2 ^ (d * model$coefficients[2] + model$coefficients[1])), Modell=as.integer(2 ^ (d * model$coefficients[2] + model$coefficients[1])), PrognosNya=as.integer(2 ^ (d * model$coefficients[2] + model$coefficients[1])) - as.integer(2 ^ ((d-1) * model$coefficients[2] + model$coefficients[1])) ) } p_values <- sapply(model_summary$coefficients[,4],signif.num) data$Dag <- as.Date("2020-03-05") + data$Dag plot <- ggplot(data, aes(x=Dag)) + geom_col(aes(y=Nya, fill="Historik"), show.legend = FALSE) + geom_col(aes(y=PrognosNya, fill="Prognos"), show.legend = FALSE) + geom_point(aes(y=IVAFall, color="Historik")) + geom_line(aes(y=Modell, color="Prognos")) + geom_point(aes(y=Prognos, color="Prognos")) + # geom_ribbon( # aes( # ymin = Model - 2 * 2^model_summary$sigma, # ymax = Model + 2 * 2^model_summary$sigma, # fill="Modell" # ), alpha = 0.3, show.legend = FALSE) + scale_color_discrete(aesthetics = c("color", "fill")) + geom_vline(xintercept = Sys.Date()) + geom_text(aes(y = IVAFall, label = IVAFall), vjust = "inward", hjust = "inward", show.legend = FALSE, check_overlap = TRUE) + geom_text(aes(y = Prognos, label = Prognos), vjust = "inward", hjust = "inward", show.legend = FALSE, check_overlap = TRUE) + geom_text(aes(y = Nya, label = Nya), vjust = "inward", hjust = "inward", show.legend = FALSE, check_overlap = TRUE) + geom_text(aes(y = PrognosNya, label = PrognosNya), vjust = "inward", hjust = "inward", show.legend = FALSE, check_overlap = TRUE) + theme_minimal() + ggtitle("Antal nyinskrivna intensivvårdtillfällen med Coronavirus i Sverige", subtitle=paste( "Rådata från: https://portal.icuregswe.org/siri/report/vtfstart-corona", "\nStaplar=nya fall, Linje/Prickar=kumulativa fall" ) ) + ylab("IVA") print(plot) print(plot +scale_y_log10()) model_summary
### ========================================================================= ### summarizeOverlaps() generic and methods ### ------------------------------------------------------------------------- setGeneric("summarizeOverlaps", signature=c("features", "reads"), function(features, reads, mode=Union, ignore.strand=FALSE, ...) standardGeneric("summarizeOverlaps") ) ### ------------------------------------------------------------------------- ### Methods for GAlignments, GAlignmentsList and GAlignmentPairs objects ### .dispatchOverlaps <- function(features, reads, mode, ignore.strand, inter.feature, preprocess.reads, ...) { if (ignore.strand) { if (class(features) == "GRangesList") { r <- unlist(features) strand(r) <- "*" features@unlistData <- r } else { strand(features) <- "*" } } if (!is.null(preprocess.reads)) reads <- preprocess.reads(reads, ...) mode(features, reads, ignore.strand=ignore.strand, inter.feature=inter.feature) } .summarizeOverlaps <- function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, preprocess.reads=NULL, ...) { if (class(reads) == "GRangesList") { if (all(unlist(strand(reads), use.names=FALSE) == "*")) ignore.strand <- TRUE } else { if (all(strand(reads) == "*")) ignore.strand <- TRUE } mode <- match.fun(mode) counts <- .dispatchOverlaps(features, reads, mode, ignore.strand, inter.feature, preprocess.reads, ...) colData <- DataFrame(object=class(reads), records=length(reads), row.names="reads") SummarizedExperiment(assays=SimpleList(counts=as.matrix(counts)), rowRanges=features, colData=colData) } setMethod("summarizeOverlaps", c("GRanges", "GAlignments"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GAlignments"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRanges", "GAlignmentsList"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GAlignmentsList"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRanges", "GAlignmentPairs"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GAlignmentPairs"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRanges", "GRanges"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GRanges"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRanges", "GRangesList"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GRangesList"), .summarizeOverlaps ) ### ------------------------------------------------------------------------- ### 'mode' functions ### Union <- function(features, reads, ignore.strand=FALSE, inter.feature=TRUE) { ov <- findOverlaps(features, reads, ignore.strand=ignore.strand) if (inter.feature) { ## Remove ambigous reads. reads_to_keep <- which(countSubjectHits(ov) == 1L) ov <- ov[subjectHits(ov) %in% reads_to_keep] } countQueryHits(ov) } IntersectionStrict <- function(features, reads, ignore.strand=FALSE, inter.feature=TRUE) { ov <- findOverlaps(reads, features, type="within", ignore.strand=ignore.strand) if (inter.feature) { ## Remove ambigous reads. reads_to_keep <- which(countQueryHits(ov) == 1L) ov <- ov[queryHits(ov) %in% reads_to_keep] } countSubjectHits(ov) } .removeSharedRegions <- function(features, ignore.strand=FALSE) { if (is(features, "GRanges")) { regions <- disjoin(features, ignore.strand=ignore.strand) } else if (is(features, "GRangesList")) { regions <- disjoin(features@unlistData, ignore.strand=ignore.strand) } else { stop("internal error") # should never happen } ov <- findOverlaps(features, regions, ignore.strand=ignore.strand) regions_to_keep <- which(countSubjectHits(ov) == 1L) ov <- ov[subjectHits(ov) %in% regions_to_keep] unlisted_ans <- regions[subjectHits(ov)] ans_partitioning <- as(ov, "PartitioningByEnd") relist(unlisted_ans, ans_partitioning) } IntersectionNotEmpty <- function(features, reads, ignore.strand=FALSE, inter.feature=TRUE) { features <- .removeSharedRegions(features, ignore.strand=ignore.strand) Union(features, reads, ignore.strand=ignore.strand, inter.feature=inter.feature) } ### ------------------------------------------------------------------------- ### Methods for BamFiles and BamViews objects ### .checkArgs <- function(bam, singleEnd, fragments) { if (singleEnd) { if (all(isTRUE(asMates(bam)))) stop("cannot specify both 'singleEnd=TRUE' and 'asMates=TRUE'") if (fragments) stop("when 'fragments=TRUE', 'singleEnd' should be FALSE") ## all paired-end reading now goes through new C algo } else { asMates(bam) <- TRUE } } .getReadFunction <- function(singleEnd, fragments) { if (singleEnd) { FUN <- readGAlignments } else { if (fragments) FUN <- readGAlignmentsList else FUN <- readGAlignmentPairs } FUN } .countWithYieldSize <- function(FUN, features, bf, mode, ignore.strand, inter.feature, param, preprocess.reads, ...) { if (is.na(yieldSize(bf))) { x <- FUN(bf, param=param, ...) .dispatchOverlaps(features, x, mode, ignore.strand, inter.feature, preprocess.reads, ...) } else { if (!isOpen(bf)) { open(bf) on.exit(close(bf)) } ct <- integer(length(features)) while (length(x <- FUN(bf, param=param, ...))) { ct <- ct + .dispatchOverlaps(features, x, mode, ignore.strand, inter.feature, preprocess.reads, ...) } ct } } .dispatchBamFiles <- function(features, reads, mode, ignore.strand, count.mapped.reads=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { exist <- sapply(reads, function(bf) file.exists(path(bf))) if (!all(exist)) stop(paste0("file(s): ", paste(path(reads)[!exist], collapse=","), " do not exist")) FUN <- .getReadFunction(singleEnd, fragments) cts <- bplapply(setNames(seq_along(reads), names(reads)), function(i, FUN, reads, features, mode, ignore.strand, inter.feature, param, preprocess.reads, ...) { bf <- reads[[i]] .countWithYieldSize(FUN, features, bf, mode, ignore.strand, inter.feature, param, preprocess.reads, ...) }, FUN, reads, features, mode=match.fun(mode), ignore.strand=ignore.strand, inter.feature=inter.feature, param=param, preprocess.reads=preprocess.reads, ... ) counts <- as.matrix(do.call(cbind, cts)) if (count.mapped.reads) { countBam <- countBam(reads) flag <- scanBamFlag(isUnmappedQuery=FALSE) param <- ScanBamParam(flag=flag, what="seq") colData <- DataFrame(countBam[c("records", "nucleotides")], mapped=countBam(reads, param=param)$records, row.names=colnames(counts)) } else { colData <- DataFrame(row.names=colnames(counts)) } SummarizedExperiment(assays=SimpleList(counts=counts), rowRanges=features, colData=colData) } setMethod("summarizeOverlaps", c("GRanges", "BamFile"), function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { .checkArgs(reads, singleEnd, fragments) .dispatchBamFiles(features, BamFileList(reads), mode, ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) }) setMethod("summarizeOverlaps", c("GRangesList", "BamFile"), function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { .checkArgs(reads, singleEnd, fragments) .dispatchBamFiles(features, BamFileList(reads), mode, ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) }) .summarizeOverlaps_character <- function(features, reads, mode=Union, ignore.strand=FALSE, yieldSize=1000000L, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { if (!all(file.exists(reads))) stop("file(s) do not exist:\n ", paste(reads[!file.exists(reads)], collapse="\n ")) if (is.null(names(reads))) { if (any(duplicated(reads))) stop("duplicate 'reads' paths not allowed; use distinct names()") } else if (any(duplicated(names(reads)))) stop("duplicate 'names(reads)' file paths not allowed") reads <- BamFileList(reads, yieldSize=yieldSize, obeyQname=FALSE, asMates=!singleEnd) summarizeOverlaps(features, reads, mode, ignore.strand=ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) } setMethod("summarizeOverlaps", c("GRanges", "character"), .summarizeOverlaps_character ) setMethod("summarizeOverlaps", c("GRangesList", "character"), .summarizeOverlaps_character ) .summarizeOverlaps_BamFileList <- function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { if (any(duplicated(names(reads)))) stop("duplicate 'names(reads)' not allowed") .checkArgs(reads, singleEnd, fragments) .dispatchBamFiles(features, reads, mode, ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) } setMethod("summarizeOverlaps", c("GRanges", "BamFileList"), .summarizeOverlaps_BamFileList ) setMethod("summarizeOverlaps", c("GRangesList", "BamFileList"), .summarizeOverlaps_BamFileList ) setMethod("summarizeOverlaps", c("BamViews", "missing"), function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { se <- callGeneric(bamRanges(features), BamFileList(bamPaths(features)), mode=mode, ignore.strand=ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) colData(se)$bamSamples <- bamSamples(features) colData(se)$bamIndices <- bamIndicies(features) metadata(se)$bamExperiment <- bamExperiment(features) se })
/R/summarizeOverlaps-methods.R
no_license
jmacdon/GenomicAlignments
R
false
false
12,385
r
### ========================================================================= ### summarizeOverlaps() generic and methods ### ------------------------------------------------------------------------- setGeneric("summarizeOverlaps", signature=c("features", "reads"), function(features, reads, mode=Union, ignore.strand=FALSE, ...) standardGeneric("summarizeOverlaps") ) ### ------------------------------------------------------------------------- ### Methods for GAlignments, GAlignmentsList and GAlignmentPairs objects ### .dispatchOverlaps <- function(features, reads, mode, ignore.strand, inter.feature, preprocess.reads, ...) { if (ignore.strand) { if (class(features) == "GRangesList") { r <- unlist(features) strand(r) <- "*" features@unlistData <- r } else { strand(features) <- "*" } } if (!is.null(preprocess.reads)) reads <- preprocess.reads(reads, ...) mode(features, reads, ignore.strand=ignore.strand, inter.feature=inter.feature) } .summarizeOverlaps <- function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, preprocess.reads=NULL, ...) { if (class(reads) == "GRangesList") { if (all(unlist(strand(reads), use.names=FALSE) == "*")) ignore.strand <- TRUE } else { if (all(strand(reads) == "*")) ignore.strand <- TRUE } mode <- match.fun(mode) counts <- .dispatchOverlaps(features, reads, mode, ignore.strand, inter.feature, preprocess.reads, ...) colData <- DataFrame(object=class(reads), records=length(reads), row.names="reads") SummarizedExperiment(assays=SimpleList(counts=as.matrix(counts)), rowRanges=features, colData=colData) } setMethod("summarizeOverlaps", c("GRanges", "GAlignments"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GAlignments"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRanges", "GAlignmentsList"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GAlignmentsList"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRanges", "GAlignmentPairs"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GAlignmentPairs"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRanges", "GRanges"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GRanges"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRanges", "GRangesList"), .summarizeOverlaps ) setMethod("summarizeOverlaps", c("GRangesList", "GRangesList"), .summarizeOverlaps ) ### ------------------------------------------------------------------------- ### 'mode' functions ### Union <- function(features, reads, ignore.strand=FALSE, inter.feature=TRUE) { ov <- findOverlaps(features, reads, ignore.strand=ignore.strand) if (inter.feature) { ## Remove ambigous reads. reads_to_keep <- which(countSubjectHits(ov) == 1L) ov <- ov[subjectHits(ov) %in% reads_to_keep] } countQueryHits(ov) } IntersectionStrict <- function(features, reads, ignore.strand=FALSE, inter.feature=TRUE) { ov <- findOverlaps(reads, features, type="within", ignore.strand=ignore.strand) if (inter.feature) { ## Remove ambigous reads. reads_to_keep <- which(countQueryHits(ov) == 1L) ov <- ov[queryHits(ov) %in% reads_to_keep] } countSubjectHits(ov) } .removeSharedRegions <- function(features, ignore.strand=FALSE) { if (is(features, "GRanges")) { regions <- disjoin(features, ignore.strand=ignore.strand) } else if (is(features, "GRangesList")) { regions <- disjoin(features@unlistData, ignore.strand=ignore.strand) } else { stop("internal error") # should never happen } ov <- findOverlaps(features, regions, ignore.strand=ignore.strand) regions_to_keep <- which(countSubjectHits(ov) == 1L) ov <- ov[subjectHits(ov) %in% regions_to_keep] unlisted_ans <- regions[subjectHits(ov)] ans_partitioning <- as(ov, "PartitioningByEnd") relist(unlisted_ans, ans_partitioning) } IntersectionNotEmpty <- function(features, reads, ignore.strand=FALSE, inter.feature=TRUE) { features <- .removeSharedRegions(features, ignore.strand=ignore.strand) Union(features, reads, ignore.strand=ignore.strand, inter.feature=inter.feature) } ### ------------------------------------------------------------------------- ### Methods for BamFiles and BamViews objects ### .checkArgs <- function(bam, singleEnd, fragments) { if (singleEnd) { if (all(isTRUE(asMates(bam)))) stop("cannot specify both 'singleEnd=TRUE' and 'asMates=TRUE'") if (fragments) stop("when 'fragments=TRUE', 'singleEnd' should be FALSE") ## all paired-end reading now goes through new C algo } else { asMates(bam) <- TRUE } } .getReadFunction <- function(singleEnd, fragments) { if (singleEnd) { FUN <- readGAlignments } else { if (fragments) FUN <- readGAlignmentsList else FUN <- readGAlignmentPairs } FUN } .countWithYieldSize <- function(FUN, features, bf, mode, ignore.strand, inter.feature, param, preprocess.reads, ...) { if (is.na(yieldSize(bf))) { x <- FUN(bf, param=param, ...) .dispatchOverlaps(features, x, mode, ignore.strand, inter.feature, preprocess.reads, ...) } else { if (!isOpen(bf)) { open(bf) on.exit(close(bf)) } ct <- integer(length(features)) while (length(x <- FUN(bf, param=param, ...))) { ct <- ct + .dispatchOverlaps(features, x, mode, ignore.strand, inter.feature, preprocess.reads, ...) } ct } } .dispatchBamFiles <- function(features, reads, mode, ignore.strand, count.mapped.reads=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { exist <- sapply(reads, function(bf) file.exists(path(bf))) if (!all(exist)) stop(paste0("file(s): ", paste(path(reads)[!exist], collapse=","), " do not exist")) FUN <- .getReadFunction(singleEnd, fragments) cts <- bplapply(setNames(seq_along(reads), names(reads)), function(i, FUN, reads, features, mode, ignore.strand, inter.feature, param, preprocess.reads, ...) { bf <- reads[[i]] .countWithYieldSize(FUN, features, bf, mode, ignore.strand, inter.feature, param, preprocess.reads, ...) }, FUN, reads, features, mode=match.fun(mode), ignore.strand=ignore.strand, inter.feature=inter.feature, param=param, preprocess.reads=preprocess.reads, ... ) counts <- as.matrix(do.call(cbind, cts)) if (count.mapped.reads) { countBam <- countBam(reads) flag <- scanBamFlag(isUnmappedQuery=FALSE) param <- ScanBamParam(flag=flag, what="seq") colData <- DataFrame(countBam[c("records", "nucleotides")], mapped=countBam(reads, param=param)$records, row.names=colnames(counts)) } else { colData <- DataFrame(row.names=colnames(counts)) } SummarizedExperiment(assays=SimpleList(counts=counts), rowRanges=features, colData=colData) } setMethod("summarizeOverlaps", c("GRanges", "BamFile"), function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { .checkArgs(reads, singleEnd, fragments) .dispatchBamFiles(features, BamFileList(reads), mode, ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) }) setMethod("summarizeOverlaps", c("GRangesList", "BamFile"), function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { .checkArgs(reads, singleEnd, fragments) .dispatchBamFiles(features, BamFileList(reads), mode, ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) }) .summarizeOverlaps_character <- function(features, reads, mode=Union, ignore.strand=FALSE, yieldSize=1000000L, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { if (!all(file.exists(reads))) stop("file(s) do not exist:\n ", paste(reads[!file.exists(reads)], collapse="\n ")) if (is.null(names(reads))) { if (any(duplicated(reads))) stop("duplicate 'reads' paths not allowed; use distinct names()") } else if (any(duplicated(names(reads)))) stop("duplicate 'names(reads)' file paths not allowed") reads <- BamFileList(reads, yieldSize=yieldSize, obeyQname=FALSE, asMates=!singleEnd) summarizeOverlaps(features, reads, mode, ignore.strand=ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) } setMethod("summarizeOverlaps", c("GRanges", "character"), .summarizeOverlaps_character ) setMethod("summarizeOverlaps", c("GRangesList", "character"), .summarizeOverlaps_character ) .summarizeOverlaps_BamFileList <- function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { if (any(duplicated(names(reads)))) stop("duplicate 'names(reads)' not allowed") .checkArgs(reads, singleEnd, fragments) .dispatchBamFiles(features, reads, mode, ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) } setMethod("summarizeOverlaps", c("GRanges", "BamFileList"), .summarizeOverlaps_BamFileList ) setMethod("summarizeOverlaps", c("GRangesList", "BamFileList"), .summarizeOverlaps_BamFileList ) setMethod("summarizeOverlaps", c("BamViews", "missing"), function(features, reads, mode=Union, ignore.strand=FALSE, inter.feature=TRUE, singleEnd=TRUE, fragments=FALSE, param=ScanBamParam(), preprocess.reads=NULL, ...) { se <- callGeneric(bamRanges(features), BamFileList(bamPaths(features)), mode=mode, ignore.strand=ignore.strand, inter.feature=inter.feature, singleEnd=singleEnd, fragments=fragments, param=param, preprocess.reads=preprocess.reads, ...) colData(se)$bamSamples <- bamSamples(features) colData(se)$bamIndices <- bamIndicies(features) metadata(se)$bamExperiment <- bamExperiment(features) se })
testlist <- list(bytes1 = integer(0), pmutation = 4.46014795967618e+43) result <- do.call(mcga:::ByteCodeMutation,testlist) str(result)
/mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802057-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
135
r
testlist <- list(bytes1 = integer(0), pmutation = 4.46014795967618e+43) result <- do.call(mcga:::ByteCodeMutation,testlist) str(result)
library(methods) library(dplyr) library(tidyr) library(dembase) portugal_exposure <- read.table("data-raw/portugal_exposure/PRT.Exposures_1x1.txt", skip = 2, header = TRUE, na.string = ".") %>% select(year = Year, age = Age, Female, Male) %>% gather(key = sex, value = count, Female, Male) %>% filter(year %in% 1950:2015) %>% xtabs(count ~ age + sex + year, data = .) %>% Counts(dimscales = c(year = "Intervals")) %>% collapseIntervals(dimension = "age", breaks = c(0, 1, seq(5, 100, 5))) %>% as("array") save(portugal_exposure, file = "data/portugal_exposure.rda", compress = "bzip2")
/data-raw/portugal_exposure/portugal_exposure.R
no_license
johnrbryant/bdefdata
R
false
false
725
r
library(methods) library(dplyr) library(tidyr) library(dembase) portugal_exposure <- read.table("data-raw/portugal_exposure/PRT.Exposures_1x1.txt", skip = 2, header = TRUE, na.string = ".") %>% select(year = Year, age = Age, Female, Male) %>% gather(key = sex, value = count, Female, Male) %>% filter(year %in% 1950:2015) %>% xtabs(count ~ age + sex + year, data = .) %>% Counts(dimscales = c(year = "Intervals")) %>% collapseIntervals(dimension = "age", breaks = c(0, 1, seq(5, 100, 5))) %>% as("array") save(portugal_exposure, file = "data/portugal_exposure.rda", compress = "bzip2")
gbess.glm = function(x, y, Gi, beta0, intercept=0, s, max.steps = 10, glm.max=1e6, weights=rep(1,nrow(x)), normalize=FALSE) { if(length(unique(y))!=2) stop("Please input binary variable!") if(missing(beta0)) beta0=rep(0,ncol(x)) if(s>length(beta0)) {stop("s is too large")} # initial n = dim(x)[1] p = dim(x)[2] vn = dimnames(x)[[2]] one = rep(1,n) names(beta0) = vn xs = x weights = weights/mean(weights) orderGi = order(Gi) x = x[,orderGi] Gi = Gi[orderGi] gi = unique(Gi) gi_index = match(gi, Gi) N = length(gi) if(normalize) { meanx = drop(weights %*% x)/n x = scale(x, meanx, FALSE) normx = sqrt(drop(weights %*% (x^2))) nosignal = normx/sqrt(n) < .Machine$double.eps if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n) names(normx) = NULL x = sqrt(n)*scale(x, FALSE, normx) } beta = beta0 coef0 = intercept A0 = NULL B = rep(0,p+1) for(k in 1:max.steps){ setA = gget_A(x, y, Gi, gi_index, s, beta, coef0, n, p, N, weights, B) A = setA$A+1 B = setA$B+1 beta = rep(0,p) gr_size = setA$gr_size if(length(B)>=2) { logit=glmnet(x[,B],y,family="binomial",lambda = 0,maxit=glm.max, weights = weights) beta[B]=logit$beta coef0=logit$a0 }else{ logit=glm(y~x[,B],family="binomial", weights = weights) beta[B]=logit$coefficients[-1] coef0=logit$coefficients[1] } if(setequal(A,A0)==TRUE){ break; } A0 <- A } if(normalize) { beta=sqrt(n)*beta/normx coef0=coef0-sum(beta*meanx) } beta[orderGi] = beta names(beta) = vn A = orderGi[A] B = orderGi[B] s=length(B) eta = x%*%beta pr = exp(eta)/(1+exp(eta)) xbest=xs[,which(beta!=0)] bestmodel=glm(y~xbest, family="binomial", weights = weights) dev=-2*sum((weights*((y*log(pr) + (1-y)*log(1-pr))))[which(pr>1e-20&pr<1-1e-20)]) nulldev=-2*sum(weights*(y*log(0.5) + (1-y)*log(0.5))) aic=dev+2*s bic=dev+log(n)*s ebic=dev+(log(n)+2*log(p))*s return(list(family="bess_binomial",beta=beta,coef0=coef0,nsample=n,bestmodel=bestmodel, deviance=dev,nulldeviance=nulldev, lambda=setA$max_T^2/2,p=p,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps, gr_size=gr_size)) }
/fuzzedpackages/BeSS/R/gbess.glm.R
no_license
akhikolla/testpackages
R
false
false
2,390
r
gbess.glm = function(x, y, Gi, beta0, intercept=0, s, max.steps = 10, glm.max=1e6, weights=rep(1,nrow(x)), normalize=FALSE) { if(length(unique(y))!=2) stop("Please input binary variable!") if(missing(beta0)) beta0=rep(0,ncol(x)) if(s>length(beta0)) {stop("s is too large")} # initial n = dim(x)[1] p = dim(x)[2] vn = dimnames(x)[[2]] one = rep(1,n) names(beta0) = vn xs = x weights = weights/mean(weights) orderGi = order(Gi) x = x[,orderGi] Gi = Gi[orderGi] gi = unique(Gi) gi_index = match(gi, Gi) N = length(gi) if(normalize) { meanx = drop(weights %*% x)/n x = scale(x, meanx, FALSE) normx = sqrt(drop(weights %*% (x^2))) nosignal = normx/sqrt(n) < .Machine$double.eps if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n) names(normx) = NULL x = sqrt(n)*scale(x, FALSE, normx) } beta = beta0 coef0 = intercept A0 = NULL B = rep(0,p+1) for(k in 1:max.steps){ setA = gget_A(x, y, Gi, gi_index, s, beta, coef0, n, p, N, weights, B) A = setA$A+1 B = setA$B+1 beta = rep(0,p) gr_size = setA$gr_size if(length(B)>=2) { logit=glmnet(x[,B],y,family="binomial",lambda = 0,maxit=glm.max, weights = weights) beta[B]=logit$beta coef0=logit$a0 }else{ logit=glm(y~x[,B],family="binomial", weights = weights) beta[B]=logit$coefficients[-1] coef0=logit$coefficients[1] } if(setequal(A,A0)==TRUE){ break; } A0 <- A } if(normalize) { beta=sqrt(n)*beta/normx coef0=coef0-sum(beta*meanx) } beta[orderGi] = beta names(beta) = vn A = orderGi[A] B = orderGi[B] s=length(B) eta = x%*%beta pr = exp(eta)/(1+exp(eta)) xbest=xs[,which(beta!=0)] bestmodel=glm(y~xbest, family="binomial", weights = weights) dev=-2*sum((weights*((y*log(pr) + (1-y)*log(1-pr))))[which(pr>1e-20&pr<1-1e-20)]) nulldev=-2*sum(weights*(y*log(0.5) + (1-y)*log(0.5))) aic=dev+2*s bic=dev+log(n)*s ebic=dev+(log(n)+2*log(p))*s return(list(family="bess_binomial",beta=beta,coef0=coef0,nsample=n,bestmodel=bestmodel, deviance=dev,nulldeviance=nulldev, lambda=setA$max_T^2/2,p=p,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps, gr_size=gr_size)) }
#' Kill all jobs submitted to the computing platform, for one or multiple flows #' #' #' @description #' #' NOTE: #' #' \strong{This requires files which are created at the end of the \link{submit_flow} command}. #' #' Even if you want to kill the flow, its best to let submit_flow do its job, when done simply use \code{kill(flow_wd)}. #' If submit_flow is interrupted, files like flow_details.rds etc are not created, thus flowr looses the association #' of jobs with flow instance and cannot monitor, kill or re-run the flow. #' #' #' @param x either path to flow wd or object of class \link{flow} #' @param jobid_col Advanced use. The column name in 'flow_details.txt' file used to fetch jobids to kill #' @param kill_cmd The command used to kill. flowr tries to guess this commands, as defined in the detect_kill_cmd(). Supplying #' it here; fot custom platoforms. #' @param force You need to set force=TRUE, to kill multiple flows. This makes sure multiple flows are NOT killed by accident. #' @param ... not used #' @inheritParams to_flow #' #' #' @export #' @examples #' #' \dontrun{ #' #' ## example for terminal #'## flowr kill_flow x=path_to_flow_directory #'## In case path matches multiple folders, flowr asks before killing #'kill(x='fastq_haplotyper*') #' Flowr: streamlining workflows #' found multiple wds: #' /fastq_haplotyper-MS132-20150825-16-24-04-0Lv1PbpI #' /fastq_haplotyper-MS132-20150825-17-47-52-5vFIkrMD #' Really kill all of them ? kill again with force=TRUE #' #'## submitting again with force=TRUE will kill them: #'kill(x='fastq_haplotyper*', force = TRUE) #' } kill <- function(x, ...) { UseMethod("kill") } #' @rdname kill #' @importFrom params kable #' @export kill.character <- function(x, force = FALSE, ...){ x = get_wds(x) if(length(x) > 1 & !force){ message("found multiple wds:\n", paste(x, collapse = "\n"), "\nIf you want to kill all of them, kill again with force=TRUE") return("multi wds") } for(i in 1:length(x)){ fobj = read_fobj(x[i]) if(!is.flow(fobj)){ stop("\nmissing flow_details at this location\n", "flowr can only kill flows, where the jobs ids are available.\n", "Please check and confirm that the path supplied is correct, ", "and that it has a flow_details.rds file. \n ls -l ", x[i]) } kill.flow(fobj, ...) } } #' @rdname kill #' @importFrom utils txtProgressBar #' @export kill.flow <- function(x, kill_cmd, verbose = get_opts("verbose"), jobid_col = "job_sub_id", ...){ if(missing(kill_cmd)){ kill_cmd = detect_kill_cmd(x) } #flow_details = read_flow_detail_fl(wd) check_args() flow_det = to_flowdet(x) wd = x@flow_path log = file.path(wd, "kill_jobs.out") cmds <- sprintf("%s %s >> %s", kill_cmd, flow_det[,jobid_col], log) ## redirect STDERR as well if silent if(verbose < 2) cmds = paste0(cmds, " 2>&1") message("killing ", length(cmds), " jobs, please wait... See kill_jobs.out in the wd for more details.") pb <- txtProgressBar(style = 3, min = 1, max = length(cmds)) tmp <- lapply(1:length(cmds), function(i){ #for(i in 1:length(cmds)) { if(verbose > 2) message(cmds[i], "\n") try(system(cmds[i], intern = TRUE)) if(length(cmds) > 1) pb$up(i) }) close(pb) # tmp <- pbsapply(cmds, function(cmd){ # Sys.sleep(1) # #return(try(system(cmd, intern = TRUE, ...))) # ## dots become a problem # ## print(as.list(...)) # #return(try(system(cmd, intern = TRUE))) # }) invisible(tmp) } #' @importFrom utils tail detect_stat_cmd <- function(fobj){ ## --- at time first jobs might be local, so fetching from the last plat = tail(fobj@jobs, 1)[[1]]@platform switch(plat, moab = "qstat", lsf = "bjobs", torque = "qstat", sge = "qstat", slurm = "sbatch") } #' @importFrom utils tail detect_kill_cmd <- function(fobj){ ## --- at time first jobs might be local, so fetching from the last plat = tail(fobj@jobs, 1)[[1]]@platform switch(plat, moab = "qdel", lsf = "bkill", torque = "qdel", sge = "qdel", slurm = "scancel") } ## --------------------- d e p r e c i a t e d f u n c t i o n s ----------------------------- ## kill_flow <- function(...){ .Deprecated("kill") kill(...) }
/R/kill-flow.R
permissive
KillEdision/flowr
R
false
false
4,233
r
#' Kill all jobs submitted to the computing platform, for one or multiple flows #' #' #' @description #' #' NOTE: #' #' \strong{This requires files which are created at the end of the \link{submit_flow} command}. #' #' Even if you want to kill the flow, its best to let submit_flow do its job, when done simply use \code{kill(flow_wd)}. #' If submit_flow is interrupted, files like flow_details.rds etc are not created, thus flowr looses the association #' of jobs with flow instance and cannot monitor, kill or re-run the flow. #' #' #' @param x either path to flow wd or object of class \link{flow} #' @param jobid_col Advanced use. The column name in 'flow_details.txt' file used to fetch jobids to kill #' @param kill_cmd The command used to kill. flowr tries to guess this commands, as defined in the detect_kill_cmd(). Supplying #' it here; fot custom platoforms. #' @param force You need to set force=TRUE, to kill multiple flows. This makes sure multiple flows are NOT killed by accident. #' @param ... not used #' @inheritParams to_flow #' #' #' @export #' @examples #' #' \dontrun{ #' #' ## example for terminal #'## flowr kill_flow x=path_to_flow_directory #'## In case path matches multiple folders, flowr asks before killing #'kill(x='fastq_haplotyper*') #' Flowr: streamlining workflows #' found multiple wds: #' /fastq_haplotyper-MS132-20150825-16-24-04-0Lv1PbpI #' /fastq_haplotyper-MS132-20150825-17-47-52-5vFIkrMD #' Really kill all of them ? kill again with force=TRUE #' #'## submitting again with force=TRUE will kill them: #'kill(x='fastq_haplotyper*', force = TRUE) #' } kill <- function(x, ...) { UseMethod("kill") } #' @rdname kill #' @importFrom params kable #' @export kill.character <- function(x, force = FALSE, ...){ x = get_wds(x) if(length(x) > 1 & !force){ message("found multiple wds:\n", paste(x, collapse = "\n"), "\nIf you want to kill all of them, kill again with force=TRUE") return("multi wds") } for(i in 1:length(x)){ fobj = read_fobj(x[i]) if(!is.flow(fobj)){ stop("\nmissing flow_details at this location\n", "flowr can only kill flows, where the jobs ids are available.\n", "Please check and confirm that the path supplied is correct, ", "and that it has a flow_details.rds file. \n ls -l ", x[i]) } kill.flow(fobj, ...) } } #' @rdname kill #' @importFrom utils txtProgressBar #' @export kill.flow <- function(x, kill_cmd, verbose = get_opts("verbose"), jobid_col = "job_sub_id", ...){ if(missing(kill_cmd)){ kill_cmd = detect_kill_cmd(x) } #flow_details = read_flow_detail_fl(wd) check_args() flow_det = to_flowdet(x) wd = x@flow_path log = file.path(wd, "kill_jobs.out") cmds <- sprintf("%s %s >> %s", kill_cmd, flow_det[,jobid_col], log) ## redirect STDERR as well if silent if(verbose < 2) cmds = paste0(cmds, " 2>&1") message("killing ", length(cmds), " jobs, please wait... See kill_jobs.out in the wd for more details.") pb <- txtProgressBar(style = 3, min = 1, max = length(cmds)) tmp <- lapply(1:length(cmds), function(i){ #for(i in 1:length(cmds)) { if(verbose > 2) message(cmds[i], "\n") try(system(cmds[i], intern = TRUE)) if(length(cmds) > 1) pb$up(i) }) close(pb) # tmp <- pbsapply(cmds, function(cmd){ # Sys.sleep(1) # #return(try(system(cmd, intern = TRUE, ...))) # ## dots become a problem # ## print(as.list(...)) # #return(try(system(cmd, intern = TRUE))) # }) invisible(tmp) } #' @importFrom utils tail detect_stat_cmd <- function(fobj){ ## --- at time first jobs might be local, so fetching from the last plat = tail(fobj@jobs, 1)[[1]]@platform switch(plat, moab = "qstat", lsf = "bjobs", torque = "qstat", sge = "qstat", slurm = "sbatch") } #' @importFrom utils tail detect_kill_cmd <- function(fobj){ ## --- at time first jobs might be local, so fetching from the last plat = tail(fobj@jobs, 1)[[1]]@platform switch(plat, moab = "qdel", lsf = "bkill", torque = "qdel", sge = "qdel", slurm = "scancel") } ## --------------------- d e p r e c i a t e d f u n c t i o n s ----------------------------- ## kill_flow <- function(...){ .Deprecated("kill") kill(...) }
df <- read.table("D:/R_files/Assignments/household_power_consumption.txt",skip=grep("1/2/2007",readLines("D:/R_files/Assignments/household_power_consumption.txt",)),nrows=2878,sep = ";",header = TRUE,stringsAsFactors = FALSE) colnames(df) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") #converting '?' to 'NA' df[df=="?"]<-NA #changing the date format df$Date <- strptime(df$Date,format = "%d/%m/%Y") datetime <- paste(as.Date(df$Date), df$Time) df$Datetime <- as.POSIXct(datetime) #plot plot(df$Datetime,df$Sub_metering_1,type="l", ylab="Energy Sub-metering", xlab="",col="black") lines(df$Datetime,df$Sub_metering_2,type="l",col="red") lines(df$Datetime,df$Sub_metering_3,type="l",col="blue") legend("topright",legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),col=c("black", "red","blue"),lty=1,cex=0.6) dev.copy(png, file = "plot3.png", height = 480, width = 480) dev.off()
/plot3.R
no_license
raghavendra99/ExData_Plotting1
R
false
false
1,013
r
df <- read.table("D:/R_files/Assignments/household_power_consumption.txt",skip=grep("1/2/2007",readLines("D:/R_files/Assignments/household_power_consumption.txt",)),nrows=2878,sep = ";",header = TRUE,stringsAsFactors = FALSE) colnames(df) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") #converting '?' to 'NA' df[df=="?"]<-NA #changing the date format df$Date <- strptime(df$Date,format = "%d/%m/%Y") datetime <- paste(as.Date(df$Date), df$Time) df$Datetime <- as.POSIXct(datetime) #plot plot(df$Datetime,df$Sub_metering_1,type="l", ylab="Energy Sub-metering", xlab="",col="black") lines(df$Datetime,df$Sub_metering_2,type="l",col="red") lines(df$Datetime,df$Sub_metering_3,type="l",col="blue") legend("topright",legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),col=c("black", "red","blue"),lty=1,cex=0.6) dev.copy(png, file = "plot3.png", height = 480, width = 480) dev.off()
library(tidyverse) load("rda/murders.rda") view(murders) murders %>% mutate(abb=reorder(abb,rate)) %>% # organize alphabet ggplot(aes(abb, rate))+ geom_bar(width = 0.5, stat = "identity", color = "black")+ coord_flip() ggsave("figs/barplot.png") # saving plot
/analysis.R
no_license
Juanc-lab/murders
R
false
false
269
r
library(tidyverse) load("rda/murders.rda") view(murders) murders %>% mutate(abb=reorder(abb,rate)) %>% # organize alphabet ggplot(aes(abb, rate))+ geom_bar(width = 0.5, stat = "identity", color = "black")+ coord_flip() ggsave("figs/barplot.png") # saving plot
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_populations.R \name{autocorr_sim} \alias{autocorr_sim} \title{Simulate Temporally Autocorrelated Populations for Every Combination of Parameters} \usage{ autocorr_sim( timesteps, start, survPhi, fecundPhi, survMean, survSd, fecundMean, fecundSd, replicates ) } \arguments{ \item{timesteps}{The number of timesteps you want to simulate. Individuals are added and killed off every timestep according to the survival and fertility rates. Can be a scalar or a vector of values to loop over.} \item{start}{The starting population size. Can be a scalar or vector.} \item{survPhi}{The temporal autocorrelation of survival. 0 is white noise (uncorrelated), positive values are red noise (directly correlated) and negative values are blue noise (inversely correlated). Can be a scalar or a vector.} \item{fecundPhi}{The temporal autocorrelation of fecundity. As above.} \item{survMean}{The mean survival from timestep to timestep. Must be a value between 0 (all individuals die) and 1 (all individuals live). Can be a scalar or a vector.} \item{survSd}{The standard deviation of the survival from timestep to timestep. Must be a value between 0 and 1. Can be a scalar or a vector.} \item{fecundMean}{The mean fertility: mean offspring produced by each individual per timestep. Can be a scalar or a vector.} \item{fecundSd}{The standard deviation of the fertility. Can be a scalar or a vector of values.} \item{replicates}{How many replicates you would like of each possible combination of parameters.} } \value{ A list of data frames, each with fourteen variables: timestep, newborns (new individuals added this timestep), survivors (individuals alive last year who survived this timestep), population (total individuals alive), growth (the increase or decrease in population size from last year), estimated survival in the timestep, estimated fecundity in the timestep, and the seven parameters used to generate the simulation. } \description{ Essentially a loop of \code{\link{unstructured_pop}}, this function simulates a population with temporally autocorrelated vital rates for every combination of parameters you specify, with as many replicates as desired. It also estimates the sample mean survival and fertility for each simulated population. Please be advised that this function can be very computationally intensive if you provide many possible parameter values and/or ask for many replicates. } \examples{ survival_range <- autocorr_sim(timesteps = 30, start = 200, survPhi = 0.3, fecundPhi = 0.1, survMean = c(0.2, 0.3, 0.4, 0.5, 0.6), survSd = 0.5, fecundMean = 1.1, fecundSd = 0.5, replicates = 50) head(survival_range[[1]]) }
/man/autocorr_sim.Rd
no_license
japilo/colorednoise
R
false
true
2,818
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_populations.R \name{autocorr_sim} \alias{autocorr_sim} \title{Simulate Temporally Autocorrelated Populations for Every Combination of Parameters} \usage{ autocorr_sim( timesteps, start, survPhi, fecundPhi, survMean, survSd, fecundMean, fecundSd, replicates ) } \arguments{ \item{timesteps}{The number of timesteps you want to simulate. Individuals are added and killed off every timestep according to the survival and fertility rates. Can be a scalar or a vector of values to loop over.} \item{start}{The starting population size. Can be a scalar or vector.} \item{survPhi}{The temporal autocorrelation of survival. 0 is white noise (uncorrelated), positive values are red noise (directly correlated) and negative values are blue noise (inversely correlated). Can be a scalar or a vector.} \item{fecundPhi}{The temporal autocorrelation of fecundity. As above.} \item{survMean}{The mean survival from timestep to timestep. Must be a value between 0 (all individuals die) and 1 (all individuals live). Can be a scalar or a vector.} \item{survSd}{The standard deviation of the survival from timestep to timestep. Must be a value between 0 and 1. Can be a scalar or a vector.} \item{fecundMean}{The mean fertility: mean offspring produced by each individual per timestep. Can be a scalar or a vector.} \item{fecundSd}{The standard deviation of the fertility. Can be a scalar or a vector of values.} \item{replicates}{How many replicates you would like of each possible combination of parameters.} } \value{ A list of data frames, each with fourteen variables: timestep, newborns (new individuals added this timestep), survivors (individuals alive last year who survived this timestep), population (total individuals alive), growth (the increase or decrease in population size from last year), estimated survival in the timestep, estimated fecundity in the timestep, and the seven parameters used to generate the simulation. } \description{ Essentially a loop of \code{\link{unstructured_pop}}, this function simulates a population with temporally autocorrelated vital rates for every combination of parameters you specify, with as many replicates as desired. It also estimates the sample mean survival and fertility for each simulated population. Please be advised that this function can be very computationally intensive if you provide many possible parameter values and/or ask for many replicates. } \examples{ survival_range <- autocorr_sim(timesteps = 30, start = 200, survPhi = 0.3, fecundPhi = 0.1, survMean = c(0.2, 0.3, 0.4, 0.5, 0.6), survSd = 0.5, fecundMean = 1.1, fecundSd = 0.5, replicates = 50) head(survival_range[[1]]) }
# File & Folder References -------------------------------------------------- #' boxr S3 Classes #' #' @description { #' boxr has a few very simple S3 classes for the data returned by the API. #' While \code{\link{httr}} returns objects in it's own system of classes, #' generally boxr extracts information, and converts the JSON response to an R #' list with httr::\code{\link{content}} (perhaps with some information judged #' to be extraneous removed). If you'd rather get to the list itself, you can #' do this with \code{unclass(x)}. #' #' The following classes are used: #' #' \describe{ #' \item{\bold{boxr_file_reference}}{ #' Returned by \code{\link{box_ul}}, and similar functions (e.g. #' \code{\link{box_save}}). A description of a file remotely hosted on #' box.com. Available methods: \code{print}. #' } #' \item{\bold{boxr_folder_reference}}{ #' As above, but for folders/direcotries. Available methods: \code{print} #' } #' \item{\bold{boxr_object_list}}{ #' Returned by \code{\link{box_search}}, and related functions. A list, #' with each entry being a reference to a file or folder hosted on box.com #' . Available methods: \code{print}, for a summary of the first few #' results, and \code{as.data.frame}, to coerce some of the API response's #' information to a \code{\link{data.frame}}. #' } #' \item{\bold{boxr_dir_comparison}}{ #' Returned by the internal function \code{\link{box_dir_diff}}. Available #' methods: \code{print}, \code{summary}. #' } #' \item{\bold{boxr_dir_wide_operation_result}}{ #' Returned by \code{\link{box_fetch}} and \code{\link{box_push}}. #' Available methods: \code{print}, \code{summary} #' } #' } #' } #' #' @author Brendan Rocks \email{rocks.brendan@@gmail.com} #' #' @name boxr_S3_classes NULL #' @keywords internal add_file_ref_class <- function(x) { class(x) <- "boxr_file_reference" x } #' @keywords internal add_folder_ref_class <- function(x) { class(x) <- "boxr_folder_reference" x } #' @export print.boxr_file_reference <- function(x, ...) { # x <- object$entries[[1]] cat("\nbox.com remote file reference\n\n") cat(" name :", x$name, "\n") if(x$description != "") cat(" description :", x$description, "\n") cat(" file id :", x$id, "\n") cat(" version :", paste0("V", as.numeric(x$etag) + 1), "\n") cat(" size :", format_bytes(x$size), "\n") cat(" modified at :", as.character(as.POSIXct(gsub("T", " ", x$modified_at))), "\n" ) cat(" created at :", as.character(as.POSIXct(gsub("T", " ", x$modified_at))), "\n" ) cat(" uploaded by :", x$modified_by$login, "\n") cat(" owned by :", x$owned_by$login, "\n") shared_link <- x$shared_link if (is.null(shared_link)) shared_link <- "None" cat(" shared link :", shared_link, "\n\n") cat(" parent folder name : ", x$parent$name, "\n") cat(" parent folder id : ", x$parent$id, "\n") invisible(x) } #' @export print.boxr_folder_reference <- function(x, ...) { # x <- object$entries[[1]] cat("\nbox.com remote folder reference\n\n") cat(" name :", x$name, "\n") cat(" dir id :", x$id, "\n") cat(" size :", format_bytes(x$size), "\n") cat(" modified at :", as.character(as.POSIXct(gsub("T", " ", x$modified_at))), "\n" ) cat(" created at :", as.character(as.POSIXct(gsub("T", " ", x$modified_at))), "\n" ) cat(" uploaded by :", x$modified_by$login, "\n") cat(" owned by :", x$owned_by$login, "\n") shared_link <- x$shared_link if (is.null(shared_link)) shared_link <- "None" cat(" shared link :", shared_link, "\n\n") cat(" parent folder name : ", x$parent$name, "\n") cat(" parent folder id : ", x$parent$id, "\n") invisible(x) } # Object Lists ------------------------------------------------------------ #' @export as.data.frame.boxr_object_list <- function(x, ...) { summarise_row <- function(x) { path <- paste0(unlist( lapply(x$path_collection$entries, function(x) x$name) ), collapse = "/") data.frame( name = x$name, type = x$type, id = x$id, size = x$size, description = x$description, owner = x$owned_by$login, path = path, modified_at = box_datetime(x$modified_at), content_modified_at = box_datetime(x$content_modified_at), sha1 = ifelse(is.null(x$sha1), NA, x$sha1), version = as.numeric(x$etag) + 1, stringsAsFactors = FALSE ) } out <- data.frame(dplyr::bind_rows(lapply(x, summarise_row))) return(out) } #' @export print.boxr_object_list <- function(x, ...) { # Convert to data.frame df <- as.data.frame.boxr_object_list(x) # If it's empty, just cat a short message if (nrow(df) < 1) { cat("\nbox.com remote object list: Empty (no objects returned)") return(df) } # For the first 10 objects, show the first 5 cols of the df df <- df[1:min(nrow(df), 10),] # If there's nothing in the description field, kill it off if (all(df$description == "")) df$description <- NULL # Lower the width of it a bit if(!is.null(df$description)) df$description <- trunc_end(df$description) df$path <- trunc_start(df$path) df$size <- format_bytes(df$size) cat(paste0("\nbox.com remote object list (", length(x), " objects)\n\n")) cat(paste0(" Summary of first ", nrow(df), ":\n\n")) print(df[, 1:5]) cat("\n\nUse as.data.frame() to extract full results.\n") invisible(x) } # Directory-Wide Operations ----------------------------------------------- # A better version of this would keep the whole httr call, in additon # to the boxr expression called (e.g. upload call : box_ul(blah)) #' @export print.boxr_dir_wide_operation_result <- function(x, ...) { boxr_timediff <- function(x) paste0("took ", format(unclass(x), digits = 3), " ", attr(x, "units")) f <- x$file_list tdif <- boxr_timediff(x$end - x$start) cat("\nboxr", x$operation, "operation\n\n") # General blurb on the op cat(paste0( " User : ", getOption("boxr.username"), "\n", " Local dir : ", x$local_tld, "\n", " box.com folder : ", x$box_tld_id, "\n", " started at : ", x$start , " (", tdif, ")", "\n", "\n" )) # Produce a summary of the changes summarise_ops(x$file_list, x$msg_list) cat("Use summary() to see individual file operations.") invisible(x) } # This will only really be shown for uploaded files. I can't think of a great # reason to explicitly 'map' this to local versions of a file at the moment. # # A better version of this would keep the whole httr call, in additon # to the boxr expression called (e.g. upload call : box_ul(blah)) #' @export summary.boxr_dir_wide_operation_result <- function(object, ...) { boxr_timediff <- function(x) paste0("took ", format(unclass(x), digits = 3), " ", attr(x, "units")) f <- object$file_list tdif <- boxr_timediff(object$end - object$start) cat("\nboxr", object$operation, "operation\n\n") # General blurb on the op cat(paste0( " User : ", getOption("boxr.username"), "\n", " Local dir : ", object$local_tld, "\n", " box.com folder : ", object$box_tld_id, "\n", " started at : ", object$start , " (", tdif, ")", "\n", "\n" )) # This just justifies the box.com id's if (!is.null(object$file_list[[17]]) && nrow(object$file_list[[17]]) > 0) object$file_list[[17]][,1] <- dir_id_tidy(object$file_list[[17]][,1]) # Print out a summary of each of the file lists print_df_summary(object$file_list, object$msg_list) invisible(object) } # Directory Comparison ---------------------------------------------------- #' @export print.boxr_dir_comparison <- function(x, ...) { cat("\nboxr remote:local directory comparison\n\n") origin <- if (x$call_info$load == "up") "Local directory" else "box.com" destin <- if (x$call_info$load != "up") "Local directory" else "box.com" # General blurb on the op cat(paste0( " User : ", getOption("boxr.username"), "\n", " Local dir : ", x$call_info$local_dir, "\n", " box.com folder : ", x$call_info$dir_id, "\n", " Direction : ", x$call_info$load, "load", "\n", " Origin : ", origin, "\n", " Destination : ", destin, "\n", "\n" )) object_list <- x[names(x) != "call_info"] # Produce a summary of the differences summarise_ops(object_list, x$call_info$msg) cat("Use summary() to see individual files.") invisible(x) } #' @export summary.boxr_dir_comparison <- function(object, ...) { cat("\nboxr remote:local directory comparison\n\n") origin <- if (object$call_info$load == "up") "Local directory" else "box.com" destin <- if (object$call_info$load != "up") "Local directory" else "box.com" # General blurb on the op cat(paste0( " User : ", getOption("boxr.username"), "\n", " Local dir : ", object$call_info$local_dir, "\n", " box.com folder : ", object$call_info$dir_id, "\n", " Direction : ", object$call_info$load, "load", "\n", " Origin : ", origin, "\n", " Destination : ", destin, "\n", "\n" )) # This just justifies the box.com id's if (!is.null(object$file_list[[17]]) && nrow(object$file_list[[17]]) > 0) object$file_list[[17]][,1] <- dir_id_tidy(object$file_list[[17]][,1]) object_list <- object[names(object) != "call_info"] # Print out a summary of each of the file lists print_df_summary(object_list, object$call_info$msg) invisible(object) } # Internal Helper Functions ----------------------------------------------- # A function to make msg_list gramatically sensible where you have only one file # e.g. "1 files were" -> "1 file was" grep_tense <- function(msg, n) { # Non-vectorized version grepTense <- function(msg, n) { if(is.na(n) || is.null(n) || n > 1) { return(msg) } msg <- gsub("files", "file", msg) msg <- gsub("directories", "directory", msg) msg <- gsub("were", "was", msg) return(msg) } # Apply grepTense along msg and n, return result as vector mapply(grepTense, msg, n) } # Combine the file list and the message list, to print out a summary of the # operations performed summarise_ops <- function(file_list, msg_list) { # Construct the messages by combining the number of rows of the data.frame # with the message for the operation, e.g. "X " + "files downloaded" op_summaries <- unlist(mapply( function(x, msg) { if (nrow(x) > 0L) paste(nrow(x), grep_tense(msg, nrow(x))) }, file_list, msg_list )) # Print the messages out cat(paste0( paste(op_summaries[!is.null(op_summaries)], collapse = ", "), ".\n\n" )) } # For a file_list and a msg_list, run through the two, printing out the name # of the operation, and the individual files affected by it print_df_summary <- function(file_list, msg_list) { # Non-vectorized print function. Oh, how you wish you'd imported magrittr! print_df <- function(x, msg) { if (nrow(x) > 0) { cat(nrow(x), msg, ":\n") print( format( data.frame( " " = x[,grepl("full_path",colnames(x))], check.names = FALSE ), justify = "left" ), row.names = FALSE ) cat("\n\n") } } # Run through the file df's in file_list, print out messages for them # dummy_var absorbs the result (only the side-effect -- the printing to # console/terminal) is desired dummy_var <- mapply(print_df, file_list, msg_list) }
/R/boxr_s3_classes.R
permissive
jgrew/boxr
R
false
false
12,123
r
# File & Folder References -------------------------------------------------- #' boxr S3 Classes #' #' @description { #' boxr has a few very simple S3 classes for the data returned by the API. #' While \code{\link{httr}} returns objects in it's own system of classes, #' generally boxr extracts information, and converts the JSON response to an R #' list with httr::\code{\link{content}} (perhaps with some information judged #' to be extraneous removed). If you'd rather get to the list itself, you can #' do this with \code{unclass(x)}. #' #' The following classes are used: #' #' \describe{ #' \item{\bold{boxr_file_reference}}{ #' Returned by \code{\link{box_ul}}, and similar functions (e.g. #' \code{\link{box_save}}). A description of a file remotely hosted on #' box.com. Available methods: \code{print}. #' } #' \item{\bold{boxr_folder_reference}}{ #' As above, but for folders/direcotries. Available methods: \code{print} #' } #' \item{\bold{boxr_object_list}}{ #' Returned by \code{\link{box_search}}, and related functions. A list, #' with each entry being a reference to a file or folder hosted on box.com #' . Available methods: \code{print}, for a summary of the first few #' results, and \code{as.data.frame}, to coerce some of the API response's #' information to a \code{\link{data.frame}}. #' } #' \item{\bold{boxr_dir_comparison}}{ #' Returned by the internal function \code{\link{box_dir_diff}}. Available #' methods: \code{print}, \code{summary}. #' } #' \item{\bold{boxr_dir_wide_operation_result}}{ #' Returned by \code{\link{box_fetch}} and \code{\link{box_push}}. #' Available methods: \code{print}, \code{summary} #' } #' } #' } #' #' @author Brendan Rocks \email{rocks.brendan@@gmail.com} #' #' @name boxr_S3_classes NULL #' @keywords internal add_file_ref_class <- function(x) { class(x) <- "boxr_file_reference" x } #' @keywords internal add_folder_ref_class <- function(x) { class(x) <- "boxr_folder_reference" x } #' @export print.boxr_file_reference <- function(x, ...) { # x <- object$entries[[1]] cat("\nbox.com remote file reference\n\n") cat(" name :", x$name, "\n") if(x$description != "") cat(" description :", x$description, "\n") cat(" file id :", x$id, "\n") cat(" version :", paste0("V", as.numeric(x$etag) + 1), "\n") cat(" size :", format_bytes(x$size), "\n") cat(" modified at :", as.character(as.POSIXct(gsub("T", " ", x$modified_at))), "\n" ) cat(" created at :", as.character(as.POSIXct(gsub("T", " ", x$modified_at))), "\n" ) cat(" uploaded by :", x$modified_by$login, "\n") cat(" owned by :", x$owned_by$login, "\n") shared_link <- x$shared_link if (is.null(shared_link)) shared_link <- "None" cat(" shared link :", shared_link, "\n\n") cat(" parent folder name : ", x$parent$name, "\n") cat(" parent folder id : ", x$parent$id, "\n") invisible(x) } #' @export print.boxr_folder_reference <- function(x, ...) { # x <- object$entries[[1]] cat("\nbox.com remote folder reference\n\n") cat(" name :", x$name, "\n") cat(" dir id :", x$id, "\n") cat(" size :", format_bytes(x$size), "\n") cat(" modified at :", as.character(as.POSIXct(gsub("T", " ", x$modified_at))), "\n" ) cat(" created at :", as.character(as.POSIXct(gsub("T", " ", x$modified_at))), "\n" ) cat(" uploaded by :", x$modified_by$login, "\n") cat(" owned by :", x$owned_by$login, "\n") shared_link <- x$shared_link if (is.null(shared_link)) shared_link <- "None" cat(" shared link :", shared_link, "\n\n") cat(" parent folder name : ", x$parent$name, "\n") cat(" parent folder id : ", x$parent$id, "\n") invisible(x) } # Object Lists ------------------------------------------------------------ #' @export as.data.frame.boxr_object_list <- function(x, ...) { summarise_row <- function(x) { path <- paste0(unlist( lapply(x$path_collection$entries, function(x) x$name) ), collapse = "/") data.frame( name = x$name, type = x$type, id = x$id, size = x$size, description = x$description, owner = x$owned_by$login, path = path, modified_at = box_datetime(x$modified_at), content_modified_at = box_datetime(x$content_modified_at), sha1 = ifelse(is.null(x$sha1), NA, x$sha1), version = as.numeric(x$etag) + 1, stringsAsFactors = FALSE ) } out <- data.frame(dplyr::bind_rows(lapply(x, summarise_row))) return(out) } #' @export print.boxr_object_list <- function(x, ...) { # Convert to data.frame df <- as.data.frame.boxr_object_list(x) # If it's empty, just cat a short message if (nrow(df) < 1) { cat("\nbox.com remote object list: Empty (no objects returned)") return(df) } # For the first 10 objects, show the first 5 cols of the df df <- df[1:min(nrow(df), 10),] # If there's nothing in the description field, kill it off if (all(df$description == "")) df$description <- NULL # Lower the width of it a bit if(!is.null(df$description)) df$description <- trunc_end(df$description) df$path <- trunc_start(df$path) df$size <- format_bytes(df$size) cat(paste0("\nbox.com remote object list (", length(x), " objects)\n\n")) cat(paste0(" Summary of first ", nrow(df), ":\n\n")) print(df[, 1:5]) cat("\n\nUse as.data.frame() to extract full results.\n") invisible(x) } # Directory-Wide Operations ----------------------------------------------- # A better version of this would keep the whole httr call, in additon # to the boxr expression called (e.g. upload call : box_ul(blah)) #' @export print.boxr_dir_wide_operation_result <- function(x, ...) { boxr_timediff <- function(x) paste0("took ", format(unclass(x), digits = 3), " ", attr(x, "units")) f <- x$file_list tdif <- boxr_timediff(x$end - x$start) cat("\nboxr", x$operation, "operation\n\n") # General blurb on the op cat(paste0( " User : ", getOption("boxr.username"), "\n", " Local dir : ", x$local_tld, "\n", " box.com folder : ", x$box_tld_id, "\n", " started at : ", x$start , " (", tdif, ")", "\n", "\n" )) # Produce a summary of the changes summarise_ops(x$file_list, x$msg_list) cat("Use summary() to see individual file operations.") invisible(x) } # This will only really be shown for uploaded files. I can't think of a great # reason to explicitly 'map' this to local versions of a file at the moment. # # A better version of this would keep the whole httr call, in additon # to the boxr expression called (e.g. upload call : box_ul(blah)) #' @export summary.boxr_dir_wide_operation_result <- function(object, ...) { boxr_timediff <- function(x) paste0("took ", format(unclass(x), digits = 3), " ", attr(x, "units")) f <- object$file_list tdif <- boxr_timediff(object$end - object$start) cat("\nboxr", object$operation, "operation\n\n") # General blurb on the op cat(paste0( " User : ", getOption("boxr.username"), "\n", " Local dir : ", object$local_tld, "\n", " box.com folder : ", object$box_tld_id, "\n", " started at : ", object$start , " (", tdif, ")", "\n", "\n" )) # This just justifies the box.com id's if (!is.null(object$file_list[[17]]) && nrow(object$file_list[[17]]) > 0) object$file_list[[17]][,1] <- dir_id_tidy(object$file_list[[17]][,1]) # Print out a summary of each of the file lists print_df_summary(object$file_list, object$msg_list) invisible(object) } # Directory Comparison ---------------------------------------------------- #' @export print.boxr_dir_comparison <- function(x, ...) { cat("\nboxr remote:local directory comparison\n\n") origin <- if (x$call_info$load == "up") "Local directory" else "box.com" destin <- if (x$call_info$load != "up") "Local directory" else "box.com" # General blurb on the op cat(paste0( " User : ", getOption("boxr.username"), "\n", " Local dir : ", x$call_info$local_dir, "\n", " box.com folder : ", x$call_info$dir_id, "\n", " Direction : ", x$call_info$load, "load", "\n", " Origin : ", origin, "\n", " Destination : ", destin, "\n", "\n" )) object_list <- x[names(x) != "call_info"] # Produce a summary of the differences summarise_ops(object_list, x$call_info$msg) cat("Use summary() to see individual files.") invisible(x) } #' @export summary.boxr_dir_comparison <- function(object, ...) { cat("\nboxr remote:local directory comparison\n\n") origin <- if (object$call_info$load == "up") "Local directory" else "box.com" destin <- if (object$call_info$load != "up") "Local directory" else "box.com" # General blurb on the op cat(paste0( " User : ", getOption("boxr.username"), "\n", " Local dir : ", object$call_info$local_dir, "\n", " box.com folder : ", object$call_info$dir_id, "\n", " Direction : ", object$call_info$load, "load", "\n", " Origin : ", origin, "\n", " Destination : ", destin, "\n", "\n" )) # This just justifies the box.com id's if (!is.null(object$file_list[[17]]) && nrow(object$file_list[[17]]) > 0) object$file_list[[17]][,1] <- dir_id_tidy(object$file_list[[17]][,1]) object_list <- object[names(object) != "call_info"] # Print out a summary of each of the file lists print_df_summary(object_list, object$call_info$msg) invisible(object) } # Internal Helper Functions ----------------------------------------------- # A function to make msg_list gramatically sensible where you have only one file # e.g. "1 files were" -> "1 file was" grep_tense <- function(msg, n) { # Non-vectorized version grepTense <- function(msg, n) { if(is.na(n) || is.null(n) || n > 1) { return(msg) } msg <- gsub("files", "file", msg) msg <- gsub("directories", "directory", msg) msg <- gsub("were", "was", msg) return(msg) } # Apply grepTense along msg and n, return result as vector mapply(grepTense, msg, n) } # Combine the file list and the message list, to print out a summary of the # operations performed summarise_ops <- function(file_list, msg_list) { # Construct the messages by combining the number of rows of the data.frame # with the message for the operation, e.g. "X " + "files downloaded" op_summaries <- unlist(mapply( function(x, msg) { if (nrow(x) > 0L) paste(nrow(x), grep_tense(msg, nrow(x))) }, file_list, msg_list )) # Print the messages out cat(paste0( paste(op_summaries[!is.null(op_summaries)], collapse = ", "), ".\n\n" )) } # For a file_list and a msg_list, run through the two, printing out the name # of the operation, and the individual files affected by it print_df_summary <- function(file_list, msg_list) { # Non-vectorized print function. Oh, how you wish you'd imported magrittr! print_df <- function(x, msg) { if (nrow(x) > 0) { cat(nrow(x), msg, ":\n") print( format( data.frame( " " = x[,grepl("full_path",colnames(x))], check.names = FALSE ), justify = "left" ), row.names = FALSE ) cat("\n\n") } } # Run through the file df's in file_list, print out messages for them # dummy_var absorbs the result (only the side-effect -- the printing to # console/terminal) is desired dummy_var <- mapply(print_df, file_list, msg_list) }
# COURSERA: EXPLORATORY DATA ANALYSIS, week 1 Course Project 1 #--------------------------------------------------------------- # Create directory (if non-existent) # Download zip file # Unzip file in desired directory if(!file.exists("ExploratoryDataAnalysis")) { dir.create("ExploratoryDataAnalysis") } if(!file.exists("./ExploratoryDataAnalysis/HouseholdPowerConsumption.zip")) { fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(fileURL, destfile = "./ExploratoryDataAnalysis/HouseholdPowerConsumption.zip") } unzip("./ExploratoryDataAnalysis/HouseholdPowerConsumption.zip", exdir = "./ExploratoryDataAnalysis") #----------------------------------------------------------------------------------- # Extract rows related to dates 01/Feb/2007 - 02/Feb/2007 (i.e. 2880 records): HouseholdPowerConsumption <- read.csv("./ExploratoryDataAnalysis/household_power_consumption.txt", sep = ";", na.strings = "?", header = F, skip = 66637, nrows = 2880) # Extract only column names from datafile (i.e. 1st row only), rename columns: ColumnNames <- read.csv("./ExploratoryDataAnalysis/household_power_consumption.txt", sep = ";", nrows = 1, as.is = T, header = F, colClasses = "character") colnames(HouseholdPowerConsumption) <- ColumnNames # Change the $Date and $Time (char) variables into a single POSIXlt class DateTime variable: HouseholdPowerConsumption$Date <- paste(HouseholdPowerConsumption$Date, HouseholdPowerConsumption$Time, sep=", ") # paste 'Date' and 'Time' columns together library(dplyr) # Loading dplyr for easier manipulation (i.e. function 'rename()') HouseholdPowerConsumption <- rename(HouseholdPowerConsumption, DateTime = Date) HouseholdPowerConsumption$DateTime <- strptime(HouseholdPowerConsumption$DateTime, format = "%d/%m/%Y, %H:%M:%S") # Changing 'DateTime' from char to POSIXlt HouseholdPowerConsumption$Time <- NULL # Dropping the 'Time' variable, as it is now incorporated into 'DateTime' #-------------------------------------------------------------- #-------------------------------------------------------------- # Plot1 creation: png(filename = "plot1.png", width = 480, height = 480) # Call graphics device to create a png file in my current working directory plot1 <- hist(HouseholdPowerConsumption$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red") dev.off() #-------------------------------------------------------------- # SCRIPT END #--------------------------------------------------------------
/Course Project 1/Plot1.R
no_license
Guille495/ExData_Plotting1
R
false
false
2,612
r
# COURSERA: EXPLORATORY DATA ANALYSIS, week 1 Course Project 1 #--------------------------------------------------------------- # Create directory (if non-existent) # Download zip file # Unzip file in desired directory if(!file.exists("ExploratoryDataAnalysis")) { dir.create("ExploratoryDataAnalysis") } if(!file.exists("./ExploratoryDataAnalysis/HouseholdPowerConsumption.zip")) { fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(fileURL, destfile = "./ExploratoryDataAnalysis/HouseholdPowerConsumption.zip") } unzip("./ExploratoryDataAnalysis/HouseholdPowerConsumption.zip", exdir = "./ExploratoryDataAnalysis") #----------------------------------------------------------------------------------- # Extract rows related to dates 01/Feb/2007 - 02/Feb/2007 (i.e. 2880 records): HouseholdPowerConsumption <- read.csv("./ExploratoryDataAnalysis/household_power_consumption.txt", sep = ";", na.strings = "?", header = F, skip = 66637, nrows = 2880) # Extract only column names from datafile (i.e. 1st row only), rename columns: ColumnNames <- read.csv("./ExploratoryDataAnalysis/household_power_consumption.txt", sep = ";", nrows = 1, as.is = T, header = F, colClasses = "character") colnames(HouseholdPowerConsumption) <- ColumnNames # Change the $Date and $Time (char) variables into a single POSIXlt class DateTime variable: HouseholdPowerConsumption$Date <- paste(HouseholdPowerConsumption$Date, HouseholdPowerConsumption$Time, sep=", ") # paste 'Date' and 'Time' columns together library(dplyr) # Loading dplyr for easier manipulation (i.e. function 'rename()') HouseholdPowerConsumption <- rename(HouseholdPowerConsumption, DateTime = Date) HouseholdPowerConsumption$DateTime <- strptime(HouseholdPowerConsumption$DateTime, format = "%d/%m/%Y, %H:%M:%S") # Changing 'DateTime' from char to POSIXlt HouseholdPowerConsumption$Time <- NULL # Dropping the 'Time' variable, as it is now incorporated into 'DateTime' #-------------------------------------------------------------- #-------------------------------------------------------------- # Plot1 creation: png(filename = "plot1.png", width = 480, height = 480) # Call graphics device to create a png file in my current working directory plot1 <- hist(HouseholdPowerConsumption$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red") dev.off() #-------------------------------------------------------------- # SCRIPT END #--------------------------------------------------------------
## Caching the Inverse of a Matrix ## The first function, makeCacheMatrix creates a special "vector", which is really a matrix containing a function to set the value of the matrix get the value of the matrix set the value of the invesrse get the value of the inverse makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setmean <- function(solve) i <<- solve getmean <- function() i matrix(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } } ## The following function calculates the inverse of the matrix created ##with the above function. However, it first checks to see if the inverse has already been calculated. ##If so, it gets the inverse from the cache and skips the computation. Otherwise, it calculates the ##inverse of the data and sets the value of the inverse in the cache via the setinverse function. cacheSolve <- function(x, ...) { i <- x$getinverse() if(!is.null(i)) { message("getting cached data") return(i) } data <- x$get() m <- solve(data, ...) x$setinverse(i) i }
/cachematrix.R
no_license
dibyajyoti681/ProgrammingAssignment2-master
R
false
false
1,140
r
## Caching the Inverse of a Matrix ## The first function, makeCacheMatrix creates a special "vector", which is really a matrix containing a function to set the value of the matrix get the value of the matrix set the value of the invesrse get the value of the inverse makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setmean <- function(solve) i <<- solve getmean <- function() i matrix(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } } ## The following function calculates the inverse of the matrix created ##with the above function. However, it first checks to see if the inverse has already been calculated. ##If so, it gets the inverse from the cache and skips the computation. Otherwise, it calculates the ##inverse of the data and sets the value of the inverse in the cache via the setinverse function. cacheSolve <- function(x, ...) { i <- x$getinverse() if(!is.null(i)) { message("getting cached data") return(i) } data <- x$get() m <- solve(data, ...) x$setinverse(i) i }
#!usr/bin/env Rscript ################################################################### ##Author: Melanie van den Bosch ##Changing OG alignments to clan alignments ##Correcting names and subselection for clans ################################################################### #------------------------------------------# ##_____ load libraries and functions _____## #------------------------------------------# install.packages("Rcurl", repos = "http://cran.rstudio.com/") install.packages("seqinr", repos = "http://cran.rstudio.com/") require("seqinr") library("RCurl") # loadRData in variable function eval(parse(text = getURL(paste("https://raw.githubusercontent.com/", "meeldurb/phylogenetics/master/loadRData.R", sep = ""), ssl.verifypeer = FALSE))) #-----------------------# ##_____ Load data _____## #-----------------------# # get clans to obtain the clan structure of the trees OG_clans_dupl <- loadRData(paste('C:/Users/meeldurb/Dropbox/Melanie/', '/Beast_dating_salmonids/RData/', 'Clans_2analyze_inBeast_withduplicates_aa.RData', sep = '')) # get file path with all alignments alignment.files <- dir(paste('C:/Users/meeldurb/Google Drive/', 'Master internship phylogenetics salmonids/', 'Salmonid_genomics_resources/Orthologs_homeologs/', 'orthogroups.03.06.2017/Alignments/', sep = ''), full.names = T) # table with Omyk gene and protein names Omyk.prot2gene = read.table(paste('C:/users/meeldurb/Google Drive/', 'Master internship phylogenetics salmonids/', 'Salmonid_genomics_resources/Orthologs_homeologs/', 'orthogroups.03.06.2017/Omyk.gene2protein.table.txt', sep = ''), header = T) #--------------------------------------# ##_____ Correct alignment files _____## #--------------------------------------# clan <- "OG0008581_1." clan <- "OG0008390_1." clan <- "OG0008707_1." # count <- 0 # creating the folder to save the data in outfolder <- "Alignments_aa_corrected/" if (!file.exists(outfolder)){ dir.create(outfolder) } # loop to change the alignment files for(clan in names(OG_clans_dupl)){ cat(clan, '\n') # give the corrected alignment file a name and location it needs to be written to fileout <- paste(outfolder, clan, "_corr.fa", sep="") if (!file.exists(fileout)) { # get original alignment ID ali.id <- gsub(".*(OG\\d*)_\\d*\\.", '\\1', clan) # position where the alignment file is found ali.file.pos <- match(ali.id, gsub(".*(OG\\d*).fa", '\\1', alignment.files)) ali.file <- alignment.files[ali.file.pos] # checking if file is not empty if (!file.size(ali.file) == 0){ # only isolate sequences that are also in the tip.labels of the clans clan.select <- OG_clans_dupl[[clan]] clan.genes <- clan.select$tip.label seqs <- read.fasta(ali.file) # fixing names in seqs to resemble names in clans tip.labels # remove double species name names(seqs) <- lapply(names(seqs), function(i){ gsub("\\w*_(\\w*\\|.*)", "\\1", i) }) # fix Omyk names from proteinID to geneID # taken from Omyk.prot2gene names(seqs) <- lapply(names(seqs), function(i){ select.row <- match(sub('Omyk\\|', '', i), Omyk.prot2gene$protein) if (!is.na(select.row)) { i <- paste('Omyk|', Omyk.prot2gene$gene_id[select.row], sep = '') } else { i } }) # first check length, then check if they contain the same elements if (length(names(seqs)) == length(clan.genes)){ if (all.equal(sort(names(seqs)), sort(clan.genes))) { print ("tip.labels are the same") # change the alignment file for only the names write.fasta(seqs, names(seqs), fileout) } } else { print ("tip labels are not the same, making subselection of seqs") # select only the sequences that resemble the tip.labels selected.seqs <- seqs[clan.genes] write.fasta(selected.seqs, names(selected.seqs), fileout) } } else { print ("file is empty") } } else { print("corrected .fa file already was written") } }
/correct_select_alignments.R
no_license
meeldurb/phylogenetics
R
false
false
4,455
r
#!usr/bin/env Rscript ################################################################### ##Author: Melanie van den Bosch ##Changing OG alignments to clan alignments ##Correcting names and subselection for clans ################################################################### #------------------------------------------# ##_____ load libraries and functions _____## #------------------------------------------# install.packages("Rcurl", repos = "http://cran.rstudio.com/") install.packages("seqinr", repos = "http://cran.rstudio.com/") require("seqinr") library("RCurl") # loadRData in variable function eval(parse(text = getURL(paste("https://raw.githubusercontent.com/", "meeldurb/phylogenetics/master/loadRData.R", sep = ""), ssl.verifypeer = FALSE))) #-----------------------# ##_____ Load data _____## #-----------------------# # get clans to obtain the clan structure of the trees OG_clans_dupl <- loadRData(paste('C:/Users/meeldurb/Dropbox/Melanie/', '/Beast_dating_salmonids/RData/', 'Clans_2analyze_inBeast_withduplicates_aa.RData', sep = '')) # get file path with all alignments alignment.files <- dir(paste('C:/Users/meeldurb/Google Drive/', 'Master internship phylogenetics salmonids/', 'Salmonid_genomics_resources/Orthologs_homeologs/', 'orthogroups.03.06.2017/Alignments/', sep = ''), full.names = T) # table with Omyk gene and protein names Omyk.prot2gene = read.table(paste('C:/users/meeldurb/Google Drive/', 'Master internship phylogenetics salmonids/', 'Salmonid_genomics_resources/Orthologs_homeologs/', 'orthogroups.03.06.2017/Omyk.gene2protein.table.txt', sep = ''), header = T) #--------------------------------------# ##_____ Correct alignment files _____## #--------------------------------------# clan <- "OG0008581_1." clan <- "OG0008390_1." clan <- "OG0008707_1." # count <- 0 # creating the folder to save the data in outfolder <- "Alignments_aa_corrected/" if (!file.exists(outfolder)){ dir.create(outfolder) } # loop to change the alignment files for(clan in names(OG_clans_dupl)){ cat(clan, '\n') # give the corrected alignment file a name and location it needs to be written to fileout <- paste(outfolder, clan, "_corr.fa", sep="") if (!file.exists(fileout)) { # get original alignment ID ali.id <- gsub(".*(OG\\d*)_\\d*\\.", '\\1', clan) # position where the alignment file is found ali.file.pos <- match(ali.id, gsub(".*(OG\\d*).fa", '\\1', alignment.files)) ali.file <- alignment.files[ali.file.pos] # checking if file is not empty if (!file.size(ali.file) == 0){ # only isolate sequences that are also in the tip.labels of the clans clan.select <- OG_clans_dupl[[clan]] clan.genes <- clan.select$tip.label seqs <- read.fasta(ali.file) # fixing names in seqs to resemble names in clans tip.labels # remove double species name names(seqs) <- lapply(names(seqs), function(i){ gsub("\\w*_(\\w*\\|.*)", "\\1", i) }) # fix Omyk names from proteinID to geneID # taken from Omyk.prot2gene names(seqs) <- lapply(names(seqs), function(i){ select.row <- match(sub('Omyk\\|', '', i), Omyk.prot2gene$protein) if (!is.na(select.row)) { i <- paste('Omyk|', Omyk.prot2gene$gene_id[select.row], sep = '') } else { i } }) # first check length, then check if they contain the same elements if (length(names(seqs)) == length(clan.genes)){ if (all.equal(sort(names(seqs)), sort(clan.genes))) { print ("tip.labels are the same") # change the alignment file for only the names write.fasta(seqs, names(seqs), fileout) } } else { print ("tip labels are not the same, making subselection of seqs") # select only the sequences that resemble the tip.labels selected.seqs <- seqs[clan.genes] write.fasta(selected.seqs, names(selected.seqs), fileout) } } else { print ("file is empty") } } else { print("corrected .fa file already was written") } }
#' Extracts statistical features for a vector. #' @param data A vector of length greater than 2 #' @return Returns a list of 22 statistical features for data #' @examples #' data = rnorm(50) #' featureCreationTS(data) #' @export # extract time series features featureCreationTS = function(data){ # describe data tsData = data.frame(psych::describe(data)[,3:13] , iqr = stats::IQR(data) , nZero = length(which(data ==0)) , nUnique = length(unique(data)) , lowerBound = stats::quantile(data,0.25,na.rm=TRUE)-(1.5*stats::IQR(data)) , upperBound = stats::quantile(data,0.75,na.rm=TRUE)+(1.5*stats::IQR(data)) , data.frame(t(stats::quantile(data, c(.01,.05,.25,.5,.75,.95, .99),na.rm=TRUE))) ) # change column names for your data colnames(tsData) = paste("TS", colnames(tsData), sep = "_") # return statistical features return(tsData) }
/R/featureCreationTS.R
no_license
nagdevAmruthnath/EnsembleML
R
false
false
982
r
#' Extracts statistical features for a vector. #' @param data A vector of length greater than 2 #' @return Returns a list of 22 statistical features for data #' @examples #' data = rnorm(50) #' featureCreationTS(data) #' @export # extract time series features featureCreationTS = function(data){ # describe data tsData = data.frame(psych::describe(data)[,3:13] , iqr = stats::IQR(data) , nZero = length(which(data ==0)) , nUnique = length(unique(data)) , lowerBound = stats::quantile(data,0.25,na.rm=TRUE)-(1.5*stats::IQR(data)) , upperBound = stats::quantile(data,0.75,na.rm=TRUE)+(1.5*stats::IQR(data)) , data.frame(t(stats::quantile(data, c(.01,.05,.25,.5,.75,.95, .99),na.rm=TRUE))) ) # change column names for your data colnames(tsData) = paste("TS", colnames(tsData), sep = "_") # return statistical features return(tsData) }
library(ape) args = commandArgs(trailingOnly = T) print('to use: Rscript make_cc_simulation_distro.R xml_template log_file tree_file') # ENABLE ARGS AND TEST xml_file <- args[1] log_file <- args[2] #xml_file <- 'ce_sim_template.xml' #log_file <- 'ce_veronika_tree_simulated_ce_24.log' xml_simulation_template <- readLines(xml_file) posterior_params_file <- read.table(log_file, head = T) input_tree <- read.tree(args[3]) # Make taxon_seqs: taxon_seqs <- paste0("<sequence id=\"seq_", input_tree$tip.label, "\" taxon=\"",input_tree$tip.label, "\" totalcount=\"4\" value=\"gc\"/>", collapse = '\n') taxon_dates <- vector() for(ta in 1:length(input_tree$tip.label)){ date <- gsub('.+_', '', input_tree$tip.label[ta]) taxon_dates[ta] <- paste0(input_tree$tip.label[ta], '=', date) } taxon_dates <- paste0(taxon_dates, collapse = ',\n') xml_temp <- gsub('TAXON_DATES', taxon_dates, gsub('TAXON_SEQS', taxon_seqs, xml_simulation_template)) xml_temp <- gsub('CE_SIM_TREE_FILE', gsub('[.]log', '_pps', log_file), xml_temp) #Get posterior params to set as simulation prior: ePopSize, growthRate. epopsize <- round(c(mean(posterior_params_file$ePopSize), sd(posterior_params_file$ePopSize)), 2) growthrate <- round(c(mean(posterior_params_file$growthRate.), sd(posterior_params_file$growthRate.)), 2) xml_temp <- gsub('E_POP_SIZE_MEAN', epopsize[1], xml_temp) xml_temp <- gsub('E_POP_SIZE_SD', epopsize[2], xml_temp) xml_temp <- gsub('GROWTH_RATE_MEAN', growthrate[1], xml_temp) xml_temp <- gsub('GROWTH_RATE_SD', growthrate[2], xml_temp) cat(xml_temp, file = gsub('[.]log', '_pps.xml', log_file), sep = '\n')
/coal_bd_sims1/make_ce_simulation_distro.R
no_license
sebastianduchene/phylodynamics_adequacy
R
false
false
1,629
r
library(ape) args = commandArgs(trailingOnly = T) print('to use: Rscript make_cc_simulation_distro.R xml_template log_file tree_file') # ENABLE ARGS AND TEST xml_file <- args[1] log_file <- args[2] #xml_file <- 'ce_sim_template.xml' #log_file <- 'ce_veronika_tree_simulated_ce_24.log' xml_simulation_template <- readLines(xml_file) posterior_params_file <- read.table(log_file, head = T) input_tree <- read.tree(args[3]) # Make taxon_seqs: taxon_seqs <- paste0("<sequence id=\"seq_", input_tree$tip.label, "\" taxon=\"",input_tree$tip.label, "\" totalcount=\"4\" value=\"gc\"/>", collapse = '\n') taxon_dates <- vector() for(ta in 1:length(input_tree$tip.label)){ date <- gsub('.+_', '', input_tree$tip.label[ta]) taxon_dates[ta] <- paste0(input_tree$tip.label[ta], '=', date) } taxon_dates <- paste0(taxon_dates, collapse = ',\n') xml_temp <- gsub('TAXON_DATES', taxon_dates, gsub('TAXON_SEQS', taxon_seqs, xml_simulation_template)) xml_temp <- gsub('CE_SIM_TREE_FILE', gsub('[.]log', '_pps', log_file), xml_temp) #Get posterior params to set as simulation prior: ePopSize, growthRate. epopsize <- round(c(mean(posterior_params_file$ePopSize), sd(posterior_params_file$ePopSize)), 2) growthrate <- round(c(mean(posterior_params_file$growthRate.), sd(posterior_params_file$growthRate.)), 2) xml_temp <- gsub('E_POP_SIZE_MEAN', epopsize[1], xml_temp) xml_temp <- gsub('E_POP_SIZE_SD', epopsize[2], xml_temp) xml_temp <- gsub('GROWTH_RATE_MEAN', growthrate[1], xml_temp) xml_temp <- gsub('GROWTH_RATE_SD', growthrate[2], xml_temp) cat(xml_temp, file = gsub('[.]log', '_pps.xml', log_file), sep = '\n')
#PAGE=137 x=7 a=factorial(x-1) a y=2 b=factorial(x-y)*factorial(y) b=a-b b
/Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH6/EX6.6.24/Ex6_6_24.R
permissive
FOSSEE/R_TBC_Uploads
R
false
false
85
r
#PAGE=137 x=7 a=factorial(x-1) a y=2 b=factorial(x-y)*factorial(y) b=a-b b
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggparcoord.R \name{skewness} \alias{skewness} \title{Sample skewness} \usage{ skewness(x) } \arguments{ \item{x}{numeric vector} } \value{ sample skewness of \code{x} } \description{ Calculate the sample skewness of a vector while ignoring missing values. } \author{ Jason Crowley \email{crowley.jason.s@gmail.com} }
/man/skewness.Rd
no_license
bbolker/ggally
R
false
true
395
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggparcoord.R \name{skewness} \alias{skewness} \title{Sample skewness} \usage{ skewness(x) } \arguments{ \item{x}{numeric vector} } \value{ sample skewness of \code{x} } \description{ Calculate the sample skewness of a vector while ignoring missing values. } \author{ Jason Crowley \email{crowley.jason.s@gmail.com} }
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 48764 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 48763 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 48763 c c Input Parameter (command line, file): c input filename QBFLIB/Sauer-Reimer/ITC99/b22_PR_8_50.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 16791 c no.of clauses 48764 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 48763 c c QBFLIB/Sauer-Reimer/ITC99/b22_PR_8_50.qdimacs 16791 48764 E1 [1] 0 332 16401 48763 RED
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Sauer-Reimer/ITC99/b22_PR_8_50/b22_PR_8_50.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
719
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 48764 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 48763 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 48763 c c Input Parameter (command line, file): c input filename QBFLIB/Sauer-Reimer/ITC99/b22_PR_8_50.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 16791 c no.of clauses 48764 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 48763 c c QBFLIB/Sauer-Reimer/ITC99/b22_PR_8_50.qdimacs 16791 48764 E1 [1] 0 332 16401 48763 RED
hello <- function() message("Hello world!")
/R/hello.R
no_license
HenrikBengtsson/teeny
R
false
false
45
r
hello <- function() message("Hello world!")
#read the test, training, subject, and activity data into R features <- read.table("~/Data Science/UCI HAR Dataset/features.txt", quote="\"", comment.char="") subject_train <- read.table("~/Data Science/UCI HAR Dataset/train/subject_train.txt", quote="\"", comment.char="") y_train <- read.table("~/Data Science/UCI HAR Dataset/train/y_train.txt", quote="\"", comment.char="") X_train <- read.table("~/Data Science/UCI HAR Dataset/train/X_train.txt", quote="\"", comment.char="") activity_labels <- read.table("~/Data Science/UCI HAR Dataset/activity_labels.txt", quote="\"", comment.char="") subject_test <- read.table("~/Data Science/UCI HAR Dataset/test/subject_test.txt", quote="\"", comment.char="") y_test <- read.table("~/Data Science/UCI HAR Dataset/test/y_test.txt", quote="\"", comment.char="") X_test <- read.table("~/Data Science/UCI HAR Dataset/test/X_test.txt", quote="\"", comment.char="") #rename the columns of data colnames(X_train)<-features$V2 colnames(X_test)<-features$V2 names(subject_test)[names(subject_test)=="V1"]<-"subject" names(subject_train)[names(subject_train)=="V1"]<-"subject" names(y_test)[names(y_test)=="V1"]<-"activity" names(y_train)[names(y_train)=="V1"]<-"activity" #merge subject, activity, and measurement data test<-cbind(subject_test,X_test,y_test) train<-cbind(subject_train,X_train,y_train) #append training data to test data whole<-rbind(test,train) #label activities in complete dataset whole$activity<-factor(whole$activity,levels=activity_labels$V1, labels=activity_labels$V2) #keep all the variables with "mean" and "std" in the name sub<-c("mean()","sd()","activity","subject") subset<-whole[,grepl(paste(sub, collapse='|'), colnames(whole))] #take averages over activity and subject product<-aggregate(subset[, 1:46], list(subset$subject, subset$activity), mean) #create dataset for submission write.table(product, row.names = F, file = "assignment.txt") product
/getclean/run_analysis.R
no_license
edavidaja/Data-Science
R
false
false
1,927
r
#read the test, training, subject, and activity data into R features <- read.table("~/Data Science/UCI HAR Dataset/features.txt", quote="\"", comment.char="") subject_train <- read.table("~/Data Science/UCI HAR Dataset/train/subject_train.txt", quote="\"", comment.char="") y_train <- read.table("~/Data Science/UCI HAR Dataset/train/y_train.txt", quote="\"", comment.char="") X_train <- read.table("~/Data Science/UCI HAR Dataset/train/X_train.txt", quote="\"", comment.char="") activity_labels <- read.table("~/Data Science/UCI HAR Dataset/activity_labels.txt", quote="\"", comment.char="") subject_test <- read.table("~/Data Science/UCI HAR Dataset/test/subject_test.txt", quote="\"", comment.char="") y_test <- read.table("~/Data Science/UCI HAR Dataset/test/y_test.txt", quote="\"", comment.char="") X_test <- read.table("~/Data Science/UCI HAR Dataset/test/X_test.txt", quote="\"", comment.char="") #rename the columns of data colnames(X_train)<-features$V2 colnames(X_test)<-features$V2 names(subject_test)[names(subject_test)=="V1"]<-"subject" names(subject_train)[names(subject_train)=="V1"]<-"subject" names(y_test)[names(y_test)=="V1"]<-"activity" names(y_train)[names(y_train)=="V1"]<-"activity" #merge subject, activity, and measurement data test<-cbind(subject_test,X_test,y_test) train<-cbind(subject_train,X_train,y_train) #append training data to test data whole<-rbind(test,train) #label activities in complete dataset whole$activity<-factor(whole$activity,levels=activity_labels$V1, labels=activity_labels$V2) #keep all the variables with "mean" and "std" in the name sub<-c("mean()","sd()","activity","subject") subset<-whole[,grepl(paste(sub, collapse='|'), colnames(whole))] #take averages over activity and subject product<-aggregate(subset[, 1:46], list(subset$subject, subset$activity), mean) #create dataset for submission write.table(product, row.names = F, file = "assignment.txt") product
# this file collects the csv files containing the Bundesbank realtime data base. # it loads the dictionary of keys of bundesbank # it creates an overview of all variables and connects it to the keys and labels # in "variable" # it selects the variables employed in "should we trust in leading indicators..." by # drechsel and scheufele 2010 in var.used # based on this, it enumerates all the vintages that have been available until the # "cutoffday" of a month for each month. using vintage.survey (function) resulting in # "vintage.employed. # this is used to construct sets of vintages of the variables used at a given point # in time. They are named after their cutoffday, month and year. wd="h:/git/zeit-2" library(zoo) setwd(wd) dir.rt=paste(wd,'/data',sep='') keys=read.csv(paste(dir.rt,'/bundesbank_keys.csv',sep=''),stringsAsFactors=F) variable.file=list.files(paste(wd,'/data/bundesbankrealtime',sep='')) nvar=length(variable.file) variable=gsub('.csv','',variable.file) variable=data.frame(variable,stringsAsFactors=F) # loading data for (i in 1:nvar){ var=read.csv(paste(wd,'/data/bundesbankrealtime/',variable.file[i],sep=''),row.names=1) nobsx=nrow(var) # dropping meta data var=var[5:nobsx,] # transforming to numbers var=write.csv(var,paste(wd,'/deleteme.csv',sep='')) var=read.csv(paste(wd,'/deleteme.csv',sep=''),row.names=1) eval(parse(text=paste(variable[i,1],'=var',sep=''))) } variable$fst_obs=NA variable$lst_obs=NA variable$fst_vint=NA variable$lst_vint=NA variable[,paste('key',1:8)]=NA # columnnumber of keys keycol=grep('key',colnames(variable)) nvar=nrow(variable) for (i in 1:nvar){ var=eval(parse(text=variable[i,'variable'])) variable$fst_obs[i]=row.names(var)[1] variable$lst_obs[i]=row.names(var)[nrow(var)] variable$fst_vint[i]=colnames(var)[1] variable$lst_vint[i]=colnames(var)[ncol(var)] detail=unlist(strsplit(variable[i,1],'\\.')) variable[i,keycol]=detail } # setting labels variable[,paste('label',1:8,sep='')]=NA labelcol=grep('label',colnames(variable)) for (i in 1:8){ keylist=keys[keys$position==i,c(1,2)] for (j in 1:nvar){ # what is the key key=variable[j,keycol[i]] # look up what it means pos=grep(key,keylist[,2]) label=keylist[pos,1] variable[j,labelcol[i]]=label } } tu=strsplit(variable$fst_vint,'\\.') tu=sapply(tu,function(x) x) tu=gsub('X','',tu) tu=matrix(as.integer(tu),nrow=3) for (i in 1:nrow(variable)){ if (tu[1,i]<1000){ variable$fstvint.year[i]=as.integer(t(tu[3,i])) variable$fstvint.month[i]=as.integer(t(tu[2,i])) } if (tu[1,i]>1000){ variable$fstvint.year[i]=as.integer(t(tu[1,i])) variable$fstvint.month[i]=as.integer(t(tu[2,i])) } } # dropping those variables that have vintages that start later than 2005 vint.late=variable$fstvint.year<=2005 variable.sel=variable[vint.late,] # downsizing of data variable.sel=variable.sel[variable.sel$'key 1'=='M',] variable.sel=variable.sel[-grep('current prices',variable.sel$'label7'),] variable.sel=variable.sel[-grep('neither seasonally nor calendar adjusted',variable.sel$'label3'),] variable.sel=variable.sel[-grep('domestic|abroad',variable.sel$'label5'),] ## Real economy # orders sel=grep('order',variable.sel$'label5') var.order=variable.sel[sel,] var.order=var.order[grep('manufacturing|consumer|capital',var.order$'label6'),] var.order=var.order[-which(var.order$variable=='M.DE.Y.I.IO1.ABA20.C.I'),] var.used=var.order # prices sel=grep('price',variable.sel$'label4') var.cpi=variable.sel[sel,] var.cpi=var.cpi[grep('all categories|total, excluding energy',var.cpi$'label6'),] var.used=rbind(var.used,var.cpi) # hours sel=grep('hours worked by employed persons',variable.sel$'label5') var.hours=variable.sel[sel,] var.hours=var.hours[grep('in absolute terms',var.hours$'label8'),] var.used=rbind(var.used,var.hours) # employed persons sel=grep('employed persons',variable.sel$'label5') var.employ=variable.sel[sel,] var.employ=var.employ[grep('overall economy',var.employ$'label6'),] var.used=rbind(var.used,var.employ) # wage var.wage=variable[grep('wage',variable$'label5'),] var.wage=var.wage[grep('negotiated wages and salaries',var.wage$'label5'),] var.wage=var.wage[grep('overall',var.wage$'label6'),] var.wage=var.wage[grep('negotiated wages and salaries per hour',var.wage$'label5'),] var.used=rbind(var.used,var.wage) # intermediate goods var.interm=variable.sel[grep('intermediate goods',variable.sel$'label6'),] var.interm=var.interm[grep('production',var.interm$'label5'),] var.used=rbind(var.used,var.interm) # industrial production var.ip=variable.sel[-sel,] var.ip=var.ip[grep('including construction|excluding construction',var.ip$'label6'),] var.used=rbind(var.used,var.ip) var.used$label=c('ORD-I','ORD-C','CPI-EX','CPI','WHOUR','EW','TARIF','IP-VORL','IP-CONST','IP') var.used$transformation=c('Dln','Dln','D ln, DD ln','D ln, DD ln','L, D','D ln','D ln, DD ln','Dln','Dln','Dln') # variables used nvar=nrow(var.used) # # settings for vintage publication survey dates=data.frame(year=rep(1995:2015,each=12)) dates$month=rep(1:12,length(1995:2015)) dates$number=NA dates$vintage=NA dates$before=NA # latest before cut-off "day" dates$last=NA # last obs of month dates[,paste(1:31)]=NA cutoffday=31 vint.survey=function(variable.name,cutoffday){ # variable.name=var.used$variable[11] # day: if 15 all the closest publication date including 15 will be returned # creates a dataframe containing year and month and each publication each month text=paste('var=',variable.name,sep='') eval(parse(text=text)) t1=as.Date(colnames(var),'X%Y.%m.%d') tt=data.frame(date=t1) tt$month=as.numeric(format(t1,'%m')) tt$day=as.numeric(format(t1,'%d')) tt$year=as.numeric(format(t1,'%Y')) ntt=nrow(tt) for (i in 1:ntt){ dates[tt$year[i]==dates$year&tt$month[i]==dates$month,paste(tt$day[i])]=as.character(tt$date[i]) } ndates=nrow(dates) for (i in 1:ndates){ dates$number[i]=sum(is.na(dates[i,paste(1:31)])==F) pubdays=which(is.na(dates[i,paste(1:31)])==F) if (length(pubdays)>0){ before=pubdays[(pubdays-cutoffday)<=0] last=max(pubdays) if (length(before)>=1){ dates$before[i]=dates[i,paste(min(before))] dates$vintage[i]=dates$before[i] }else{ dates$before[i]=NA } dates$last[i]=dates[i,paste(last)] } if (i>1){ if(is.na(dates$before[i])==T&is.na(dates$last[i-1])==F){ dates$vintage[i]=dates$last[i-1] dates$last[i]=dates$last[i-1] } } } dates$vintage=paste('X',dates$vintage,sep='') dates$vintage=gsub('-','\\.',dates$vintage) return(dates) } tt=sapply(var.used$variable,function(x) vint.survey(x,cutoffday)$vintage) # create a list of vintages per variable used vintage.employ=cbind(dates[,c(1,2)],tt) start=vintage.employ$year==2005&vintage.employ$month==12 start=which(start) end=vintage.employ$year==2015&vintage.employ$month==2 end=which(end) vintage.employ=vintage.employ[start:end,] nvint=nrow(vintage.employ) nvar=ncol(vintage.employ)-2 dates=data.frame(year=rep(1990:2015,each=12)) mth=c(paste(0,1:9,sep=''),paste(10:12,sep='')) m=rep(mth,length(1990:2015)) y=as.character(rep(1990:2015,each=12)) ym=paste(y,m,sep='-') specimen=data.frame(matrix(NA,nrow=length(ym),ncol=nvar)) row.names(specimen)=ym colnames(specimen)=colnames(vintage.employ)[3:(nvar+2)] specimen_s=specimen # making sets sets=list() for (vint in 1:nvint){ for (var.i in colnames(specimen_s)){ vintage=as.character(vintage.employ[vint,var.i]) text=paste('var=',var.i,sep='') eval(parse(text=text)) rnames=row.names(var) specimen[rnames,var.i]=var[,vintage] } # cutoff is the date <= the observations are included cutoff=paste(vintage.employ$year[vint],vintage.employ$month[vint],cutoffday,sep='-') sets[[cutoff]]=specimen specimen=specimen_s } var.used[var.used$label=='WHOUR','L']=1 var.used[var.used$label=='WHOUR','D']=1 var.used[var.used$label%in%c('ORD-C','ORD-I','CPI','CPI-EX','IP','IP-VORL','IP-CONST','TARIF'),'Dln']=1 var.used[var.used$label%in%c('CPI','CPI-EX','TARIF'),'D2ln']=1 var.used$Source='Buba RTDB' colnames(var.used)[which(colnames(var.used)=='variable')]='code' var.used$name[which(var.used$label=='ORD-C')]='Manufacturing orders - consumer goods' var.used$name[which(var.used$label=='ORD-I')]='Manufacturing orders - capital goods' var.used$name[which(var.used$label=='CPI')]='CPI' var.used$name[which(var.used$label=='CPI-EX')]='Core CPI' var.used$name[which(var.used$label=='TARIF')]='Negotiated wage and salary level' var.used$name[which(var.used$label=='IP')]='Industrial production' var.used$name[which(var.used$label=='IP-CONST')]='Industrial production excluding construction' var.used$name[which(var.used$label=='IP-VORL')]='Intermediate goods production' var.used$name[which(var.used$label=='WHOUR')]='Hours worked' var.used$id[which(var.used$label=='ORD-C')]=84 var.used$id[which(var.used$label=='ORD-I')]=85 var.used$id[which(var.used$label=='CPI')]=78 var.used$id[which(var.used$label=='CPI-EX')]=79 var.used$id[which(var.used$label=='TARIF')]=80 var.used$id[which(var.used$label=='IP')]=0 var.used$id[which(var.used$label=='IP-CONST')]=-1 var.used$id[which(var.used$label=='IP-VORL')]=82 var.used$id[which(var.used$label=='WHOUR')]=90 var.used$lag=NA var.used$lag[which(var.used$label=='ORD-C')]=1 var.used$lag[which(var.used$label=='ORD-I')]=1 var.used$lag[which(var.used$label=='CPI')]=0 var.used$lag[which(var.used$label=='CPI-EX')]=1 var.used$lag[which(var.used$label=='TARIF')]=1 var.used$lag[which(var.used$label=='IP')]=1 var.used$lag[which(var.used$label=='IP-CONST')]=1 var.used$lag[which(var.used$label=='IP-VORL')]=1 var.used$lag[which(var.used$label=='WHOUR')]=1 var.used.s=var.used c("name","id","L","D","Dln","D2ln",'lag',"code")%in%colnames(var.used) var.used=var.used[,c("name","id","L","D","Dln","D2ln",'lag',"code")] row.names(var.used)=var.used.s$label var.used$Source='Buba RTDB' var.used['EW','Dln']=1 write.csv(var.used,'h:/Git/zeit-2/data/bubaRTDmeta.csv') save.image("C:/Users/dulbricht/Desktop/t.RData") save(sets,var.used, file = paste(wd,'/data/realtime_sets_cutoffday_',cutoffday,".RData",sep=''))
/code/preparing data/BUNDESBANK code for getting and preparing data/alt/making sets of bundesbank realtime data.R
no_license
dullibri/zeit-2
R
false
false
11,119
r
# this file collects the csv files containing the Bundesbank realtime data base. # it loads the dictionary of keys of bundesbank # it creates an overview of all variables and connects it to the keys and labels # in "variable" # it selects the variables employed in "should we trust in leading indicators..." by # drechsel and scheufele 2010 in var.used # based on this, it enumerates all the vintages that have been available until the # "cutoffday" of a month for each month. using vintage.survey (function) resulting in # "vintage.employed. # this is used to construct sets of vintages of the variables used at a given point # in time. They are named after their cutoffday, month and year. wd="h:/git/zeit-2" library(zoo) setwd(wd) dir.rt=paste(wd,'/data',sep='') keys=read.csv(paste(dir.rt,'/bundesbank_keys.csv',sep=''),stringsAsFactors=F) variable.file=list.files(paste(wd,'/data/bundesbankrealtime',sep='')) nvar=length(variable.file) variable=gsub('.csv','',variable.file) variable=data.frame(variable,stringsAsFactors=F) # loading data for (i in 1:nvar){ var=read.csv(paste(wd,'/data/bundesbankrealtime/',variable.file[i],sep=''),row.names=1) nobsx=nrow(var) # dropping meta data var=var[5:nobsx,] # transforming to numbers var=write.csv(var,paste(wd,'/deleteme.csv',sep='')) var=read.csv(paste(wd,'/deleteme.csv',sep=''),row.names=1) eval(parse(text=paste(variable[i,1],'=var',sep=''))) } variable$fst_obs=NA variable$lst_obs=NA variable$fst_vint=NA variable$lst_vint=NA variable[,paste('key',1:8)]=NA # columnnumber of keys keycol=grep('key',colnames(variable)) nvar=nrow(variable) for (i in 1:nvar){ var=eval(parse(text=variable[i,'variable'])) variable$fst_obs[i]=row.names(var)[1] variable$lst_obs[i]=row.names(var)[nrow(var)] variable$fst_vint[i]=colnames(var)[1] variable$lst_vint[i]=colnames(var)[ncol(var)] detail=unlist(strsplit(variable[i,1],'\\.')) variable[i,keycol]=detail } # setting labels variable[,paste('label',1:8,sep='')]=NA labelcol=grep('label',colnames(variable)) for (i in 1:8){ keylist=keys[keys$position==i,c(1,2)] for (j in 1:nvar){ # what is the key key=variable[j,keycol[i]] # look up what it means pos=grep(key,keylist[,2]) label=keylist[pos,1] variable[j,labelcol[i]]=label } } tu=strsplit(variable$fst_vint,'\\.') tu=sapply(tu,function(x) x) tu=gsub('X','',tu) tu=matrix(as.integer(tu),nrow=3) for (i in 1:nrow(variable)){ if (tu[1,i]<1000){ variable$fstvint.year[i]=as.integer(t(tu[3,i])) variable$fstvint.month[i]=as.integer(t(tu[2,i])) } if (tu[1,i]>1000){ variable$fstvint.year[i]=as.integer(t(tu[1,i])) variable$fstvint.month[i]=as.integer(t(tu[2,i])) } } # dropping those variables that have vintages that start later than 2005 vint.late=variable$fstvint.year<=2005 variable.sel=variable[vint.late,] # downsizing of data variable.sel=variable.sel[variable.sel$'key 1'=='M',] variable.sel=variable.sel[-grep('current prices',variable.sel$'label7'),] variable.sel=variable.sel[-grep('neither seasonally nor calendar adjusted',variable.sel$'label3'),] variable.sel=variable.sel[-grep('domestic|abroad',variable.sel$'label5'),] ## Real economy # orders sel=grep('order',variable.sel$'label5') var.order=variable.sel[sel,] var.order=var.order[grep('manufacturing|consumer|capital',var.order$'label6'),] var.order=var.order[-which(var.order$variable=='M.DE.Y.I.IO1.ABA20.C.I'),] var.used=var.order # prices sel=grep('price',variable.sel$'label4') var.cpi=variable.sel[sel,] var.cpi=var.cpi[grep('all categories|total, excluding energy',var.cpi$'label6'),] var.used=rbind(var.used,var.cpi) # hours sel=grep('hours worked by employed persons',variable.sel$'label5') var.hours=variable.sel[sel,] var.hours=var.hours[grep('in absolute terms',var.hours$'label8'),] var.used=rbind(var.used,var.hours) # employed persons sel=grep('employed persons',variable.sel$'label5') var.employ=variable.sel[sel,] var.employ=var.employ[grep('overall economy',var.employ$'label6'),] var.used=rbind(var.used,var.employ) # wage var.wage=variable[grep('wage',variable$'label5'),] var.wage=var.wage[grep('negotiated wages and salaries',var.wage$'label5'),] var.wage=var.wage[grep('overall',var.wage$'label6'),] var.wage=var.wage[grep('negotiated wages and salaries per hour',var.wage$'label5'),] var.used=rbind(var.used,var.wage) # intermediate goods var.interm=variable.sel[grep('intermediate goods',variable.sel$'label6'),] var.interm=var.interm[grep('production',var.interm$'label5'),] var.used=rbind(var.used,var.interm) # industrial production var.ip=variable.sel[-sel,] var.ip=var.ip[grep('including construction|excluding construction',var.ip$'label6'),] var.used=rbind(var.used,var.ip) var.used$label=c('ORD-I','ORD-C','CPI-EX','CPI','WHOUR','EW','TARIF','IP-VORL','IP-CONST','IP') var.used$transformation=c('Dln','Dln','D ln, DD ln','D ln, DD ln','L, D','D ln','D ln, DD ln','Dln','Dln','Dln') # variables used nvar=nrow(var.used) # # settings for vintage publication survey dates=data.frame(year=rep(1995:2015,each=12)) dates$month=rep(1:12,length(1995:2015)) dates$number=NA dates$vintage=NA dates$before=NA # latest before cut-off "day" dates$last=NA # last obs of month dates[,paste(1:31)]=NA cutoffday=31 vint.survey=function(variable.name,cutoffday){ # variable.name=var.used$variable[11] # day: if 15 all the closest publication date including 15 will be returned # creates a dataframe containing year and month and each publication each month text=paste('var=',variable.name,sep='') eval(parse(text=text)) t1=as.Date(colnames(var),'X%Y.%m.%d') tt=data.frame(date=t1) tt$month=as.numeric(format(t1,'%m')) tt$day=as.numeric(format(t1,'%d')) tt$year=as.numeric(format(t1,'%Y')) ntt=nrow(tt) for (i in 1:ntt){ dates[tt$year[i]==dates$year&tt$month[i]==dates$month,paste(tt$day[i])]=as.character(tt$date[i]) } ndates=nrow(dates) for (i in 1:ndates){ dates$number[i]=sum(is.na(dates[i,paste(1:31)])==F) pubdays=which(is.na(dates[i,paste(1:31)])==F) if (length(pubdays)>0){ before=pubdays[(pubdays-cutoffday)<=0] last=max(pubdays) if (length(before)>=1){ dates$before[i]=dates[i,paste(min(before))] dates$vintage[i]=dates$before[i] }else{ dates$before[i]=NA } dates$last[i]=dates[i,paste(last)] } if (i>1){ if(is.na(dates$before[i])==T&is.na(dates$last[i-1])==F){ dates$vintage[i]=dates$last[i-1] dates$last[i]=dates$last[i-1] } } } dates$vintage=paste('X',dates$vintage,sep='') dates$vintage=gsub('-','\\.',dates$vintage) return(dates) } tt=sapply(var.used$variable,function(x) vint.survey(x,cutoffday)$vintage) # create a list of vintages per variable used vintage.employ=cbind(dates[,c(1,2)],tt) start=vintage.employ$year==2005&vintage.employ$month==12 start=which(start) end=vintage.employ$year==2015&vintage.employ$month==2 end=which(end) vintage.employ=vintage.employ[start:end,] nvint=nrow(vintage.employ) nvar=ncol(vintage.employ)-2 dates=data.frame(year=rep(1990:2015,each=12)) mth=c(paste(0,1:9,sep=''),paste(10:12,sep='')) m=rep(mth,length(1990:2015)) y=as.character(rep(1990:2015,each=12)) ym=paste(y,m,sep='-') specimen=data.frame(matrix(NA,nrow=length(ym),ncol=nvar)) row.names(specimen)=ym colnames(specimen)=colnames(vintage.employ)[3:(nvar+2)] specimen_s=specimen # making sets sets=list() for (vint in 1:nvint){ for (var.i in colnames(specimen_s)){ vintage=as.character(vintage.employ[vint,var.i]) text=paste('var=',var.i,sep='') eval(parse(text=text)) rnames=row.names(var) specimen[rnames,var.i]=var[,vintage] } # cutoff is the date <= the observations are included cutoff=paste(vintage.employ$year[vint],vintage.employ$month[vint],cutoffday,sep='-') sets[[cutoff]]=specimen specimen=specimen_s } var.used[var.used$label=='WHOUR','L']=1 var.used[var.used$label=='WHOUR','D']=1 var.used[var.used$label%in%c('ORD-C','ORD-I','CPI','CPI-EX','IP','IP-VORL','IP-CONST','TARIF'),'Dln']=1 var.used[var.used$label%in%c('CPI','CPI-EX','TARIF'),'D2ln']=1 var.used$Source='Buba RTDB' colnames(var.used)[which(colnames(var.used)=='variable')]='code' var.used$name[which(var.used$label=='ORD-C')]='Manufacturing orders - consumer goods' var.used$name[which(var.used$label=='ORD-I')]='Manufacturing orders - capital goods' var.used$name[which(var.used$label=='CPI')]='CPI' var.used$name[which(var.used$label=='CPI-EX')]='Core CPI' var.used$name[which(var.used$label=='TARIF')]='Negotiated wage and salary level' var.used$name[which(var.used$label=='IP')]='Industrial production' var.used$name[which(var.used$label=='IP-CONST')]='Industrial production excluding construction' var.used$name[which(var.used$label=='IP-VORL')]='Intermediate goods production' var.used$name[which(var.used$label=='WHOUR')]='Hours worked' var.used$id[which(var.used$label=='ORD-C')]=84 var.used$id[which(var.used$label=='ORD-I')]=85 var.used$id[which(var.used$label=='CPI')]=78 var.used$id[which(var.used$label=='CPI-EX')]=79 var.used$id[which(var.used$label=='TARIF')]=80 var.used$id[which(var.used$label=='IP')]=0 var.used$id[which(var.used$label=='IP-CONST')]=-1 var.used$id[which(var.used$label=='IP-VORL')]=82 var.used$id[which(var.used$label=='WHOUR')]=90 var.used$lag=NA var.used$lag[which(var.used$label=='ORD-C')]=1 var.used$lag[which(var.used$label=='ORD-I')]=1 var.used$lag[which(var.used$label=='CPI')]=0 var.used$lag[which(var.used$label=='CPI-EX')]=1 var.used$lag[which(var.used$label=='TARIF')]=1 var.used$lag[which(var.used$label=='IP')]=1 var.used$lag[which(var.used$label=='IP-CONST')]=1 var.used$lag[which(var.used$label=='IP-VORL')]=1 var.used$lag[which(var.used$label=='WHOUR')]=1 var.used.s=var.used c("name","id","L","D","Dln","D2ln",'lag',"code")%in%colnames(var.used) var.used=var.used[,c("name","id","L","D","Dln","D2ln",'lag',"code")] row.names(var.used)=var.used.s$label var.used$Source='Buba RTDB' var.used['EW','Dln']=1 write.csv(var.used,'h:/Git/zeit-2/data/bubaRTDmeta.csv') save.image("C:/Users/dulbricht/Desktop/t.RData") save(sets,var.used, file = paste(wd,'/data/realtime_sets_cutoffday_',cutoffday,".RData",sep=''))
# server.R # server logic for predictive text app # 11 July 2019 # Load software package library(shiny) # Define server function shinyServer(function(input, output) { # Insert a short pause after user input Sys.sleep(0.1) # Clean input text cleanText <- reactive({ prepareText(input$impText, 3) }) output$clnText <- renderText({ paste("...", cleanText()) }) # Predict next word # nextWord <- reactive({ # predictWord(cleanText()) # }) # output$nxtWord <- renderPrint( # nextWord() # ) # } # ) output$prediction <- renderPrint({ Predict(cleanText()) }) # output$text1 <- renderText({ # paste("...", cleanText()); } )
/server.R
no_license
stevescicluna/Capstone
R
false
false
872
r
# server.R # server logic for predictive text app # 11 July 2019 # Load software package library(shiny) # Define server function shinyServer(function(input, output) { # Insert a short pause after user input Sys.sleep(0.1) # Clean input text cleanText <- reactive({ prepareText(input$impText, 3) }) output$clnText <- renderText({ paste("...", cleanText()) }) # Predict next word # nextWord <- reactive({ # predictWord(cleanText()) # }) # output$nxtWord <- renderPrint( # nextWord() # ) # } # ) output$prediction <- renderPrint({ Predict(cleanText()) }) # output$text1 <- renderText({ # paste("...", cleanText()); } )
# draw a 3D cross (+) symbol in an rgl view cross3d <- function(centre=rep(0,3), scale=rep(1,3), ...) { axes <- matrix( c(-1, 0, 0, 1, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0, -1, 0, 0, 1), 6, 3, byrow=TRUE) if (!missing(scale)) { if (length(scale) != 3) scale <- rep(scale, length.out=3) axes <- rgl::scale3d(axes, scale[1], scale[2], scale[3]) } if (!missing(centre)) { if (length(centre) != 3) scale <- rep(centre, length.out=3) axes <- rgl::translate3d(axes, centre[1], centre[2], centre[3]) } rgl::segments3d(axes, ...) invisible(axes) }
/heplots/R/cross3d.R
no_license
ingted/R-Examples
R
false
false
658
r
# draw a 3D cross (+) symbol in an rgl view cross3d <- function(centre=rep(0,3), scale=rep(1,3), ...) { axes <- matrix( c(-1, 0, 0, 1, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0, -1, 0, 0, 1), 6, 3, byrow=TRUE) if (!missing(scale)) { if (length(scale) != 3) scale <- rep(scale, length.out=3) axes <- rgl::scale3d(axes, scale[1], scale[2], scale[3]) } if (!missing(centre)) { if (length(centre) != 3) scale <- rep(centre, length.out=3) axes <- rgl::translate3d(axes, centre[1], centre[2], centre[3]) } rgl::segments3d(axes, ...) invisible(axes) }
source("K_calcs_Johnson_OS.R") TSSolPlot<-function(compound){ S_list<-c(0,17.5,32,33,34,35,36) plot(T_LOTS,KH(compound,T_LOTS,max(S_list)),type="n",xlab="Temperature / Celcius", ylab="KH / dimensionless (gas/liquid)") linetype<-1 legendlist<-NULL for(S in S_list){ lines(T_LOTS,KH(compound,T_LOTS,S),lty=linetype) linetype<-linetype+1 legendlist<-c(legendlist,paste("S=",S,sep="")) } legend("topleft",legendlist,lty=seq(from=1,to=length(S_list),by=1)) } TSSolPlot<-function(compound){ S_list<-c(0,17.5,32,33,34,35,36) plot(T_LOTS,KH_Molar_per_atmosphere(compound,T_LOTS,min(S_list)),type="n",xlab="Temperature / Celcius", ylab="KH / dimensionless (gas/liquid)") linetype<-1 legendlist<-NULL for(S in S_list){ lines(T_LOTS,KH_Molar_per_atmosphere(compound,T_LOTS,S),lty=linetype) linetype<-linetype+1 legendlist<-c(legendlist,paste("S=",S,sep="")) } legend("topright",legendlist,lty=seq(from=1,to=length(S_list),by=1)) } TSSchmidtPlot<-function(compound){ S_list<-c(0,17.5,32,33,34,35,36) plot(T_LOTS,schmidt(compound,T_LOTS,max(S_list)),type="n",xlab="Temperature / Celcius", ylab="Schmidt number (dimensionless)") linetype<-1 legendlist<-NULL for(S in S_list){ lines(T_LOTS,schmidt(compound,T_LOTS,S),lty=linetype) linetype<-linetype+1 legendlist<-c(legendlist,paste("S=",S,sep="")) } legend("topleft",legendlist,lty=seq(from=1,to=length(S_list),by=1)) }
/default_compound_plots.R
no_license
martwine/Transfer-velocity-core-scheme
R
false
false
1,371
r
source("K_calcs_Johnson_OS.R") TSSolPlot<-function(compound){ S_list<-c(0,17.5,32,33,34,35,36) plot(T_LOTS,KH(compound,T_LOTS,max(S_list)),type="n",xlab="Temperature / Celcius", ylab="KH / dimensionless (gas/liquid)") linetype<-1 legendlist<-NULL for(S in S_list){ lines(T_LOTS,KH(compound,T_LOTS,S),lty=linetype) linetype<-linetype+1 legendlist<-c(legendlist,paste("S=",S,sep="")) } legend("topleft",legendlist,lty=seq(from=1,to=length(S_list),by=1)) } TSSolPlot<-function(compound){ S_list<-c(0,17.5,32,33,34,35,36) plot(T_LOTS,KH_Molar_per_atmosphere(compound,T_LOTS,min(S_list)),type="n",xlab="Temperature / Celcius", ylab="KH / dimensionless (gas/liquid)") linetype<-1 legendlist<-NULL for(S in S_list){ lines(T_LOTS,KH_Molar_per_atmosphere(compound,T_LOTS,S),lty=linetype) linetype<-linetype+1 legendlist<-c(legendlist,paste("S=",S,sep="")) } legend("topright",legendlist,lty=seq(from=1,to=length(S_list),by=1)) } TSSchmidtPlot<-function(compound){ S_list<-c(0,17.5,32,33,34,35,36) plot(T_LOTS,schmidt(compound,T_LOTS,max(S_list)),type="n",xlab="Temperature / Celcius", ylab="Schmidt number (dimensionless)") linetype<-1 legendlist<-NULL for(S in S_list){ lines(T_LOTS,schmidt(compound,T_LOTS,S),lty=linetype) linetype<-linetype+1 legendlist<-c(legendlist,paste("S=",S,sep="")) } legend("topleft",legendlist,lty=seq(from=1,to=length(S_list),by=1)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ds.exposures_pData.R \name{ds.exposures_pData} \alias{ds.exposures_pData} \title{Extract exposure or phenotype data and save them to a data frame} \usage{ ds.exposures_pData( set, type = "", name = NULL, exposures_type = NULL, rownames2col = FALSE, datasources = NULL ) } \arguments{ \item{set}{\code{character} Name of the Exposome Set object on the server side} \item{type}{\code{character} To specify target of output table, \code{all} to include exposures and phenotypes, \code{exposures} to include only the exposures and \code{phenotypes} to include only the phenotypes. Default \code{all}} \item{name}{\code{character} (default \code{NULL}) Name of the new Exposome Set, if null the name will be \code{"set_table"} where \code{"set"} is the inputted argument} \item{exposures_type}{\code{character} (default \code{NULL}) Type of the exposures to be kept, usually \code{"numeric"} or \code{"character"/"factor"}. If \code{NULL} all the exposures will be kept.} \item{datasources}{a list of \code{\link{DSConnection-class}} (default \code{NULL}) objects obtained after login} } \value{ This function does not have an output. It creates an data frame object on the study server. } \description{ Extracts exposures, phenotypes or combined data and saves it to a data frame on the server side, if no \code{name} argument is provided, the new Exposome Set object will be named \code{"set_table"} where \code{"set"} is the inputted argument }
/man/ds.exposures_pData.Rd
permissive
isglobal-brge/dsExposomeClient
R
false
true
1,540
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ds.exposures_pData.R \name{ds.exposures_pData} \alias{ds.exposures_pData} \title{Extract exposure or phenotype data and save them to a data frame} \usage{ ds.exposures_pData( set, type = "", name = NULL, exposures_type = NULL, rownames2col = FALSE, datasources = NULL ) } \arguments{ \item{set}{\code{character} Name of the Exposome Set object on the server side} \item{type}{\code{character} To specify target of output table, \code{all} to include exposures and phenotypes, \code{exposures} to include only the exposures and \code{phenotypes} to include only the phenotypes. Default \code{all}} \item{name}{\code{character} (default \code{NULL}) Name of the new Exposome Set, if null the name will be \code{"set_table"} where \code{"set"} is the inputted argument} \item{exposures_type}{\code{character} (default \code{NULL}) Type of the exposures to be kept, usually \code{"numeric"} or \code{"character"/"factor"}. If \code{NULL} all the exposures will be kept.} \item{datasources}{a list of \code{\link{DSConnection-class}} (default \code{NULL}) objects obtained after login} } \value{ This function does not have an output. It creates an data frame object on the study server. } \description{ Extracts exposures, phenotypes or combined data and saves it to a data frame on the server side, if no \code{name} argument is provided, the new Exposome Set object will be named \code{"set_table"} where \code{"set"} is the inputted argument }
\name{ellipseCA} \alias{ellipseCA} \title{Draw confidence ellipses in CA} \description{ Draw confidence ellipses in CA around rows and/or columns. } \usage{ ellipseCA (x, ellipse=c("col","row"), method="multinomial", nbsample=100, axes=c(1,2), xlim=NULL, ylim=NULL, col.row="blue", col.col="red", col.row.ell=col.row, col.col.ell=col.col, graph.type = c("ggplot","classic"), ggoptions = NULL, \dots) } \arguments{ \item{x}{an object of class CA} \item{ellipse}{a vector of character that defines which ellipses are drawn} \item{method}{the method to construct ellipses (see details below)} \item{nbsample}{number of samples drawn to evaluate the stability of the points} \item{axes}{a length 2 vector specifying the components to plot} \item{xlim}{range for the plotted 'x' values, defaulting to the range of the finite values of 'x'} \item{ylim}{range for the plotted 'y' values, defaulting to the range of the finite values of 'y'} \item{col.row}{a color for the rows points} \item{col.col}{a color for columns points} \item{col.row.ell}{a color for the ellipses of rows points (the color "transparent" can be used if an ellipse should not be drawn)} \item{col.col.ell}{a color for the ellipses of columns points (the color "transparent" can be used if an ellipse should not be drawn)} \item{graph.type}{a character that gives the type of graph used: "ggplot" or "classic"} \item{ggoptions}{a list that gives the graph options when grah.type="ggplot" is used. See the optines and the default values in the details section} \item{\dots}{further arguments passed to or from the plot.CA function, such as title, invisible, ...} } \value{ Returns the factor map with the joint plot of CA with ellipses around some elements. } \details{ With \code{method="multinomial"}, the table X with the active elements is taken as a reference. Then new data tables are drawn in the following way: N (the sum of X) values are drawn from a multinomial distribution with theoretical frequencies equals to the values in the cells divided by N.\cr With \code{method="boot"}, the values are bootstrapped row by row: Ni (the sum of row i in the X table) values are taken in a vector with Nij equals to column j (with j varying from 1 to J). Thus \code{nbsample} new datasets are drawn and projected as supplementary rows and/or supplementary columns. Then confidence ellipses are drawn for each elements thanks to the \code{nbsample} supplementary points. } \references{ Lebart, L., Morineau, A. and Piron, M. (1995) Statistique exploratoire multidimensionnelle, \emph{Dunod}. } \author{Francois Husson \email{Francois.Husson@agrocampus-ouest.fr}} \seealso{ \code{\link{plot.CA}}, \code{\link{CA}}} \examples{ data(children) res.ca <- CA (children, col.sup = 6:8, row.sup = 15:18) ## Ellipses for all the active elements ellipseCA(res.ca) ## Ellipses around some columns only ellipseCA(res.ca,ellipse="col",col.col.ell=c(rep("red",2),rep("transparent",3)), invisible=c("row.sup","col.sup")) } \keyword{multivariate}
/man/ellipseCA.Rd
no_license
husson/FactoMineR
R
false
false
3,050
rd
\name{ellipseCA} \alias{ellipseCA} \title{Draw confidence ellipses in CA} \description{ Draw confidence ellipses in CA around rows and/or columns. } \usage{ ellipseCA (x, ellipse=c("col","row"), method="multinomial", nbsample=100, axes=c(1,2), xlim=NULL, ylim=NULL, col.row="blue", col.col="red", col.row.ell=col.row, col.col.ell=col.col, graph.type = c("ggplot","classic"), ggoptions = NULL, \dots) } \arguments{ \item{x}{an object of class CA} \item{ellipse}{a vector of character that defines which ellipses are drawn} \item{method}{the method to construct ellipses (see details below)} \item{nbsample}{number of samples drawn to evaluate the stability of the points} \item{axes}{a length 2 vector specifying the components to plot} \item{xlim}{range for the plotted 'x' values, defaulting to the range of the finite values of 'x'} \item{ylim}{range for the plotted 'y' values, defaulting to the range of the finite values of 'y'} \item{col.row}{a color for the rows points} \item{col.col}{a color for columns points} \item{col.row.ell}{a color for the ellipses of rows points (the color "transparent" can be used if an ellipse should not be drawn)} \item{col.col.ell}{a color for the ellipses of columns points (the color "transparent" can be used if an ellipse should not be drawn)} \item{graph.type}{a character that gives the type of graph used: "ggplot" or "classic"} \item{ggoptions}{a list that gives the graph options when grah.type="ggplot" is used. See the optines and the default values in the details section} \item{\dots}{further arguments passed to or from the plot.CA function, such as title, invisible, ...} } \value{ Returns the factor map with the joint plot of CA with ellipses around some elements. } \details{ With \code{method="multinomial"}, the table X with the active elements is taken as a reference. Then new data tables are drawn in the following way: N (the sum of X) values are drawn from a multinomial distribution with theoretical frequencies equals to the values in the cells divided by N.\cr With \code{method="boot"}, the values are bootstrapped row by row: Ni (the sum of row i in the X table) values are taken in a vector with Nij equals to column j (with j varying from 1 to J). Thus \code{nbsample} new datasets are drawn and projected as supplementary rows and/or supplementary columns. Then confidence ellipses are drawn for each elements thanks to the \code{nbsample} supplementary points. } \references{ Lebart, L., Morineau, A. and Piron, M. (1995) Statistique exploratoire multidimensionnelle, \emph{Dunod}. } \author{Francois Husson \email{Francois.Husson@agrocampus-ouest.fr}} \seealso{ \code{\link{plot.CA}}, \code{\link{CA}}} \examples{ data(children) res.ca <- CA (children, col.sup = 6:8, row.sup = 15:18) ## Ellipses for all the active elements ellipseCA(res.ca) ## Ellipses around some columns only ellipseCA(res.ca,ellipse="col",col.col.ell=c(rep("red",2),rep("transparent",3)), invisible=c("row.sup","col.sup")) } \keyword{multivariate}
readTree <- function(file,restabs){ library(ape) t <- read.tree(file) #Remove the "_species" names t$tip.label <- gsub("_species$","",t$tip.label) #Remove any node labels and brach lengths t$node.label <- NULL t$edge.length <- NULL common <- intersect(colnames(restabs$pfammat),t$tip.label) cat("readTree: From ",length(t$tip.label),"species in tree and",length(colnames(restabs$pfammat)),"species in results found",length(common),"common species.\n") #print(common) #Remove extra species from tree toremove <- setdiff(t$tip.label,common) t <- drop.tip(t,toremove) #plot(t) #Remove extra species from restabs toremove <- setdiff(colnames(restabs$pfammat),common) if (length(toremove) > 0) { restabs$pfam <- restabs$pfam[,-toremove] restabs$pfammat <- restabs$pfammat[,-toremove] restabs$clus <- restabs$clus[,-toremove] restabs$clusmat <- restabs$clusmat[,-toremove] } #Order restabs by tree restabs$pfam <- restabs$pfam[,c(colnames(restabs$pfam)[1:3],t$tip.label,grep("_IDs",colnames(restabs$pfam),value=T))] restabs$pfammat <- restabs$pfammat[,t$tip.label] restabs$clus <- restabs$clus[,c(colnames(restabs$clus)[1:2],t$tip.label,grep("_IDs",colnames(restabs$clus),value=T))] restabs$clusmat <- restabs$clusmat[,t$tip.label] out <- list(tree=t,restabs=restabs) invisible(out) }
/bin/f_readTree.R
no_license
fahad-syed/ProSol
R
false
false
1,407
r
readTree <- function(file,restabs){ library(ape) t <- read.tree(file) #Remove the "_species" names t$tip.label <- gsub("_species$","",t$tip.label) #Remove any node labels and brach lengths t$node.label <- NULL t$edge.length <- NULL common <- intersect(colnames(restabs$pfammat),t$tip.label) cat("readTree: From ",length(t$tip.label),"species in tree and",length(colnames(restabs$pfammat)),"species in results found",length(common),"common species.\n") #print(common) #Remove extra species from tree toremove <- setdiff(t$tip.label,common) t <- drop.tip(t,toremove) #plot(t) #Remove extra species from restabs toremove <- setdiff(colnames(restabs$pfammat),common) if (length(toremove) > 0) { restabs$pfam <- restabs$pfam[,-toremove] restabs$pfammat <- restabs$pfammat[,-toremove] restabs$clus <- restabs$clus[,-toremove] restabs$clusmat <- restabs$clusmat[,-toremove] } #Order restabs by tree restabs$pfam <- restabs$pfam[,c(colnames(restabs$pfam)[1:3],t$tip.label,grep("_IDs",colnames(restabs$pfam),value=T))] restabs$pfammat <- restabs$pfammat[,t$tip.label] restabs$clus <- restabs$clus[,c(colnames(restabs$clus)[1:2],t$tip.label,grep("_IDs",colnames(restabs$clus),value=T))] restabs$clusmat <- restabs$clusmat[,t$tip.label] out <- list(tree=t,restabs=restabs) invisible(out) }
\name{me-package} \alias{me-package} \alias{me} \docType{package} \title{ What the package does (short line) ~~ package title ~~ } \description{ More about what it does (maybe more than one line) ~~ A concise (1-5 lines) description of the package ~~ } \details{ \tabular{ll}{ Package: \tab me\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2015-05-21\cr License: \tab What license is it under?\cr } ~~ An overview of how to use the package, including the most important ~~ ~~ functions ~~ } \author{ Who wrote it Maintainer: Who to complain to <yourfault@somewhere.net> ~~ The author and/or maintainer of the package ~~ } \references{ ~~ Literature or other references for background information ~~ } ~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~ ~~ the R documentation directory ~~ \keyword{ package } \seealso{ ~~ Optional links to other man pages, e.g. ~~ ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~ } \examples{ ~~ simple examples of the most important functions ~~ }
/me/man/me-package.Rd
no_license
Noexecuse/package
R
false
false
1,019
rd
\name{me-package} \alias{me-package} \alias{me} \docType{package} \title{ What the package does (short line) ~~ package title ~~ } \description{ More about what it does (maybe more than one line) ~~ A concise (1-5 lines) description of the package ~~ } \details{ \tabular{ll}{ Package: \tab me\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2015-05-21\cr License: \tab What license is it under?\cr } ~~ An overview of how to use the package, including the most important ~~ ~~ functions ~~ } \author{ Who wrote it Maintainer: Who to complain to <yourfault@somewhere.net> ~~ The author and/or maintainer of the package ~~ } \references{ ~~ Literature or other references for background information ~~ } ~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~ ~~ the R documentation directory ~~ \keyword{ package } \seealso{ ~~ Optional links to other man pages, e.g. ~~ ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~ } \examples{ ~~ simple examples of the most important functions ~~ }
## Assignment ## This function creates a special "matrix" object that can cache its inverse makeCacheMatrix <- function(x = matrix()) { theMatrix <- NULL set <- function(y){ x <<- y theMatrix <<- NULL } get <- function() x setInvmatrix <- function(solve) theMatrix <<- solve getInvmatrix <- function() theMatrix list(set = set, get = get, setInvmatrix = setInvmatrix, getInvmatrix = getInvmatrix) } ## This function computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. If the inverse has already been ## calculated (and the matrix has not changed), then cacheSolve ## should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inverted <- x$getInvmatrix() if(!is.null(inverted)){ ## then we have cached data print("cached") return(inverted) } else { ## not cached tempMatrix <- x$get() ##get the matrix ## print("debug...") ## print(tempMatrix) inverted <- solve(tempMatrix, ...) ##invert matrix x$setInvmatrix(inverted) ##store in cache print("not cahced") inverted } }
/cachematrix.R
no_license
oddaolse/ProgrammingAssignment2
R
false
false
1,283
r
## Assignment ## This function creates a special "matrix" object that can cache its inverse makeCacheMatrix <- function(x = matrix()) { theMatrix <- NULL set <- function(y){ x <<- y theMatrix <<- NULL } get <- function() x setInvmatrix <- function(solve) theMatrix <<- solve getInvmatrix <- function() theMatrix list(set = set, get = get, setInvmatrix = setInvmatrix, getInvmatrix = getInvmatrix) } ## This function computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. If the inverse has already been ## calculated (and the matrix has not changed), then cacheSolve ## should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inverted <- x$getInvmatrix() if(!is.null(inverted)){ ## then we have cached data print("cached") return(inverted) } else { ## not cached tempMatrix <- x$get() ##get the matrix ## print("debug...") ## print(tempMatrix) inverted <- solve(tempMatrix, ...) ##invert matrix x$setInvmatrix(inverted) ##store in cache print("not cahced") inverted } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/base-functions.R \name{o_source} \alias{o_source} \title{Sourcing Octave/Matlab Files} \usage{ o_source(file = "", text = NULL, sep = ";\\n", ...) } \arguments{ \item{file}{the path to the Octave/Matlab source file -- typically with extension ".m".} \item{text}{a character vector containing \emph{Octave} statements, that are concatenated in a temporary file, which is then sourced. This argument typically enables the evaluation of multiple statements, as opposed to single statement evaluation performed by \code{\link{o_eval}}.} \item{sep}{single character string added as suffix to each element of \code{text}. The concatenation of all suffixed element should form a valid \emph{Octave} block.} \item{...}{other arguments passed to \code{\link{.CallOctave}}.} } \value{ None } \description{ This function sources an Octave file within the current Octave session. The loaded functions are accessible by subsequent calls of \code{\link{.CallOctave}}. } \section{Octave Documentation for \emph{source}}{ \Sexpr[results=rd,stage=render]{if( .Platform$OS.type != 'windows' || .Platform$r_arch != 'x64' ) RcppOctave::o_help(source, format='rd')} \emph{[Generated from Octave-\Sexpr{RcppOctave::o_version()} on \Sexpr{Sys.time()}]} } \examples{ \dontshow{ options(R_CHECK_RUNNING_EXAMPLES_=TRUE) ## roxygen generated flag } \dontshow{ o_clear() } # source file mfile <- system.file("scripts/ex_source.m", package='RcppOctave') o_source(mfile) # pass multiple statements o_source(text="a=1;b=3;c=randn(1,5);") o_get('a','b','c') # also works with a character vector of statements o_source(text=c("a=10;b=30;", "c=randn(1,5)", "d=4")) o_get('a','b','c', 'd') } \seealso{ Other Octave_files: \code{\link{o_addpath}} }
/man/o_source.Rd
no_license
renozao/RcppOctave
R
false
true
1,809
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/base-functions.R \name{o_source} \alias{o_source} \title{Sourcing Octave/Matlab Files} \usage{ o_source(file = "", text = NULL, sep = ";\\n", ...) } \arguments{ \item{file}{the path to the Octave/Matlab source file -- typically with extension ".m".} \item{text}{a character vector containing \emph{Octave} statements, that are concatenated in a temporary file, which is then sourced. This argument typically enables the evaluation of multiple statements, as opposed to single statement evaluation performed by \code{\link{o_eval}}.} \item{sep}{single character string added as suffix to each element of \code{text}. The concatenation of all suffixed element should form a valid \emph{Octave} block.} \item{...}{other arguments passed to \code{\link{.CallOctave}}.} } \value{ None } \description{ This function sources an Octave file within the current Octave session. The loaded functions are accessible by subsequent calls of \code{\link{.CallOctave}}. } \section{Octave Documentation for \emph{source}}{ \Sexpr[results=rd,stage=render]{if( .Platform$OS.type != 'windows' || .Platform$r_arch != 'x64' ) RcppOctave::o_help(source, format='rd')} \emph{[Generated from Octave-\Sexpr{RcppOctave::o_version()} on \Sexpr{Sys.time()}]} } \examples{ \dontshow{ options(R_CHECK_RUNNING_EXAMPLES_=TRUE) ## roxygen generated flag } \dontshow{ o_clear() } # source file mfile <- system.file("scripts/ex_source.m", package='RcppOctave') o_source(mfile) # pass multiple statements o_source(text="a=1;b=3;c=randn(1,5);") o_get('a','b','c') # also works with a character vector of statements o_source(text=c("a=10;b=30;", "c=randn(1,5)", "d=4")) o_get('a','b','c', 'd') } \seealso{ Other Octave_files: \code{\link{o_addpath}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spider.R \name{spider} \alias{spider} \title{Generate spider data} \usage{ spider(n = 3000) } \arguments{ \item{n}{The number of points to generate} } \description{ Generates data for plotting a spider and stores it in a data frame with (x, y) coordinates and radius r } \examples{ spider() } \keyword{spider}
/man/spider.Rd
no_license
Shornone/mathart
R
false
true
388
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spider.R \name{spider} \alias{spider} \title{Generate spider data} \usage{ spider(n = 3000) } \arguments{ \item{n}{The number of points to generate} } \description{ Generates data for plotting a spider and stores it in a data frame with (x, y) coordinates and radius r } \examples{ spider() } \keyword{spider}
\name{as.stlpp} \alias{as.stlpp} \title{Convert data to a space-time point pattern on a linear network} \usage{ as.stlpp(x,y,t,L) } \description{Convert data to a space-time point pattern on a linear network} \arguments{ \item{x,y,t}{vectors of cartesian coordinates and time occurance. Alternatively, x can be of classes \code{\link{data.frame}}, \code{\link{ppp}} and \code{\link{lpp}}.} \item{L}{linear network (object of class \code{\link{linnet}}) on which the points lie.} } \details{ This function converts data to an object of class stlpp. Data can be of formats: \itemize{ \item x is of class class \code{\link{data.frame}} with three columns. Then columns are considered as cartesian coordinates (i.e. x,y,t) and they will be converted to a spatio-temporal point pattern on the linear network L. \item x is a planar point pattern (class \code{\link{ppp}}). Then x will be converted to a spatio-temporal point pattern on the linear network L and with coresponding time vector t. \item x is a linear point pattern (class \code{\link{lpp}}). Then x will be converted to a spatio-temporal point pattern on the linear network L and with coresponding time vector t. \item x,y,t are vectors of same length where x,y are living on the corresponding network L. } } \value{ A spatio-temporal point pattern on a linear network. An object of class \code{\link{stlpp}}. } \author{ Mehdi Moradi <m2.moradi@yahoo.com> } \seealso{ \code{\link{as.lpp.stlpp}}, \code{\link{runifpointOnLines}}, \code{\link{as.lpp}} } \examples{ data(easynet) x <- runifpointOnLines(40, easynet) t1 <- sample(1:10,40,replace=TRUE) Y <- as.stlpp(x,t=t1,L=easynet) Z <- as.lpp.stlpp(Y) t2 <- sample(1:10,40,replace=TRUE) W <- as.stlpp(Z,t=t2) }
/man/as.stlpp.Rd
no_license
Moradii/stlnpp
R
false
false
1,731
rd
\name{as.stlpp} \alias{as.stlpp} \title{Convert data to a space-time point pattern on a linear network} \usage{ as.stlpp(x,y,t,L) } \description{Convert data to a space-time point pattern on a linear network} \arguments{ \item{x,y,t}{vectors of cartesian coordinates and time occurance. Alternatively, x can be of classes \code{\link{data.frame}}, \code{\link{ppp}} and \code{\link{lpp}}.} \item{L}{linear network (object of class \code{\link{linnet}}) on which the points lie.} } \details{ This function converts data to an object of class stlpp. Data can be of formats: \itemize{ \item x is of class class \code{\link{data.frame}} with three columns. Then columns are considered as cartesian coordinates (i.e. x,y,t) and they will be converted to a spatio-temporal point pattern on the linear network L. \item x is a planar point pattern (class \code{\link{ppp}}). Then x will be converted to a spatio-temporal point pattern on the linear network L and with coresponding time vector t. \item x is a linear point pattern (class \code{\link{lpp}}). Then x will be converted to a spatio-temporal point pattern on the linear network L and with coresponding time vector t. \item x,y,t are vectors of same length where x,y are living on the corresponding network L. } } \value{ A spatio-temporal point pattern on a linear network. An object of class \code{\link{stlpp}}. } \author{ Mehdi Moradi <m2.moradi@yahoo.com> } \seealso{ \code{\link{as.lpp.stlpp}}, \code{\link{runifpointOnLines}}, \code{\link{as.lpp}} } \examples{ data(easynet) x <- runifpointOnLines(40, easynet) t1 <- sample(1:10,40,replace=TRUE) Y <- as.stlpp(x,t=t1,L=easynet) Z <- as.lpp.stlpp(Y) t2 <- sample(1:10,40,replace=TRUE) W <- as.stlpp(Z,t=t2) }
#R script for plot 3 library(dplyr) power <- read.csv( "household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE, na.strings = "?" ) power_t <- tbl_df(power) power_feb <- filter(power, Date %in% c("1/2/2007", "2/2/2007")) power_feb$DateTime <- strptime(paste(power_feb$Date, power_feb$Time), "%d/%m/%Y %H:%M:%S") png("plot3.png", width = 480, height = 480) plot( power_feb$DateTime, power_feb$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = NA, col = "black" ) lines(power_feb$DateTime, power_feb$Sub_metering_2, col = "red") lines(power_feb$DateTime, power_feb$Sub_metering_3, col = "blue") legend( "topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") ) dev.off()
/plot3.R
no_license
jesyfax/ExData_Plotting1
R
false
false
858
r
#R script for plot 3 library(dplyr) power <- read.csv( "household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE, na.strings = "?" ) power_t <- tbl_df(power) power_feb <- filter(power, Date %in% c("1/2/2007", "2/2/2007")) power_feb$DateTime <- strptime(paste(power_feb$Date, power_feb$Time), "%d/%m/%Y %H:%M:%S") png("plot3.png", width = 480, height = 480) plot( power_feb$DateTime, power_feb$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = NA, col = "black" ) lines(power_feb$DateTime, power_feb$Sub_metering_2, col = "red") lines(power_feb$DateTime, power_feb$Sub_metering_3, col = "blue") legend( "topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") ) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runfile.R \name{runfiles} \alias{runfiles} \title{Retrieve all the `run` files from a particular workflow.} \usage{ runfiles(workflow_id, target, con = bety()) } \arguments{ \item{workflow_id}{ID of PEcAn workflow (from BETY `workflows` table)} \item{con}{DBI database connection object for BETY database. Default is output of [bety()].} } \value{ Character vector of URLs for reading or downloading files. } \description{ Retrieve all the `run` files from a particular workflow. } \author{ Alexey Shiklomanov }
/man/runfiles.Rd
permissive
femeunier/fortebaseline
R
false
true
591
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runfile.R \name{runfiles} \alias{runfiles} \title{Retrieve all the `run` files from a particular workflow.} \usage{ runfiles(workflow_id, target, con = bety()) } \arguments{ \item{workflow_id}{ID of PEcAn workflow (from BETY `workflows` table)} \item{con}{DBI database connection object for BETY database. Default is output of [bety()].} } \value{ Character vector of URLs for reading or downloading files. } \description{ Retrieve all the `run` files from a particular workflow. } \author{ Alexey Shiklomanov }
cpp("queue_sim.cpp") KS0_intensity = KS_train() save(KS0_intensity, file = "KS0_intensity.RData") # load("KS0_intensity.RData") x = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.0) cut_KS = quantile(x[,1], .90) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.1) sum(y[,1]<cut_KS)/length(y[,1]) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.2) sum(y[,1]<cut_KS)/length(y[,1]) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.3) sum(y[,1]<cut_KS)/length(y[,1]) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.5) sum(y[,1]<cut_KS)/length(y[,1]) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 2.0) sum(y[,1]<cut_KS)/length(y[,1]) result = simulate() intensity = (result$N)/(result$Y) save(intensity, file = "Beta0.RData") # load("Beta0.RData") x = test_stat(Beta0 = intensity) cut = quantile(x[,1], .90) y = test_stat(Beta0 = intensity, slow = 1.0); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 1.1); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 1.2); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 1.3); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 1.5); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 2.0); sum(y[,1]<cut)/length(y[,1]) # ========================================================================== # RESULTS # ========================================================================== # > x = test_stat(Beta0 = intensity) # > # > cut = quantile(x[,1], .90) # > y = test_stat(Beta0 = intensity, slow = 1.0); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.90161 # > y = test_stat(Beta0 = intensity, slow = 1.1); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.86198 # > y = test_stat(Beta0 = intensity, slow = 1.2); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.81568 # > y = test_stat(Beta0 = intensity, slow = 1.3); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.76586 # > y = test_stat(Beta0 = intensity, slow = 1.5); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.65191 # > y = test_stat(Beta0 = intensity, slow = 2.0); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.39273 # > load("KS0_intensity.RData") # > x = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.0) # > cut_KS = quantile(x[,1], .90) # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.1) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.89262 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.2) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.88392 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.3) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.87169 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.5) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.84644 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 2.0) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.76957 # # CORRECTED RESUTS # # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.1) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.89155 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.2) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.88304 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.3) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.87085 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.5) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.8442 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 2.0) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.767
/queue_monitor.R
no_license
devashishdasUSF/CTMC_realdata
R
false
false
3,432
r
cpp("queue_sim.cpp") KS0_intensity = KS_train() save(KS0_intensity, file = "KS0_intensity.RData") # load("KS0_intensity.RData") x = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.0) cut_KS = quantile(x[,1], .90) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.1) sum(y[,1]<cut_KS)/length(y[,1]) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.2) sum(y[,1]<cut_KS)/length(y[,1]) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.3) sum(y[,1]<cut_KS)/length(y[,1]) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.5) sum(y[,1]<cut_KS)/length(y[,1]) y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 2.0) sum(y[,1]<cut_KS)/length(y[,1]) result = simulate() intensity = (result$N)/(result$Y) save(intensity, file = "Beta0.RData") # load("Beta0.RData") x = test_stat(Beta0 = intensity) cut = quantile(x[,1], .90) y = test_stat(Beta0 = intensity, slow = 1.0); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 1.1); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 1.2); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 1.3); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 1.5); sum(y[,1]<cut)/length(y[,1]) y = test_stat(Beta0 = intensity, slow = 2.0); sum(y[,1]<cut)/length(y[,1]) # ========================================================================== # RESULTS # ========================================================================== # > x = test_stat(Beta0 = intensity) # > # > cut = quantile(x[,1], .90) # > y = test_stat(Beta0 = intensity, slow = 1.0); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.90161 # > y = test_stat(Beta0 = intensity, slow = 1.1); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.86198 # > y = test_stat(Beta0 = intensity, slow = 1.2); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.81568 # > y = test_stat(Beta0 = intensity, slow = 1.3); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.76586 # > y = test_stat(Beta0 = intensity, slow = 1.5); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.65191 # > y = test_stat(Beta0 = intensity, slow = 2.0); # > sum(y[,1]<cut)/length(y[,1]) # [1] 0.39273 # > load("KS0_intensity.RData") # > x = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.0) # > cut_KS = quantile(x[,1], .90) # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.1) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.89262 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.2) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.88392 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.3) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.87169 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.5) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.84644 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 2.0) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.76957 # # CORRECTED RESUTS # # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.1) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.89155 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.2) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.88304 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.3) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.87085 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 1.5) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.8442 # > y = KS_test_stat(KS0_intensity = KS0_intensity, slow = 2.0) # > sum(y[,1]<cut_KS)/length(y[,1]) # [1] 0.767
#' Sample dates (vector) #' #' #' @docType data #' #' @format A vector containg 13 dates data "sample_date"
/R/sample_date-data.R
no_license
kun-ecology/SER
R
false
false
110
r
#' Sample dates (vector) #' #' #' @docType data #' #' @format A vector containg 13 dates data "sample_date"
################# ### plot.bigo ### ################# #' @export plot.bigo <- function(x, y, ...) { df <- x[["runtimes"]] if (ncol(df) == 2) { var_name <- setdiff(names(df), "elapsed") p <- ggplot2::ggplot(df, ggplot2::aes_string(x = var_name, y = "elapsed")) + ggplot2::geom_line() + ggplot2::labs(title = paste("Complexity of", x[["function_name"]]), x = var_name, y = "Runtime (s)") return(p) } else if (ncol(df) == 3) { var_names <- setdiff(names(df), "elapsed") p <- ggplot2::ggplot(df, ggplot2::aes_string(x = var_names[[1]], y = var_names[[2]], z = "elapsed")) + ggplot2::geom_contour() + ggplot2::labs(title = paste("Complexity of", x[["function_name"]]), x = var_names[[1]], y = var_names[[2]], z = "Runtime (s)") return(p) } else { stop("Cannot plot complexity when there are more than two variables.", call. = FALSE) } } ###
/R/plot.R
permissive
bfgray3/bigo
R
false
false
1,064
r
################# ### plot.bigo ### ################# #' @export plot.bigo <- function(x, y, ...) { df <- x[["runtimes"]] if (ncol(df) == 2) { var_name <- setdiff(names(df), "elapsed") p <- ggplot2::ggplot(df, ggplot2::aes_string(x = var_name, y = "elapsed")) + ggplot2::geom_line() + ggplot2::labs(title = paste("Complexity of", x[["function_name"]]), x = var_name, y = "Runtime (s)") return(p) } else if (ncol(df) == 3) { var_names <- setdiff(names(df), "elapsed") p <- ggplot2::ggplot(df, ggplot2::aes_string(x = var_names[[1]], y = var_names[[2]], z = "elapsed")) + ggplot2::geom_contour() + ggplot2::labs(title = paste("Complexity of", x[["function_name"]]), x = var_names[[1]], y = var_names[[2]], z = "Runtime (s)") return(p) } else { stop("Cannot plot complexity when there are more than two variables.", call. = FALSE) } } ###
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/05_Transformation_normalization.R \name{medianNormFactor} \alias{medianNormFactor} \title{medianNormFactor()} \usage{ medianNormFactor(romics_object, main_factor = "factor") } \arguments{ \item{romics_object}{has to be an romics_object created using romicsCreateObject() that has not been previously log-transformed using log10transform()} } \value{ This function returns the transformed romics_object with updated data layer. } \description{ Normalizes the samples by their median within a given factor. The median of the median within this factor will be used as factor-specific median center. } \details{ median normalize the samples within a given factor the median of the median of this factor will be used as new factor median. } \author{ Geremy Clair }
/man/medianNormFactor.Rd
permissive
PNNL-Comp-Mass-Spec/RomicsProcessor
R
false
true
838
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/05_Transformation_normalization.R \name{medianNormFactor} \alias{medianNormFactor} \title{medianNormFactor()} \usage{ medianNormFactor(romics_object, main_factor = "factor") } \arguments{ \item{romics_object}{has to be an romics_object created using romicsCreateObject() that has not been previously log-transformed using log10transform()} } \value{ This function returns the transformed romics_object with updated data layer. } \description{ Normalizes the samples by their median within a given factor. The median of the median within this factor will be used as factor-specific median center. } \details{ median normalize the samples within a given factor the median of the median of this factor will be used as new factor median. } \author{ Geremy Clair }
#'Compute negative log likelihood of HMM using all parameters #' #'This function computes the negative log likelihood of the hidden Markov model. #'using all parameters, untransformed. It is used to get the covariance matrix #'of the fitted model. #' #'@param parvect The vector of parameters to be estimated #'@param obs A n x ndist matrix of data. If ndist=1, obs must be a n x 1 matrix. #'@param PDFs A list of PDFs for the ndist distributions. #'@param skeleton A list with the original parameter structure used to reassemble #'parvect #'@param nstates Number of hidden states #'@param useRcpp Logical indicating whether or not to use Rcpp. #'@return The negative log likelihood of the hidden markov model. #'@export ## function that computes the negative log-likelihood move.HMM.mllk.full <- function(parvect,obs,PDFs,skeleton,nstates,useRcpp=FALSE){ n=nrow(obs) if(nstates==1){ parvect=c(1,parvect) } params=relist(parvect,skeleton) params[[1]]=t(params[[1]]) delta=solve(t(diag(nrow(params[[1]]))-params[[1]]+1),rep(1,nrow(params[[1]]))) allprobs <- matrix(rep(1,nstates*n),nrow=n)#f(y_t|s_t=k) if(nstates>1){ nparam=unlist(lapply(params,ncol))[-1] }else{ nparam=unlist(lapply(params,ncol)) } ndists=length(PDFs) #make index for NAs use=!is.na(obs)*1 for(i in 1:ndists){ if(nparam[i]==2){ #for 2 parameter distributions for (j in 1:nstates){ allprobs[use[,i],j] <- allprobs[use[,i],j]*PDFs[[i]](obs[use[,i],i],params[[i+1]][j,1],params[[i+1]][j,2]) } }else if(nparam[i]==1){ #for 1 parameter distributions. for (j in 1:nstates){ allprobs[use[,i],j] <- allprobs[use[,i],j]*PDFs[[i]](obs[use[,i],i],params[[i+1]][j]) } }else if(nparam[i]==3){ #for 3 parameter distributions for (j in 1:nstates){ allprobs[use[,i],j] <- allprobs[use[,i],j]*PDFs[[i]](obs[use[,i],i],params[[i+1]][j,1],params[[i+1]][j,2],params[[i+1]][j,3]) } } } foo <- delta Gamma = params[[1]] if(class(useRcpp)=="CFunc"){ foo=matrix(foo,ncol=nstates) mllk=useRcpp(Gamma,allprobs,foo) }else{ lscale <- 0 for (i in 1:n){ foo <- foo%*%Gamma*allprobs[i,] sumfoo <- sum(foo) lscale <- lscale+log(sumfoo) foo <- foo/sumfoo } mllk <- -lscale } mllk }
/R/move.HMM.mllk.full.R
no_license
benaug/move.HMM
R
false
false
2,325
r
#'Compute negative log likelihood of HMM using all parameters #' #'This function computes the negative log likelihood of the hidden Markov model. #'using all parameters, untransformed. It is used to get the covariance matrix #'of the fitted model. #' #'@param parvect The vector of parameters to be estimated #'@param obs A n x ndist matrix of data. If ndist=1, obs must be a n x 1 matrix. #'@param PDFs A list of PDFs for the ndist distributions. #'@param skeleton A list with the original parameter structure used to reassemble #'parvect #'@param nstates Number of hidden states #'@param useRcpp Logical indicating whether or not to use Rcpp. #'@return The negative log likelihood of the hidden markov model. #'@export ## function that computes the negative log-likelihood move.HMM.mllk.full <- function(parvect,obs,PDFs,skeleton,nstates,useRcpp=FALSE){ n=nrow(obs) if(nstates==1){ parvect=c(1,parvect) } params=relist(parvect,skeleton) params[[1]]=t(params[[1]]) delta=solve(t(diag(nrow(params[[1]]))-params[[1]]+1),rep(1,nrow(params[[1]]))) allprobs <- matrix(rep(1,nstates*n),nrow=n)#f(y_t|s_t=k) if(nstates>1){ nparam=unlist(lapply(params,ncol))[-1] }else{ nparam=unlist(lapply(params,ncol)) } ndists=length(PDFs) #make index for NAs use=!is.na(obs)*1 for(i in 1:ndists){ if(nparam[i]==2){ #for 2 parameter distributions for (j in 1:nstates){ allprobs[use[,i],j] <- allprobs[use[,i],j]*PDFs[[i]](obs[use[,i],i],params[[i+1]][j,1],params[[i+1]][j,2]) } }else if(nparam[i]==1){ #for 1 parameter distributions. for (j in 1:nstates){ allprobs[use[,i],j] <- allprobs[use[,i],j]*PDFs[[i]](obs[use[,i],i],params[[i+1]][j]) } }else if(nparam[i]==3){ #for 3 parameter distributions for (j in 1:nstates){ allprobs[use[,i],j] <- allprobs[use[,i],j]*PDFs[[i]](obs[use[,i],i],params[[i+1]][j,1],params[[i+1]][j,2],params[[i+1]][j,3]) } } } foo <- delta Gamma = params[[1]] if(class(useRcpp)=="CFunc"){ foo=matrix(foo,ncol=nstates) mllk=useRcpp(Gamma,allprobs,foo) }else{ lscale <- 0 for (i in 1:n){ foo <- foo%*%Gamma*allprobs[i,] sumfoo <- sum(foo) lscale <- lscale+log(sumfoo) foo <- foo/sumfoo } mllk <- -lscale } mllk }
library(ElemStatLearn) library(randomForest) library(caret) data(vowel.train) data(vowel.test) vowel.train$y <- factor(vowel.train$y) vowel.test$y <- factor(vowel.test$y) set.seed(33833) modFitRF <- train(y ~., data = vowel.train, method = "rf") modFitG <- train(y ~., data = vowel.train, method = "gbm") predictRF <- confusionMatrix(vowel.test$y, predict(modFitRF,vowel.test)) #.5909 predictG <- confusionMatrix(vowel.test$y, predict(modFitG,vowel.test)) #.53 agreement <- predict(modFitRF,vowel.test) == predict(modFitG, vowel.test) vowel.test$prediction <- predict(modFitRF,vowel.test) vowel.test.agree <- vowel.test[agreement,] sum(vowel.test.agree$y == vowel.test.agree$prediction)/nrow(vowel.test.agree) #.62 ##Q2 library(caret) library(gbm) set.seed(3433) library(AppliedPredictiveModeling) data(AlzheimerDisease) adData = data.frame(diagnosis,predictors) inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]] training = adData[ inTrain,] testing = adData[-inTrain,] set.seed(62433) modFitRF <- train(diagnosis ~., data = training, method = "rf") modFitG <- train(diagnosis ~., data = training, method = "gbm") modFitL <- train(diagnosis ~., data = training, method = "lda") conMRF <- confusionMatrix(testing$diagnosis, predict(modFitRF,testing)) #.79 conMG <- confusionMatrix(testing$diagnosis, predict(modFitG,testing)) #.78 conML <- confusionMatrix(testing$diagnosis, predict(modFitL,testing)) #.7683 pr1 <- predict(modFitRF,testing) pr2 <-predict(modFitG,testing) pr3 <- predict(modFitL,testing) preDF <- data.frame(pr1,pr2,pr3, diagnosis = testing$diagnosis) comModFit <- train(diagnosis ~., method = "rf", data = predDF) combPred <- predict(comModFit, predDF) sum(combPred == testing$diagnosis)/nrow(testing) #.804878, better than RF and lda, same boosing ##Q3 set.seed(3523) library(AppliedPredictiveModeling) library(elasticnet) library(caret) data(concrete) inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]] training = concrete[ inTrain,] testing = concrete[-inTrain,] fit<- train(CompressiveStrength~., data=training, method = "lasso") plot.enet(fit$finalModel) ##NOT COURSE AGGREGATE, MUST BE FINE AGGREGATE? plot(fit$finalModel, xvar="penalty", use.color=T) #CEMENENT! #Q4 library(lubridate) library(forecast) setwd("~/Dropbox/machinelearning") list.files() dat <- read.csv("gaData.csv", stringsAsFactors = F) training = dat[year(dat$date) < 2012,] testing = dat[(year(dat$date)) > 2011,] tstrain = ts(training$visitsTumblr) tstest <- ts(testing$visitsTumblr) fit <- bats(tstrain) ftest <- forecast(tstest, model = fit, h = 235) lower <- as.data.frame(ftest$lower) colnames(lower)[2] <- "lower" upper <- as.data.frame(ftest$upper) colnames(upper)[2] <- "upper" results <- data.frame(cbind(lower = lower$lower, upper = upper$upper, testing = testing$visitsTumblr)) results$ininterval <- (results$testing > results$lower & results$testing < results$upper) sum(results$ininterval)/nrow((results)) # 0.9574468 #Q5 set.seed(3523) library(AppliedPredictiveModeling) library(e1071) data(concrete) inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]] training = concrete[ inTrain,] testing = concrete[-inTrain,] set.seed(352) model <- svm(CompressiveStrength ~ ., data = training) pred <- predict(model, testing) sqrt(mean((testing$CompressiveStrength - pred)^2)) #6.715009
/quizzesAndOther/quiz4.R
no_license
michaelrahija/PracticalMachineLearning
R
false
false
3,411
r
library(ElemStatLearn) library(randomForest) library(caret) data(vowel.train) data(vowel.test) vowel.train$y <- factor(vowel.train$y) vowel.test$y <- factor(vowel.test$y) set.seed(33833) modFitRF <- train(y ~., data = vowel.train, method = "rf") modFitG <- train(y ~., data = vowel.train, method = "gbm") predictRF <- confusionMatrix(vowel.test$y, predict(modFitRF,vowel.test)) #.5909 predictG <- confusionMatrix(vowel.test$y, predict(modFitG,vowel.test)) #.53 agreement <- predict(modFitRF,vowel.test) == predict(modFitG, vowel.test) vowel.test$prediction <- predict(modFitRF,vowel.test) vowel.test.agree <- vowel.test[agreement,] sum(vowel.test.agree$y == vowel.test.agree$prediction)/nrow(vowel.test.agree) #.62 ##Q2 library(caret) library(gbm) set.seed(3433) library(AppliedPredictiveModeling) data(AlzheimerDisease) adData = data.frame(diagnosis,predictors) inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]] training = adData[ inTrain,] testing = adData[-inTrain,] set.seed(62433) modFitRF <- train(diagnosis ~., data = training, method = "rf") modFitG <- train(diagnosis ~., data = training, method = "gbm") modFitL <- train(diagnosis ~., data = training, method = "lda") conMRF <- confusionMatrix(testing$diagnosis, predict(modFitRF,testing)) #.79 conMG <- confusionMatrix(testing$diagnosis, predict(modFitG,testing)) #.78 conML <- confusionMatrix(testing$diagnosis, predict(modFitL,testing)) #.7683 pr1 <- predict(modFitRF,testing) pr2 <-predict(modFitG,testing) pr3 <- predict(modFitL,testing) preDF <- data.frame(pr1,pr2,pr3, diagnosis = testing$diagnosis) comModFit <- train(diagnosis ~., method = "rf", data = predDF) combPred <- predict(comModFit, predDF) sum(combPred == testing$diagnosis)/nrow(testing) #.804878, better than RF and lda, same boosing ##Q3 set.seed(3523) library(AppliedPredictiveModeling) library(elasticnet) library(caret) data(concrete) inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]] training = concrete[ inTrain,] testing = concrete[-inTrain,] fit<- train(CompressiveStrength~., data=training, method = "lasso") plot.enet(fit$finalModel) ##NOT COURSE AGGREGATE, MUST BE FINE AGGREGATE? plot(fit$finalModel, xvar="penalty", use.color=T) #CEMENENT! #Q4 library(lubridate) library(forecast) setwd("~/Dropbox/machinelearning") list.files() dat <- read.csv("gaData.csv", stringsAsFactors = F) training = dat[year(dat$date) < 2012,] testing = dat[(year(dat$date)) > 2011,] tstrain = ts(training$visitsTumblr) tstest <- ts(testing$visitsTumblr) fit <- bats(tstrain) ftest <- forecast(tstest, model = fit, h = 235) lower <- as.data.frame(ftest$lower) colnames(lower)[2] <- "lower" upper <- as.data.frame(ftest$upper) colnames(upper)[2] <- "upper" results <- data.frame(cbind(lower = lower$lower, upper = upper$upper, testing = testing$visitsTumblr)) results$ininterval <- (results$testing > results$lower & results$testing < results$upper) sum(results$ininterval)/nrow((results)) # 0.9574468 #Q5 set.seed(3523) library(AppliedPredictiveModeling) library(e1071) data(concrete) inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]] training = concrete[ inTrain,] testing = concrete[-inTrain,] set.seed(352) model <- svm(CompressiveStrength ~ ., data = training) pred <- predict(model, testing) sqrt(mean((testing$CompressiveStrength - pred)^2)) #6.715009
## code to prepare `cancer` dataset goes here library(haven) cancer2 <- haven::read_dta(file = "http://www.stata-press.com/data/r16/cancer.dta") cancer2 <- haven::zap_formats(cancer2) cancer2 <- haven::zap_label(cancer2) cancer2 <- haven::zap_labels(cancer2) cancer2 <- haven::zap_missing(cancer2) cancer2 <- haven::zap_widths(cancer2) usethis::use_data(cancer2, overwrite = TRUE)
/data-raw/cancer2.R
permissive
ClinicoPath/KMunicate-package
R
false
false
381
r
## code to prepare `cancer` dataset goes here library(haven) cancer2 <- haven::read_dta(file = "http://www.stata-press.com/data/r16/cancer.dta") cancer2 <- haven::zap_formats(cancer2) cancer2 <- haven::zap_label(cancer2) cancer2 <- haven::zap_labels(cancer2) cancer2 <- haven::zap_missing(cancer2) cancer2 <- haven::zap_widths(cancer2) usethis::use_data(cancer2, overwrite = TRUE)
####Gaussian Quadrature Part Integration Innerfunc=function(zz,rstat,model, ww2=0.8049141,ww1=0.08131284,ww3=0.8049141,ww4=0.08131284, xx1=-1.65068,xx2=-0.5246476,xx3= 0.5246476,xx4= 1.65068){ if(zz==1){ ff1=(1+0.5*((rstat-sqrt(2)*model$parameter$eta1*xx1-model$parameter$gamma1)^2)/model$parameter$beta1)^(-model$parameter$alpha1-0.5) ff2=(1+0.5*((rstat-sqrt(2)*model$parameter$eta1*xx2-model$parameter$gamma1)^2)/model$parameter$beta1)^(-model$parameter$alpha1-0.5) ff3=(1+0.5*((rstat-sqrt(2)*model$parameter$eta1*xx3-model$parameter$gamma1)^2)/model$parameter$beta1)^(-model$parameter$alpha1-0.5) ff4=(1+0.5*((rstat-sqrt(2)*model$parameter$eta1*xx4-model$parameter$gamma1)^2)/model$parameter$beta1)^(-model$parameter$alpha1-0.5) outvalue=exp(lgamma(model$parameter$alpha1+0.5)-lgamma(model$parameter$alpha1))/sqrt(pi*model$parameter$beta1) }else{ ff1=(1+0.5*((rstat-sqrt(2)*model$parameter$eta0*xx1-model$parameter$gamma0)^2)/model$parameter$beta0)^(-model$parameter$alpha0-0.5) ff2=(1+0.5*((rstat-sqrt(2)*model$parameter$eta0*xx2-model$parameter$gamma0)^2)/model$parameter$beta0)^(-model$parameter$alpha0-0.5) ff3=(1+0.5*((rstat-sqrt(2)*model$parameter$eta0*xx3-model$parameter$gamma0)^2)/model$parameter$beta0)^(-model$parameter$alpha0-0.5) ff4=(1+0.5*((rstat-sqrt(2)*model$parameter$eta0*xx4-model$parameter$gamma0)^2)/model$parameter$beta0)^(-model$parameter$alpha0-0.5) outvalue=exp(lgamma(model$parameter$alpha0+0.5)-lgamma(model$parameter$alpha0))/sqrt(pi*model$parameter$beta0) } ff<-c(ff1,ff2,ff3,ff4) integralvalue=outvalue*(c(ww1,ww2,ww3,ww4)%*%ff) return(integralvalue) }
/R/Innerfunc.R
no_license
cran/BANFF
R
false
false
1,697
r
####Gaussian Quadrature Part Integration Innerfunc=function(zz,rstat,model, ww2=0.8049141,ww1=0.08131284,ww3=0.8049141,ww4=0.08131284, xx1=-1.65068,xx2=-0.5246476,xx3= 0.5246476,xx4= 1.65068){ if(zz==1){ ff1=(1+0.5*((rstat-sqrt(2)*model$parameter$eta1*xx1-model$parameter$gamma1)^2)/model$parameter$beta1)^(-model$parameter$alpha1-0.5) ff2=(1+0.5*((rstat-sqrt(2)*model$parameter$eta1*xx2-model$parameter$gamma1)^2)/model$parameter$beta1)^(-model$parameter$alpha1-0.5) ff3=(1+0.5*((rstat-sqrt(2)*model$parameter$eta1*xx3-model$parameter$gamma1)^2)/model$parameter$beta1)^(-model$parameter$alpha1-0.5) ff4=(1+0.5*((rstat-sqrt(2)*model$parameter$eta1*xx4-model$parameter$gamma1)^2)/model$parameter$beta1)^(-model$parameter$alpha1-0.5) outvalue=exp(lgamma(model$parameter$alpha1+0.5)-lgamma(model$parameter$alpha1))/sqrt(pi*model$parameter$beta1) }else{ ff1=(1+0.5*((rstat-sqrt(2)*model$parameter$eta0*xx1-model$parameter$gamma0)^2)/model$parameter$beta0)^(-model$parameter$alpha0-0.5) ff2=(1+0.5*((rstat-sqrt(2)*model$parameter$eta0*xx2-model$parameter$gamma0)^2)/model$parameter$beta0)^(-model$parameter$alpha0-0.5) ff3=(1+0.5*((rstat-sqrt(2)*model$parameter$eta0*xx3-model$parameter$gamma0)^2)/model$parameter$beta0)^(-model$parameter$alpha0-0.5) ff4=(1+0.5*((rstat-sqrt(2)*model$parameter$eta0*xx4-model$parameter$gamma0)^2)/model$parameter$beta0)^(-model$parameter$alpha0-0.5) outvalue=exp(lgamma(model$parameter$alpha0+0.5)-lgamma(model$parameter$alpha0))/sqrt(pi*model$parameter$beta0) } ff<-c(ff1,ff2,ff3,ff4) integralvalue=outvalue*(c(ww1,ww2,ww3,ww4)%*%ff) return(integralvalue) }
# All_Tumor_clinical_analyses.R # Author: Greg Poore # Date: Oct 8, 2018 # Purpose: To explore clinically-oriented differential expression and machine learning analyses # Load dependencies require(ggplot2) require(ggsci) require(limma) require(Glimma) require(edgeR) require(maftools) require(dplyr) require(TCGAmutations) require(doMC) numCores <- detectCores() registerDoMC(cores=numCores) #------------------------------------------------------ # Load data load("tcgaVbDataAndMetadataAndSNM.RData") load("alphaDiversityMetrics.RData") load("snmDataSampleTypeWithExpStrategyFINAL.RData") #------------------------------------------------------ # require(sevenbridges) # a <- Auth(token = "e5664ac8582f46b5b68c08a88381fbea", # platform = "cgc") # a$api(path = "projects", method = "GET") # a$project() # # p <- a$project(id = "jkanbar/tcga-kraken") # p$file() # tmp <- p$file(fields="_all", complete = TRUE) # # # cols2Keep <- c( # "reference_genome", # "case_id", # "experimental_strategy", # "disease_type", # "aliquot_uuid", # "gender", # "aliquot_id", # "data_subtype", # "sample_uuid", # "platform", # "investigation", # "case_uuid", # "data_format", # "data_type", # "sample_type", # "primary_site", # "sample_id" # ) # tmp3 <- list() # for(ii in 1:length(tmp)){ # tmp3[[ii]] <- data.frame(filename = tmp[[ii]]$name, as.data.frame(tmp[[ii]]$`.->metadata`)[,cols2Keep]) # } # # tmp4 <- do.call("rbind", tmp3) # cgcMetadataKrakenProj <- tmp4 # save(cgcMetadataKrakenProj, file = "cgcMetadataKrakenProj.RData") #------------------------------------------------------ load("cgcMetadataKrakenProj.RData") load("cgcAPIMetadataJoined.RData") # metadataSamplesAllCGC <- left_join(metadataSamplesAll, # cgcMetadataKrakenProj[, -which(names(cgcMetadataKrakenProj) %in% # c("reference_genome", # "experimental_strategy", # "disease_type", # "aliquot_uuid", # "gender", # "sample_uuid", # "platform", # "investigation", # "case_uuid", # "sample_type", # "primary_site"))], # by = "filename") # rownames(metadataSamplesAllCGC) <- rownames(metadataSamplesAll) # head(metadataSamplesAllCGC,2) # # metadataSamplesAllQCCGC <- left_join(metadataSamplesAllQC, # cgcMetadataKrakenProj[, -which(names(cgcMetadataKrakenProj) %in% # c("reference_genome", # "experimental_strategy", # "disease_type", # "aliquot_uuid", # "gender", # "sample_uuid", # "platform", # "investigation", # "case_uuid", # "sample_type", # "primary_site"))], # by = "filename") # rownames(metadataSamplesAllQCCGC) <- rownames(metadataSamplesAllQC) # head(metadataSamplesAllQCCGC,2) # # metadataSamplesAllQCSurvivalCGC <- left_join(metadataSamplesAllQCSurvival, # cgcMetadataKrakenProj[, -which(names(cgcMetadataKrakenProj) %in% # c("reference_genome", # "experimental_strategy", # "disease_type", # "aliquot_uuid", # "gender", # "sample_uuid", # "platform", # "investigation", # "case_uuid", # "sample_type", # "primary_site"))], # by = "filename") # rownames(metadataSamplesAllQCSurvivalCGC) <- rownames(metadataSamplesAllQCSurvival) # head(metadataSamplesAllQCSurvivalCGC,2) # # save(metadataSamplesAllCGC, metadataSamplesAllQCCGC, metadataSamplesAllQCSurvivalCGC, # file = "cgcAPIMetadataJoined.RData") #------------------------------------------------------ # Load clinical metadata from TCGAmutations ## Load available TCGA datasets and extract radiation information availableStudies <- TCGAmutations::tcga_available() for(tcgaStudy in head(availableStudies$Study_Abbreviation,-1)){ tcga_load(study = tcgaStudy) } accClinicalMetadata <- as.data.frame(getClinicalData(tcga_acc_mc3)) blcaClinicalMetadata <- as.data.frame(getClinicalData(tcga_blca_mc3)) # brcaClinicalMetadata <- as.data.frame(getClinicalData(tcga_brca_mc3)) # cescClinicalMetadata <- as.data.frame(getClinicalData(tcga_cesc_mc3)) cholClinicalMetadata <- as.data.frame(getClinicalData(tcga_chol_mc3)) coadClinicalMetadata <- as.data.frame(getClinicalData(tcga_coad_mc3)) # dlbcClinicalMetadata <- as.data.frame(getClinicalData(tcga_dlbc_mc3)) escaClinicalMetadata <- as.data.frame(getClinicalData(tcga_esca_mc3)) gbmClinicalMetadata <- as.data.frame(getClinicalData(tcga_gbm_mc3)) hnscClinicalMetadata <- as.data.frame(getClinicalData(tcga_hnsc_mc3)) # kichClinicalMetadata <- as.data.frame(getClinicalData(tcga_kich_mc3)) kircClinicalMetadata <- as.data.frame(getClinicalData(tcga_kirc_mc3)) # kirpClinicalMetadata <- as.data.frame(getClinicalData(tcga_kirp_mc3)) lamlClinicalMetadata <- as.data.frame(getClinicalData(tcga_laml_mc3)) lggClinicalMetadata <- as.data.frame(getClinicalData(tcga_lgg_mc3)) # lihcClinicalMetadata <- as.data.frame(getClinicalData(tcga_lihc_mc3)) # luadClinicalMetadata <- as.data.frame(getClinicalData(tcga_luad_mc3)) # luscClinicalMetadata <- as.data.frame(getClinicalData(tcga_lusc_mc3)) mesoClinicalMetadata <- as.data.frame(getClinicalData(tcga_meso_mc3)) ovClinicalMetadata <- as.data.frame(getClinicalData(tcga_ov_mc3)) # paadClinicalMetadata <- as.data.frame(getClinicalData(tcga_paad_mc3)) pcpgClinicalMetadata <- as.data.frame(getClinicalData(tcga_pcpg_mc3)) pradClinicalMetadata <- as.data.frame(getClinicalData(tcga_prad_mc3)) # readClinicalMetadata <- as.data.frame(getClinicalData(tcga_read_mc3)) sarcClinicalMetadata <- as.data.frame(getClinicalData(tcga_sarc_mc3)) skcmClinicalMetadata <- as.data.frame(getClinicalData(tcga_skcm_mc3)) # stadClinicalMetadata <- as.data.frame(getClinicalData(tcga_stad_mc3)) # tgctClinicalMetadata <- as.data.frame(getClinicalData(tcga_tgct_mc3)) thcaClinicalMetadata <- as.data.frame(getClinicalData(tcga_thca_mc3)) # thymClinicalMetadata <- as.data.frame(getClinicalData(tcga_thym_mc3)) ucecClinicalMetadata <- as.data.frame(getClinicalData(tcga_ucec_mc3)) # ucsClinicalMetadata <- as.data.frame(getClinicalData(tcga_ucs_mc3)) uvmClinicalMetadata <- as.data.frame(getClinicalData(tcga_uvm_mc3)) # cols2Keep <- c( # "Tumor_Sample_Barcode", # "patient_id", # "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file # "postoperative_rx_tx", # "radiation_therapy" # ) # ind <- 1 # listClinData <- list() # for(studyClinData in grep("ClinicalMetadata$", ls(), value = TRUE)){ # dfClinData <- get(studyClinData) # dfClinData$study <- factor(studyClinData) # dfClinData$bcr_patient_uuid <- toupper(dfClinData$bcr_patient_uuid) # if(any(grepl("^radiation_therapy$",colnames(dfClinData), ignore.case = TRUE))){ # print(studyClinData) # listClinData[[ind]] <- dfClinData[,cols2Keep] # ind <- ind + 1 # } # else{next} # } # # mergedClinicalDataDF <- do.call("rbind", listClinData) # dim(mergedClinicalDataDF) # sum(duplicated(mergedClinicalDataDF)) # Shows lots of duplicated rows, so make unique # uniqueMergedClinicalDataDF <- unique(mergedClinicalDataDF) # table(uniqueMergedClinicalDataDF$radiation_therapy) # # dim(uniqueMergedClinicalDataDF) #------------- COAD: MSI vs. MSS -------------# # Data alignment coadClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "microsatellite_instability", "kras_mutation_found", "colon_polyps_present", "loss_expression_of_mismatch_repair_proteins_by_ihc" ) coadMetadataCGC <- metadataSamplesAllCGC[metadataSamplesAllCGC$disease_type == "Colon Adenocarcinoma",] coadMetadataCGC$case_uuid <- toupper(coadMetadataCGC$case_uuid) coadClinicalMetadata$bcr_patient_uuid <- toupper(coadClinicalMetadata$bcr_patient_uuid) coadMetadataCGCClinical <- left_join(coadMetadataCGC, coadClinicalMetadata[,coadClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(coadMetadataCGCClinical) <- rownames(coadMetadataCGC) # Subset data coadMetadataCGCClinical_microsatellite <- coadMetadataCGCClinical[!is.na(coadMetadataCGCClinical$microsatellite_instability),] coadMetadataCGCClinical_microsatellitePT <- droplevels(coadMetadataCGCClinical_microsatellite[coadMetadataCGCClinical_microsatellite$sample_type == "Primary Tumor",]) #--------------------------- voomMetadata <- coadMetadataCGCClinical_microsatellitePT voomCountData <- t(vbDataBarnDFReconciled[rownames(voomMetadata),]) # Differential abundance analysis limmaFormula <- formula(~0 + microsatellite_instability + data_submitting_center_label + platform) covDesignShort <- model.matrix(limmaFormula, data = voomMetadata) colnames(covDesignShort) <- gsub('([[:punct:]])|\\s+','',colnames(covDesignShort)) colnames(covDesignShort) dge <- DGEList(counts = voomCountData) keep <- filterByExpr(dge, covDesignShort) dge <- dge[keep,,keep.lib.sizes=FALSE] dge <- calcNormFactors(dge, method = "TMM") vdge <- voom(dge, design = covDesignShort, plot = TRUE, save.plot = TRUE, normalize.method="none") vdgeFit <- lmFit(vdge, covDesignShort) vdgeFit <- eBayes(vdgeFit) contrast.matrix <- makeContrasts(microsatelliteinstabilityYES - microsatelliteinstabilityNO, levels = covDesignShort) vdgeFit2 <- contrasts.fit(vdgeFit, contrasts = contrast.matrix) vdgeFit2 <- eBayes(vdgeFit2) vdgeFitDT <- decideTests(vdgeFit2) # results <- list(dge = dge, vdge = vdge, vdgeFit2 = vdgeFit2, vdgeFitDT = vdgeFitDT, limmaFormula = limmaFormula) titleXY <- "Volcano Plot - COAD Primary Tumors - Microsatellite Instability (Y|N = 18|100)" glXYPlot(x = vdgeFit2$coef, y = vdgeFit2$lod, # coef = 1, xlab="logFC", ylab="log-odds", status = vdgeFitDT, groups = coadMetadataCGCClinical_microsatellitePT$microsatellite_instability, main = titleXY, side.ylab = "Voom Normalized Abundance", html = titleXY, folder = "Glimma-plots-clinical-Volcano", counts = vdge) # predMSSvsMSI.R coadMetadataQCCGC <- metadataSamplesAllQCCGC[metadataSamplesAllQCCGC$disease_type == "Colon Adenocarcinoma",] coadMetadataQCCGC$case_uuid <- toupper(coadMetadataQCCGC$case_uuid) coadClinicalMetadata$bcr_patient_uuid <- toupper(coadClinicalMetadata$bcr_patient_uuid) coadMetadataQCCGCClinical <- left_join(coadMetadataQCCGC, coadClinicalMetadata[,coadClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(coadMetadataQCCGCClinical) <- rownames(coadMetadataQCCGC) # Subset data coadMetadataQCCGCClinical_microsatellite <- coadMetadataQCCGCClinical[!is.na(coadMetadataQCCGCClinical$microsatellite_instability),] coadMetadataQCCGCClinical_microsatellitePT <- droplevels(coadMetadataQCCGCClinical_microsatellite[coadMetadataQCCGCClinical_microsatellite$sample_type == "Primary Tumor",]) #--------------------------- source("predMSSvsMSI.R") customGBMGrid <- expand.grid(interaction.depth = seq(1,3), n.trees = floor((1:3) * 50), shrinkage = 0.1, n.minobsinnode = 1) tmp <- predMSSvsMSI(qcMLMetadata = coadMetadataQCCGCClinical_microsatellitePT, qcMLDataSNM = snmDataSampleType[rownames(coadMetadataQCCGCClinical_microsatellitePT),], cancerTypeString = "Colon Adenocarcinoma", sampleTypeComparison = "Primary Tumor", caretModel = "gbm", numResampleIter = 3, numKFold = 4, trainSetProp = 0.5, caretTuneGrid = customGBMGrid, ggPath = "./roc-ggplots-clinical") #-----------------------------------------# #------------- Sanity checks -------------# #-----------------------------------------# require(dplyr) require(ggplot2) require(ggpubr) require(bigrquery) #------------- HPV status across all cancers -------------# clinical_table = "[isb-cgc:tcga_201607_beta.Clinical_data]" cloud_project_workshop = "hybrid-coyote-219120" sqlQuery = paste("SELECT ParticipantBarcode, Study, hpv_calls, hpv_status ", "FROM ", clinical_table,sep="") sqlQuery hpv_table = query_exec(sqlQuery,project = cloud_project_workshop) isbcgcHPV <- hpv_table hpvPancancerMeta <- left_join(metadataSamplesAllQCCGC, isbcgcHPV, by = c("case_id" = "ParticipantBarcode") ) hpvPancancerData <- data.frame(HPV = snmDataSampleTypeWithExpStrategy[rownames(metadataSamplesAllQCCGC),"k__Viruses.f__Papillomaviridae.g__Alphapapillomavirus"]) hpvPancancerCombined <- droplevels(cbind(hpvPancancerMeta, hpvPancancerData)) interactVec <- as.character(interaction(hpvPancancerCombined$investigation, hpvPancancerCombined$hpv_status, sep = " ")) interactVec[which(is.na(interactVec))] <- as.character(hpvPancancerCombined$investigation[which(is.na(interactVec))]) interactVec <- factor(interactVec) hpvPancancerCombined$hpvInteract <- interactVec # hpvPancancerCombined %>% # filter((sample_type %in% c("Primary Tumor")) & # !(hpv_status %in% c("Indeterminate", "Positive"))) %>% # summarise(mean = mean(HPV)) -> ptGrandMeans # # hpvPancancerCombined %>% # filter((sample_type %in% c("Solid Tissue Normal")) & # !(hpv_status %in% c("Indeterminate", "Positive"))) %>% # summarise(mean = mean(HPV)) -> stnGrandMeans # # hpvPancancerCombined %>% # filter((sample_type %in% c("Blood Derived Normal")) & # !(hpv_status %in% c("Indeterminate", "Positive"))) %>% # summarise(mean = mean(HPV)) -> bdnGrandMeans hpvPancancerCombined %>% filter((sample_type %in% c("Primary Tumor")) & (hpv_status %in% c("Negative"))) %>% summarise(mean = mean(HPV)) -> ptGrandMeans hpvPancancerCombined %>% filter((sample_type %in% c("Solid Tissue Normal")) & (hpv_status %in% c("Negative"))) %>% summarise(mean = mean(HPV)) -> stnGrandMeans hpvPancancerCombined %>% filter((sample_type %in% c("Blood Derived Normal")) & (hpv_status %in% c("Negative"))) %>% summarise(mean = mean(HPV)) -> bdnGrandMeans allGrandMeans <- data.frame(sample_type = c("Primary Tumor", "Solid Tissue Normal", "Blood Derived Normal"), means = rbind(ptGrandMeans, stnGrandMeans, bdnGrandMeans)) hpvPancancerCombined %>% filter((sample_type %in% c("Blood Derived Normal", "Solid Tissue Normal", "Primary Tumor")) & !(hpv_status %in% c("Indeterminate"))) %>% ggboxplot(x = "hpvInteract", y = "HPV", color = "sample_type", # add = "median_iqr", # facet.by = "sample_type", # palette = "Blues", xlab = "Disease Type", ylab = "SNM Normalized Abundance", title = "Pancancer Comparison of Alphapapillomavirus Genus Abundance", # legend = "right", # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + geom_hline(data = allGrandMeans, aes(yintercept = mean), linetype = 2) + theme(plot.title = element_text(hjust = 0.5)) + rotate_x_text(angle = 30) -> p facet(p, facet.by = "sample_type", nrow = 3, ncol = 1) hpvCervicalCancerComparisons <- list( c("TCGA-CESC Positive", "TCGA-CESC Negative")) hpvPancancerCombined %>% filter((sample_type %in% c("Blood Derived Normal", "Primary Tumor")) & !(hpv_status %in% c("Indeterminate")) & (disease_type == "Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma")) %>% ggboxplot(x = "hpvInteract", y = "HPV", # color = "sample_type", add = "jitter", facet.by = "sample_type", # palette = pal_nejm(), xlab = "Clinical HPV Status", ylab = "SNM Normalized Abundance", ylim = c(-11, 20), title = "Pancancer Comparison of Alphapapillomavirus Genus Abundance in Cervical Cancer", # legend = "right", # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + theme(plot.title = element_text(hjust = 0.5)) + scale_x_discrete(labels=c("TCGA-CESC Negative" = "Negative", "TCGA-CESC Positive" = "Positive")) + scale_color_nejm() + rotate_x_text(angle = 30) + stat_compare_means(comparisons = hpvCervicalCancerComparisons, label = "p.signif", method.args = list(alternative = "greater")) -> p# Add pairwise comparisons p-value ggsave(p, filename = "HPV in CESC.png", path = "./Clinical Validation Plots", dpi = "retina", units = "in", height = 5, width = 4) #------------- LCV status in stomach cancers -------------# stadMasterPatientTable <- read.csv(file = "STAD_Master_Patient_Table_20140207.csv", stringsAsFactors = FALSE) stadPancancerMeta <- left_join(metadataSamplesAllQCCGC, stadMasterPatientTable, by = c("case_id" = "TCGA.barcode") ) rownames(stadPancancerMeta) <- rownames(metadataSamplesAllQCCGC) lcvPancancerData <- data.frame(LCV = snmDataSampleTypeWithExpStrategy[rownames(stadPancancerMeta),"k__Viruses.o__Herpesvirales.f__Herpesviridae.g__Lymphocryptovirus"], HPylori = snmDataSampleTypeWithExpStrategy[rownames(stadPancancerMeta),"k__Bacteria.p__Proteobacteria.c__Epsilonproteobacteria.o__Campylobacterales.f__Helicobacteraceae.g__Helicobacter"]) lcvPancancerCombined <- droplevels(cbind(stadPancancerMeta, lcvPancancerData)) lcvComparisons <- list( c("EBV", "CIN"), c("EBV", "GS"), c("EBV","MSI") ) lcvPancancerCombined %>% filter((sample_type %in% c("Blood Derived Normal", "Solid Tissue Normal", "Primary Tumor")) & !(is.na(Molecular.Subtype)) & (disease_type == "Stomach Adenocarcinoma")) %>% ggboxplot(x = "Molecular.Subtype", y = "LCV", # color = "sample_type", add = "jitter", facet.by = "sample_type", # palette = "lancet", ylim = c(-5, 22), xlab = "STAD Molecular subtype (The Cancer Genome Atlas Research Network, 2014. Nature)", ylab = "SNM Normalized Abundance", title = "Pancancer Comparison of Lymphocryptovirus Genus Abundance in Stomach Adenocarcinoma", # legend = "right", # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + scale_color_nejm() + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = lcvComparisons, label = "p.signif", method = "wilcox.test") -> p # Add pairwise comparisons p-value ggsave(p, filename = "EBV in STAD.png", path = "./Clinical Validation Plots",dpi = "retina", units = "in", height = 4, width = 7) hpyloriComparisons <- list( c("Primary Tumor", "Solid Tissue Normal")) lcvPancancerCombined %>% filter((sample_type %in% c("Solid Tissue Normal", "Primary Tumor")) & (disease_type == "Stomach Adenocarcinoma")) %>% # filter(experimental_strategy == "WGS") %>% ggboxplot(x = "sample_type", y = "HPylori", # color = "sample_type", add = "jitter", line.color = "gray", # facet.by = "sample_type", palette = "lancet", # ylim = c(-5, 25), xlab = "Sample Type", ylab = "SNM Normalized Abundance", title = "Comparison of Helicobacter Genus Abundance in Stomach Adenocarcinoma", # legend = "right", # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + theme(plot.title = element_text(hjust = 0.5)) + # rotate_x_text(angle = 15) + stat_compare_means(comparisons = hpyloriComparisons, method = "wilcox.test", # ref.group = "Solid Tissue Normal", # comparisons = hpyloriComparisons, method.args = list(alternative = "less")) -> p# Add pairwise comparisons p-value ggsave(p, filename = "H Pylori in STAD.png", path = "./Clinical Validation Plots",dpi = "retina", units = "in", height = 4, width = 4) # hpyloriComparisons <- list( c("Primary Tumor", "Solid Tissue Normal")) # lcvPancancerCombined %>% # filter((sample_type %in% c("Solid Tissue Normal", "Primary Tumor")) & # (disease_type == "Stomach Adenocarcinoma")) %>% # group_by(sample_id) %>% # filter(n() >= 2) %>% # & experimental_strategy == "RNA-Seq" # ggpaired(x = "sample_type", y = "HPylori", # color = "sample_type", # id = "case_uuid", # add = "jitter", # # facet.by = "sample_type", # palette = "lancet", # # ylim = c(-5, 25), # xlab = "Sample Type", ylab = "SNM Normalized Abundance", # title = "Comparison of Helicobacter Genus Abundance in Stomach Adenocarcinoma", # # legend = "right", # # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), # legend.title = "Sample Type") + # theme(plot.title = element_text(hjust = 0.5)) + # stat_compare_means(comparisons = hpyloriComparisons, method = "t.test") # Add pairwise comparisons p-value #------------- HPV status in cervical cancer -------------# cervicalHPVMeta <- droplevels(metadataSamplesAllQC[metadataSamplesAllQC$disease_type == "Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma",]) cervicalHPVData <- data.frame(HPV = snmDataSampleTypeWithExpStrategy[rownames(cervicalHPVMeta),"k__Viruses.f__Papillomaviridae.g__Alphapapillomavirus"]) cervicalHPVCombined <- cbind(cervicalHPVMeta, cervicalHPVData) cervicalHPVcomparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) cervicalHPVCombined %>% filter(sample_type %in% c("Blood Derived Normal", "Solid Tissue Normal", "Primary Tumor")) %>% ggboxplot(x = "sample_type", y = "HPV", color = "sample_type", add = "jitter", palette = "lancet", xlab = "Sample Type", ylab = "SNM Normalized Abundance", title = "Comparison of Alphapapillomavirus Genus Abundance in Cervical Cancer", legend = "right", order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = cervicalHPVcomparisons, label = "p.signif") + # Add pairwise comparisons p-value stat_compare_means(label.y = -10) # Add global p-value #------------- HPV status in HNSC -------------# # Data alignment hnscClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "hpv_status_by_p16_testing", "hpv_status_by_ish_testing" ) hnscMetadataQCCGC <- metadataSamplesAllQCCGC[metadataSamplesAllQCCGC$disease_type == "Head and Neck Squamous Cell Carcinoma",] hnscMetadataQCCGC$case_uuid <- toupper(hnscMetadataQCCGC$case_uuid) hnscClinicalMetadata$bcr_patient_uuid <- toupper(hnscClinicalMetadata$bcr_patient_uuid) hnscMetadataQCCGCClinical <- left_join(hnscMetadataQCCGC, hnscClinicalMetadata[,hnscClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(hnscMetadataQCCGCClinical) <- rownames(hnscMetadataQCCGC) # Subset data hnscMetadataQCCGCClinical_HPVp16 <- droplevels(hnscMetadataQCCGCClinical[!is.na(hnscMetadataQCCGCClinical$hpv_status_by_p16_testing),]) hnscMetadataQCCGCClinical_HPVish <- droplevels(hnscMetadataQCCGCClinical[!is.na(hnscMetadataQCCGCClinical$hpv_status_by_ish_testing),]) hnscHPVp16Data <- data.frame(HPV = snmDataSampleTypeWithExpStrategy[rownames(hnscMetadataQCCGCClinical_HPVp16),"k__Viruses.f__Papillomaviridae.g__Alphapapillomavirus"]) hnscHPVishData <- data.frame(HPV = snmDataSampleTypeWithExpStrategy[rownames(hnscMetadataQCCGCClinical_HPVish),"k__Viruses.f__Papillomaviridae.g__Alphapapillomavirus"]) hnscHPVp16Combined <- cbind(hnscMetadataQCCGCClinical_HPVp16, hnscHPVp16Data) hnscHPVishCombined <- cbind(hnscMetadataQCCGCClinical_HPVish, hnscHPVishData) testType <- factor(c(rep("p16 Testing",dim(hnscHPVp16Combined)[1]), rep("ISH Testing", dim(hnscHPVishCombined)[1]))) testValue <- factor(c(as.character(hnscHPVp16Combined$hpv_status_by_p16_testing), as.character(hnscHPVishCombined$hpv_status_by_ish_testing))) hnscHPVbothCombined <- cbind(rbind(hnscHPVp16Combined,hnscHPVishCombined),testType, testValue) hnscHPVcomparisons <- list( c("Negative", "Positive")) hnscHPVbothCombined %>% filter(sample_type == "Primary Tumor") %>% filter(!is.na(hpv_status_by_p16_testing)) %>% filter(!is.na(hpv_status_by_ish_testing)) %>% ggboxplot(x = "testValue", y = "HPV", # color = "testValue", facet.by = "testType", add = "jitter", # palette = "lancet", xlab = "Clinical Testing for HPV", ylab = "SNM Normalized Abundance", ylim = c(-3, 18), title = "Comparison of Alphapapillomavirus Genus Abundance in\nHead and Neck Squamous Cell Carcinoma Primary Tumors", legend = "right", legend.title = "Clinical HPV Status", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + scale_color_nejm() + rotate_x_text(angle = 30) + stat_compare_means(comparisons = hnscHPVcomparisons, label = "p.signif", method.args = list(alternative = "less"), method = "t.test") -> p # Add pairwise comparisons p-value ggsave(p, filename = "HPV in HNSCC.png", path = "./Clinical Validation Plots", dpi = "retina", units = "in", height = 4, width = 3) # my_comparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) hnscHPVcomparisons <- list( c("Negative", "Positive")) hnscHPVp16Combined %>% filter(sample_type == "Primary Tumor") %>% ggboxplot(x = "hpv_status_by_p16_testing", y = "HPV", color = "hpv_status_by_p16_testing", add = "jitter", palette = "lancet", xlab = "Clinical Testing for HPV", ylab = "SNM Normalized Abundance", title = "Comparison of Alphapapillomavirus Genus Abundance in\nHead and Neck Squamous Cell Carcinoma Primary Tumors", legend = "right", legend.title = "HPV Status by\nP16 Testing", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = hnscHPVcomparisons, method.args = list(alternative = "less"), method = "t.test") # Add pairwise comparisons p-value hnscHPVcomparisons <- list( c("Negative", "Positive")) hnscHPVishCombined %>% filter(sample_type == "Primary Tumor") %>% ggboxplot(x = "hpv_status_by_ish_testing", y = "HPV", color = "hpv_status_by_ish_testing", add = "jitter", palette = "lancet", xlab = "Clinical Testing for HPV", ylab = "SNM Normalized Abundance", title = "Comparison of Alphapapillomavirus Genus Abundance in\nHead and Neck Squamous Cell Carcinoma Primary Tumors", legend = "right", legend.title = "HPV Status by\nISH Testing", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = hnscHPVcomparisons, method.args = list(alternative = "less"), method = "t.test") # Add pairwise comparisons p-value #------------- HBV/HCV status in LIHC -------------# # Data alignment lihcClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "history_hepato_carcinoma_risk_factors" ) lihcMetadataQCCGC <- metadataSamplesAllQCCGC[metadataSamplesAllQCCGC$disease_type == "Liver Hepatocellular Carcinoma",] lihcMetadataQCCGC$case_uuid <- toupper(lihcMetadataQCCGC$case_uuid) lihcClinicalMetadata$bcr_patient_uuid <- toupper(lihcClinicalMetadata$bcr_patient_uuid) lihcMetadataQCCGCClinical <- left_join(lihcMetadataQCCGC, lihcClinicalMetadata[,lihcClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(lihcMetadataQCCGCClinical) <- rownames(lihcMetadataQCCGC) # Subset data lihcMetadataQCCGCClinical_Riskfactors <- droplevels(lihcMetadataQCCGCClinical[!is.na(lihcMetadataQCCGCClinical$history_hepato_carcinoma_risk_factors),]) hbvHcvGenera <- c("k__Viruses.f__Flaviviridae.g__Hepacivirus", "k__Viruses.f__Hepadnaviridae.g__Orthohepadnavirus") lihcHepData <- snmDataSampleTypeWithExpStrategy[rownames(lihcMetadataQCCGCClinical_Riskfactors),hbvHcvGenera] lihcHepDataCombined <- droplevels(cbind(lihcMetadataQCCGCClinical_Riskfactors, lihcHepData)) # my_comparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) lihcHepComparisons <- list( c("Hepatitis_B", "Hepatitis_C"), c("Hepatitis_B","Alcohol_consumption"), c("Hepatitis_C","Alcohol_consumption")) lihcHepDataCombined %>% filter((history_hepato_carcinoma_risk_factors %in% c("Hepatitis_C", "Hepatitis_B","Alcohol_consumption")) & (sample_type %in% c("Primary Tumor", "Solid Tissue Normal", "Blood Derived Normal"))) %>% ggboxplot(x = "history_hepato_carcinoma_risk_factors", y = "k__Viruses.f__Hepadnaviridae.g__Orthohepadnavirus", # color = "sample_type", facet.by = "sample_type", palette = "lancet", add = c("jitter"), # add = "jitter", # shape = "sample_type", xlab = "Clinically Assessed Patient History Risk Factors for Hepatocellular Carcinoma", ylab = "SNM Normalized Abundance", title = "Comparison of Orthohepadnavirus Genus Abundance in Liver Hepatocellular Carcinoma", legend = "right", legend.title = "Sample Type", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + scale_x_discrete(labels=c("Alcohol_consumption" = "EtOH", "Hepatitis_B" = "Hep B", "Hepatitis_C" = "Hep C")) + # rotate_x_text(angle = 30) + stat_compare_means(comparisons = lihcHepComparisons, label = "p.signif") -> p # Add pairwise comparisons p-value ggsave(p, filename = "HBV in LIHC.png", path = "./Clinical Validation Plots",dpi = "retina", units = "in", height = 4, width = 7) # # my_comparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) # lihcHepComparisons <- list( c("Hepatitis_B", "Hepatitis_C"), c("Hepatitis_B","Alcohol_consumption"), c("Hepatitis_C","Alcohol_consumption")) # lihcHepDataCombined %>% # filter((history_hepato_carcinoma_risk_factors %in% c("Hepatitis_C", "Hepatitis_B","Alcohol_consumption")) & # (sample_type %in% c("Primary Tumor", "Solid Tissue Normal", "Blood Derived Normal"))) %>% # filter(experimental_strategy == "WGS") %>% # ggboxplot(x = "history_hepato_carcinoma_risk_factors", y = "k__Viruses.f__Flaviviridae.g__Hepacivirus", # color = "sample_type", # facet.by = "sample_type", # palette = "lancet", # add = c("jitter"), # add = "jitter", # # shape = "sample_type", # xlab = "Clinically Assessed Patient History Risk Factors for Hepatocellular Carcinoma", ylab = "SNM Normalized Abundance", # title = "Comparison of Hepacivirus Genus Abundance in Liver Hepatocellular Carcinoma", # legend = "right", # legend.title = "Sample Type", # font.label = list(size = 14, face = "bold")) + # theme(plot.title = element_text(hjust = 0.5)) + # scale_x_discrete(labels=c("Alcohol_consumption" = "EtOH", # "Hepatitis_B" = "Hep B", # "Hepatitis_C" = "Hep C")) + # # rotate_x_text(angle = 30) + # stat_compare_means(comparisons = lihcHepComparisons, label = "p.signif") # Add pairwise comparisons p-value #------------- H pylori in STAD -------------# # Data alignment stadClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "h_pylori_infection" ) stadMetadataQCCGC <- metadataSamplesAllQCCGC[metadataSamplesAllQCCGC$disease_type == "Stomach Adenocarcinoma",] stadMetadataQCCGC$case_uuid <- toupper(stadMetadataQCCGC$case_uuid) stadClinicalMetadata$bcr_patient_uuid <- toupper(stadClinicalMetadata$bcr_patient_uuid) stadMetadataQCCGCClinical <- left_join(stadMetadataQCCGC, stadClinicalMetadata[,stadClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(stadMetadataQCCGCClinical) <- rownames(stadMetadataQCCGC) # Subset data stadMetadataQCCGCClinical_HPylori <- droplevels(stadMetadataQCCGCClinical[!is.na(stadMetadataQCCGCClinical$h_pylori_infection),]) hpyloriGenus <- "k__Bacteria.p__Proteobacteria.c__Epsilonproteobacteria.o__Campylobacterales.f__Helicobacteraceae.g__Helicobacter" stadHPyloriData <- data.frame(HPylori = snmDataSampleTypeWithExpStrategy[rownames(stadMetadataQCCGCClinical_HPylori), "k__Bacteria.p__Proteobacteria.c__Epsilonproteobacteria.o__Campylobacterales.f__Helicobacteraceae.g__Helicobacter"]) stadHPyloriDataCombined <- droplevels(cbind(stadMetadataQCCGCClinical_HPylori, stadHPyloriData)) # my_comparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) # lihcHepComparisons <- list( c("Hepatitis_B", "Hepatitis_C"), c("Hepatitis_B","Alcohol_consumption"), c("Hepatitis_C","Alcohol_consumption")) stadComparisons <- list( c( "Yes","No")) stadHPyloriDataCombined %>% filter(sample_type %in% c("Primary Tumor", "Solid Tissue Normal", "Blood Derived Normal")) %>% ggboxplot(x = "h_pylori_infection", y = "HPylori", color = "sample_type", facet.by = "sample_type", palette = "lancet", add = c("jitter"), # add = "jitter", # shape = "sample_type", xlab = "Clinical H Pylori Testing Result", ylab = "SNM Normalized Abundance", title = "Comparison of Helicobacter Genus Abundance in Stomach Adenocarcinoma", legend = "right", legend.title = "Sample Type", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + # scale_x_discrete(labels=c("Alcohol_consumption" = "EtOH", # "Hepatitis_B" = "Hep B", # "Hepatitis_C" = "Hep C")) + # rotate_x_text(angle = 30) + stat_compare_means(comparisons = stadComparisons, label = "p.signif") # Add pairwise comparisons p-value #------------- Gender differences -------------# genderBug <- "k__Bacteria.p__Actinobacteria.c__Actinobacteria.o__Micrococcales.f__Intrasporangiaceae.g__Tetrasphaera" genderBugData <- data.frame(Tetrasphaera = snmDataSampleTypeWithExpStrategy[,genderBug]) genderDataCombined <- droplevels(cbind(metadataSamplesAllQC, genderBugData)) genderComparisons <- list( c( "MALE","FEMALE")) genderCancers <- c("Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma", "Breast Invasive Carcinoma", "Ovarian Serous Cystadenocarcinoma", "Prostate Adenocarcinoma", "Testicular Germ Cell Tumors", "Uterine Carcinosarcoma", "Uterine Corpus Endometrial Carcinoma") genderDataCombined %>% filter((sample_type %in% c("Solid Tissue Normal")) & !(disease_type %in% genderCancers)) %>% ggboxplot(x = "gender", y = "Tetrasphaera", color = "gender", facet.by = "disease_type", palette = "lancet", add = c("jitter"), # add = "jitter", # shape = "sample_type", xlab = "Gender", ylab = "SNM Normalized Abundance", ylim = c(10,18), title = "Comparison of Tetrasphaera Genus Abundance Across Genders in Solid Tissue Normals", # legend = "right", legend.title = "Gender") + theme(plot.title = element_text(hjust = 0.5)) + scale_x_discrete(labels=c("FEMALE" = "Female", "MALE" = "Male")) + # rotate_x_text(angle = 30) + stat_compare_means(comparisons = genderComparisons, label.y = 17, label = "p.signif") # Add pairwise comparisons p-value #------------- COAD: KRAS Mutation -------------# # Subset data coadMetadataCGCClinical_kras <- coadMetadataCGCClinical[!is.na(coadMetadataCGCClinical$kras_mutation_found),] coadMetadataCGCClinical_krasPT <- droplevels(coadMetadataCGCClinical_kras[coadMetadataCGCClinical_kras$sample_type == "Primary Tumor",]) voomMetadata <- coadMetadataCGCClinical_krasPT voomCountData <- counts <- t(vbDataBarnDFReconciled[rownames(coadMetadataCGCClinical_krasPT),]) # Differential abundance analysis limmaFormula <- formula(~0 + kras_mutation_found + data_submitting_center_label + platform) covDesignShort <- model.matrix(limmaFormula, data = voomMetadata) colnames(covDesignShort) <- gsub('([[:punct:]])|\\s+','',colnames(covDesignShort)) colnames(covDesignShort) dge <- DGEList(counts = voomCountData) keep <- filterByExpr(dge, covDesignShort) dge <- dge[keep,,keep.lib.sizes=FALSE] dge <- calcNormFactors(dge, method = "TMM") vdge <- voom(dge, design = covDesignShort, plot = TRUE, save.plot = TRUE, normalize.method="none") vdgeFit <- lmFit(vdge, covDesignShort) vdgeFit <- eBayes(vdgeFit) contrast.matrix <- makeContrasts(krasmutationfoundYES - krasmutationfoundNO, levels = covDesignShort) vdgeFit2 <- contrasts.fit(vdgeFit, contrasts = contrast.matrix) vdgeFit2 <- eBayes(vdgeFit2) vdgeFitDT <- decideTests(vdgeFit2) # results <- list(dge = dge, vdge = vdge, vdgeFit2 = vdgeFit2, vdgeFitDT = vdgeFitDT, limmaFormula = limmaFormula) titleXY <- "Volcano Plot - COAD Primary Tumors - KRAS Mutation (Y|N = 28|30)" glXYPlot(x = vdgeFit2$coef, y = vdgeFit2$lod, # coef = 1, xlab="logFC", ylab="log-odds", status = vdgeFitDT, groups = voomMetadata$kras_mutation_found, main = titleXY, side.ylab = "Voom Normalized Abundance", html = titleXY, folder = "Glimma-plots-clinical-Volcano", counts = vdge) #------------- LUAD: Smoking vs nonsmoking -------------# # Checking which columns to keep table(luadClinicalMetadata$kras_mutation_found) # Y|N 23|39 table(luadClinicalMetadata$tobacco_smoking_history) # Data alignment luadClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "tobacco_smoking_history", # See meaning of values here: https://groups.google.com/forum/#!topic/cbioportal/irEXZRj9Who "number_pack_years_smoked", "kras_mutation_found" ) luadMetadataCGC <- metadataSamplesAllCGC[metadataSamplesAllCGC$disease_type == "Lung Adenocarcinoma",] luadMetadataCGC$case_uuid <- toupper(luadMetadataCGC$case_uuid) luadClinicalMetadata$bcr_patient_uuid <- toupper(luadClinicalMetadata$bcr_patient_uuid) luadMetadataCGCClinical <- left_join(luadMetadataCGC, luadClinicalMetadata[,luadClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(luadMetadataCGCClinical) <- rownames(luadMetadataCGC) # Subset data luadMetadataCGCClinical_smoking <- luadMetadataCGCClinical[!is.na(luadMetadataCGCClinical$tobacco_smoking_history),] luadMetadataCGCClinical_kras <- luadMetadataCGCClinical[!is.na(luadMetadataCGCClinical$kras_mutation_found),] table(luadMetadataCGCClinical_smoking$sample_type) # luadMetadataCGCClinical_smoking <- luadMetadataCGCClinical_smoking[luadMetadataCGCClinical_smoking$tobacco_smoking_history %in% c("1","3"),] luadMetadataCGCClinical_smoking$smokingHistory <- ordered(ifelse(luadMetadataCGCClinical_smoking$tobacco_smoking_history == 1, yes = "Nonsmoker", no = "Smoker"), levels = c("Nonsmoker","Smoker")) luadMetadataCGCClinical_smokingPT <- droplevels(luadMetadataCGCClinical_smoking[luadMetadataCGCClinical_smoking$sample_type == "Primary Tumor",]) luadMetadataCGCClinical_smokingSTN <- droplevels(luadMetadataCGCClinical_smoking[luadMetadataCGCClinical_smoking$sample_type == "Solid Tissue Normal",]) luadMetadataCGCClinical_smokingBDN <- droplevels(luadMetadataCGCClinical_smoking[luadMetadataCGCClinical_smoking$sample_type == "Blood Derived Normal",]) luadMetadataCGCClinical_krasPT <- droplevels(luadMetadataCGCClinical_kras[luadMetadataCGCClinical_kras$sample_type == "Primary Tumor",]) luadMetadataCGCClinical_krasSTN <- droplevels(luadMetadataCGCClinical_kras[luadMetadataCGCClinical_kras$sample_type == "Solid Tissue Normal",]) luadMetadataCGCClinical_krasBDN <- droplevels(luadMetadataCGCClinical_kras[luadMetadataCGCClinical_kras$sample_type == "Blood Derived Normal",]) voomMetadata <- luadMetadataCGCClinical_krasPT voomCountData <- t(vbDataBarnDFReconciled[rownames(voomMetadata),]) table(voomMetadata$smokingHistory) table(voomMetadata$kras_mutation_found) # Differential abundance analysis limmaFormula <- formula(~0 + kras_mutation_found + data_submitting_center_label) covDesignShort <- model.matrix(limmaFormula, data = voomMetadata) colnames(covDesignShort) <- gsub('([[:punct:]])|\\s+','',colnames(covDesignShort)) colnames(covDesignShort) dge <- DGEList(counts = voomCountData) keep <- filterByExpr(dge, covDesignShort) dge <- dge[keep,,keep.lib.sizes=FALSE] dge <- calcNormFactors(dge, method = "TMM") vdge <- voom(dge, design = covDesignShort, plot = TRUE, save.plot = TRUE, normalize.method="none") vdgeFit <- lmFit(vdge, covDesignShort) vdgeFit <- eBayes(vdgeFit) contrast.matrix <- makeContrasts(krasmutationfoundYES - krasmutationfoundNO, levels = covDesignShort) vdgeFit2 <- contrasts.fit(vdgeFit, contrasts = contrast.matrix) vdgeFit2 <- eBayes(vdgeFit2) vdgeFitDT <- decideTests(vdgeFit2) # results <- list(dge = dge, vdge = vdge, vdgeFit2 = vdgeFit2, vdgeFitDT = vdgeFitDT, limmaFormula = limmaFormula) titleXY <- "Volcano Plot - LUAD Primary Tumor - KRAS Mutation (Yes|No = 26|55)" glXYPlot(x = vdgeFit2$coef, y = vdgeFit2$lod, # coef = 1, xlab="logFC", ylab="log-odds", status = vdgeFitDT, groups = voomMetadata$kras_mutation_found, main = titleXY, side.ylab = "Voom Normalized Abundance", html = titleXY, folder = "Glimma-plots-clinical-Volcano", counts = vdge)
/r_scripts/All_Tumor_clinical_analysesFA.R
no_license
pratyaysengupta/tcga
R
false
false
45,606
r
# All_Tumor_clinical_analyses.R # Author: Greg Poore # Date: Oct 8, 2018 # Purpose: To explore clinically-oriented differential expression and machine learning analyses # Load dependencies require(ggplot2) require(ggsci) require(limma) require(Glimma) require(edgeR) require(maftools) require(dplyr) require(TCGAmutations) require(doMC) numCores <- detectCores() registerDoMC(cores=numCores) #------------------------------------------------------ # Load data load("tcgaVbDataAndMetadataAndSNM.RData") load("alphaDiversityMetrics.RData") load("snmDataSampleTypeWithExpStrategyFINAL.RData") #------------------------------------------------------ # require(sevenbridges) # a <- Auth(token = "e5664ac8582f46b5b68c08a88381fbea", # platform = "cgc") # a$api(path = "projects", method = "GET") # a$project() # # p <- a$project(id = "jkanbar/tcga-kraken") # p$file() # tmp <- p$file(fields="_all", complete = TRUE) # # # cols2Keep <- c( # "reference_genome", # "case_id", # "experimental_strategy", # "disease_type", # "aliquot_uuid", # "gender", # "aliquot_id", # "data_subtype", # "sample_uuid", # "platform", # "investigation", # "case_uuid", # "data_format", # "data_type", # "sample_type", # "primary_site", # "sample_id" # ) # tmp3 <- list() # for(ii in 1:length(tmp)){ # tmp3[[ii]] <- data.frame(filename = tmp[[ii]]$name, as.data.frame(tmp[[ii]]$`.->metadata`)[,cols2Keep]) # } # # tmp4 <- do.call("rbind", tmp3) # cgcMetadataKrakenProj <- tmp4 # save(cgcMetadataKrakenProj, file = "cgcMetadataKrakenProj.RData") #------------------------------------------------------ load("cgcMetadataKrakenProj.RData") load("cgcAPIMetadataJoined.RData") # metadataSamplesAllCGC <- left_join(metadataSamplesAll, # cgcMetadataKrakenProj[, -which(names(cgcMetadataKrakenProj) %in% # c("reference_genome", # "experimental_strategy", # "disease_type", # "aliquot_uuid", # "gender", # "sample_uuid", # "platform", # "investigation", # "case_uuid", # "sample_type", # "primary_site"))], # by = "filename") # rownames(metadataSamplesAllCGC) <- rownames(metadataSamplesAll) # head(metadataSamplesAllCGC,2) # # metadataSamplesAllQCCGC <- left_join(metadataSamplesAllQC, # cgcMetadataKrakenProj[, -which(names(cgcMetadataKrakenProj) %in% # c("reference_genome", # "experimental_strategy", # "disease_type", # "aliquot_uuid", # "gender", # "sample_uuid", # "platform", # "investigation", # "case_uuid", # "sample_type", # "primary_site"))], # by = "filename") # rownames(metadataSamplesAllQCCGC) <- rownames(metadataSamplesAllQC) # head(metadataSamplesAllQCCGC,2) # # metadataSamplesAllQCSurvivalCGC <- left_join(metadataSamplesAllQCSurvival, # cgcMetadataKrakenProj[, -which(names(cgcMetadataKrakenProj) %in% # c("reference_genome", # "experimental_strategy", # "disease_type", # "aliquot_uuid", # "gender", # "sample_uuid", # "platform", # "investigation", # "case_uuid", # "sample_type", # "primary_site"))], # by = "filename") # rownames(metadataSamplesAllQCSurvivalCGC) <- rownames(metadataSamplesAllQCSurvival) # head(metadataSamplesAllQCSurvivalCGC,2) # # save(metadataSamplesAllCGC, metadataSamplesAllQCCGC, metadataSamplesAllQCSurvivalCGC, # file = "cgcAPIMetadataJoined.RData") #------------------------------------------------------ # Load clinical metadata from TCGAmutations ## Load available TCGA datasets and extract radiation information availableStudies <- TCGAmutations::tcga_available() for(tcgaStudy in head(availableStudies$Study_Abbreviation,-1)){ tcga_load(study = tcgaStudy) } accClinicalMetadata <- as.data.frame(getClinicalData(tcga_acc_mc3)) blcaClinicalMetadata <- as.data.frame(getClinicalData(tcga_blca_mc3)) # brcaClinicalMetadata <- as.data.frame(getClinicalData(tcga_brca_mc3)) # cescClinicalMetadata <- as.data.frame(getClinicalData(tcga_cesc_mc3)) cholClinicalMetadata <- as.data.frame(getClinicalData(tcga_chol_mc3)) coadClinicalMetadata <- as.data.frame(getClinicalData(tcga_coad_mc3)) # dlbcClinicalMetadata <- as.data.frame(getClinicalData(tcga_dlbc_mc3)) escaClinicalMetadata <- as.data.frame(getClinicalData(tcga_esca_mc3)) gbmClinicalMetadata <- as.data.frame(getClinicalData(tcga_gbm_mc3)) hnscClinicalMetadata <- as.data.frame(getClinicalData(tcga_hnsc_mc3)) # kichClinicalMetadata <- as.data.frame(getClinicalData(tcga_kich_mc3)) kircClinicalMetadata <- as.data.frame(getClinicalData(tcga_kirc_mc3)) # kirpClinicalMetadata <- as.data.frame(getClinicalData(tcga_kirp_mc3)) lamlClinicalMetadata <- as.data.frame(getClinicalData(tcga_laml_mc3)) lggClinicalMetadata <- as.data.frame(getClinicalData(tcga_lgg_mc3)) # lihcClinicalMetadata <- as.data.frame(getClinicalData(tcga_lihc_mc3)) # luadClinicalMetadata <- as.data.frame(getClinicalData(tcga_luad_mc3)) # luscClinicalMetadata <- as.data.frame(getClinicalData(tcga_lusc_mc3)) mesoClinicalMetadata <- as.data.frame(getClinicalData(tcga_meso_mc3)) ovClinicalMetadata <- as.data.frame(getClinicalData(tcga_ov_mc3)) # paadClinicalMetadata <- as.data.frame(getClinicalData(tcga_paad_mc3)) pcpgClinicalMetadata <- as.data.frame(getClinicalData(tcga_pcpg_mc3)) pradClinicalMetadata <- as.data.frame(getClinicalData(tcga_prad_mc3)) # readClinicalMetadata <- as.data.frame(getClinicalData(tcga_read_mc3)) sarcClinicalMetadata <- as.data.frame(getClinicalData(tcga_sarc_mc3)) skcmClinicalMetadata <- as.data.frame(getClinicalData(tcga_skcm_mc3)) # stadClinicalMetadata <- as.data.frame(getClinicalData(tcga_stad_mc3)) # tgctClinicalMetadata <- as.data.frame(getClinicalData(tcga_tgct_mc3)) thcaClinicalMetadata <- as.data.frame(getClinicalData(tcga_thca_mc3)) # thymClinicalMetadata <- as.data.frame(getClinicalData(tcga_thym_mc3)) ucecClinicalMetadata <- as.data.frame(getClinicalData(tcga_ucec_mc3)) # ucsClinicalMetadata <- as.data.frame(getClinicalData(tcga_ucs_mc3)) uvmClinicalMetadata <- as.data.frame(getClinicalData(tcga_uvm_mc3)) # cols2Keep <- c( # "Tumor_Sample_Barcode", # "patient_id", # "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file # "postoperative_rx_tx", # "radiation_therapy" # ) # ind <- 1 # listClinData <- list() # for(studyClinData in grep("ClinicalMetadata$", ls(), value = TRUE)){ # dfClinData <- get(studyClinData) # dfClinData$study <- factor(studyClinData) # dfClinData$bcr_patient_uuid <- toupper(dfClinData$bcr_patient_uuid) # if(any(grepl("^radiation_therapy$",colnames(dfClinData), ignore.case = TRUE))){ # print(studyClinData) # listClinData[[ind]] <- dfClinData[,cols2Keep] # ind <- ind + 1 # } # else{next} # } # # mergedClinicalDataDF <- do.call("rbind", listClinData) # dim(mergedClinicalDataDF) # sum(duplicated(mergedClinicalDataDF)) # Shows lots of duplicated rows, so make unique # uniqueMergedClinicalDataDF <- unique(mergedClinicalDataDF) # table(uniqueMergedClinicalDataDF$radiation_therapy) # # dim(uniqueMergedClinicalDataDF) #------------- COAD: MSI vs. MSS -------------# # Data alignment coadClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "microsatellite_instability", "kras_mutation_found", "colon_polyps_present", "loss_expression_of_mismatch_repair_proteins_by_ihc" ) coadMetadataCGC <- metadataSamplesAllCGC[metadataSamplesAllCGC$disease_type == "Colon Adenocarcinoma",] coadMetadataCGC$case_uuid <- toupper(coadMetadataCGC$case_uuid) coadClinicalMetadata$bcr_patient_uuid <- toupper(coadClinicalMetadata$bcr_patient_uuid) coadMetadataCGCClinical <- left_join(coadMetadataCGC, coadClinicalMetadata[,coadClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(coadMetadataCGCClinical) <- rownames(coadMetadataCGC) # Subset data coadMetadataCGCClinical_microsatellite <- coadMetadataCGCClinical[!is.na(coadMetadataCGCClinical$microsatellite_instability),] coadMetadataCGCClinical_microsatellitePT <- droplevels(coadMetadataCGCClinical_microsatellite[coadMetadataCGCClinical_microsatellite$sample_type == "Primary Tumor",]) #--------------------------- voomMetadata <- coadMetadataCGCClinical_microsatellitePT voomCountData <- t(vbDataBarnDFReconciled[rownames(voomMetadata),]) # Differential abundance analysis limmaFormula <- formula(~0 + microsatellite_instability + data_submitting_center_label + platform) covDesignShort <- model.matrix(limmaFormula, data = voomMetadata) colnames(covDesignShort) <- gsub('([[:punct:]])|\\s+','',colnames(covDesignShort)) colnames(covDesignShort) dge <- DGEList(counts = voomCountData) keep <- filterByExpr(dge, covDesignShort) dge <- dge[keep,,keep.lib.sizes=FALSE] dge <- calcNormFactors(dge, method = "TMM") vdge <- voom(dge, design = covDesignShort, plot = TRUE, save.plot = TRUE, normalize.method="none") vdgeFit <- lmFit(vdge, covDesignShort) vdgeFit <- eBayes(vdgeFit) contrast.matrix <- makeContrasts(microsatelliteinstabilityYES - microsatelliteinstabilityNO, levels = covDesignShort) vdgeFit2 <- contrasts.fit(vdgeFit, contrasts = contrast.matrix) vdgeFit2 <- eBayes(vdgeFit2) vdgeFitDT <- decideTests(vdgeFit2) # results <- list(dge = dge, vdge = vdge, vdgeFit2 = vdgeFit2, vdgeFitDT = vdgeFitDT, limmaFormula = limmaFormula) titleXY <- "Volcano Plot - COAD Primary Tumors - Microsatellite Instability (Y|N = 18|100)" glXYPlot(x = vdgeFit2$coef, y = vdgeFit2$lod, # coef = 1, xlab="logFC", ylab="log-odds", status = vdgeFitDT, groups = coadMetadataCGCClinical_microsatellitePT$microsatellite_instability, main = titleXY, side.ylab = "Voom Normalized Abundance", html = titleXY, folder = "Glimma-plots-clinical-Volcano", counts = vdge) # predMSSvsMSI.R coadMetadataQCCGC <- metadataSamplesAllQCCGC[metadataSamplesAllQCCGC$disease_type == "Colon Adenocarcinoma",] coadMetadataQCCGC$case_uuid <- toupper(coadMetadataQCCGC$case_uuid) coadClinicalMetadata$bcr_patient_uuid <- toupper(coadClinicalMetadata$bcr_patient_uuid) coadMetadataQCCGCClinical <- left_join(coadMetadataQCCGC, coadClinicalMetadata[,coadClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(coadMetadataQCCGCClinical) <- rownames(coadMetadataQCCGC) # Subset data coadMetadataQCCGCClinical_microsatellite <- coadMetadataQCCGCClinical[!is.na(coadMetadataQCCGCClinical$microsatellite_instability),] coadMetadataQCCGCClinical_microsatellitePT <- droplevels(coadMetadataQCCGCClinical_microsatellite[coadMetadataQCCGCClinical_microsatellite$sample_type == "Primary Tumor",]) #--------------------------- source("predMSSvsMSI.R") customGBMGrid <- expand.grid(interaction.depth = seq(1,3), n.trees = floor((1:3) * 50), shrinkage = 0.1, n.minobsinnode = 1) tmp <- predMSSvsMSI(qcMLMetadata = coadMetadataQCCGCClinical_microsatellitePT, qcMLDataSNM = snmDataSampleType[rownames(coadMetadataQCCGCClinical_microsatellitePT),], cancerTypeString = "Colon Adenocarcinoma", sampleTypeComparison = "Primary Tumor", caretModel = "gbm", numResampleIter = 3, numKFold = 4, trainSetProp = 0.5, caretTuneGrid = customGBMGrid, ggPath = "./roc-ggplots-clinical") #-----------------------------------------# #------------- Sanity checks -------------# #-----------------------------------------# require(dplyr) require(ggplot2) require(ggpubr) require(bigrquery) #------------- HPV status across all cancers -------------# clinical_table = "[isb-cgc:tcga_201607_beta.Clinical_data]" cloud_project_workshop = "hybrid-coyote-219120" sqlQuery = paste("SELECT ParticipantBarcode, Study, hpv_calls, hpv_status ", "FROM ", clinical_table,sep="") sqlQuery hpv_table = query_exec(sqlQuery,project = cloud_project_workshop) isbcgcHPV <- hpv_table hpvPancancerMeta <- left_join(metadataSamplesAllQCCGC, isbcgcHPV, by = c("case_id" = "ParticipantBarcode") ) hpvPancancerData <- data.frame(HPV = snmDataSampleTypeWithExpStrategy[rownames(metadataSamplesAllQCCGC),"k__Viruses.f__Papillomaviridae.g__Alphapapillomavirus"]) hpvPancancerCombined <- droplevels(cbind(hpvPancancerMeta, hpvPancancerData)) interactVec <- as.character(interaction(hpvPancancerCombined$investigation, hpvPancancerCombined$hpv_status, sep = " ")) interactVec[which(is.na(interactVec))] <- as.character(hpvPancancerCombined$investigation[which(is.na(interactVec))]) interactVec <- factor(interactVec) hpvPancancerCombined$hpvInteract <- interactVec # hpvPancancerCombined %>% # filter((sample_type %in% c("Primary Tumor")) & # !(hpv_status %in% c("Indeterminate", "Positive"))) %>% # summarise(mean = mean(HPV)) -> ptGrandMeans # # hpvPancancerCombined %>% # filter((sample_type %in% c("Solid Tissue Normal")) & # !(hpv_status %in% c("Indeterminate", "Positive"))) %>% # summarise(mean = mean(HPV)) -> stnGrandMeans # # hpvPancancerCombined %>% # filter((sample_type %in% c("Blood Derived Normal")) & # !(hpv_status %in% c("Indeterminate", "Positive"))) %>% # summarise(mean = mean(HPV)) -> bdnGrandMeans hpvPancancerCombined %>% filter((sample_type %in% c("Primary Tumor")) & (hpv_status %in% c("Negative"))) %>% summarise(mean = mean(HPV)) -> ptGrandMeans hpvPancancerCombined %>% filter((sample_type %in% c("Solid Tissue Normal")) & (hpv_status %in% c("Negative"))) %>% summarise(mean = mean(HPV)) -> stnGrandMeans hpvPancancerCombined %>% filter((sample_type %in% c("Blood Derived Normal")) & (hpv_status %in% c("Negative"))) %>% summarise(mean = mean(HPV)) -> bdnGrandMeans allGrandMeans <- data.frame(sample_type = c("Primary Tumor", "Solid Tissue Normal", "Blood Derived Normal"), means = rbind(ptGrandMeans, stnGrandMeans, bdnGrandMeans)) hpvPancancerCombined %>% filter((sample_type %in% c("Blood Derived Normal", "Solid Tissue Normal", "Primary Tumor")) & !(hpv_status %in% c("Indeterminate"))) %>% ggboxplot(x = "hpvInteract", y = "HPV", color = "sample_type", # add = "median_iqr", # facet.by = "sample_type", # palette = "Blues", xlab = "Disease Type", ylab = "SNM Normalized Abundance", title = "Pancancer Comparison of Alphapapillomavirus Genus Abundance", # legend = "right", # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + geom_hline(data = allGrandMeans, aes(yintercept = mean), linetype = 2) + theme(plot.title = element_text(hjust = 0.5)) + rotate_x_text(angle = 30) -> p facet(p, facet.by = "sample_type", nrow = 3, ncol = 1) hpvCervicalCancerComparisons <- list( c("TCGA-CESC Positive", "TCGA-CESC Negative")) hpvPancancerCombined %>% filter((sample_type %in% c("Blood Derived Normal", "Primary Tumor")) & !(hpv_status %in% c("Indeterminate")) & (disease_type == "Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma")) %>% ggboxplot(x = "hpvInteract", y = "HPV", # color = "sample_type", add = "jitter", facet.by = "sample_type", # palette = pal_nejm(), xlab = "Clinical HPV Status", ylab = "SNM Normalized Abundance", ylim = c(-11, 20), title = "Pancancer Comparison of Alphapapillomavirus Genus Abundance in Cervical Cancer", # legend = "right", # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + theme(plot.title = element_text(hjust = 0.5)) + scale_x_discrete(labels=c("TCGA-CESC Negative" = "Negative", "TCGA-CESC Positive" = "Positive")) + scale_color_nejm() + rotate_x_text(angle = 30) + stat_compare_means(comparisons = hpvCervicalCancerComparisons, label = "p.signif", method.args = list(alternative = "greater")) -> p# Add pairwise comparisons p-value ggsave(p, filename = "HPV in CESC.png", path = "./Clinical Validation Plots", dpi = "retina", units = "in", height = 5, width = 4) #------------- LCV status in stomach cancers -------------# stadMasterPatientTable <- read.csv(file = "STAD_Master_Patient_Table_20140207.csv", stringsAsFactors = FALSE) stadPancancerMeta <- left_join(metadataSamplesAllQCCGC, stadMasterPatientTable, by = c("case_id" = "TCGA.barcode") ) rownames(stadPancancerMeta) <- rownames(metadataSamplesAllQCCGC) lcvPancancerData <- data.frame(LCV = snmDataSampleTypeWithExpStrategy[rownames(stadPancancerMeta),"k__Viruses.o__Herpesvirales.f__Herpesviridae.g__Lymphocryptovirus"], HPylori = snmDataSampleTypeWithExpStrategy[rownames(stadPancancerMeta),"k__Bacteria.p__Proteobacteria.c__Epsilonproteobacteria.o__Campylobacterales.f__Helicobacteraceae.g__Helicobacter"]) lcvPancancerCombined <- droplevels(cbind(stadPancancerMeta, lcvPancancerData)) lcvComparisons <- list( c("EBV", "CIN"), c("EBV", "GS"), c("EBV","MSI") ) lcvPancancerCombined %>% filter((sample_type %in% c("Blood Derived Normal", "Solid Tissue Normal", "Primary Tumor")) & !(is.na(Molecular.Subtype)) & (disease_type == "Stomach Adenocarcinoma")) %>% ggboxplot(x = "Molecular.Subtype", y = "LCV", # color = "sample_type", add = "jitter", facet.by = "sample_type", # palette = "lancet", ylim = c(-5, 22), xlab = "STAD Molecular subtype (The Cancer Genome Atlas Research Network, 2014. Nature)", ylab = "SNM Normalized Abundance", title = "Pancancer Comparison of Lymphocryptovirus Genus Abundance in Stomach Adenocarcinoma", # legend = "right", # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + scale_color_nejm() + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = lcvComparisons, label = "p.signif", method = "wilcox.test") -> p # Add pairwise comparisons p-value ggsave(p, filename = "EBV in STAD.png", path = "./Clinical Validation Plots",dpi = "retina", units = "in", height = 4, width = 7) hpyloriComparisons <- list( c("Primary Tumor", "Solid Tissue Normal")) lcvPancancerCombined %>% filter((sample_type %in% c("Solid Tissue Normal", "Primary Tumor")) & (disease_type == "Stomach Adenocarcinoma")) %>% # filter(experimental_strategy == "WGS") %>% ggboxplot(x = "sample_type", y = "HPylori", # color = "sample_type", add = "jitter", line.color = "gray", # facet.by = "sample_type", palette = "lancet", # ylim = c(-5, 25), xlab = "Sample Type", ylab = "SNM Normalized Abundance", title = "Comparison of Helicobacter Genus Abundance in Stomach Adenocarcinoma", # legend = "right", # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + theme(plot.title = element_text(hjust = 0.5)) + # rotate_x_text(angle = 15) + stat_compare_means(comparisons = hpyloriComparisons, method = "wilcox.test", # ref.group = "Solid Tissue Normal", # comparisons = hpyloriComparisons, method.args = list(alternative = "less")) -> p# Add pairwise comparisons p-value ggsave(p, filename = "H Pylori in STAD.png", path = "./Clinical Validation Plots",dpi = "retina", units = "in", height = 4, width = 4) # hpyloriComparisons <- list( c("Primary Tumor", "Solid Tissue Normal")) # lcvPancancerCombined %>% # filter((sample_type %in% c("Solid Tissue Normal", "Primary Tumor")) & # (disease_type == "Stomach Adenocarcinoma")) %>% # group_by(sample_id) %>% # filter(n() >= 2) %>% # & experimental_strategy == "RNA-Seq" # ggpaired(x = "sample_type", y = "HPylori", # color = "sample_type", # id = "case_uuid", # add = "jitter", # # facet.by = "sample_type", # palette = "lancet", # # ylim = c(-5, 25), # xlab = "Sample Type", ylab = "SNM Normalized Abundance", # title = "Comparison of Helicobacter Genus Abundance in Stomach Adenocarcinoma", # # legend = "right", # # order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), # legend.title = "Sample Type") + # theme(plot.title = element_text(hjust = 0.5)) + # stat_compare_means(comparisons = hpyloriComparisons, method = "t.test") # Add pairwise comparisons p-value #------------- HPV status in cervical cancer -------------# cervicalHPVMeta <- droplevels(metadataSamplesAllQC[metadataSamplesAllQC$disease_type == "Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma",]) cervicalHPVData <- data.frame(HPV = snmDataSampleTypeWithExpStrategy[rownames(cervicalHPVMeta),"k__Viruses.f__Papillomaviridae.g__Alphapapillomavirus"]) cervicalHPVCombined <- cbind(cervicalHPVMeta, cervicalHPVData) cervicalHPVcomparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) cervicalHPVCombined %>% filter(sample_type %in% c("Blood Derived Normal", "Solid Tissue Normal", "Primary Tumor")) %>% ggboxplot(x = "sample_type", y = "HPV", color = "sample_type", add = "jitter", palette = "lancet", xlab = "Sample Type", ylab = "SNM Normalized Abundance", title = "Comparison of Alphapapillomavirus Genus Abundance in Cervical Cancer", legend = "right", order = c("Solid Tissue Normal", "Primary Tumor", "Blood Derived Normal"), legend.title = "Sample Type") + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = cervicalHPVcomparisons, label = "p.signif") + # Add pairwise comparisons p-value stat_compare_means(label.y = -10) # Add global p-value #------------- HPV status in HNSC -------------# # Data alignment hnscClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "hpv_status_by_p16_testing", "hpv_status_by_ish_testing" ) hnscMetadataQCCGC <- metadataSamplesAllQCCGC[metadataSamplesAllQCCGC$disease_type == "Head and Neck Squamous Cell Carcinoma",] hnscMetadataQCCGC$case_uuid <- toupper(hnscMetadataQCCGC$case_uuid) hnscClinicalMetadata$bcr_patient_uuid <- toupper(hnscClinicalMetadata$bcr_patient_uuid) hnscMetadataQCCGCClinical <- left_join(hnscMetadataQCCGC, hnscClinicalMetadata[,hnscClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(hnscMetadataQCCGCClinical) <- rownames(hnscMetadataQCCGC) # Subset data hnscMetadataQCCGCClinical_HPVp16 <- droplevels(hnscMetadataQCCGCClinical[!is.na(hnscMetadataQCCGCClinical$hpv_status_by_p16_testing),]) hnscMetadataQCCGCClinical_HPVish <- droplevels(hnscMetadataQCCGCClinical[!is.na(hnscMetadataQCCGCClinical$hpv_status_by_ish_testing),]) hnscHPVp16Data <- data.frame(HPV = snmDataSampleTypeWithExpStrategy[rownames(hnscMetadataQCCGCClinical_HPVp16),"k__Viruses.f__Papillomaviridae.g__Alphapapillomavirus"]) hnscHPVishData <- data.frame(HPV = snmDataSampleTypeWithExpStrategy[rownames(hnscMetadataQCCGCClinical_HPVish),"k__Viruses.f__Papillomaviridae.g__Alphapapillomavirus"]) hnscHPVp16Combined <- cbind(hnscMetadataQCCGCClinical_HPVp16, hnscHPVp16Data) hnscHPVishCombined <- cbind(hnscMetadataQCCGCClinical_HPVish, hnscHPVishData) testType <- factor(c(rep("p16 Testing",dim(hnscHPVp16Combined)[1]), rep("ISH Testing", dim(hnscHPVishCombined)[1]))) testValue <- factor(c(as.character(hnscHPVp16Combined$hpv_status_by_p16_testing), as.character(hnscHPVishCombined$hpv_status_by_ish_testing))) hnscHPVbothCombined <- cbind(rbind(hnscHPVp16Combined,hnscHPVishCombined),testType, testValue) hnscHPVcomparisons <- list( c("Negative", "Positive")) hnscHPVbothCombined %>% filter(sample_type == "Primary Tumor") %>% filter(!is.na(hpv_status_by_p16_testing)) %>% filter(!is.na(hpv_status_by_ish_testing)) %>% ggboxplot(x = "testValue", y = "HPV", # color = "testValue", facet.by = "testType", add = "jitter", # palette = "lancet", xlab = "Clinical Testing for HPV", ylab = "SNM Normalized Abundance", ylim = c(-3, 18), title = "Comparison of Alphapapillomavirus Genus Abundance in\nHead and Neck Squamous Cell Carcinoma Primary Tumors", legend = "right", legend.title = "Clinical HPV Status", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + scale_color_nejm() + rotate_x_text(angle = 30) + stat_compare_means(comparisons = hnscHPVcomparisons, label = "p.signif", method.args = list(alternative = "less"), method = "t.test") -> p # Add pairwise comparisons p-value ggsave(p, filename = "HPV in HNSCC.png", path = "./Clinical Validation Plots", dpi = "retina", units = "in", height = 4, width = 3) # my_comparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) hnscHPVcomparisons <- list( c("Negative", "Positive")) hnscHPVp16Combined %>% filter(sample_type == "Primary Tumor") %>% ggboxplot(x = "hpv_status_by_p16_testing", y = "HPV", color = "hpv_status_by_p16_testing", add = "jitter", palette = "lancet", xlab = "Clinical Testing for HPV", ylab = "SNM Normalized Abundance", title = "Comparison of Alphapapillomavirus Genus Abundance in\nHead and Neck Squamous Cell Carcinoma Primary Tumors", legend = "right", legend.title = "HPV Status by\nP16 Testing", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = hnscHPVcomparisons, method.args = list(alternative = "less"), method = "t.test") # Add pairwise comparisons p-value hnscHPVcomparisons <- list( c("Negative", "Positive")) hnscHPVishCombined %>% filter(sample_type == "Primary Tumor") %>% ggboxplot(x = "hpv_status_by_ish_testing", y = "HPV", color = "hpv_status_by_ish_testing", add = "jitter", palette = "lancet", xlab = "Clinical Testing for HPV", ylab = "SNM Normalized Abundance", title = "Comparison of Alphapapillomavirus Genus Abundance in\nHead and Neck Squamous Cell Carcinoma Primary Tumors", legend = "right", legend.title = "HPV Status by\nISH Testing", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = hnscHPVcomparisons, method.args = list(alternative = "less"), method = "t.test") # Add pairwise comparisons p-value #------------- HBV/HCV status in LIHC -------------# # Data alignment lihcClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "history_hepato_carcinoma_risk_factors" ) lihcMetadataQCCGC <- metadataSamplesAllQCCGC[metadataSamplesAllQCCGC$disease_type == "Liver Hepatocellular Carcinoma",] lihcMetadataQCCGC$case_uuid <- toupper(lihcMetadataQCCGC$case_uuid) lihcClinicalMetadata$bcr_patient_uuid <- toupper(lihcClinicalMetadata$bcr_patient_uuid) lihcMetadataQCCGCClinical <- left_join(lihcMetadataQCCGC, lihcClinicalMetadata[,lihcClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(lihcMetadataQCCGCClinical) <- rownames(lihcMetadataQCCGC) # Subset data lihcMetadataQCCGCClinical_Riskfactors <- droplevels(lihcMetadataQCCGCClinical[!is.na(lihcMetadataQCCGCClinical$history_hepato_carcinoma_risk_factors),]) hbvHcvGenera <- c("k__Viruses.f__Flaviviridae.g__Hepacivirus", "k__Viruses.f__Hepadnaviridae.g__Orthohepadnavirus") lihcHepData <- snmDataSampleTypeWithExpStrategy[rownames(lihcMetadataQCCGCClinical_Riskfactors),hbvHcvGenera] lihcHepDataCombined <- droplevels(cbind(lihcMetadataQCCGCClinical_Riskfactors, lihcHepData)) # my_comparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) lihcHepComparisons <- list( c("Hepatitis_B", "Hepatitis_C"), c("Hepatitis_B","Alcohol_consumption"), c("Hepatitis_C","Alcohol_consumption")) lihcHepDataCombined %>% filter((history_hepato_carcinoma_risk_factors %in% c("Hepatitis_C", "Hepatitis_B","Alcohol_consumption")) & (sample_type %in% c("Primary Tumor", "Solid Tissue Normal", "Blood Derived Normal"))) %>% ggboxplot(x = "history_hepato_carcinoma_risk_factors", y = "k__Viruses.f__Hepadnaviridae.g__Orthohepadnavirus", # color = "sample_type", facet.by = "sample_type", palette = "lancet", add = c("jitter"), # add = "jitter", # shape = "sample_type", xlab = "Clinically Assessed Patient History Risk Factors for Hepatocellular Carcinoma", ylab = "SNM Normalized Abundance", title = "Comparison of Orthohepadnavirus Genus Abundance in Liver Hepatocellular Carcinoma", legend = "right", legend.title = "Sample Type", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + scale_x_discrete(labels=c("Alcohol_consumption" = "EtOH", "Hepatitis_B" = "Hep B", "Hepatitis_C" = "Hep C")) + # rotate_x_text(angle = 30) + stat_compare_means(comparisons = lihcHepComparisons, label = "p.signif") -> p # Add pairwise comparisons p-value ggsave(p, filename = "HBV in LIHC.png", path = "./Clinical Validation Plots",dpi = "retina", units = "in", height = 4, width = 7) # # my_comparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) # lihcHepComparisons <- list( c("Hepatitis_B", "Hepatitis_C"), c("Hepatitis_B","Alcohol_consumption"), c("Hepatitis_C","Alcohol_consumption")) # lihcHepDataCombined %>% # filter((history_hepato_carcinoma_risk_factors %in% c("Hepatitis_C", "Hepatitis_B","Alcohol_consumption")) & # (sample_type %in% c("Primary Tumor", "Solid Tissue Normal", "Blood Derived Normal"))) %>% # filter(experimental_strategy == "WGS") %>% # ggboxplot(x = "history_hepato_carcinoma_risk_factors", y = "k__Viruses.f__Flaviviridae.g__Hepacivirus", # color = "sample_type", # facet.by = "sample_type", # palette = "lancet", # add = c("jitter"), # add = "jitter", # # shape = "sample_type", # xlab = "Clinically Assessed Patient History Risk Factors for Hepatocellular Carcinoma", ylab = "SNM Normalized Abundance", # title = "Comparison of Hepacivirus Genus Abundance in Liver Hepatocellular Carcinoma", # legend = "right", # legend.title = "Sample Type", # font.label = list(size = 14, face = "bold")) + # theme(plot.title = element_text(hjust = 0.5)) + # scale_x_discrete(labels=c("Alcohol_consumption" = "EtOH", # "Hepatitis_B" = "Hep B", # "Hepatitis_C" = "Hep C")) + # # rotate_x_text(angle = 30) + # stat_compare_means(comparisons = lihcHepComparisons, label = "p.signif") # Add pairwise comparisons p-value #------------- H pylori in STAD -------------# # Data alignment stadClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "h_pylori_infection" ) stadMetadataQCCGC <- metadataSamplesAllQCCGC[metadataSamplesAllQCCGC$disease_type == "Stomach Adenocarcinoma",] stadMetadataQCCGC$case_uuid <- toupper(stadMetadataQCCGC$case_uuid) stadClinicalMetadata$bcr_patient_uuid <- toupper(stadClinicalMetadata$bcr_patient_uuid) stadMetadataQCCGCClinical <- left_join(stadMetadataQCCGC, stadClinicalMetadata[,stadClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(stadMetadataQCCGCClinical) <- rownames(stadMetadataQCCGC) # Subset data stadMetadataQCCGCClinical_HPylori <- droplevels(stadMetadataQCCGCClinical[!is.na(stadMetadataQCCGCClinical$h_pylori_infection),]) hpyloriGenus <- "k__Bacteria.p__Proteobacteria.c__Epsilonproteobacteria.o__Campylobacterales.f__Helicobacteraceae.g__Helicobacter" stadHPyloriData <- data.frame(HPylori = snmDataSampleTypeWithExpStrategy[rownames(stadMetadataQCCGCClinical_HPylori), "k__Bacteria.p__Proteobacteria.c__Epsilonproteobacteria.o__Campylobacterales.f__Helicobacteraceae.g__Helicobacter"]) stadHPyloriDataCombined <- droplevels(cbind(stadMetadataQCCGCClinical_HPylori, stadHPyloriData)) # my_comparisons <- list( c("Primary Tumor", "Solid Tissue Normal"), c("Primary Tumor", "Blood Derived Normal"), c("Solid Tissue Normal", "Blood Derived Normal")) # lihcHepComparisons <- list( c("Hepatitis_B", "Hepatitis_C"), c("Hepatitis_B","Alcohol_consumption"), c("Hepatitis_C","Alcohol_consumption")) stadComparisons <- list( c( "Yes","No")) stadHPyloriDataCombined %>% filter(sample_type %in% c("Primary Tumor", "Solid Tissue Normal", "Blood Derived Normal")) %>% ggboxplot(x = "h_pylori_infection", y = "HPylori", color = "sample_type", facet.by = "sample_type", palette = "lancet", add = c("jitter"), # add = "jitter", # shape = "sample_type", xlab = "Clinical H Pylori Testing Result", ylab = "SNM Normalized Abundance", title = "Comparison of Helicobacter Genus Abundance in Stomach Adenocarcinoma", legend = "right", legend.title = "Sample Type", font.label = list(size = 14, face = "bold")) + theme(plot.title = element_text(hjust = 0.5)) + # scale_x_discrete(labels=c("Alcohol_consumption" = "EtOH", # "Hepatitis_B" = "Hep B", # "Hepatitis_C" = "Hep C")) + # rotate_x_text(angle = 30) + stat_compare_means(comparisons = stadComparisons, label = "p.signif") # Add pairwise comparisons p-value #------------- Gender differences -------------# genderBug <- "k__Bacteria.p__Actinobacteria.c__Actinobacteria.o__Micrococcales.f__Intrasporangiaceae.g__Tetrasphaera" genderBugData <- data.frame(Tetrasphaera = snmDataSampleTypeWithExpStrategy[,genderBug]) genderDataCombined <- droplevels(cbind(metadataSamplesAllQC, genderBugData)) genderComparisons <- list( c( "MALE","FEMALE")) genderCancers <- c("Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma", "Breast Invasive Carcinoma", "Ovarian Serous Cystadenocarcinoma", "Prostate Adenocarcinoma", "Testicular Germ Cell Tumors", "Uterine Carcinosarcoma", "Uterine Corpus Endometrial Carcinoma") genderDataCombined %>% filter((sample_type %in% c("Solid Tissue Normal")) & !(disease_type %in% genderCancers)) %>% ggboxplot(x = "gender", y = "Tetrasphaera", color = "gender", facet.by = "disease_type", palette = "lancet", add = c("jitter"), # add = "jitter", # shape = "sample_type", xlab = "Gender", ylab = "SNM Normalized Abundance", ylim = c(10,18), title = "Comparison of Tetrasphaera Genus Abundance Across Genders in Solid Tissue Normals", # legend = "right", legend.title = "Gender") + theme(plot.title = element_text(hjust = 0.5)) + scale_x_discrete(labels=c("FEMALE" = "Female", "MALE" = "Male")) + # rotate_x_text(angle = 30) + stat_compare_means(comparisons = genderComparisons, label.y = 17, label = "p.signif") # Add pairwise comparisons p-value #------------- COAD: KRAS Mutation -------------# # Subset data coadMetadataCGCClinical_kras <- coadMetadataCGCClinical[!is.na(coadMetadataCGCClinical$kras_mutation_found),] coadMetadataCGCClinical_krasPT <- droplevels(coadMetadataCGCClinical_kras[coadMetadataCGCClinical_kras$sample_type == "Primary Tumor",]) voomMetadata <- coadMetadataCGCClinical_krasPT voomCountData <- counts <- t(vbDataBarnDFReconciled[rownames(coadMetadataCGCClinical_krasPT),]) # Differential abundance analysis limmaFormula <- formula(~0 + kras_mutation_found + data_submitting_center_label + platform) covDesignShort <- model.matrix(limmaFormula, data = voomMetadata) colnames(covDesignShort) <- gsub('([[:punct:]])|\\s+','',colnames(covDesignShort)) colnames(covDesignShort) dge <- DGEList(counts = voomCountData) keep <- filterByExpr(dge, covDesignShort) dge <- dge[keep,,keep.lib.sizes=FALSE] dge <- calcNormFactors(dge, method = "TMM") vdge <- voom(dge, design = covDesignShort, plot = TRUE, save.plot = TRUE, normalize.method="none") vdgeFit <- lmFit(vdge, covDesignShort) vdgeFit <- eBayes(vdgeFit) contrast.matrix <- makeContrasts(krasmutationfoundYES - krasmutationfoundNO, levels = covDesignShort) vdgeFit2 <- contrasts.fit(vdgeFit, contrasts = contrast.matrix) vdgeFit2 <- eBayes(vdgeFit2) vdgeFitDT <- decideTests(vdgeFit2) # results <- list(dge = dge, vdge = vdge, vdgeFit2 = vdgeFit2, vdgeFitDT = vdgeFitDT, limmaFormula = limmaFormula) titleXY <- "Volcano Plot - COAD Primary Tumors - KRAS Mutation (Y|N = 28|30)" glXYPlot(x = vdgeFit2$coef, y = vdgeFit2$lod, # coef = 1, xlab="logFC", ylab="log-odds", status = vdgeFitDT, groups = voomMetadata$kras_mutation_found, main = titleXY, side.ylab = "Voom Normalized Abundance", html = titleXY, folder = "Glimma-plots-clinical-Volcano", counts = vdge) #------------- LUAD: Smoking vs nonsmoking -------------# # Checking which columns to keep table(luadClinicalMetadata$kras_mutation_found) # Y|N 23|39 table(luadClinicalMetadata$tobacco_smoking_history) # Data alignment luadClinicalCols2Keep <- c( "Tumor_Sample_Barcode", "bcr_patient_uuid", ## NB: This aligns with the case_uuid in the QIIME mapping file "tobacco_smoking_history", # See meaning of values here: https://groups.google.com/forum/#!topic/cbioportal/irEXZRj9Who "number_pack_years_smoked", "kras_mutation_found" ) luadMetadataCGC <- metadataSamplesAllCGC[metadataSamplesAllCGC$disease_type == "Lung Adenocarcinoma",] luadMetadataCGC$case_uuid <- toupper(luadMetadataCGC$case_uuid) luadClinicalMetadata$bcr_patient_uuid <- toupper(luadClinicalMetadata$bcr_patient_uuid) luadMetadataCGCClinical <- left_join(luadMetadataCGC, luadClinicalMetadata[,luadClinicalCols2Keep], by = c("case_uuid" = "bcr_patient_uuid")) rownames(luadMetadataCGCClinical) <- rownames(luadMetadataCGC) # Subset data luadMetadataCGCClinical_smoking <- luadMetadataCGCClinical[!is.na(luadMetadataCGCClinical$tobacco_smoking_history),] luadMetadataCGCClinical_kras <- luadMetadataCGCClinical[!is.na(luadMetadataCGCClinical$kras_mutation_found),] table(luadMetadataCGCClinical_smoking$sample_type) # luadMetadataCGCClinical_smoking <- luadMetadataCGCClinical_smoking[luadMetadataCGCClinical_smoking$tobacco_smoking_history %in% c("1","3"),] luadMetadataCGCClinical_smoking$smokingHistory <- ordered(ifelse(luadMetadataCGCClinical_smoking$tobacco_smoking_history == 1, yes = "Nonsmoker", no = "Smoker"), levels = c("Nonsmoker","Smoker")) luadMetadataCGCClinical_smokingPT <- droplevels(luadMetadataCGCClinical_smoking[luadMetadataCGCClinical_smoking$sample_type == "Primary Tumor",]) luadMetadataCGCClinical_smokingSTN <- droplevels(luadMetadataCGCClinical_smoking[luadMetadataCGCClinical_smoking$sample_type == "Solid Tissue Normal",]) luadMetadataCGCClinical_smokingBDN <- droplevels(luadMetadataCGCClinical_smoking[luadMetadataCGCClinical_smoking$sample_type == "Blood Derived Normal",]) luadMetadataCGCClinical_krasPT <- droplevels(luadMetadataCGCClinical_kras[luadMetadataCGCClinical_kras$sample_type == "Primary Tumor",]) luadMetadataCGCClinical_krasSTN <- droplevels(luadMetadataCGCClinical_kras[luadMetadataCGCClinical_kras$sample_type == "Solid Tissue Normal",]) luadMetadataCGCClinical_krasBDN <- droplevels(luadMetadataCGCClinical_kras[luadMetadataCGCClinical_kras$sample_type == "Blood Derived Normal",]) voomMetadata <- luadMetadataCGCClinical_krasPT voomCountData <- t(vbDataBarnDFReconciled[rownames(voomMetadata),]) table(voomMetadata$smokingHistory) table(voomMetadata$kras_mutation_found) # Differential abundance analysis limmaFormula <- formula(~0 + kras_mutation_found + data_submitting_center_label) covDesignShort <- model.matrix(limmaFormula, data = voomMetadata) colnames(covDesignShort) <- gsub('([[:punct:]])|\\s+','',colnames(covDesignShort)) colnames(covDesignShort) dge <- DGEList(counts = voomCountData) keep <- filterByExpr(dge, covDesignShort) dge <- dge[keep,,keep.lib.sizes=FALSE] dge <- calcNormFactors(dge, method = "TMM") vdge <- voom(dge, design = covDesignShort, plot = TRUE, save.plot = TRUE, normalize.method="none") vdgeFit <- lmFit(vdge, covDesignShort) vdgeFit <- eBayes(vdgeFit) contrast.matrix <- makeContrasts(krasmutationfoundYES - krasmutationfoundNO, levels = covDesignShort) vdgeFit2 <- contrasts.fit(vdgeFit, contrasts = contrast.matrix) vdgeFit2 <- eBayes(vdgeFit2) vdgeFitDT <- decideTests(vdgeFit2) # results <- list(dge = dge, vdge = vdge, vdgeFit2 = vdgeFit2, vdgeFitDT = vdgeFitDT, limmaFormula = limmaFormula) titleXY <- "Volcano Plot - LUAD Primary Tumor - KRAS Mutation (Yes|No = 26|55)" glXYPlot(x = vdgeFit2$coef, y = vdgeFit2$lod, # coef = 1, xlab="logFC", ylab="log-odds", status = vdgeFitDT, groups = voomMetadata$kras_mutation_found, main = titleXY, side.ylab = "Voom Normalized Abundance", html = titleXY, folder = "Glimma-plots-clinical-Volcano", counts = vdge)
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # H2OMOJOSettings.default <- function() { H2OMOJOSettings() } #' @export H2OMOJOSettings H2OMOJOSettings <- setRefClass("H2OMOJOSettings", fields = list(predictionCol = "character", detailedPredictionCol = "character", convertUnknownCategoricalLevelsToNa = "logical", convertInvalidNumbersToNa = "logical", namedMojoOutputColumns = "logical", withContributions = "logical", withLeafNodeAssignments = "logical", withStageResults = "logical"), methods = list( initialize = function(predictionCol = "prediction", detailedPredictionCol = "detailed_prediction", convertUnknownCategoricalLevelsToNa = FALSE, convertInvalidNumbersToNa = FALSE, namedMojoOutputColumns = TRUE, withContributions = FALSE, withLeafNodeAssignments = FALSE, withStageResults = FALSE) { .self$predictionCol <- predictionCol .self$detailedPredictionCol <- detailedPredictionCol .self$convertUnknownCategoricalLevelsToNa <- convertUnknownCategoricalLevelsToNa .self$convertInvalidNumbersToNa <- convertInvalidNumbersToNa .self$namedMojoOutputColumns <- namedMojoOutputColumns .self$withContributions <- withContributions .self$withLeafNodeAssignments <- withLeafNodeAssignments .self$withStageResults <- withStageResults }, toJavaObject = function() { sc <- spark_connection_find()[[1]] invoke_new(sc, "ai.h2o.sparkling.ml.models.H2OMOJOSettings", .self$predictionCol, .self$detailedPredictionCol, .self$convertUnknownCategoricalLevelsToNa, .self$convertInvalidNumbersToNa, .self$namedMojoOutputColumns, .self$withContributions, .self$withLeafNodeAssignments, .self$withStageResults) } ))
/r/src/R/ai/h2o/sparkling/ml/models/H2OMOJOSettings.R
permissive
alexander-manley/sparkling-water
R
false
false
3,985
r
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # H2OMOJOSettings.default <- function() { H2OMOJOSettings() } #' @export H2OMOJOSettings H2OMOJOSettings <- setRefClass("H2OMOJOSettings", fields = list(predictionCol = "character", detailedPredictionCol = "character", convertUnknownCategoricalLevelsToNa = "logical", convertInvalidNumbersToNa = "logical", namedMojoOutputColumns = "logical", withContributions = "logical", withLeafNodeAssignments = "logical", withStageResults = "logical"), methods = list( initialize = function(predictionCol = "prediction", detailedPredictionCol = "detailed_prediction", convertUnknownCategoricalLevelsToNa = FALSE, convertInvalidNumbersToNa = FALSE, namedMojoOutputColumns = TRUE, withContributions = FALSE, withLeafNodeAssignments = FALSE, withStageResults = FALSE) { .self$predictionCol <- predictionCol .self$detailedPredictionCol <- detailedPredictionCol .self$convertUnknownCategoricalLevelsToNa <- convertUnknownCategoricalLevelsToNa .self$convertInvalidNumbersToNa <- convertInvalidNumbersToNa .self$namedMojoOutputColumns <- namedMojoOutputColumns .self$withContributions <- withContributions .self$withLeafNodeAssignments <- withLeafNodeAssignments .self$withStageResults <- withStageResults }, toJavaObject = function() { sc <- spark_connection_find()[[1]] invoke_new(sc, "ai.h2o.sparkling.ml.models.H2OMOJOSettings", .self$predictionCol, .self$detailedPredictionCol, .self$convertUnknownCategoricalLevelsToNa, .self$convertInvalidNumbersToNa, .self$namedMojoOutputColumns, .self$withContributions, .self$withLeafNodeAssignments, .self$withStageResults) } ))
\name{midas_r.fit} \alias{midas_r.fit} \title{Fit restricted MIDAS regression} \usage{ \method{midas_r}{fit}(x) } \arguments{ \item{x}{\code{midas_r} object} } \value{ \code{\link{midas_r}} object } \description{ Workhorse function for fitting restricted MIDAS regression } \author{ Vaidotas Zemlys }
/man/midas_r.fit.Rd
no_license
snowdj/midasr
R
false
false
304
rd
\name{midas_r.fit} \alias{midas_r.fit} \title{Fit restricted MIDAS regression} \usage{ \method{midas_r}{fit}(x) } \arguments{ \item{x}{\code{midas_r} object} } \value{ \code{\link{midas_r}} object } \description{ Workhorse function for fitting restricted MIDAS regression } \author{ Vaidotas Zemlys }
#' Semi-annual and annual hydrological data #' #' Downloading hydrological data for the semi-annual and annual period #' available in the danepubliczne.imgw.pl collection #' #' @param year vector of years (e.g., 1966:2000) #' @param coords add coordinates of the stations (logical value TRUE or FALSE) #' @param value type of data (can be: state - "H" (default), flow - "Q", or temperature - "T") #' @param station name or ID of hydrological station(s). #' It accepts names (characters in CAPITAL LETTERS) or stations' IDs (numeric) #' @param col_names three types of column names possible: #' "short" - default, values with shorten names, #' "full" - full English description, #' "polish" - original names in the dataset #' @param allow_failure logical - whether to proceed or stop on failure. By default set to TRUE (i.e. don't stop on error). For debugging purposes change to FALSE #' @param ... other parameters that may be passed to the 'shortening' function that shortens column names #' @importFrom XML readHTMLTable #' @importFrom utils download.file unzip read.csv #' @importFrom data.table fread #' @export #' @examples #' \donttest{ #' hydro_yearly = hydro_imgw_annual(year = 2000, value = "H", station = "ANNOPOL") #' } hydro_imgw_annual = function(year, coords = FALSE, value = "H", station = NULL, col_names = "short", allow_failure = TRUE, ...) { if (allow_failure) { tryCatch(hydro_imgw_annual_bp(year, coords, value, station, col_names, ...), error = function(e){ message(paste("Problems with downloading data.", "Run function with argument allow_failure = FALSE", "to see more details"))}) } else { hydro_imgw_annual_bp(year, coords, value, station, col_names, ...) } } #' @keywords internal #' @noRd hydro_imgw_annual_bp = function(year = year, coords = coords, value = value, station = station, col_names = col_names, ...) { translit = check_locale() base_url = "https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_hydrologiczne/" interval = "semiannual_and_annual" interval_pl = "polroczne_i_roczne" temp = tempfile() test_url(link = paste0(base_url, interval_pl, "/"), output = temp) a = readLines(temp, warn = FALSE) ind = grep(readHTMLTable(a)[[1]]$Name, pattern = "/") catalogs = as.character(readHTMLTable(a)[[1]]$Name[ind]) catalogs = gsub(x = catalogs, pattern = "/", replacement = "") # less files to read: catalogs = catalogs[catalogs %in% as.character(year)] if (length(catalogs) == 0) { stop("Selected year(s) is/are not available in the database.", call. = FALSE) } meta = hydro_metadata_imgw(interval) all_data = vector("list", length = length(catalogs)) for (i in seq_along(catalogs)) { # i = 1 catalog = catalogs[i] #print(i) address = paste0(base_url, interval_pl, "/", catalog, "/polr_", value, "_", catalog, ".zip") temp = tempfile() temp2 = tempfile() test_url(address, temp) #download.file(address, temp) unzip(zipfile = temp, exdir = temp2) file1 = paste(temp2, dir(temp2), sep = "/")[1] if (translit) { data1 = as.data.frame(data.table::fread(cmd = paste("iconv -f CP1250 -t ASCII//TRANSLIT", file1))) } else { data1 = read.csv(file1, header = FALSE, stringsAsFactors = FALSE, fileEncoding = "CP1250") } colnames(data1) = meta[[value]]$parameters all_data[[i]] = data1 } all_data = do.call(rbind, all_data) # ten sam warunek braku danych lub obserwacji dla wszytkich wartosci all_data[all_data == 99999.999] = NA all_data = all_data[, !duplicated(colnames(all_data))] # coords if (coords) { all_data = merge(climate::imgw_hydro_stations, all_data, by.x = "id", by.y = "Kod stacji", all.y = TRUE) } #station selection if (!is.null(station)) { if (is.character(station)) { all_data = all_data[substr(all_data$`Nazwa stacji`, 1, nchar(station)) == station, ] if (nrow(all_data) == 0) { stop("Selected station(s) is not available in the database.", call. = FALSE) } } else if (is.numeric(station)) { all_data = all_data[all_data$`Kod stacji` %in% station, ] if (nrow(all_data) == 0) { stop("Selected station(s) is not available in the database.", call. = FALSE) } } else { stop("Selected station(s) are not in the proper format.", call. = FALSE) } } all_data = all_data[order(all_data$`Nazwa stacji`, all_data$`Rok hydrologiczny`), ] # adding option for shortening column names and removing duplicates all_data = hydro_shortening_imgw(all_data, col_names = col_names, ...) return(all_data) }
/R/hydro_imgw_annual.R
permissive
bczernecki/climate
R
false
false
5,314
r
#' Semi-annual and annual hydrological data #' #' Downloading hydrological data for the semi-annual and annual period #' available in the danepubliczne.imgw.pl collection #' #' @param year vector of years (e.g., 1966:2000) #' @param coords add coordinates of the stations (logical value TRUE or FALSE) #' @param value type of data (can be: state - "H" (default), flow - "Q", or temperature - "T") #' @param station name or ID of hydrological station(s). #' It accepts names (characters in CAPITAL LETTERS) or stations' IDs (numeric) #' @param col_names three types of column names possible: #' "short" - default, values with shorten names, #' "full" - full English description, #' "polish" - original names in the dataset #' @param allow_failure logical - whether to proceed or stop on failure. By default set to TRUE (i.e. don't stop on error). For debugging purposes change to FALSE #' @param ... other parameters that may be passed to the 'shortening' function that shortens column names #' @importFrom XML readHTMLTable #' @importFrom utils download.file unzip read.csv #' @importFrom data.table fread #' @export #' @examples #' \donttest{ #' hydro_yearly = hydro_imgw_annual(year = 2000, value = "H", station = "ANNOPOL") #' } hydro_imgw_annual = function(year, coords = FALSE, value = "H", station = NULL, col_names = "short", allow_failure = TRUE, ...) { if (allow_failure) { tryCatch(hydro_imgw_annual_bp(year, coords, value, station, col_names, ...), error = function(e){ message(paste("Problems with downloading data.", "Run function with argument allow_failure = FALSE", "to see more details"))}) } else { hydro_imgw_annual_bp(year, coords, value, station, col_names, ...) } } #' @keywords internal #' @noRd hydro_imgw_annual_bp = function(year = year, coords = coords, value = value, station = station, col_names = col_names, ...) { translit = check_locale() base_url = "https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_hydrologiczne/" interval = "semiannual_and_annual" interval_pl = "polroczne_i_roczne" temp = tempfile() test_url(link = paste0(base_url, interval_pl, "/"), output = temp) a = readLines(temp, warn = FALSE) ind = grep(readHTMLTable(a)[[1]]$Name, pattern = "/") catalogs = as.character(readHTMLTable(a)[[1]]$Name[ind]) catalogs = gsub(x = catalogs, pattern = "/", replacement = "") # less files to read: catalogs = catalogs[catalogs %in% as.character(year)] if (length(catalogs) == 0) { stop("Selected year(s) is/are not available in the database.", call. = FALSE) } meta = hydro_metadata_imgw(interval) all_data = vector("list", length = length(catalogs)) for (i in seq_along(catalogs)) { # i = 1 catalog = catalogs[i] #print(i) address = paste0(base_url, interval_pl, "/", catalog, "/polr_", value, "_", catalog, ".zip") temp = tempfile() temp2 = tempfile() test_url(address, temp) #download.file(address, temp) unzip(zipfile = temp, exdir = temp2) file1 = paste(temp2, dir(temp2), sep = "/")[1] if (translit) { data1 = as.data.frame(data.table::fread(cmd = paste("iconv -f CP1250 -t ASCII//TRANSLIT", file1))) } else { data1 = read.csv(file1, header = FALSE, stringsAsFactors = FALSE, fileEncoding = "CP1250") } colnames(data1) = meta[[value]]$parameters all_data[[i]] = data1 } all_data = do.call(rbind, all_data) # ten sam warunek braku danych lub obserwacji dla wszytkich wartosci all_data[all_data == 99999.999] = NA all_data = all_data[, !duplicated(colnames(all_data))] # coords if (coords) { all_data = merge(climate::imgw_hydro_stations, all_data, by.x = "id", by.y = "Kod stacji", all.y = TRUE) } #station selection if (!is.null(station)) { if (is.character(station)) { all_data = all_data[substr(all_data$`Nazwa stacji`, 1, nchar(station)) == station, ] if (nrow(all_data) == 0) { stop("Selected station(s) is not available in the database.", call. = FALSE) } } else if (is.numeric(station)) { all_data = all_data[all_data$`Kod stacji` %in% station, ] if (nrow(all_data) == 0) { stop("Selected station(s) is not available in the database.", call. = FALSE) } } else { stop("Selected station(s) are not in the proper format.", call. = FALSE) } } all_data = all_data[order(all_data$`Nazwa stacji`, all_data$`Rok hydrologiczny`), ] # adding option for shortening column names and removing duplicates all_data = hydro_shortening_imgw(all_data, col_names = col_names, ...) return(all_data) }
library(shiny) library(tidyverse) library(ggplot2) ################### NYFedData <- read.csv("debt_balance.csv") i <- 1 while (i < length(NYFedData[,1])) { NYFedData[i,1]<-sub(":", " \\1", NYFedData[i,1]) #substitute ":" with space i = i+1 } ################### ui <- fluidPage( titlePanel("Interactive Graph and Table App"), sidebarLayout( sidebarPanel( p("The graph is a visualization of the historical total debt data (between the first quarter of 2003 and the first quarter of 2021) of the private sector in the United States segmented by the debt type."), h3("Graph:"), p("Please use the ", strong("drop down menu")," (below) to select which debt type to plot on the Y axis. Quarters are plotted on the X axis."), h3("Table:"), p("The same data are shown in the",strong("table")," (below the graph). The data can be filtered using the radio buttons (below the drop down menu) and searched using the search menu."), p(span("The controls of the table and the graph are decoupled.", style = "color:green")), br(), em("Data Source:"), em("Total Debt Balance and its Composition in the Quarterly Report on Household Debt and Credit by the Federal Reserve Bank of New York. Released in May 2021.", "Data can be accessed ", a("HERE", href = "https://www.newyorkfed.org/microeconomics/hhdc")), br(), br(), selectInput("plot_var", label = "Drop down menu: Select a variable to plot on the Y axis:", choices = c("Mortgage", "HE.Revolving", "Auto.Loan", "Credit.Card", "Student.Loan", "Other", "Total" ), selected = "Mortgage"), br(), br(), conditionalPanel( 'input.dataset === "NYFedData"', checkboxGroupInput("show_vars", "Radio buttons: Select the columns to show in the table:", names(NYFedData), selected = names(NYFedData)) )), ################### mainPanel( plotOutput("Plot1"), br(), br(), tabsetPanel( id = 'dataset', tabPanel("NYFedData", DT::dataTableOutput("mytable1")) ) ) ) ) ################### server <- function(input, output) { output$Plot1 <- renderPlot({ plot <- ggplot(NYFedData, aes(Quarter, NYFedData[,input$plot_var])) plot <- plot + geom_bar(stat = "identity", fill = "darkolivegreen4") plot <- plot + ggtitle("Total Debt Balance by its Composition Type. Trillions of $") plot <- plot + xlab("Quarters") + theme(axis.text.x = element_text(angle = 90, vjust = 1, hjust = 1)) plot <- plot + ylab(input$plot_var) print(plot) }) output$mytable1 <- DT::renderDataTable({ DT::datatable(NYFedData[, input$show_vars, drop = FALSE], options = list( pageLength = 73) ) }) } shinyApp(ui, server)
/app.R
no_license
sophiej-s/RShiny_proj1
R
false
false
3,095
r
library(shiny) library(tidyverse) library(ggplot2) ################### NYFedData <- read.csv("debt_balance.csv") i <- 1 while (i < length(NYFedData[,1])) { NYFedData[i,1]<-sub(":", " \\1", NYFedData[i,1]) #substitute ":" with space i = i+1 } ################### ui <- fluidPage( titlePanel("Interactive Graph and Table App"), sidebarLayout( sidebarPanel( p("The graph is a visualization of the historical total debt data (between the first quarter of 2003 and the first quarter of 2021) of the private sector in the United States segmented by the debt type."), h3("Graph:"), p("Please use the ", strong("drop down menu")," (below) to select which debt type to plot on the Y axis. Quarters are plotted on the X axis."), h3("Table:"), p("The same data are shown in the",strong("table")," (below the graph). The data can be filtered using the radio buttons (below the drop down menu) and searched using the search menu."), p(span("The controls of the table and the graph are decoupled.", style = "color:green")), br(), em("Data Source:"), em("Total Debt Balance and its Composition in the Quarterly Report on Household Debt and Credit by the Federal Reserve Bank of New York. Released in May 2021.", "Data can be accessed ", a("HERE", href = "https://www.newyorkfed.org/microeconomics/hhdc")), br(), br(), selectInput("plot_var", label = "Drop down menu: Select a variable to plot on the Y axis:", choices = c("Mortgage", "HE.Revolving", "Auto.Loan", "Credit.Card", "Student.Loan", "Other", "Total" ), selected = "Mortgage"), br(), br(), conditionalPanel( 'input.dataset === "NYFedData"', checkboxGroupInput("show_vars", "Radio buttons: Select the columns to show in the table:", names(NYFedData), selected = names(NYFedData)) )), ################### mainPanel( plotOutput("Plot1"), br(), br(), tabsetPanel( id = 'dataset', tabPanel("NYFedData", DT::dataTableOutput("mytable1")) ) ) ) ) ################### server <- function(input, output) { output$Plot1 <- renderPlot({ plot <- ggplot(NYFedData, aes(Quarter, NYFedData[,input$plot_var])) plot <- plot + geom_bar(stat = "identity", fill = "darkolivegreen4") plot <- plot + ggtitle("Total Debt Balance by its Composition Type. Trillions of $") plot <- plot + xlab("Quarters") + theme(axis.text.x = element_text(angle = 90, vjust = 1, hjust = 1)) plot <- plot + ylab(input$plot_var) print(plot) }) output$mytable1 <- DT::renderDataTable({ DT::datatable(NYFedData[, input$show_vars, drop = FALSE], options = list( pageLength = 73) ) }) } shinyApp(ui, server)
#' Estimation of p values #' @param X list of matrices, where the first is the one obtained with real data calc_p <- function(X){ p <- matrix(0, nrow=nrow(X[[1]]), ncol=ncol(X[[1]]), dimnames = list(rownames(X[[1]]), colnames(X[[1]]))) for(i in 1:length(X)){ idx <- X[[i]] >= X[[1]] p[idx] <- p[idx] + 1 } p <- p / length(X) return(p) }
/R/calc_p.R
no_license
emosca-cnr/dmfind002
R
false
false
361
r
#' Estimation of p values #' @param X list of matrices, where the first is the one obtained with real data calc_p <- function(X){ p <- matrix(0, nrow=nrow(X[[1]]), ncol=ncol(X[[1]]), dimnames = list(rownames(X[[1]]), colnames(X[[1]]))) for(i in 1:length(X)){ idx <- X[[i]] >= X[[1]] p[idx] <- p[idx] + 1 } p <- p / length(X) return(p) }
library(foreign) setwd("J:/temp/bootCamp/stataWorkshop/data") gbd <- read.dta ("GBD+WorldBank.dta") gbd <- gbd[with(gbd, order(country, year)),] gbd$firstRec <- !duplicated(gbd$country) ?unique n_ <- function (vec){ temp <- c(); final <-c() for (v in vec){ temp <- c(temp,v); final <- c(final, sum(v==temp)) } final }
/code/rC/day3GBD.R
no_license
nmmarquez/IHME
R
false
false
343
r
library(foreign) setwd("J:/temp/bootCamp/stataWorkshop/data") gbd <- read.dta ("GBD+WorldBank.dta") gbd <- gbd[with(gbd, order(country, year)),] gbd$firstRec <- !duplicated(gbd$country) ?unique n_ <- function (vec){ temp <- c(); final <-c() for (v in vec){ temp <- c(temp,v); final <- c(final, sum(v==temp)) } final }
#' API base URL #' @return API base URL (character) #' @noRd base_url <- function() { "https://api.hubapi.com" } #' @param path An API endpoint path #' @return The URL to that API endpoint (character) #' @noRd get_path_url <- function(path) { httr::modify_url(base_url(), path = path ) } #' @param path API endpoint path (character) #' @param apikey API key (character) #' @param token_path Path to cached token (character) #' @param query Query parameters (named list) #' @return A list #' @noRd .get_results <- function(path, apikey, token_path, query = NULL) { auth <- hubspot_auth( token_path = token_path, apikey = apikey ) # remove NULL elements from the query query <- purrr::discard(query, is.null) # auth if (auth$auth == "key") { query$hapikey <- auth$value res <- httr::GET(get_path_url(path), query = query, httr::user_agent("hubspot R package by Locke Data") ) } else { token <- readRDS(auth$value) token <- check_token(token, file = auth$value) res <- httr::GET(get_path_url(path), query = query, httr::config(httr::user_agent("hubspot R package by Locke Data"), token = token ) ) } httr::warn_for_status(res) res %>% httr::content() } get_results <- ratelimitr::limit_rate( .get_results, ratelimitr::rate(100, 10) ) #' @param path API endpoint path (character) #' @param apikey API key (character) #' @param query Query parameters (named list) #' @param max_iter Maximal number of iterations (integer) #' @param element Element to retrieve from API raw results (character) #' @param hasmore_name Name of the has-more parameter for the API #' endpoint (character) #' @param offset_name_in Name of the offset parameter to send to the API #' @param offset_name_out Name of the offset parameter returned #' @return A list #' @noRd get_results_paged <- function(path, token_path, apikey, query = NULL, max_iter = max_iter, element, hasmore_name, offset_name_in = "offset", offset_name_out = "offset") { results <- list() n <- 0 do <- TRUE offset <- 0 while (do & n < max_iter) { query[[offset_name_in]] <- offset res_content <- get_results( path = path, token_path = token_path, apikey = apikey, query = query ) n <- n + 1 results[n] <- list(res_content[[element]]) do <- res_content[[hasmore_name]] offset <- res_content[[offset_name_out]] } results <- purrr::flatten(results) return(results) } check_token <- function(token, file) { info <- httr::GET(get_path_url( glue::glue("/oauth/v1/access-tokens/{ token$credentials$access_token}") )) %>% httr::content() if ("message" %in% names(info)) { if (grepl("expired", info$message)) { token$refresh() saveRDS(token, file) } } if ("expires_in" %in% names(info)) { if (info$expires_in < 60) { token$refresh() saveRDS(token, file) } } token }
/R/utils.R
permissive
MrJoan/hubspot
R
false
false
3,136
r
#' API base URL #' @return API base URL (character) #' @noRd base_url <- function() { "https://api.hubapi.com" } #' @param path An API endpoint path #' @return The URL to that API endpoint (character) #' @noRd get_path_url <- function(path) { httr::modify_url(base_url(), path = path ) } #' @param path API endpoint path (character) #' @param apikey API key (character) #' @param token_path Path to cached token (character) #' @param query Query parameters (named list) #' @return A list #' @noRd .get_results <- function(path, apikey, token_path, query = NULL) { auth <- hubspot_auth( token_path = token_path, apikey = apikey ) # remove NULL elements from the query query <- purrr::discard(query, is.null) # auth if (auth$auth == "key") { query$hapikey <- auth$value res <- httr::GET(get_path_url(path), query = query, httr::user_agent("hubspot R package by Locke Data") ) } else { token <- readRDS(auth$value) token <- check_token(token, file = auth$value) res <- httr::GET(get_path_url(path), query = query, httr::config(httr::user_agent("hubspot R package by Locke Data"), token = token ) ) } httr::warn_for_status(res) res %>% httr::content() } get_results <- ratelimitr::limit_rate( .get_results, ratelimitr::rate(100, 10) ) #' @param path API endpoint path (character) #' @param apikey API key (character) #' @param query Query parameters (named list) #' @param max_iter Maximal number of iterations (integer) #' @param element Element to retrieve from API raw results (character) #' @param hasmore_name Name of the has-more parameter for the API #' endpoint (character) #' @param offset_name_in Name of the offset parameter to send to the API #' @param offset_name_out Name of the offset parameter returned #' @return A list #' @noRd get_results_paged <- function(path, token_path, apikey, query = NULL, max_iter = max_iter, element, hasmore_name, offset_name_in = "offset", offset_name_out = "offset") { results <- list() n <- 0 do <- TRUE offset <- 0 while (do & n < max_iter) { query[[offset_name_in]] <- offset res_content <- get_results( path = path, token_path = token_path, apikey = apikey, query = query ) n <- n + 1 results[n] <- list(res_content[[element]]) do <- res_content[[hasmore_name]] offset <- res_content[[offset_name_out]] } results <- purrr::flatten(results) return(results) } check_token <- function(token, file) { info <- httr::GET(get_path_url( glue::glue("/oauth/v1/access-tokens/{ token$credentials$access_token}") )) %>% httr::content() if ("message" %in% names(info)) { if (grepl("expired", info$message)) { token$refresh() saveRDS(token, file) } } if ("expires_in" %in% names(info)) { if (info$expires_in < 60) { token$refresh() saveRDS(token, file) } } token }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Calc_Vol_Emerge.R \name{Calc_Vol_Emerge} \alias{Calc_Vol_Emerge} \title{Calcul un volume à partir des tarifs Emerge en estimant la hauteur de décrochement} \usage{ Calc_Vol_Emerge(Id_arbre, ess, d13, htot, X = NULL, Y = NULL, crs = NA) } \arguments{ \item{Id_arbre}{identifiant unique de l'arbre (numerique ou chaine de caractère)} \item{ess}{code de l'essence selon code ONF (voir colonne Cod_ess de la base de données emergeIFNVol::Code_ess_Emerge)} \item{d13}{diamètre à hauteur de poitrine de l'arbre} \item{htot}{hauteur total de l'arbre} \item{X}{coordonnée X de l'arbre (permet de lui affecter une sylvo-éco-région d'appartenance, ATTENTION au système de coordonnée de référence = crs)} \item{Y}{coordonnée Y de l'arbre (permet de lui affecter une sylvo-éco-région d'appartenance, ATTENTION au système de coordonnée de référence = crs)} \item{crs}{système de coordonnée de référence rattaché aux coordonnées X et Y renseigné précédement (par défaut : Lambert 93 code EPSG = 2154)} } \description{ Cette fonction permet de calculer un volume en utilisant les tarifs Emerge mais en ne rensignant "que" l'essence, le diamètre à hauteur de poitrine et la hauteur d'un ou plusieurs arbres. La hauteur de décrochement est prédite en utilisant un modèle de prédiction établit à partir des données des placettes de l'inventaire foretier national }
/man/Calc_Vol_Emerge.Rd
no_license
antoine25C/emergeIFNVol
R
false
true
1,503
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Calc_Vol_Emerge.R \name{Calc_Vol_Emerge} \alias{Calc_Vol_Emerge} \title{Calcul un volume à partir des tarifs Emerge en estimant la hauteur de décrochement} \usage{ Calc_Vol_Emerge(Id_arbre, ess, d13, htot, X = NULL, Y = NULL, crs = NA) } \arguments{ \item{Id_arbre}{identifiant unique de l'arbre (numerique ou chaine de caractère)} \item{ess}{code de l'essence selon code ONF (voir colonne Cod_ess de la base de données emergeIFNVol::Code_ess_Emerge)} \item{d13}{diamètre à hauteur de poitrine de l'arbre} \item{htot}{hauteur total de l'arbre} \item{X}{coordonnée X de l'arbre (permet de lui affecter une sylvo-éco-région d'appartenance, ATTENTION au système de coordonnée de référence = crs)} \item{Y}{coordonnée Y de l'arbre (permet de lui affecter une sylvo-éco-région d'appartenance, ATTENTION au système de coordonnée de référence = crs)} \item{crs}{système de coordonnée de référence rattaché aux coordonnées X et Y renseigné précédement (par défaut : Lambert 93 code EPSG = 2154)} } \description{ Cette fonction permet de calculer un volume en utilisant les tarifs Emerge mais en ne rensignant "que" l'essence, le diamètre à hauteur de poitrine et la hauteur d'un ou plusieurs arbres. La hauteur de décrochement est prédite en utilisant un modèle de prédiction établit à partir des données des placettes de l'inventaire foretier national }
#PLOT 2 source('dataloader.R') ## Create Plot 2 plot(subdata$Global_active_power~subdata$Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") ## Saving to file dev.copy(png, file="plot2.png", height=480, width=480) dev.off()
/Plot2.R
no_license
Invictus-McQ/GettingAndCleaningDataFinal
R
false
false
242
r
#PLOT 2 source('dataloader.R') ## Create Plot 2 plot(subdata$Global_active_power~subdata$Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") ## Saving to file dev.copy(png, file="plot2.png", height=480, width=480) dev.off()
library(tidyverse) library(foreign) todofolders <- c('exam', 'question', 'lab') # do diet later ### stitch together NHANES from an .xpt folder demo <- read.xport('./xpt/demo/demo_i.xpt') big_data <- list() big_data[[1]] <- demo for (ii in seq_along(todofolders)) { xptdirectory <- sprintf('./xpt/%s', todofolders[ii]) outdataname <- sprintf('./xpt/%s.rds', todofolders[ii]) ### xptfiles <- list.files(xptdirectory) xptpaths <- file.path(xptdirectory, xptfiles) frames_list <- xptpaths %>% map(~read.xport(.)) big_data[[ii+1]] <- frames_list %>% reduce(full_join, by='SEQN') } big_data_merged <- big_data %>% reduce(left_join, by='SEQN') ## find redundant cols and remove/rename big_data_merged <- big_data_merged %>% select(-ends_with('.y')) colnames(big_data_merged) <- sub('\\.x', '', colnames(big_data_merged)) saveRDS(big_data_merged, file='./xpt/merged.RDS')
/cbind_xpt_2018.R
no_license
chiragjp/nhanes_scraper
R
false
false
884
r
library(tidyverse) library(foreign) todofolders <- c('exam', 'question', 'lab') # do diet later ### stitch together NHANES from an .xpt folder demo <- read.xport('./xpt/demo/demo_i.xpt') big_data <- list() big_data[[1]] <- demo for (ii in seq_along(todofolders)) { xptdirectory <- sprintf('./xpt/%s', todofolders[ii]) outdataname <- sprintf('./xpt/%s.rds', todofolders[ii]) ### xptfiles <- list.files(xptdirectory) xptpaths <- file.path(xptdirectory, xptfiles) frames_list <- xptpaths %>% map(~read.xport(.)) big_data[[ii+1]] <- frames_list %>% reduce(full_join, by='SEQN') } big_data_merged <- big_data %>% reduce(left_join, by='SEQN') ## find redundant cols and remove/rename big_data_merged <- big_data_merged %>% select(-ends_with('.y')) colnames(big_data_merged) <- sub('\\.x', '', colnames(big_data_merged)) saveRDS(big_data_merged, file='./xpt/merged.RDS')
#The following two functions can be used to create a matrix and calculate its #inverse with the ability to cache the inverse matrix if the original matrix remains #unchanged. #This function provides the following functionality #Creates a list with 4 functions as follows: #1. get() #2. set() #3. setinverse() #4. getinverse() makeCacheMatrix <- function(x = matrix()) { i <- NULL #set() function enables the user to enter a valid matrix set <- function(y) { if(class(y)!="matrix") { message("Input is not a matrix! Please enter a valid matrix") } else { x <<- y i <<- NULL } } #get() function enables the user to retrieve the value of the matrix get <- function() x #setinverse() function is used by the cachesolve() function to cache the value of the inverse matrix setinverse <- function(inverse) i <<- inverse #getinverse() function is used by the cachesolve() function to retrieve the value of the cached inverse matrix getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } #The cacheSolve() function is used to calculate the inverse of the matrix created using the makeCacheMatrix() function. #This function returns a cached value of the inverse matrix if the original matrix remains unchanged. cacheSolve <- function(x, ...) { #Inverse of the matrix stored using makeCacheMatrix() is calculated i <- x$getinverse() #condition to check whether the original matrix has been modified if(!is.null(i)) { message("getting cached data") #CONDITION IS TRUE: the cached value of the inverse matrix is returned return(i) } data <- x$get() #CONDITION IS FALSE: inverse of the given matrix is calulated anew i <- solve(data, ...) x$setinverse(i) #the inverse matrix is returned i }
/cachematrix.R
no_license
abhayj1987/ProgrammingAssignment2
R
false
false
2,275
r
#The following two functions can be used to create a matrix and calculate its #inverse with the ability to cache the inverse matrix if the original matrix remains #unchanged. #This function provides the following functionality #Creates a list with 4 functions as follows: #1. get() #2. set() #3. setinverse() #4. getinverse() makeCacheMatrix <- function(x = matrix()) { i <- NULL #set() function enables the user to enter a valid matrix set <- function(y) { if(class(y)!="matrix") { message("Input is not a matrix! Please enter a valid matrix") } else { x <<- y i <<- NULL } } #get() function enables the user to retrieve the value of the matrix get <- function() x #setinverse() function is used by the cachesolve() function to cache the value of the inverse matrix setinverse <- function(inverse) i <<- inverse #getinverse() function is used by the cachesolve() function to retrieve the value of the cached inverse matrix getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } #The cacheSolve() function is used to calculate the inverse of the matrix created using the makeCacheMatrix() function. #This function returns a cached value of the inverse matrix if the original matrix remains unchanged. cacheSolve <- function(x, ...) { #Inverse of the matrix stored using makeCacheMatrix() is calculated i <- x$getinverse() #condition to check whether the original matrix has been modified if(!is.null(i)) { message("getting cached data") #CONDITION IS TRUE: the cached value of the inverse matrix is returned return(i) } data <- x$get() #CONDITION IS FALSE: inverse of the given matrix is calulated anew i <- solve(data, ...) x$setinverse(i) #the inverse matrix is returned i }
#Loads the required Libraries library(ggplot2) library(scales) #Reads in the required csv file results <- read.csv(file='data.csv',sep=',',header=T) #Smallen the amount results$Amount <- results$Amount/10000 #Brak the results up into import and export for easy usage import <- results[ which(results$Type=='Import'), ] export <- results[ which(results$Type=='Export'), ] #Creating a new png png("importyear.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows the amount spent on imports over the years ggplot(data=import, aes(y=Amount, x=Year, group=Item, colour=Item)) + geom_line() + geom_point()+ xlab("Years") + scale_y_continuous(label = comma)+ ylab("Amount Spent in the 0000's")+ ggtitle("Money Spent \nBy the JA Government on Import's") #Closing device dev.off() #Creating a new png png("exportyear.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows the amount earned on exports over the years ggplot(data=export, aes(y=Amount, x=Year, group=Item, colour=Item)) + geom_line() + geom_point()+ xlab("Years") + scale_y_continuous(label = comma)+ ylab("Amount Earned in the 0000's")+ ggtitle("Money Earned \nBy the JA Government on Export's") #Closing device dev.off() #Creating a new png png("importtotal.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows the amount spent in total on each import ggplot(data=import, aes(x=Item, y=Amount,fill=Item)) + geom_bar(stat="identity")+ scale_x_discrete(breaks=NULL)+ scale_y_continuous(label = comma)+ xlab("Imported Items") + ylab("Amount Spent in the 0000's")+ ggtitle("Total Money Spent \nBy the JA Government on Each Import") #Closing device dev.off() #Creating a new png png("exporttotal.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows the amount earned in total on each export ggplot(data=export, aes(x=Item, y=Amount,fill=Item)) + geom_bar(stat="identity")+ scale_x_discrete(breaks=NULL,label = comma)+ scale_y_continuous(label = comma)+ xlab("Exported Items") + ylab("Amount Earned in the 0000's")+ ggtitle("Total Money \nEarned By the JA Government \non Each Export") #Closing device dev.off() #Creating a new png png("exportvimport.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows exports vs imports ggplot(data=results, aes(x=Type, y=Amount,fill=Type)) + geom_bar(stat="identity")+ xlab("Export Or Import") + ylab("Amount in the 0000's")+ scale_y_continuous(label = comma)+ ggtitle("Export vs Import between 2008 - 2013") #Closing device dev.off()
/stats.R
no_license
Deano24/JAStats
R
false
false
2,589
r
#Loads the required Libraries library(ggplot2) library(scales) #Reads in the required csv file results <- read.csv(file='data.csv',sep=',',header=T) #Smallen the amount results$Amount <- results$Amount/10000 #Brak the results up into import and export for easy usage import <- results[ which(results$Type=='Import'), ] export <- results[ which(results$Type=='Export'), ] #Creating a new png png("importyear.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows the amount spent on imports over the years ggplot(data=import, aes(y=Amount, x=Year, group=Item, colour=Item)) + geom_line() + geom_point()+ xlab("Years") + scale_y_continuous(label = comma)+ ylab("Amount Spent in the 0000's")+ ggtitle("Money Spent \nBy the JA Government on Import's") #Closing device dev.off() #Creating a new png png("exportyear.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows the amount earned on exports over the years ggplot(data=export, aes(y=Amount, x=Year, group=Item, colour=Item)) + geom_line() + geom_point()+ xlab("Years") + scale_y_continuous(label = comma)+ ylab("Amount Earned in the 0000's")+ ggtitle("Money Earned \nBy the JA Government on Export's") #Closing device dev.off() #Creating a new png png("importtotal.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows the amount spent in total on each import ggplot(data=import, aes(x=Item, y=Amount,fill=Item)) + geom_bar(stat="identity")+ scale_x_discrete(breaks=NULL)+ scale_y_continuous(label = comma)+ xlab("Imported Items") + ylab("Amount Spent in the 0000's")+ ggtitle("Total Money Spent \nBy the JA Government on Each Import") #Closing device dev.off() #Creating a new png png("exporttotal.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows the amount earned in total on each export ggplot(data=export, aes(x=Item, y=Amount,fill=Item)) + geom_bar(stat="identity")+ scale_x_discrete(breaks=NULL,label = comma)+ scale_y_continuous(label = comma)+ xlab("Exported Items") + ylab("Amount Earned in the 0000's")+ ggtitle("Total Money \nEarned By the JA Government \non Each Export") #Closing device dev.off() #Creating a new png png("exportvimport.png", width=6.75*300, height=6*300, res=300) #Creates the plot that shows exports vs imports ggplot(data=results, aes(x=Type, y=Amount,fill=Type)) + geom_bar(stat="identity")+ xlab("Export Or Import") + ylab("Amount in the 0000's")+ scale_y_continuous(label = comma)+ ggtitle("Export vs Import between 2008 - 2013") #Closing device dev.off()
## These functions can be used to save time in matrix inversion calculation ## by storing the inverted matrix for future use. ## The makeCacheMatrix function receives a matrix "m" and creates a corresponding CacheMatrix object ## This object contains two fields: The matrix and the inverted matrix ## and input/output (set/get) functions for both fields ## The inverted matrix is set to NULL until the user sets another value makeCacheMatrix <- function(x = matrix()) { InvertedMatrix <- NULL # The inverted matrix field, initially set to NULL set <- function(y) { # This sub-function sets the value of the matrix field to "y" x <<- y InvertedMatrix <<- NULL # Since a new matrix was entered, the inverted matrix field is set to NULL } get <- function() x # Return the matrix field setInverted <- function(Inv) InvertedMatrix <<- Inv # Set a new value for the inverted matrix field getInverted <- function() InvertedMatrix # Return the inverted matrix field list(set = set, get = get, # List of all sub-functions setInverted = setInverted, getInverted = getInverted) } ## The cacheSolve function recieves a CacheMatrix object and returns the inverted matrix ## The function checks whether the inverted matrix was calculted before ## If it has been calculated, the corresponding field is read from the CacheMatrix object and returned ## Otherwise, the inverted matrix is calculated, saved to the inverted matrix field and returned cacheSolve <- function(x, ...) { Inv <- x$getInverted() # Get the value of the inverted matrix field of "m" if(!is.null(Inv)) { # Check if this calculation was performed before message("getting cached data") # If it has been, inform the user return(Inv) # and return the stored field } # Reaching this line means the program has not reached the return command above # and the inverted matrix was not previously calculted data <- x$get() # Get the matrix field Inv <- solve(data, ...) # Invert the matrix x$setInverted(Inv) # Store the inverted matrix in "m" for future use Inv # Display the inverted matrix }
/cachematrix.R
no_license
MT15Coursera/ProgrammingAssignment2
R
false
false
2,290
r
## These functions can be used to save time in matrix inversion calculation ## by storing the inverted matrix for future use. ## The makeCacheMatrix function receives a matrix "m" and creates a corresponding CacheMatrix object ## This object contains two fields: The matrix and the inverted matrix ## and input/output (set/get) functions for both fields ## The inverted matrix is set to NULL until the user sets another value makeCacheMatrix <- function(x = matrix()) { InvertedMatrix <- NULL # The inverted matrix field, initially set to NULL set <- function(y) { # This sub-function sets the value of the matrix field to "y" x <<- y InvertedMatrix <<- NULL # Since a new matrix was entered, the inverted matrix field is set to NULL } get <- function() x # Return the matrix field setInverted <- function(Inv) InvertedMatrix <<- Inv # Set a new value for the inverted matrix field getInverted <- function() InvertedMatrix # Return the inverted matrix field list(set = set, get = get, # List of all sub-functions setInverted = setInverted, getInverted = getInverted) } ## The cacheSolve function recieves a CacheMatrix object and returns the inverted matrix ## The function checks whether the inverted matrix was calculted before ## If it has been calculated, the corresponding field is read from the CacheMatrix object and returned ## Otherwise, the inverted matrix is calculated, saved to the inverted matrix field and returned cacheSolve <- function(x, ...) { Inv <- x$getInverted() # Get the value of the inverted matrix field of "m" if(!is.null(Inv)) { # Check if this calculation was performed before message("getting cached data") # If it has been, inform the user return(Inv) # and return the stored field } # Reaching this line means the program has not reached the return command above # and the inverted matrix was not previously calculted data <- x$get() # Get the matrix field Inv <- solve(data, ...) # Invert the matrix x$setInverted(Inv) # Store the inverted matrix in "m" for future use Inv # Display the inverted matrix }
library(clusternor) ### Name: FuzzyCMeans ### Title: Perform Fuzzy C-means clustering on a data matrix. A soft ### variant of the kmeans algorithm where each data point are assigned a ### contribution weight to each cluster ### Aliases: FuzzyCMeans ### ** Examples iris.mat <- as.matrix(iris[,1:4]) k <- length(unique(iris[, dim(iris)[2]])) # Number of unique classes fcm <- FuzzyCMeans(iris.mat, k, iter.max=5)
/data/genthat_extracted_code/clusternor/examples/FuzzyCMeans.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
424
r
library(clusternor) ### Name: FuzzyCMeans ### Title: Perform Fuzzy C-means clustering on a data matrix. A soft ### variant of the kmeans algorithm where each data point are assigned a ### contribution weight to each cluster ### Aliases: FuzzyCMeans ### ** Examples iris.mat <- as.matrix(iris[,1:4]) k <- length(unique(iris[, dim(iris)[2]])) # Number of unique classes fcm <- FuzzyCMeans(iris.mat, k, iter.max=5)
#' Maybe set max_area_height/width parameters if invalid. validate_area_parameters <- function(width, height, features_shape) { pairs <- calculate_divisor_pairs(features_shape[[2]]) if (is.null(width) || is.null(height)) { n <- length(pairs) %/% 2 print(sprintf( "Setting (width, height) to (%s, %s)", pairs[[n]][[1]], pairs[[n]][[2]] )) height <- pairs[[n]][[1]] width <- pairs[[n]][[2]] } if(!list(c(width, height)) %in% pairs) { stop(paste( "(width, height) must be a pair that", "divides (length) evenly, got: "), paste0("(", width, ", ", height, ")"), "\nPlease select (height) from one of the following values:\n", list(unlist(pairs)[seq(length(pairs) * 2, by = -2)])) } c(as.integer(width), as.integer(height)) } #' Calculates the padding mask based on which embeddings are all zero. #' #' emb Tensor with shape [..., depth] #' #' Returns: #' a float Tensor with shape [...]. Each element is 1 if its #' corresponding embedding vector is all zero, and is 0 otherwise. embedding_to_padding <- function(emb) { emb_sum <- tf$reduce_sum(tf$abs(emb), axis = -1L) tf$to_float(tf$equal(emb_sum, 0)) } #' Reshape input by splitting length over blocks of memory_block_size. #' #' x Tensor [batch, heads, length, depth] #' x_shape tf$TensorShape of x #' memory_block_size Integer to dividing length by #' Return #' Tensor [batch, heads, length %/% memory_block_size, memory_block_size, depth] reshape_by_blocks <- function(x, x_shape, memory_block_size) { x <- tf$reshape(x, list(x_shape[[1]], x_shape[[2]], as.integer(x_shape[[3]] %/% memory_block_size), memory_block_size, x_shape[[4]])) x } #' Reshape x so that the last dimension becomes two dimensions. split_last_dimension <- function(x, n) { x_shape <- shape_list2(x) n <- as.integer(n) m <- x_shape[[length(x_shape)]] stopifnot(m %% n == 0) out <- tf$reshape(x, c(x_shape[-length(x_shape)], list(n, as.integer(m %/% n)))) out } #' Split channels (dimension 2) into multiple heads (becomes dimension 1). #' x Tensor shape: [batch, length, channels] #' num_heads integer split_heads <- function(x, num_heads) { out <- tf$transpose(split_last_dimension(x, num_heads), perm = list(0L, 2L, 1L, 3L)) out } #' Reshape x so that the last two dimension become one. combine_last_two_dimensions <- function(x) { x_shape <- shape_list2(x) c(a, b) %<-% x_shape[-c(1:(length(x_shape)-2))] tf$reshape(x, c(x_shape[c(1,2)], as.integer(a * b))) } #' Inverse of split_heads. combine_heads <- function(x) { combine_last_two_dimensions(tf$transpose(x, list(0L, 2L, 1L, 3L))) } # TODO: make this an R6 layer? #' Takes input tensor of shape [batch, seqlen, channels] and #' creates query, key, and value tensors to pass to attention #' mechanisms downstream. #' #' query shape [batch, seqlen, filter_depth] #' key shape [batch, seqlen, filter_depth] #' value shape [batch, seqlen, filter_depth] #' @export create_qkv <- function(x, filter_depth, num_parts = 1L, share_kv = FALSE) { x_shape <- shape_list2(x) part_depth <- as.integer(floor(filter_depth / num_parts)) if (!share_kv) { combined <- layer_dense( x, filter_depth * 3L, use_bias = FALSE, name = "qkv_transform") c(q, k, v) %<-% tf$split(combined, 3L, axis = 2L) } else { q <- layer_dense( x, filter_depth, use_bias = FALSE, name = "q_transform") kv_combined <- layer_dense( tf$concat(list(x, x), axis = 1L), filter_depth, use_bias = FALSE, name = "kv_transform") c(k, v) %<-% tf$split(kv_combined, list(x_shape[[2]], x_shape[[2]]), axis = 1L) } q <- q * tf$pow(tf$cast(part_depth, tf$float32), tf$constant(-0.5)) c(q, k, v) } #' query [batch, length_q, channels] #' memory [batch, length_m, channels] (optional, usually RNN hidden states) #' return [batch, length_q, *_depth] (q, k ,v) tensors compute_qkv <- function(query, memory = NULL, key_depth = 64L, value_depth = 64L, q_filter_width = 1L, kv_filter_width = 1L, q_padding = 'same', kv_padding = 'same', vars_3d_num_heads = 0L) { if (is.null(memory)) memory <- query q <- compute_attention_component(query, key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads) k <- compute_attention_component(memory, key_depth, kv_filter_width, kv_padding, "k", vars_3d_num_heads) v <- compute_attention_component(memory, value_depth, kv_filter_width, kv_padding, "v", vars_3d_num_heads) c(q, k, v) } #' antecedent: Tensor with shape [batch, length, channels] #' depth: specifying projection layer depth #' filter_width: how wide should the attention component be #' padding: must be in: c("valid", "same", "left") compute_attention_component <- function(antecedent, depth, filter_width = 1L, padding = 'same', name = 'c', vars_3d_num_heads = 0L) { if (vars_3d_num_heads > 0) { stopifnot(filter_width == 1) input_shape <- shape_list2(antecedent) input_depth <- input_shape[[length(input_shape)]] stddev <- input_depth ^ (-0.5) depth_per_head <- depth %/% vars_3d_num_heads if ("q" %in% name) stddev %<>% `*`(depth_per_head ^ (-0.5)) var <- tf$Variable( tf$random$normal( shape = list( input_depth, vars_3d_num_heads, as.integer(depth %/% vars_3d_num_heads) ), stddev = stddev, dtype = antecedent$dtype, name = name ), name = name ) # var <- tf$compat$v1$get_variable( # name = name, # shape = list( # input_depth, # vars_3d_num_heads, # as.integer(depth %/% vars_3d_num_heads) # ), # initializer = tf$random_normal_initializer(stddev = stddev), # dtype = antecedent$dtype # ) var <- var %>% tf$reshape(shape = list(input_depth, depth)) return(tf$tensordot(antecedent, var, axes = 1L)) } out <- if (filter_width == 1L) layer_dense(antecedent, depth, use_bias = FALSE, name = name) else layer_conv_1d(antecedent, depth, filter_width, padding = padding, name = name) out } #' Pools for an area in features_2d. .pool_one_shape <- function(features_2d, area_width, area_height, batch, width, height, depth, fn = tf$reduce_max, name = NULL) { images <- vector("list", area_height * area_width) i <- 1L for (y_shift in seq(0L, area_height-1L)) { img_height <- tf$maximum(height - area_height + 1L + y_shift, 0L) for (x_shift in seq(0L, area_width-1L)) { img_width <- tf$maximum(width - area_width + 1L + x_shift, 0L) area <- features_2d[ , y_shift:img_height, x_shift:img_width, , style = "python"] flatten_area <- tf$reshape(area, list(batch, -1L, depth, 1L)) images[[i]] <- flatten_area i <- i + 1L } } img_tensor <- tf$concat(images, axis = 3L) max_tensor <- fn(img_tensor, axis = 3L) max_tensor } #' Pools for each area based on a given pooling function (fn) #' @export #' #' @param features a Tensor in a shape of [batch_size, height * width, depth] #' @param max_area_width the max width allowed for an area. #' @param max_area_height the max height allowed for an area. #' @param fn the TF function for the pooling. #' @param name the namescope. #' #' @return pool_results: A Tensor of shape [batch_size, num_areas, depth] #' @return area_heights: A Tensor of shape [batch_size, num_areas, 1] #' @return area_widths: A Tensor of shape [batch_size, num_areas, 1] basic_pool <- function(features, max_area_width, max_area_height = 1L, height = 1L, fn = tf$reduce_max, name = NULL) { feature_shape <- shape_list2(features) batch <- feature_shape[[1]] length <- feature_shape[[length(feature_shape) - 1L]] depth <- feature_shape[[length(feature_shape)]] height <- as.integer(height) width <- as.integer(length %/% height) c(width, height) %<-% validate_area_parameters(width, height, feature_shape) # if (is.null(max_area_width) || is.null(max_area_height)) # c(max_area_width, max_area_height) %<-% c(width %/% 2, height %/% 2) features_2d <- tf$reshape(features, list(batch, height, width, depth)) height_list <- list() width_list <- list() pool_list <- list() size_tensor <- tf$ones_like(features_2d[, , , 0L, style = "python"], dtype = tf$int32) i <- 1L for (area_height in seq(0L, max_area_height - 1L)) { for (area_width in seq(0L, max_area_width - 1L)) { pool_tensor = .pool_one_shape( features_2d, area_width = area_width + 1L, area_height = area_height + 1L, batch = batch, width = width, height = height, depth = depth, fn = fn ) pool_list[[i]] <- tf$reshape(pool_tensor, list(batch, -1L, depth)) h <- size_tensor[, area_height:NULL, area_width:NULL, style = "python"] * tf$cast((area_height + 1L), tf$int32) w <- size_tensor[, area_height:NULL, area_width:NULL, style = "python"] * tf$cast((area_width + 1), tf$int32) height_list[[i]] <- tf$reshape(h, list(batch, -1L)) width_list[[i]] <- tf$reshape(w, list(batch, -1L)) i <- i + 1L } } pool_results <- tf$concat(pool_list, axis = 1L) area_heights <- tf$expand_dims(tf$concat(height_list, axis = 1L), 2L) area_widths <- tf$expand_dims(tf$concat(width_list, axis = 1L), 2L) c(pool = pool_results, heights = area_heights, widths = area_widths) } #' Compute area sums for features #' @param features: a Tensor in a shape of [batch_size, height * width, depth]. #' @param max_area_width the max width allowed for an area. #' @param max_area_height the max height allowed for an area. (default for 1D case) #' @param height the height of the image. (default for 1D case) #' @param name the namescope. #' @return sum_image #' @return area_heights #' @return area_widths .compute_sum_image <- function(features, max_area_width, max_area_height = 1L, .height = 1L) { features_shape <- shape_list2(features) batch <- features_shape[[1]] length <- features_shape[[length(features_shape)-1L]] depth <- features_shape[[length(features_shape)]] .width <- length %/% .height c(.width, .height) %<-% validate_area_parameters(.width, .height, features_shape) features_2d <- tf$reshape(features, list(batch, .height, .width, depth)) width_cum <- tf$cumsum(features_2d, axis = -2L, name = "compute_integral_h") integral_image <- tf$cumsum(width_cum, axis = -3L, name = "compute_integral_v") padded_image <- tf$pad(integral_image, list(c(0L, 0L), c(1L, 0L), c(1L, 0L), c(0L, 0L)), constant_values = 0L) length.out <- max_area_width * max_area_height height_list <- vector("list", length.out) width_list <- vector("list", length.out) dst_images <- vector("list", length.out) src_images_diag <- vector("list", length.out) src_images_h <- vector("list", length.out) src_images_v <- vector("list", length.out) image_shape <- shape_list2(padded_image) size_tensor <- tf$ones(shape = image_shape[1:length(image_shape)-1], dtype = tf$int32) i <- 1L for (height in seq(0L, max_area_height-1L)) { for (width in seq(0L, max_area_width-1L)) { dst_images[[i]] <- padded_image[, `(height + 1):`, `(width + 1):`, , style="python"] %>% tf$reshape(list(batch, -1L, depth)) src_images_diag[[i]] <- padded_image[, `:-height - 1`, `:-width - 1`, , style="python"] %>% tf$reshape(list(batch, -1L, depth)) src_images_h[[i]] <- padded_image[, `(height + 1):`, `:-width - 1`, , style = "python"] %>% tf$reshape(list(batch, -1L, depth)) src_images_v[[i]] <- padded_image[, `:-height - 1`, `width + 1:`, , style = "python"] %>% tf$reshape(list(batch, -1L, depth)) height_list[[i]] <- tf$reshape(size_tensor[, `height + 1:`, `width + 1:`, style = "python"] * (height + 1L), list(batch, -1L)) width_list[[i]] <- tf$reshape(size_tensor[, `height + 1:`, `width + 1:`, style = "python"] * (height + 1L), list(batch, -1L)) # print(paste("dst: ", dst_images[[i]])) # print(paste("src_diag:", src_images_diag[[i]])) # print(paste("src_v: ", src_images_v[[i]])) # print(paste("src_h: ", src_images_h[[i]])) # print("") i <- i + 1L } } sum_image <- tf$subtract( tf$concat(dst_images, axis = 1L) + tf$concat(src_images_diag, axis = 1L), tf$concat(src_images_v, axis = 1L) + tf$concat(src_images_h, axis = 1L)) area_heights <- tf$expand_dims(tf$concat(height_list, axis = 1L), 2L) area_widths <- tf$expand_dims(tf$concat(width_list, axis = 1L), 2L) c(sum = sum_image, heights = area_heights, widths = area_widths) } #' Computes features for each area. #' @return area_mean: A Tensor of shape [batch_size, num_areas, depth] #' @return area_std: A Tensor of shape [batch_size, num_areas, depth] #' @return area_sum: A Tensor of shape [batch_size, num_areas, depth] #' @return area_heights: A Tensor of shape [batch_size, num_areas, 1] #' @return area_widths: A Tensor of shape [batch_size, num_areas, 1] compute_area_features <- function(features, max_area_width = NULL, max_area_height = NULL, epsilon = 1e-6) { c(area_sum, area_heights, area_widths) %<-% .compute_sum_image(features, max_area_width, max_area_height) c(area_sq_sum, unused1, unused2) %<-% .compute_sum_image(tf$pow(features, 2L), max_area_width, max_area_height) sizes <- tf$multiply(area_heights, area_widths) %>% tf$cast(dtype = tf$float32) area_mean <- tf$math$divide(area_sum, sizes) sq_area_mean <- tf$math$divide(area_sq_sum, sizes) area_variance <- tf$subtract(sq_area_mean, tf$pow(area_mean, 2L)) area_std <- tf$sqrt(tf$abs(area_variance) + epsilon) c(mean = area_mean, stddev = area_std, sum = area_sum, heights = area_heights, widths = area_widths) } #' Computes the key for each area. #' #' @param features a Tensor in a shape of [batch_size, height * width, depth]. #' @param max_area_width: the max width allowed for an area. #' @param max_area_height: the max height allowed for an area. #' @param height: the height of the image. #' @param mode: whether to combine different area features or only use #' the vector mean of each area, which can be "mean", "concat", "sum", #' "sample_concat", and "sample_sum". #' @return Tensor of shape [batch, num_areas, depth] compute_area_key <- function(features, max_area_width, max_area_height = 1L, height = 1L, mode = "sample_concat", hidden_activation = "relu", training = TRUE, name = NULL) { stopifnot(mode %in% c("mean", "max", "concat", "sum", "sample", "sample_concat", "sample_sum", "max_concat")) if (mode %in% c("concat", "max_concat")) warning(sprintf("Mode '%s' uses tf$layers$dense and is deprecated", mode)) c(area_mean, area_std, unused, area_heights, area_widths) %<-% compute_area_features(features, max_area_width, max_area_height, height) if (mode == "mean") return(area_mean) else if (mode == "max") { c(area_max, unused, unused2) %<-% basic_pool(features, max_area_width, max_area_height, height) return(area_max) } else if (mode == "sample") { if (training) area_mean <- area_mean + (area_std * tf$random$normal(tf$shape(area_std))) return(area_mean) } depth <- tail(shape_list2(area_mean), 1)[[1]] height_embed <- tf$nn$embedding_lookup( params = tf$Variable( tf$zeros(shape = list(max_area_height, depth %/% 2)), name = "area_height_emb"), ids = area_heights[, , 0, style = "python"] - 1L ) width_embed <- tf$nn$embedding_lookup( params = tf$Variable( tf$zeros(shape = list(max_area_width, depth %/% 2)), name = "area_width_emb"), ids = area_heights[, , 0, style = "python"] - 1L ) size_embed <- tf$concat(list(height_embed, width_embed), -1L) if (mode == "concat") feature_concat <- tf$concat(list(area_mean, area_std, size_embed), -1L) else if (mode == "max_concat") { area_max <- basic_pool(features, max_area_width, max_area_height, height)[[1]] feature_concat <- tf$concat(list(area_max, size_embed), -1L) } else if (mode == "sum") feature_concat <- size_embed + area_mean + area_std else if (mode == "sample_concat") { if (training) area_mean <- area_mean + (area_std * tf$random$normal(tf$shape(area_std))) feature_concat <- area_mean + size_embed } else if (mode == "sample_sum") { if (training) area_mean <- area_mean * (area_std * tf$random$normal(tf$shape(area_std))) feature_concat <- area_mean + size_embed } else stop(sprintf("Unsupported area key mode %s", mode)) feature_hidden <- layer_dense(feature_concat, depth, activation = hidden_activation) # Shape issue with calling keras_layer vs tf dense layer? if (mode %in% c("concat", "max_concat")) area_key <- tf$layers$dense(feature_hidden, depth) else area_key <- layer_dense(feature_hidden, depth) area_key }
/R/attention-utils.R
no_license
ifrit98/attention-layers
R
false
false
19,557
r
#' Maybe set max_area_height/width parameters if invalid. validate_area_parameters <- function(width, height, features_shape) { pairs <- calculate_divisor_pairs(features_shape[[2]]) if (is.null(width) || is.null(height)) { n <- length(pairs) %/% 2 print(sprintf( "Setting (width, height) to (%s, %s)", pairs[[n]][[1]], pairs[[n]][[2]] )) height <- pairs[[n]][[1]] width <- pairs[[n]][[2]] } if(!list(c(width, height)) %in% pairs) { stop(paste( "(width, height) must be a pair that", "divides (length) evenly, got: "), paste0("(", width, ", ", height, ")"), "\nPlease select (height) from one of the following values:\n", list(unlist(pairs)[seq(length(pairs) * 2, by = -2)])) } c(as.integer(width), as.integer(height)) } #' Calculates the padding mask based on which embeddings are all zero. #' #' emb Tensor with shape [..., depth] #' #' Returns: #' a float Tensor with shape [...]. Each element is 1 if its #' corresponding embedding vector is all zero, and is 0 otherwise. embedding_to_padding <- function(emb) { emb_sum <- tf$reduce_sum(tf$abs(emb), axis = -1L) tf$to_float(tf$equal(emb_sum, 0)) } #' Reshape input by splitting length over blocks of memory_block_size. #' #' x Tensor [batch, heads, length, depth] #' x_shape tf$TensorShape of x #' memory_block_size Integer to dividing length by #' Return #' Tensor [batch, heads, length %/% memory_block_size, memory_block_size, depth] reshape_by_blocks <- function(x, x_shape, memory_block_size) { x <- tf$reshape(x, list(x_shape[[1]], x_shape[[2]], as.integer(x_shape[[3]] %/% memory_block_size), memory_block_size, x_shape[[4]])) x } #' Reshape x so that the last dimension becomes two dimensions. split_last_dimension <- function(x, n) { x_shape <- shape_list2(x) n <- as.integer(n) m <- x_shape[[length(x_shape)]] stopifnot(m %% n == 0) out <- tf$reshape(x, c(x_shape[-length(x_shape)], list(n, as.integer(m %/% n)))) out } #' Split channels (dimension 2) into multiple heads (becomes dimension 1). #' x Tensor shape: [batch, length, channels] #' num_heads integer split_heads <- function(x, num_heads) { out <- tf$transpose(split_last_dimension(x, num_heads), perm = list(0L, 2L, 1L, 3L)) out } #' Reshape x so that the last two dimension become one. combine_last_two_dimensions <- function(x) { x_shape <- shape_list2(x) c(a, b) %<-% x_shape[-c(1:(length(x_shape)-2))] tf$reshape(x, c(x_shape[c(1,2)], as.integer(a * b))) } #' Inverse of split_heads. combine_heads <- function(x) { combine_last_two_dimensions(tf$transpose(x, list(0L, 2L, 1L, 3L))) } # TODO: make this an R6 layer? #' Takes input tensor of shape [batch, seqlen, channels] and #' creates query, key, and value tensors to pass to attention #' mechanisms downstream. #' #' query shape [batch, seqlen, filter_depth] #' key shape [batch, seqlen, filter_depth] #' value shape [batch, seqlen, filter_depth] #' @export create_qkv <- function(x, filter_depth, num_parts = 1L, share_kv = FALSE) { x_shape <- shape_list2(x) part_depth <- as.integer(floor(filter_depth / num_parts)) if (!share_kv) { combined <- layer_dense( x, filter_depth * 3L, use_bias = FALSE, name = "qkv_transform") c(q, k, v) %<-% tf$split(combined, 3L, axis = 2L) } else { q <- layer_dense( x, filter_depth, use_bias = FALSE, name = "q_transform") kv_combined <- layer_dense( tf$concat(list(x, x), axis = 1L), filter_depth, use_bias = FALSE, name = "kv_transform") c(k, v) %<-% tf$split(kv_combined, list(x_shape[[2]], x_shape[[2]]), axis = 1L) } q <- q * tf$pow(tf$cast(part_depth, tf$float32), tf$constant(-0.5)) c(q, k, v) } #' query [batch, length_q, channels] #' memory [batch, length_m, channels] (optional, usually RNN hidden states) #' return [batch, length_q, *_depth] (q, k ,v) tensors compute_qkv <- function(query, memory = NULL, key_depth = 64L, value_depth = 64L, q_filter_width = 1L, kv_filter_width = 1L, q_padding = 'same', kv_padding = 'same', vars_3d_num_heads = 0L) { if (is.null(memory)) memory <- query q <- compute_attention_component(query, key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads) k <- compute_attention_component(memory, key_depth, kv_filter_width, kv_padding, "k", vars_3d_num_heads) v <- compute_attention_component(memory, value_depth, kv_filter_width, kv_padding, "v", vars_3d_num_heads) c(q, k, v) } #' antecedent: Tensor with shape [batch, length, channels] #' depth: specifying projection layer depth #' filter_width: how wide should the attention component be #' padding: must be in: c("valid", "same", "left") compute_attention_component <- function(antecedent, depth, filter_width = 1L, padding = 'same', name = 'c', vars_3d_num_heads = 0L) { if (vars_3d_num_heads > 0) { stopifnot(filter_width == 1) input_shape <- shape_list2(antecedent) input_depth <- input_shape[[length(input_shape)]] stddev <- input_depth ^ (-0.5) depth_per_head <- depth %/% vars_3d_num_heads if ("q" %in% name) stddev %<>% `*`(depth_per_head ^ (-0.5)) var <- tf$Variable( tf$random$normal( shape = list( input_depth, vars_3d_num_heads, as.integer(depth %/% vars_3d_num_heads) ), stddev = stddev, dtype = antecedent$dtype, name = name ), name = name ) # var <- tf$compat$v1$get_variable( # name = name, # shape = list( # input_depth, # vars_3d_num_heads, # as.integer(depth %/% vars_3d_num_heads) # ), # initializer = tf$random_normal_initializer(stddev = stddev), # dtype = antecedent$dtype # ) var <- var %>% tf$reshape(shape = list(input_depth, depth)) return(tf$tensordot(antecedent, var, axes = 1L)) } out <- if (filter_width == 1L) layer_dense(antecedent, depth, use_bias = FALSE, name = name) else layer_conv_1d(antecedent, depth, filter_width, padding = padding, name = name) out } #' Pools for an area in features_2d. .pool_one_shape <- function(features_2d, area_width, area_height, batch, width, height, depth, fn = tf$reduce_max, name = NULL) { images <- vector("list", area_height * area_width) i <- 1L for (y_shift in seq(0L, area_height-1L)) { img_height <- tf$maximum(height - area_height + 1L + y_shift, 0L) for (x_shift in seq(0L, area_width-1L)) { img_width <- tf$maximum(width - area_width + 1L + x_shift, 0L) area <- features_2d[ , y_shift:img_height, x_shift:img_width, , style = "python"] flatten_area <- tf$reshape(area, list(batch, -1L, depth, 1L)) images[[i]] <- flatten_area i <- i + 1L } } img_tensor <- tf$concat(images, axis = 3L) max_tensor <- fn(img_tensor, axis = 3L) max_tensor } #' Pools for each area based on a given pooling function (fn) #' @export #' #' @param features a Tensor in a shape of [batch_size, height * width, depth] #' @param max_area_width the max width allowed for an area. #' @param max_area_height the max height allowed for an area. #' @param fn the TF function for the pooling. #' @param name the namescope. #' #' @return pool_results: A Tensor of shape [batch_size, num_areas, depth] #' @return area_heights: A Tensor of shape [batch_size, num_areas, 1] #' @return area_widths: A Tensor of shape [batch_size, num_areas, 1] basic_pool <- function(features, max_area_width, max_area_height = 1L, height = 1L, fn = tf$reduce_max, name = NULL) { feature_shape <- shape_list2(features) batch <- feature_shape[[1]] length <- feature_shape[[length(feature_shape) - 1L]] depth <- feature_shape[[length(feature_shape)]] height <- as.integer(height) width <- as.integer(length %/% height) c(width, height) %<-% validate_area_parameters(width, height, feature_shape) # if (is.null(max_area_width) || is.null(max_area_height)) # c(max_area_width, max_area_height) %<-% c(width %/% 2, height %/% 2) features_2d <- tf$reshape(features, list(batch, height, width, depth)) height_list <- list() width_list <- list() pool_list <- list() size_tensor <- tf$ones_like(features_2d[, , , 0L, style = "python"], dtype = tf$int32) i <- 1L for (area_height in seq(0L, max_area_height - 1L)) { for (area_width in seq(0L, max_area_width - 1L)) { pool_tensor = .pool_one_shape( features_2d, area_width = area_width + 1L, area_height = area_height + 1L, batch = batch, width = width, height = height, depth = depth, fn = fn ) pool_list[[i]] <- tf$reshape(pool_tensor, list(batch, -1L, depth)) h <- size_tensor[, area_height:NULL, area_width:NULL, style = "python"] * tf$cast((area_height + 1L), tf$int32) w <- size_tensor[, area_height:NULL, area_width:NULL, style = "python"] * tf$cast((area_width + 1), tf$int32) height_list[[i]] <- tf$reshape(h, list(batch, -1L)) width_list[[i]] <- tf$reshape(w, list(batch, -1L)) i <- i + 1L } } pool_results <- tf$concat(pool_list, axis = 1L) area_heights <- tf$expand_dims(tf$concat(height_list, axis = 1L), 2L) area_widths <- tf$expand_dims(tf$concat(width_list, axis = 1L), 2L) c(pool = pool_results, heights = area_heights, widths = area_widths) } #' Compute area sums for features #' @param features: a Tensor in a shape of [batch_size, height * width, depth]. #' @param max_area_width the max width allowed for an area. #' @param max_area_height the max height allowed for an area. (default for 1D case) #' @param height the height of the image. (default for 1D case) #' @param name the namescope. #' @return sum_image #' @return area_heights #' @return area_widths .compute_sum_image <- function(features, max_area_width, max_area_height = 1L, .height = 1L) { features_shape <- shape_list2(features) batch <- features_shape[[1]] length <- features_shape[[length(features_shape)-1L]] depth <- features_shape[[length(features_shape)]] .width <- length %/% .height c(.width, .height) %<-% validate_area_parameters(.width, .height, features_shape) features_2d <- tf$reshape(features, list(batch, .height, .width, depth)) width_cum <- tf$cumsum(features_2d, axis = -2L, name = "compute_integral_h") integral_image <- tf$cumsum(width_cum, axis = -3L, name = "compute_integral_v") padded_image <- tf$pad(integral_image, list(c(0L, 0L), c(1L, 0L), c(1L, 0L), c(0L, 0L)), constant_values = 0L) length.out <- max_area_width * max_area_height height_list <- vector("list", length.out) width_list <- vector("list", length.out) dst_images <- vector("list", length.out) src_images_diag <- vector("list", length.out) src_images_h <- vector("list", length.out) src_images_v <- vector("list", length.out) image_shape <- shape_list2(padded_image) size_tensor <- tf$ones(shape = image_shape[1:length(image_shape)-1], dtype = tf$int32) i <- 1L for (height in seq(0L, max_area_height-1L)) { for (width in seq(0L, max_area_width-1L)) { dst_images[[i]] <- padded_image[, `(height + 1):`, `(width + 1):`, , style="python"] %>% tf$reshape(list(batch, -1L, depth)) src_images_diag[[i]] <- padded_image[, `:-height - 1`, `:-width - 1`, , style="python"] %>% tf$reshape(list(batch, -1L, depth)) src_images_h[[i]] <- padded_image[, `(height + 1):`, `:-width - 1`, , style = "python"] %>% tf$reshape(list(batch, -1L, depth)) src_images_v[[i]] <- padded_image[, `:-height - 1`, `width + 1:`, , style = "python"] %>% tf$reshape(list(batch, -1L, depth)) height_list[[i]] <- tf$reshape(size_tensor[, `height + 1:`, `width + 1:`, style = "python"] * (height + 1L), list(batch, -1L)) width_list[[i]] <- tf$reshape(size_tensor[, `height + 1:`, `width + 1:`, style = "python"] * (height + 1L), list(batch, -1L)) # print(paste("dst: ", dst_images[[i]])) # print(paste("src_diag:", src_images_diag[[i]])) # print(paste("src_v: ", src_images_v[[i]])) # print(paste("src_h: ", src_images_h[[i]])) # print("") i <- i + 1L } } sum_image <- tf$subtract( tf$concat(dst_images, axis = 1L) + tf$concat(src_images_diag, axis = 1L), tf$concat(src_images_v, axis = 1L) + tf$concat(src_images_h, axis = 1L)) area_heights <- tf$expand_dims(tf$concat(height_list, axis = 1L), 2L) area_widths <- tf$expand_dims(tf$concat(width_list, axis = 1L), 2L) c(sum = sum_image, heights = area_heights, widths = area_widths) } #' Computes features for each area. #' @return area_mean: A Tensor of shape [batch_size, num_areas, depth] #' @return area_std: A Tensor of shape [batch_size, num_areas, depth] #' @return area_sum: A Tensor of shape [batch_size, num_areas, depth] #' @return area_heights: A Tensor of shape [batch_size, num_areas, 1] #' @return area_widths: A Tensor of shape [batch_size, num_areas, 1] compute_area_features <- function(features, max_area_width = NULL, max_area_height = NULL, epsilon = 1e-6) { c(area_sum, area_heights, area_widths) %<-% .compute_sum_image(features, max_area_width, max_area_height) c(area_sq_sum, unused1, unused2) %<-% .compute_sum_image(tf$pow(features, 2L), max_area_width, max_area_height) sizes <- tf$multiply(area_heights, area_widths) %>% tf$cast(dtype = tf$float32) area_mean <- tf$math$divide(area_sum, sizes) sq_area_mean <- tf$math$divide(area_sq_sum, sizes) area_variance <- tf$subtract(sq_area_mean, tf$pow(area_mean, 2L)) area_std <- tf$sqrt(tf$abs(area_variance) + epsilon) c(mean = area_mean, stddev = area_std, sum = area_sum, heights = area_heights, widths = area_widths) } #' Computes the key for each area. #' #' @param features a Tensor in a shape of [batch_size, height * width, depth]. #' @param max_area_width: the max width allowed for an area. #' @param max_area_height: the max height allowed for an area. #' @param height: the height of the image. #' @param mode: whether to combine different area features or only use #' the vector mean of each area, which can be "mean", "concat", "sum", #' "sample_concat", and "sample_sum". #' @return Tensor of shape [batch, num_areas, depth] compute_area_key <- function(features, max_area_width, max_area_height = 1L, height = 1L, mode = "sample_concat", hidden_activation = "relu", training = TRUE, name = NULL) { stopifnot(mode %in% c("mean", "max", "concat", "sum", "sample", "sample_concat", "sample_sum", "max_concat")) if (mode %in% c("concat", "max_concat")) warning(sprintf("Mode '%s' uses tf$layers$dense and is deprecated", mode)) c(area_mean, area_std, unused, area_heights, area_widths) %<-% compute_area_features(features, max_area_width, max_area_height, height) if (mode == "mean") return(area_mean) else if (mode == "max") { c(area_max, unused, unused2) %<-% basic_pool(features, max_area_width, max_area_height, height) return(area_max) } else if (mode == "sample") { if (training) area_mean <- area_mean + (area_std * tf$random$normal(tf$shape(area_std))) return(area_mean) } depth <- tail(shape_list2(area_mean), 1)[[1]] height_embed <- tf$nn$embedding_lookup( params = tf$Variable( tf$zeros(shape = list(max_area_height, depth %/% 2)), name = "area_height_emb"), ids = area_heights[, , 0, style = "python"] - 1L ) width_embed <- tf$nn$embedding_lookup( params = tf$Variable( tf$zeros(shape = list(max_area_width, depth %/% 2)), name = "area_width_emb"), ids = area_heights[, , 0, style = "python"] - 1L ) size_embed <- tf$concat(list(height_embed, width_embed), -1L) if (mode == "concat") feature_concat <- tf$concat(list(area_mean, area_std, size_embed), -1L) else if (mode == "max_concat") { area_max <- basic_pool(features, max_area_width, max_area_height, height)[[1]] feature_concat <- tf$concat(list(area_max, size_embed), -1L) } else if (mode == "sum") feature_concat <- size_embed + area_mean + area_std else if (mode == "sample_concat") { if (training) area_mean <- area_mean + (area_std * tf$random$normal(tf$shape(area_std))) feature_concat <- area_mean + size_embed } else if (mode == "sample_sum") { if (training) area_mean <- area_mean * (area_std * tf$random$normal(tf$shape(area_std))) feature_concat <- area_mean + size_embed } else stop(sprintf("Unsupported area key mode %s", mode)) feature_hidden <- layer_dense(feature_concat, depth, activation = hidden_activation) # Shape issue with calling keras_layer vs tf dense layer? if (mode %in% c("concat", "max_concat")) area_key <- tf$layers$dense(feature_hidden, depth) else area_key <- layer_dense(feature_hidden, depth) area_key }
## ## coerce ## # Helper for common logic underlying .as.matrix() and .as.sparseMatrix() .values_and_indices <- function(x, ..., withDimnames=TRUE) { stopifnot( is.logical(withDimnames), length(withDimnames) == 1L, !is.na(withDimnames) ) h5f <- H5Fopen(.h5path(x)) on.exit(H5Fclose(h5f)) ## maximum index; needed when selecting last individual h5indptr <- H5Dopen(h5f, .indptr(x)) on.exit(H5Dclose(h5indptr), add=TRUE) indlen <- H5Sget_simple_extent_dims(H5Dget_space(h5indptr))$size ## get all rows for selected columns cidx <- .colidx(x) startidx <- h5read( h5f, .indptr(x), list(cidx), bit64conversion = "double" ) endidx <- h5read( # indptr contains the last index, too h5f, .indptr(x), list(cidx + 1L), bit64conversion = "double" ) - 1L idx <- Map(seq, startidx, endidx, MoreArgs=list(by = 1)) lens <- lengths(idx) idx <- unlist(idx) + 1L ridx <- h5read( h5f, .indices(x), list(idx), bit64conversion = "double" ) + 1L ## get values for rows of interest keep <- ridx %in% .rowidx(x) idx <- idx[keep] values <-as.vector(h5read(h5f, .dataname(x), index=list(idx))) ridx <- match(ridx, .rowidx(x))[keep] cidx <- rep(seq_along(cidx), lens)[keep] list(values = values, ridx = ridx, cidx = cidx) } #' @rdname TENxGenomics-class #' #' @param withDimnames logical(1) Include dimnames on returned matrix? #' #' @return \code{as.matrix(tenx)} and \code{as(tenx, "matrix")} return #' a matrix with dim and dimnames equal to \code{tenx}, and values #' the read counts overlapping corresponding genes and #' samples. Use \code{as.matrix(withDimnames=FALSE)} to suppress #' dimnames on the returned matrix. NOTE: consider the size of the #' matrix, \code{prod(as.numeric(dim(tenx)))} before invoking this #' function. #' #' @method as.matrix TENxGenomics #' #' @export as.matrix.TENxGenomics <- function(x, ..., withDimnames=TRUE) { values_and_indices <- .values_and_indices( x=x, ..., withDimnames=withDimnames ) values <- values_and_indices[["values"]] ridx <- values_and_indices[["ridx"]] cidx <- values_and_indices[["cidx"]] ## formulate result as matrix m <- matrix( 0L, nrow(x), ncol(x), dimnames = if (withDimnames) dimnames(x) else list(NULL, NULL) ) m[cbind(ridx, cidx)] <- values m } #' @rdname TENxGenomics-class #' #' @name coerce,TENxGenomics,matrix-method #' #' @exportMethod coerce setAs("TENxGenomics", "matrix", function(from) as.matrix.TENxGenomics(from)) #' @rdname TENxGenomics-class #' #' @return \code{as.dgCMatrix(tenx)} and \code{as(tenx, "dgCMatrix")} #' return a sparse matrix (from the Matrix package) with dim and #' dimnames equal to \code{tenx}, and values the read counts #' overlapping corresponding genes and samples. Use #' \code{as.matrix(withDimnames=FALSE)} to suppress dimnames on #' the returned matrix. #' #' @export as.dgCMatrix <- function(x, ..., withDimnames=TRUE) { .requireMatrix() # TODO: Support withDimnames stopifnot(withDimnames) values_and_indices <- .values_and_indices( x, ..., withDimnames = withDimnames ) Matrix::sparseMatrix( i = values_and_indices[["ridx"]], j = values_and_indices[["cidx"]], x = values_and_indices[["values"]], dims = dim(x), dimnames = dimnames(x), giveCsparse = TRUE ) } ## NOTE: This uses a dgCMatrix, a compressed, sparse, column-oriented ## numeric (double) matrix. What we really want to use to store ## these data is a igCMatrix, a compressed, sparse, ## column-oriented *integer* matrix. However, the igCMatrix ## class, while defined in the Matrix package, is not actually ## implemented.' @rdname TENxGenomics-class ## #' @rdname TENxGenomics-class #' #' @name coerce,TENxGenomics,dgCMatrix-method #' #' @exportMethod coerce setAs("TENxGenomics", "dgCMatrix", function(from) as.dgCMatrix(from))
/R/coerce.R
no_license
ttriche/TENxGenomics
R
false
false
4,072
r
## ## coerce ## # Helper for common logic underlying .as.matrix() and .as.sparseMatrix() .values_and_indices <- function(x, ..., withDimnames=TRUE) { stopifnot( is.logical(withDimnames), length(withDimnames) == 1L, !is.na(withDimnames) ) h5f <- H5Fopen(.h5path(x)) on.exit(H5Fclose(h5f)) ## maximum index; needed when selecting last individual h5indptr <- H5Dopen(h5f, .indptr(x)) on.exit(H5Dclose(h5indptr), add=TRUE) indlen <- H5Sget_simple_extent_dims(H5Dget_space(h5indptr))$size ## get all rows for selected columns cidx <- .colidx(x) startidx <- h5read( h5f, .indptr(x), list(cidx), bit64conversion = "double" ) endidx <- h5read( # indptr contains the last index, too h5f, .indptr(x), list(cidx + 1L), bit64conversion = "double" ) - 1L idx <- Map(seq, startidx, endidx, MoreArgs=list(by = 1)) lens <- lengths(idx) idx <- unlist(idx) + 1L ridx <- h5read( h5f, .indices(x), list(idx), bit64conversion = "double" ) + 1L ## get values for rows of interest keep <- ridx %in% .rowidx(x) idx <- idx[keep] values <-as.vector(h5read(h5f, .dataname(x), index=list(idx))) ridx <- match(ridx, .rowidx(x))[keep] cidx <- rep(seq_along(cidx), lens)[keep] list(values = values, ridx = ridx, cidx = cidx) } #' @rdname TENxGenomics-class #' #' @param withDimnames logical(1) Include dimnames on returned matrix? #' #' @return \code{as.matrix(tenx)} and \code{as(tenx, "matrix")} return #' a matrix with dim and dimnames equal to \code{tenx}, and values #' the read counts overlapping corresponding genes and #' samples. Use \code{as.matrix(withDimnames=FALSE)} to suppress #' dimnames on the returned matrix. NOTE: consider the size of the #' matrix, \code{prod(as.numeric(dim(tenx)))} before invoking this #' function. #' #' @method as.matrix TENxGenomics #' #' @export as.matrix.TENxGenomics <- function(x, ..., withDimnames=TRUE) { values_and_indices <- .values_and_indices( x=x, ..., withDimnames=withDimnames ) values <- values_and_indices[["values"]] ridx <- values_and_indices[["ridx"]] cidx <- values_and_indices[["cidx"]] ## formulate result as matrix m <- matrix( 0L, nrow(x), ncol(x), dimnames = if (withDimnames) dimnames(x) else list(NULL, NULL) ) m[cbind(ridx, cidx)] <- values m } #' @rdname TENxGenomics-class #' #' @name coerce,TENxGenomics,matrix-method #' #' @exportMethod coerce setAs("TENxGenomics", "matrix", function(from) as.matrix.TENxGenomics(from)) #' @rdname TENxGenomics-class #' #' @return \code{as.dgCMatrix(tenx)} and \code{as(tenx, "dgCMatrix")} #' return a sparse matrix (from the Matrix package) with dim and #' dimnames equal to \code{tenx}, and values the read counts #' overlapping corresponding genes and samples. Use #' \code{as.matrix(withDimnames=FALSE)} to suppress dimnames on #' the returned matrix. #' #' @export as.dgCMatrix <- function(x, ..., withDimnames=TRUE) { .requireMatrix() # TODO: Support withDimnames stopifnot(withDimnames) values_and_indices <- .values_and_indices( x, ..., withDimnames = withDimnames ) Matrix::sparseMatrix( i = values_and_indices[["ridx"]], j = values_and_indices[["cidx"]], x = values_and_indices[["values"]], dims = dim(x), dimnames = dimnames(x), giveCsparse = TRUE ) } ## NOTE: This uses a dgCMatrix, a compressed, sparse, column-oriented ## numeric (double) matrix. What we really want to use to store ## these data is a igCMatrix, a compressed, sparse, ## column-oriented *integer* matrix. However, the igCMatrix ## class, while defined in the Matrix package, is not actually ## implemented.' @rdname TENxGenomics-class ## #' @rdname TENxGenomics-class #' #' @name coerce,TENxGenomics,dgCMatrix-method #' #' @exportMethod coerce setAs("TENxGenomics", "dgCMatrix", function(from) as.dgCMatrix(from))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/LCAs.R \name{LCAs} \alias{LCAs} \title{Construct a poset of latent class analysis models.} \arguments{ \item{maxNumClasses}{the number of classes in the largest LCA model to considered.} \item{numVariables}{the number of observed variables.} \item{numStatesForVariables}{the number of states for each observed variable, at the moment these must all be equal.} \item{phi}{parameter controlling the strength of the sBIC penalty.} } \value{ An object representing the collection. } \description{ Creates an object representing a collection of latent class analysis models. There is one model for each fixed number of latent classes from 1 to some specified maximum. In particular each model is identified by a single number specifiying the number of latent classes in the model. Models are naturally ordered by inclusion so that, for example, a model with 2 latent classes comes before a model with 3 or more latent classes. }
/man/LCAs.Rd
no_license
yuinityk/sBIC
R
false
true
1,005
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/LCAs.R \name{LCAs} \alias{LCAs} \title{Construct a poset of latent class analysis models.} \arguments{ \item{maxNumClasses}{the number of classes in the largest LCA model to considered.} \item{numVariables}{the number of observed variables.} \item{numStatesForVariables}{the number of states for each observed variable, at the moment these must all be equal.} \item{phi}{parameter controlling the strength of the sBIC penalty.} } \value{ An object representing the collection. } \description{ Creates an object representing a collection of latent class analysis models. There is one model for each fixed number of latent classes from 1 to some specified maximum. In particular each model is identified by a single number specifiying the number of latent classes in the model. Models are naturally ordered by inclusion so that, for example, a model with 2 latent classes comes before a model with 3 or more latent classes. }
# Load data -------------------------- fileUrl <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip' filename <- 'household_power_consumption.txt' download.file(fileUrl, destfile = "household_power_consumption.zip") dataset <- read.table(unz('household_power_consumption.zip', filename),header = TRUE, sep=';', stringsAsFactor = FALSE) # Convert Date from Character to Date type and subset data dataset$Date <- as.Date(strptime(dataset$Date, format = "%d/%m/%Y")) subdataset <- subset(dataset, Date >= "2007-02-01" & Date <= "2007-02-02" ) # Obtain numeric values for column 3:9 subdataset[3:9] <- sapply(3:9, function(x,y) {as.numeric(y[, x])}, y = subdataset) # Get weekday to count how many observations per day subdataset$weekday <- weekdays(subdataset$Date) countt <- table(subdataset$weekday) # Plotting ----------------------------- png(filename = "plot4.png", width = 480, height = 480, units = "px") par(mfrow=c(2, 2), mar = c(4,4,4,1), oma = c(0, 0, 0, 0)) # Plot 1 plot(subdataset$Global_active_power, col='black', xaxt='n', type ='l', xlab ="", ylab = 'Global Active Power', cex.lab=0.8) axis(1, at=c(1, countt[1]+1, nrow(subdataset)+1), labels=c("Thu", "Fri", "Sat")) # Plot 2 plot(subdataset$Voltage, col='black', xaxt='n', type ='l', xlab ="datatime", ylab = 'Voltage', cex.lab=0.8) axis(1, at=c(1, countt[1]+1, nrow(subdataset)+1), labels=c("Thu", "Fri", "Sat")) # Plot 3 plot(subdataset$Sub_metering_1, col='black', xaxt='n', type ='l', xlab ="", ylab = 'Energy sub metering', cex.lab=0.8) lines(subdataset$Sub_metering_2, col='red') lines(subdataset$Sub_metering_3, col='blue') axis(1, at=c(1, countt[1]+1, nrow(subdataset)+1), labels=c("Thu", "Fri", "Sat")) legend(nrow(subdataset)/2, max(subdataset$Sub_metering_1, subdataset$Sub_metering_2, subdataset$Sub_metering_3), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n", box.lwd = 0, lty = rep(1, 3), cex = 0.8, col= c('black', 'red', 'blue'), bg='transparent') # Plot 4 plot(subdataset$Global_reactive_power, col='black', xaxt='n', type ='l', xlab ="datetime", ylab = 'Global_reactive_power' , cex.lab=0.8) axis(1, at=c(1, countt[1]+1, nrow(subdataset)+1), labels=c("Thu", "Fri", "Sat")) dev.off()
/plot4.R
no_license
juanlp/ExData_Plotting1
R
false
false
2,488
r
# Load data -------------------------- fileUrl <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip' filename <- 'household_power_consumption.txt' download.file(fileUrl, destfile = "household_power_consumption.zip") dataset <- read.table(unz('household_power_consumption.zip', filename),header = TRUE, sep=';', stringsAsFactor = FALSE) # Convert Date from Character to Date type and subset data dataset$Date <- as.Date(strptime(dataset$Date, format = "%d/%m/%Y")) subdataset <- subset(dataset, Date >= "2007-02-01" & Date <= "2007-02-02" ) # Obtain numeric values for column 3:9 subdataset[3:9] <- sapply(3:9, function(x,y) {as.numeric(y[, x])}, y = subdataset) # Get weekday to count how many observations per day subdataset$weekday <- weekdays(subdataset$Date) countt <- table(subdataset$weekday) # Plotting ----------------------------- png(filename = "plot4.png", width = 480, height = 480, units = "px") par(mfrow=c(2, 2), mar = c(4,4,4,1), oma = c(0, 0, 0, 0)) # Plot 1 plot(subdataset$Global_active_power, col='black', xaxt='n', type ='l', xlab ="", ylab = 'Global Active Power', cex.lab=0.8) axis(1, at=c(1, countt[1]+1, nrow(subdataset)+1), labels=c("Thu", "Fri", "Sat")) # Plot 2 plot(subdataset$Voltage, col='black', xaxt='n', type ='l', xlab ="datatime", ylab = 'Voltage', cex.lab=0.8) axis(1, at=c(1, countt[1]+1, nrow(subdataset)+1), labels=c("Thu", "Fri", "Sat")) # Plot 3 plot(subdataset$Sub_metering_1, col='black', xaxt='n', type ='l', xlab ="", ylab = 'Energy sub metering', cex.lab=0.8) lines(subdataset$Sub_metering_2, col='red') lines(subdataset$Sub_metering_3, col='blue') axis(1, at=c(1, countt[1]+1, nrow(subdataset)+1), labels=c("Thu", "Fri", "Sat")) legend(nrow(subdataset)/2, max(subdataset$Sub_metering_1, subdataset$Sub_metering_2, subdataset$Sub_metering_3), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n", box.lwd = 0, lty = rep(1, 3), cex = 0.8, col= c('black', 'red', 'blue'), bg='transparent') # Plot 4 plot(subdataset$Global_reactive_power, col='black', xaxt='n', type ='l', xlab ="datetime", ylab = 'Global_reactive_power' , cex.lab=0.8) axis(1, at=c(1, countt[1]+1, nrow(subdataset)+1), labels=c("Thu", "Fri", "Sat")) dev.off()
#CpG: How does R2 change if I used individual beh vs Noisy composite? #March 21, 2018 library(pscl) library(lmtest) library(BaylorEdPsych) setwd("~/Documents/Behavioral Genetics Study/R Projects/simulations/") #1) Read file with Methylation and Behavior Data3<- read.csv(file ="~/Documents/Behavioral Genetics Study/Shota Methylation Study/OXTR epi file_2018.02.08.csv") str(Data3) #1a) Data2 <- Data3[!is.na(Data3$CpG1),] #Removes Missing Values Data<- Data2[!is.na(Data2$sc),] #Removes Missing Values str(Data) #1) Prox Data$resid_px<-residuals(lm(px_adultfem~compound, data=Data, na.action=na.exclude)) x1<-summary(lm(px_adultfem~CpG1+compound, data=Data)) x2<-summary(lm(resid_px~CpG1, data=Data))$r.squared Data$resid_px1<-residuals(lm(px_all_notownix~compound, data=Data, na.action=na.exclude)) x3<-summary(lm(px_all_notownix~CpG1+compound, data=Data)) x4<-summary(lm(resid_px1~CpG1, data=Data))$r.squared x2 x4 #3) cpg1 and Anxiety SS<-summary(glm.nb((sc)~CpG1+compound+offset(log(Time)), data=Data)) #Pseudo R2 pR2(glm.nb((sc)~CpG1+compound+offset(log(Time)), data=Data)) pR2(glm.nb((sum_anx)~CpG1+compound+offset(log(Time)), data=Data))
/CpG R2 composite comparisons.R
no_license
dcossyleon/Stats-Simulations
R
false
false
1,160
r
#CpG: How does R2 change if I used individual beh vs Noisy composite? #March 21, 2018 library(pscl) library(lmtest) library(BaylorEdPsych) setwd("~/Documents/Behavioral Genetics Study/R Projects/simulations/") #1) Read file with Methylation and Behavior Data3<- read.csv(file ="~/Documents/Behavioral Genetics Study/Shota Methylation Study/OXTR epi file_2018.02.08.csv") str(Data3) #1a) Data2 <- Data3[!is.na(Data3$CpG1),] #Removes Missing Values Data<- Data2[!is.na(Data2$sc),] #Removes Missing Values str(Data) #1) Prox Data$resid_px<-residuals(lm(px_adultfem~compound, data=Data, na.action=na.exclude)) x1<-summary(lm(px_adultfem~CpG1+compound, data=Data)) x2<-summary(lm(resid_px~CpG1, data=Data))$r.squared Data$resid_px1<-residuals(lm(px_all_notownix~compound, data=Data, na.action=na.exclude)) x3<-summary(lm(px_all_notownix~CpG1+compound, data=Data)) x4<-summary(lm(resid_px1~CpG1, data=Data))$r.squared x2 x4 #3) cpg1 and Anxiety SS<-summary(glm.nb((sc)~CpG1+compound+offset(log(Time)), data=Data)) #Pseudo R2 pR2(glm.nb((sc)~CpG1+compound+offset(log(Time)), data=Data)) pR2(glm.nb((sum_anx)~CpG1+compound+offset(log(Time)), data=Data))
#================读取参数=========================# arg <- commandArgs(T) if ((arg[1]=="-h")|(length(arg)==0)) { cat("Usage : Rscript Variant_qc.r input_patient_id sample_list (output_dir)\n") quit("no") } # sample_list : # patient time2 tissueType tissue # P27 2 tumor2 tumor2_1a #================定义常量=========================# patient_data <- "/work/shared/GeneticTest/" cnv_dir <- "/work/user/zhanggh/Work/CNVresult/CNVreport/" sample_dir <- "/work/shared/Analysis/" sample_raw_file <- "/work/data/projects/sequencing_data_list.txt" # arg <- c("P4008","/work/user/gemh/Tools/mytools/QC_jikou/sample_list_P4008.txt","/work/user/gemh/Tools/mytools/QC_jikou/") #================加载包===========================# tryCatch({ library(dplyr,quietly = T,verbose = F,warn.conflicts = F) library(stringr,quietly = T,verbose = F,warn.conflicts = F) library(data.table,quietly = T,verbose = F,warn.conflicts = F) },error = function(e){ cat("Need R packages: dplyr,stringr,data.table\n") quit("no") }) if (length(arg)==2) { arg[3] <- paste(patient_data,arg[1],sep = "") } result_dir <- paste(arg[3],"/qc_data/",sep = "") if (!file.exists(result_dir)){ dir.create(result_dir,recursive = TRUE) } sample_list <- read.table(arg[2],sep = "\t",header = F,stringsAsFactors = F) #===================== tryCatch({ recheck_file_dir <- paste(patient_data,arg[1],"/recheck/", sample_list$V4,".filtered.sites.gz",sep = "") recheck_data <- c() for (i in 1:length(recheck_file_dir)) { if (!file.exists(recheck_file_dir[i])) { cat(paste("Missing ",arg[1]," file : ",recheck_file_dir[i],sep = ""),"\n") quit("no") } temp_data <- fread(paste("zcat ",recheck_file_dir[i],sep=""), sep="\t",header=T) %>% mutate(patient_id=sample_list[i,1], submission_time=sample_list[i,2], tissue=sample_list[i,3], variant_bases_sd_quality="", mkdup_variant_bases_sd_quality="", mismatch_count_in_support_reads="" ) recheck_data <- rbind(recheck_data, select(temp_data,patient_id,submission_time,tissue, chrom,pos,ref,alt,gene,exon,mutation=mutation.c, protein_change=mutation.p,variant_vaf=freq, supports_reads=reads,total_depth=depth,variant_positive=reads_plus, variant_negative=reads_minus,variant_head=reads_begin,variant_tail=reads_end, variant_bases_mean_quality=avg_qual,variant_bases_sd_quality, mkdup_variant_vaf=freq.mkdup ,mkdup_supports_reads=reads.mkdup, mkdup_total_depth=depth.mkdup,mkdup_variant_positive=reads_plus.mkdup, mkdup_variant_negative=reads_minus.mkdup,mkdup_variant_head=reads_begin.mkdup, mkdup_variant_tail=reads_end.mkdup,mkdup_variant_bases_mean_quality=avg_qual.mkdup, mkdup_variant_bases_sd_quality,mismatch_count_in_support_reads, recheck=confident )) } write.table(recheck_data,paste(result_dir,"variant_qc.txt",sep = ""),sep = "\t",quote = F,row.names = F) cat("successful!\n") },error = function(e){ cat("Bug!\n") quit("no")})
/Variant_qc.r
no_license
GenecastGMH/genecast
R
false
false
3,706
r
#================读取参数=========================# arg <- commandArgs(T) if ((arg[1]=="-h")|(length(arg)==0)) { cat("Usage : Rscript Variant_qc.r input_patient_id sample_list (output_dir)\n") quit("no") } # sample_list : # patient time2 tissueType tissue # P27 2 tumor2 tumor2_1a #================定义常量=========================# patient_data <- "/work/shared/GeneticTest/" cnv_dir <- "/work/user/zhanggh/Work/CNVresult/CNVreport/" sample_dir <- "/work/shared/Analysis/" sample_raw_file <- "/work/data/projects/sequencing_data_list.txt" # arg <- c("P4008","/work/user/gemh/Tools/mytools/QC_jikou/sample_list_P4008.txt","/work/user/gemh/Tools/mytools/QC_jikou/") #================加载包===========================# tryCatch({ library(dplyr,quietly = T,verbose = F,warn.conflicts = F) library(stringr,quietly = T,verbose = F,warn.conflicts = F) library(data.table,quietly = T,verbose = F,warn.conflicts = F) },error = function(e){ cat("Need R packages: dplyr,stringr,data.table\n") quit("no") }) if (length(arg)==2) { arg[3] <- paste(patient_data,arg[1],sep = "") } result_dir <- paste(arg[3],"/qc_data/",sep = "") if (!file.exists(result_dir)){ dir.create(result_dir,recursive = TRUE) } sample_list <- read.table(arg[2],sep = "\t",header = F,stringsAsFactors = F) #===================== tryCatch({ recheck_file_dir <- paste(patient_data,arg[1],"/recheck/", sample_list$V4,".filtered.sites.gz",sep = "") recheck_data <- c() for (i in 1:length(recheck_file_dir)) { if (!file.exists(recheck_file_dir[i])) { cat(paste("Missing ",arg[1]," file : ",recheck_file_dir[i],sep = ""),"\n") quit("no") } temp_data <- fread(paste("zcat ",recheck_file_dir[i],sep=""), sep="\t",header=T) %>% mutate(patient_id=sample_list[i,1], submission_time=sample_list[i,2], tissue=sample_list[i,3], variant_bases_sd_quality="", mkdup_variant_bases_sd_quality="", mismatch_count_in_support_reads="" ) recheck_data <- rbind(recheck_data, select(temp_data,patient_id,submission_time,tissue, chrom,pos,ref,alt,gene,exon,mutation=mutation.c, protein_change=mutation.p,variant_vaf=freq, supports_reads=reads,total_depth=depth,variant_positive=reads_plus, variant_negative=reads_minus,variant_head=reads_begin,variant_tail=reads_end, variant_bases_mean_quality=avg_qual,variant_bases_sd_quality, mkdup_variant_vaf=freq.mkdup ,mkdup_supports_reads=reads.mkdup, mkdup_total_depth=depth.mkdup,mkdup_variant_positive=reads_plus.mkdup, mkdup_variant_negative=reads_minus.mkdup,mkdup_variant_head=reads_begin.mkdup, mkdup_variant_tail=reads_end.mkdup,mkdup_variant_bases_mean_quality=avg_qual.mkdup, mkdup_variant_bases_sd_quality,mismatch_count_in_support_reads, recheck=confident )) } write.table(recheck_data,paste(result_dir,"variant_qc.txt",sep = ""),sep = "\t",quote = F,row.names = F) cat("successful!\n") },error = function(e){ cat("Bug!\n") quit("no")})
library(dplyr) library(tidyr) library(readr) library(stringr) library(scales) CANT_MESAS <- 8653 ############# Mesas # Resultado y datos de cada mesa. # Una columna por partido polítco. mesas <- read_csv("resultados-parciales-gobernador-cordoba-2019.csv") colnames(mesas) <- str_replace_all(tolower(colnames(mesas))," ", "_") ############# Resultados # Long Format de votos obtenidos por partido en cada mesa resultados <- mesas %>% select(1:20) %>% gather(key="nombre_lista",value = "votos", 7:20) %>% mutate(lista = stringr::word(nombre_lista, -1, sep = "_")) posiciones <- resultados %>% group_by(nombre_lista) %>% summarise(votos = sum(votos)) %>% mutate(total_votos = sum(votos), `%` = percent(votos / total_votos)) %>% select(nombre_lista, votos, `%`) %>% arrange(desc(votos)) ############# Circuitos # # url_circuitos <- "https://docs.google.com/feeds/download/spreadsheets/Export?key=1Afrsxj_eJwk4-eF8A3I9uEJG57i5-twdLBDVI15I_RU&exportFormat=csv&gid=0" # column_types = cols(LONGITUD = col_double(), LATITUD = col_double()) # circuitos_raw <- read_csv(url_circuitos, locale = locale(decimal_mark = ","), col_types = column_types) # circuitos_geo <- circuitos_raw %>% # select(circuito_id = circuito, circuito_nombre = Circuito, lng = LONGITUD, lat = LATITUD) # write_csv(circuitos_geo, "circuitos_geo.csv") circuitos_geo <- read_csv("circuitos_geo.csv") %>% filter(!is.na(lat)) circuitos_resultados <- resultados %>% left_join(circuitos_geo, by=c("codigo_circuito" = "circuito_id")) %>% group_by(codigo_circuito, circuito_nombre, lng, lat) %>% summarise(votos = sum(votos)) ############# Sección # Mesas Geolocalizadas por circuito eletoral seccion <- mesas %>% select(seccion, unite_por_la_libertad_y_la_dignidad_p195:`mst_-_nueva_izquierda_a300`) %>% group_by(seccion) %>% summarise_if(is.numeric, sum) colnames(seccion) <- word(colnames(seccion), -1, sep="_")
/r_scripts/data.R
no_license
OpenDataCordoba/elecciones-2019-gobernador
R
false
false
1,934
r
library(dplyr) library(tidyr) library(readr) library(stringr) library(scales) CANT_MESAS <- 8653 ############# Mesas # Resultado y datos de cada mesa. # Una columna por partido polítco. mesas <- read_csv("resultados-parciales-gobernador-cordoba-2019.csv") colnames(mesas) <- str_replace_all(tolower(colnames(mesas))," ", "_") ############# Resultados # Long Format de votos obtenidos por partido en cada mesa resultados <- mesas %>% select(1:20) %>% gather(key="nombre_lista",value = "votos", 7:20) %>% mutate(lista = stringr::word(nombre_lista, -1, sep = "_")) posiciones <- resultados %>% group_by(nombre_lista) %>% summarise(votos = sum(votos)) %>% mutate(total_votos = sum(votos), `%` = percent(votos / total_votos)) %>% select(nombre_lista, votos, `%`) %>% arrange(desc(votos)) ############# Circuitos # # url_circuitos <- "https://docs.google.com/feeds/download/spreadsheets/Export?key=1Afrsxj_eJwk4-eF8A3I9uEJG57i5-twdLBDVI15I_RU&exportFormat=csv&gid=0" # column_types = cols(LONGITUD = col_double(), LATITUD = col_double()) # circuitos_raw <- read_csv(url_circuitos, locale = locale(decimal_mark = ","), col_types = column_types) # circuitos_geo <- circuitos_raw %>% # select(circuito_id = circuito, circuito_nombre = Circuito, lng = LONGITUD, lat = LATITUD) # write_csv(circuitos_geo, "circuitos_geo.csv") circuitos_geo <- read_csv("circuitos_geo.csv") %>% filter(!is.na(lat)) circuitos_resultados <- resultados %>% left_join(circuitos_geo, by=c("codigo_circuito" = "circuito_id")) %>% group_by(codigo_circuito, circuito_nombre, lng, lat) %>% summarise(votos = sum(votos)) ############# Sección # Mesas Geolocalizadas por circuito eletoral seccion <- mesas %>% select(seccion, unite_por_la_libertad_y_la_dignidad_p195:`mst_-_nueva_izquierda_a300`) %>% group_by(seccion) %>% summarise_if(is.numeric, sum) colnames(seccion) <- word(colnames(seccion), -1, sep="_")
#server.R source("datarep.R",local=TRUE) library(shiny) library(qualityTools) # Define a server for the Shiny app shinyServer(function(input, output){ source("datarep.R",local=TRUE) # Fill in the spot we created for a plot output$instr <- renderPrint({ cat("This is Gage R&R Tool.\nEach parameter was collected under electrical test of semiconductor device.\nYou can see an example of the Table output in the Example Tab and how the parameteres were collected.\nYou have a column for the Tester used, a column for the Deviced tested and the following columns are each one a different electrical test, i.e., parameter.\nFor this data, 10 devices were used. They were tested 3 times in each tester, i.e., overall there are 30 data-points for each device.\nThe reason it's measure 3x in each tester is in order to estimate the within tester repeatability. The reason in repeating this in 3 different testers is in order to estimate tester to tester reproducibility, thus Gage R&R.\n \n \nYou need to select a parameter each time you do an evaluation for the Repeatabiltiy and Reproducibility values.\nThe goal in the industry in to keep the TotalRR variance lower than 30% the total Variance. Everything above that you should disqualify the measurement tools, fix, calibrate and perform the test again.\n\nHere an example from the semiconductor industry industry was brought, however this can be applied in a variety of fields. ") }) output$table <- renderDataTable(testDat[,1:12]) output$sum <-renderPrint({ source("datarep.R",local=TRUE) gdo = gageRRDesign(3,10, 3, method = "crossed",sigma=6,randomize = TRUE) response(gdo)<-testDat[,input$var] gdo<-gageRR(gdo) }) output$plot<- renderPlot({ source("datarep.R",local=TRUE) gdo = gageRRDesign(3,10, 3, method = "crossed",sigma=6,randomize = TRUE) response(gdo)<-testDat[,input$var] gdo<-gageRR(gdo) plot(gdo)}) })
/server.R
no_license
Meny007/Meny007.github.io
R
false
false
1,946
r
#server.R source("datarep.R",local=TRUE) library(shiny) library(qualityTools) # Define a server for the Shiny app shinyServer(function(input, output){ source("datarep.R",local=TRUE) # Fill in the spot we created for a plot output$instr <- renderPrint({ cat("This is Gage R&R Tool.\nEach parameter was collected under electrical test of semiconductor device.\nYou can see an example of the Table output in the Example Tab and how the parameteres were collected.\nYou have a column for the Tester used, a column for the Deviced tested and the following columns are each one a different electrical test, i.e., parameter.\nFor this data, 10 devices were used. They were tested 3 times in each tester, i.e., overall there are 30 data-points for each device.\nThe reason it's measure 3x in each tester is in order to estimate the within tester repeatability. The reason in repeating this in 3 different testers is in order to estimate tester to tester reproducibility, thus Gage R&R.\n \n \nYou need to select a parameter each time you do an evaluation for the Repeatabiltiy and Reproducibility values.\nThe goal in the industry in to keep the TotalRR variance lower than 30% the total Variance. Everything above that you should disqualify the measurement tools, fix, calibrate and perform the test again.\n\nHere an example from the semiconductor industry industry was brought, however this can be applied in a variety of fields. ") }) output$table <- renderDataTable(testDat[,1:12]) output$sum <-renderPrint({ source("datarep.R",local=TRUE) gdo = gageRRDesign(3,10, 3, method = "crossed",sigma=6,randomize = TRUE) response(gdo)<-testDat[,input$var] gdo<-gageRR(gdo) }) output$plot<- renderPlot({ source("datarep.R",local=TRUE) gdo = gageRRDesign(3,10, 3, method = "crossed",sigma=6,randomize = TRUE) response(gdo)<-testDat[,input$var] gdo<-gageRR(gdo) plot(gdo)}) })
library(glmnet) mydata = read.table("./TrainingSet/Correlation/NSCLC.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.15,family="gaussian",standardize=TRUE) sink('./Model/EN/Correlation/NSCLC/NSCLC_029.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Correlation/NSCLC/NSCLC_029.R
no_license
leon1003/QSMART
R
false
false
357
r
library(glmnet) mydata = read.table("./TrainingSet/Correlation/NSCLC.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.15,family="gaussian",standardize=TRUE) sink('./Model/EN/Correlation/NSCLC/NSCLC_029.txt',append=TRUE) print(glm$glmnet.fit) sink()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.gtfs} \alias{plot.gtfs} \title{Plot GTFS object routes and their frequencies} \usage{ \method{plot}{gtfs}(x, ...) } \arguments{ \item{x}{a gtfs_obj as read by read_gtfs()} \item{...}{further specifications} } \description{ Plot GTFS object routes and their frequencies } \examples{ \donttest{ local_gtfs_path <- system.file("extdata", "google_transit_nyc_subway.zip", package = "tidytransit") nyc <- read_gtfs(local_gtfs_path) plot(nyc) } }
/man/plot.gtfs.Rd
no_license
mpadge/tidytransit
R
false
true
598
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.gtfs} \alias{plot.gtfs} \title{Plot GTFS object routes and their frequencies} \usage{ \method{plot}{gtfs}(x, ...) } \arguments{ \item{x}{a gtfs_obj as read by read_gtfs()} \item{...}{further specifications} } \description{ Plot GTFS object routes and their frequencies } \examples{ \donttest{ local_gtfs_path <- system.file("extdata", "google_transit_nyc_subway.zip", package = "tidytransit") nyc <- read_gtfs(local_gtfs_path) plot(nyc) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hasUpperCase.R \name{hasUpperCase} \alias{hasUpperCase} \title{Contains uppercase?} \usage{ hasUpperCase(string) } \arguments{ \item{string}{the string to evaluate} } \value{ TRUE or FALSE if the string has an upper case letter } \description{ Simply change if there is any uppercase letter in a string } \examples{ strToTest1 <- 'obiWentToSchool' strToTest2 <- 'obiwenttoschool' hasUpperCase(strToTest1) hasUpperCase(strToTest2) }
/man/hasUpperCase.Rd
no_license
cran/r2shortcode
R
false
true
512
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hasUpperCase.R \name{hasUpperCase} \alias{hasUpperCase} \title{Contains uppercase?} \usage{ hasUpperCase(string) } \arguments{ \item{string}{the string to evaluate} } \value{ TRUE or FALSE if the string has an upper case letter } \description{ Simply change if there is any uppercase letter in a string } \examples{ strToTest1 <- 'obiWentToSchool' strToTest2 <- 'obiwenttoschool' hasUpperCase(strToTest1) hasUpperCase(strToTest2) }
library(shiny) library(shinydashboard) library(dplyr) library(shinyWidgets) library(DT) library(shinythemes) library(shinyalert) library(odbc) library(RMySQL) library(shinyjs) #data <- read.csv("Customer_Data.csv") isValidEmail <- function(x) { grepl("\\<[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}\\>", as.character(x), ignore.case=TRUE) } server <- function(input, output, session) { ############################DAILY VALUS##################################################################################################################################################################################### output$damount <- renderText({ p <- (input$ddatep - input$ddated)*100 paste0("Amount Owned:",p) }) observeEvent(input$dfirst,{ output$d_first <- renderText({ print(paste0("First Name: ", input$dfirst)) })}) observeEvent(input$dlast,{ output$d_last <- renderText({ print(paste0("Second Name: ", input$dlast)) })}) observeEvent(input$dstore, { output$d_store <- renderText({ print(paste0("Name of The Store: ", input$dstore)) })}) observeEvent(input$dnumber,{ output$d_number <- renderText({ print(paste0("Phone Number:", input$dnumber)) })}) observeEvent(input$ddescription,{ output$d_description <- renderText({ print(paste0("Item Description:", input$ddescription)) })}) observeEvent(input$dquantity,{ output$d_quantity <- renderText({ print(paste0("Quantity:", input$dquantity)) })}) output$value1 <- renderText({paste0("Date Of Drop Off:",Sys.Date()) }) output$d_confirmation_text<- renderText({ print(paste0("Please Confirm That The Detail Provided Above Are Correct Before Pressing The Submit Button")) }) observeEvent(input$submit,{ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) date <- as.Date(Sys.Date()) dbGetQuery(con ,paste0("Insert daily_data values ('",input$dfirst,"','",input$dlast,"','",input$dstore,"','",input$dnumber,"','",input$dquantity,"','",input$ddescription,"', '",date,"','",input$dpaidamount,"','","Pending","')")) #dbSendQuery(con, query) #dbGetQuery(con, query) dbDisconnect(con) updateTextInput(session, "dfirst", value = "") updateTextInput(session, "dlast", value = "") updateTextInput(session,"dstore",value = "") updateTextInput(session, "dnumber",value = "" ,placeholder = "+254") #pdateTextInput(session, "email", value = "") # updateDateInput(session, "dob", value = Sys.Date()) # updateNumericInput(session, "quantity", value = 1) # updateTextInput(session, "mpesa", value = "") shinyalert(title = "Registration Successful!", type = "success") updateTextAreaInput(session, "ddescription", "Item Description", value = "",placeholder = "Please Describe the Item") }) #####################################DAILY PICK UP#################################################################### observeEvent(input$drefresh,{output$tabledaily <- DT::renderDataTable({ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM daily_data") data <- dbFetch(rs) datatable(data) %>% formatStyle( 'Status', backgroundColor = styleEqual(c("Pending", "Picked"), c('green', 'red'))) #dbDisconnect(con) }) observeEvent(input$dailypayment,{ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM daily_data") data <- dbFetch(rs) a = input$tabledaily_rows_selected b = data[a, 4] c = data[a, 1] e <- as.character(c) # dbGetQuery(con, statement = # paste0(" # UPDATE customer_data # SET Status = ","Picked"," # WHERE `Phone Number` = ","705098186","")) dbGetQuery(con, paste0("UPDATE daily_data SET Status = 'Picked' WHERE `Phone Number` = '",b,"' AND `First Name` = '",e,"'" )) }) output$amountowned = renderPrint({ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM daily_data") data <- dbFetch(rs) m = input$tabledaily_rows_selected n = data[m, 1] cash <- data[m,8] datepaid = data[m,7] p <- cash - ((Sys.Date() - datepaid)*100) cat('Amount Owned by:',n,"is",p) #cat(n, sep = ', ') }) }) ###################################################################################################################### output$hyper <- renderUI({ tags$iframe(src= "index.html", style="width: 100vw;height: 100vh;position:fixed;") }) output$tableforpatient <- renderText({ validate( need(input$first !="", paste("First Name: Please Input your firstname")), need(input$last !="", paste("Second Name: Please Input your lastname")), need(input$numbers !="", paste("Phone Number: Please Input your Phone Number")), need(input$stores1 !="", paste("Store Name: Please Provide the Store Name")), # need(isValidEmail(input$email), # paste("Email Address: Please Input a valid E-mail address")) need(input$description !="", paste("Item Description: Please Describe the Item")), need(input$mpesa !="", paste("MPESA Code: Please Provide the MPESA verification Code")) )}) # observeEvent(input$subit, { # showModal(modalDialog( # title = "Please Confirm Details", # print(paste("Fisrt Name: ",input$first)), # print(paste("Second Name: ",input$last)) # )) # }) output$value <- renderText({paste0("Date Of Drop Off:",Sys.Date()) }) observeEvent(input$btn, { shinyalert( title = "Please Confirm Details",print(paste0("Fisrt Name: ",input$first,sep="\n")), # print(paste0("Second Name: ",input$last,sep="\n")), # print(paste0("Store Name:",input$stores1, sep="\n")), # callbackR = function() { shinyalert(paste("Registration Successful!")) } callbackR = function() { actionButton("data","Confirm") } ) }) observeEvent(input$subit,{ # query <-"INSERT INTO customer_data # VALUES(%s,input$first # %s,input$stores1 # %d,input$numbers);" con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) dbGetQuery(con ,paste0("Insert customer_data values ('",input$first,"','",input$stores1,"','",input$numbers,"','","Pending","')")) #dbSendQuery(con, query) #dbGetQuery(con, query) shinyalert(title = "Registration Successful!", type = "success") dbDisconnect(con) updateTextInput(session, "first", value = "") updateTextInput(session, "last", value = "") updateSelectInput(session,"stores1","Select A Store", choices= c("","Malazi Store", "Majengo Store","Online Dress","Online Furniture")) updateTextInput(session, "numbers",value = "" ,placeholder = "+254") updateTextInput(session, "email", value = "") updateDateInput(session, "dob", value = Sys.Date()) updateNumericInput(session, "quantity", value = 1) updateTextInput(session, "mpesa", value = "") updateTextAreaInput(session, "description", "Item Description", value = "",placeholder = "Please Describe the Item") }) ################################################################################################################################################################################ observeEvent(input$refresh,{ output$table <- DT::renderDataTable({ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM customer_data") data <- dbFetch(rs) data <- filter(data, data$Store == input$stores) datatable(data) %>% formatStyle( 'Status', backgroundColor = styleEqual(c("Pending", "Picked"), c('green', 'red'))) #dbDisconnect(con) })}) output$x4 = renderPrint({ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM customer_data") data <- dbFetch(rs) data <- filter(data, data$Store == input$stores) s = input$table_rows_selected t = data[s, 1] if (length(t)) { cat('Please Enter The MPESA Payment Code For:') cat(t, sep = ', ') } }) observeEvent(input$verify, { output$mpesaconfirmation = renderPrint({ req(input$table_rows_selected) req(input$verify) con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM customer_data") data <- dbFetch(rs) data <- filter(data, data$Store == input$stores) s = input$table_rows_selected t = data[s, 3] if (input$mpesacon != t) { cat('SORRY THE CODES DO NOT MATCH') } else{cat('THE CODES MATCH')} })}) observeEvent(input$verify,{ req(input$mpesacon) con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM customer_data") data <- dbFetch(rs) data <- filter(data, data$Store == input$stores) a = input$table_rows_selected b = data[a, 3] c = data[a, 1] if(input$mpesacon == b){ # dbGetQuery(con, statement = # paste0(" # UPDATE customer_data # SET Status = ","Picked"," # WHERE `Phone Number` = ","705098186","")) dbGetQuery(con, paste0("UPDATE customer_data SET Status = 'Picked' WHERE `Phone Number` = ",b,"")) } }) ################################################################################################################################################################################# observeEvent(input$first,{ output$first_name <- renderText({ print(paste0("First Name: ", input$first)) })}) output$second_name <- renderText({ print(paste0("Second Name: ", input$last)) }) output$store_name <- renderText({ print(paste0("Store Name: ",input$stores1)) }) output$phone_number <- renderText({ print(paste0("Phone Number: ", input$numbers)) }) output$item_desc <- renderText({ print(paste0("Item Description: ",input$description)) }) output$item_quantity<- renderText({ print(paste0("Number Of Items: ", input$quantity)) }) output$mpesa_code <- renderText({ print(paste0("MPESA Code: ", input$mpesa)) }) output$confirmation_text<- renderText({ print(paste0("Please Confirm That The Detail Provided Above Are Correct Before Pressing The Submit Button")) }) }
/server.r
no_license
MonginaDolphine8/DianaStore
R
false
false
13,394
r
library(shiny) library(shinydashboard) library(dplyr) library(shinyWidgets) library(DT) library(shinythemes) library(shinyalert) library(odbc) library(RMySQL) library(shinyjs) #data <- read.csv("Customer_Data.csv") isValidEmail <- function(x) { grepl("\\<[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}\\>", as.character(x), ignore.case=TRUE) } server <- function(input, output, session) { ############################DAILY VALUS##################################################################################################################################################################################### output$damount <- renderText({ p <- (input$ddatep - input$ddated)*100 paste0("Amount Owned:",p) }) observeEvent(input$dfirst,{ output$d_first <- renderText({ print(paste0("First Name: ", input$dfirst)) })}) observeEvent(input$dlast,{ output$d_last <- renderText({ print(paste0("Second Name: ", input$dlast)) })}) observeEvent(input$dstore, { output$d_store <- renderText({ print(paste0("Name of The Store: ", input$dstore)) })}) observeEvent(input$dnumber,{ output$d_number <- renderText({ print(paste0("Phone Number:", input$dnumber)) })}) observeEvent(input$ddescription,{ output$d_description <- renderText({ print(paste0("Item Description:", input$ddescription)) })}) observeEvent(input$dquantity,{ output$d_quantity <- renderText({ print(paste0("Quantity:", input$dquantity)) })}) output$value1 <- renderText({paste0("Date Of Drop Off:",Sys.Date()) }) output$d_confirmation_text<- renderText({ print(paste0("Please Confirm That The Detail Provided Above Are Correct Before Pressing The Submit Button")) }) observeEvent(input$submit,{ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) date <- as.Date(Sys.Date()) dbGetQuery(con ,paste0("Insert daily_data values ('",input$dfirst,"','",input$dlast,"','",input$dstore,"','",input$dnumber,"','",input$dquantity,"','",input$ddescription,"', '",date,"','",input$dpaidamount,"','","Pending","')")) #dbSendQuery(con, query) #dbGetQuery(con, query) dbDisconnect(con) updateTextInput(session, "dfirst", value = "") updateTextInput(session, "dlast", value = "") updateTextInput(session,"dstore",value = "") updateTextInput(session, "dnumber",value = "" ,placeholder = "+254") #pdateTextInput(session, "email", value = "") # updateDateInput(session, "dob", value = Sys.Date()) # updateNumericInput(session, "quantity", value = 1) # updateTextInput(session, "mpesa", value = "") shinyalert(title = "Registration Successful!", type = "success") updateTextAreaInput(session, "ddescription", "Item Description", value = "",placeholder = "Please Describe the Item") }) #####################################DAILY PICK UP#################################################################### observeEvent(input$drefresh,{output$tabledaily <- DT::renderDataTable({ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM daily_data") data <- dbFetch(rs) datatable(data) %>% formatStyle( 'Status', backgroundColor = styleEqual(c("Pending", "Picked"), c('green', 'red'))) #dbDisconnect(con) }) observeEvent(input$dailypayment,{ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM daily_data") data <- dbFetch(rs) a = input$tabledaily_rows_selected b = data[a, 4] c = data[a, 1] e <- as.character(c) # dbGetQuery(con, statement = # paste0(" # UPDATE customer_data # SET Status = ","Picked"," # WHERE `Phone Number` = ","705098186","")) dbGetQuery(con, paste0("UPDATE daily_data SET Status = 'Picked' WHERE `Phone Number` = '",b,"' AND `First Name` = '",e,"'" )) }) output$amountowned = renderPrint({ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM daily_data") data <- dbFetch(rs) m = input$tabledaily_rows_selected n = data[m, 1] cash <- data[m,8] datepaid = data[m,7] p <- cash - ((Sys.Date() - datepaid)*100) cat('Amount Owned by:',n,"is",p) #cat(n, sep = ', ') }) }) ###################################################################################################################### output$hyper <- renderUI({ tags$iframe(src= "index.html", style="width: 100vw;height: 100vh;position:fixed;") }) output$tableforpatient <- renderText({ validate( need(input$first !="", paste("First Name: Please Input your firstname")), need(input$last !="", paste("Second Name: Please Input your lastname")), need(input$numbers !="", paste("Phone Number: Please Input your Phone Number")), need(input$stores1 !="", paste("Store Name: Please Provide the Store Name")), # need(isValidEmail(input$email), # paste("Email Address: Please Input a valid E-mail address")) need(input$description !="", paste("Item Description: Please Describe the Item")), need(input$mpesa !="", paste("MPESA Code: Please Provide the MPESA verification Code")) )}) # observeEvent(input$subit, { # showModal(modalDialog( # title = "Please Confirm Details", # print(paste("Fisrt Name: ",input$first)), # print(paste("Second Name: ",input$last)) # )) # }) output$value <- renderText({paste0("Date Of Drop Off:",Sys.Date()) }) observeEvent(input$btn, { shinyalert( title = "Please Confirm Details",print(paste0("Fisrt Name: ",input$first,sep="\n")), # print(paste0("Second Name: ",input$last,sep="\n")), # print(paste0("Store Name:",input$stores1, sep="\n")), # callbackR = function() { shinyalert(paste("Registration Successful!")) } callbackR = function() { actionButton("data","Confirm") } ) }) observeEvent(input$subit,{ # query <-"INSERT INTO customer_data # VALUES(%s,input$first # %s,input$stores1 # %d,input$numbers);" con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) dbGetQuery(con ,paste0("Insert customer_data values ('",input$first,"','",input$stores1,"','",input$numbers,"','","Pending","')")) #dbSendQuery(con, query) #dbGetQuery(con, query) shinyalert(title = "Registration Successful!", type = "success") dbDisconnect(con) updateTextInput(session, "first", value = "") updateTextInput(session, "last", value = "") updateSelectInput(session,"stores1","Select A Store", choices= c("","Malazi Store", "Majengo Store","Online Dress","Online Furniture")) updateTextInput(session, "numbers",value = "" ,placeholder = "+254") updateTextInput(session, "email", value = "") updateDateInput(session, "dob", value = Sys.Date()) updateNumericInput(session, "quantity", value = 1) updateTextInput(session, "mpesa", value = "") updateTextAreaInput(session, "description", "Item Description", value = "",placeholder = "Please Describe the Item") }) ################################################################################################################################################################################ observeEvent(input$refresh,{ output$table <- DT::renderDataTable({ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM customer_data") data <- dbFetch(rs) data <- filter(data, data$Store == input$stores) datatable(data) %>% formatStyle( 'Status', backgroundColor = styleEqual(c("Pending", "Picked"), c('green', 'red'))) #dbDisconnect(con) })}) output$x4 = renderPrint({ con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM customer_data") data <- dbFetch(rs) data <- filter(data, data$Store == input$stores) s = input$table_rows_selected t = data[s, 1] if (length(t)) { cat('Please Enter The MPESA Payment Code For:') cat(t, sep = ', ') } }) observeEvent(input$verify, { output$mpesaconfirmation = renderPrint({ req(input$table_rows_selected) req(input$verify) con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM customer_data") data <- dbFetch(rs) data <- filter(data, data$Store == input$stores) s = input$table_rows_selected t = data[s, 3] if (input$mpesacon != t) { cat('SORRY THE CODES DO NOT MATCH') } else{cat('THE CODES MATCH')} })}) observeEvent(input$verify,{ req(input$mpesacon) con <- DBI::dbConnect(odbc::odbc(), driver = "MySQL ODBC 8.0 Unicode Driver", database = "test_db", UID = "root", PWD = "Purity@8", host = "localhost", port = 3306) rs <- dbSendQuery(con, "SELECT * FROM customer_data") data <- dbFetch(rs) data <- filter(data, data$Store == input$stores) a = input$table_rows_selected b = data[a, 3] c = data[a, 1] if(input$mpesacon == b){ # dbGetQuery(con, statement = # paste0(" # UPDATE customer_data # SET Status = ","Picked"," # WHERE `Phone Number` = ","705098186","")) dbGetQuery(con, paste0("UPDATE customer_data SET Status = 'Picked' WHERE `Phone Number` = ",b,"")) } }) ################################################################################################################################################################################# observeEvent(input$first,{ output$first_name <- renderText({ print(paste0("First Name: ", input$first)) })}) output$second_name <- renderText({ print(paste0("Second Name: ", input$last)) }) output$store_name <- renderText({ print(paste0("Store Name: ",input$stores1)) }) output$phone_number <- renderText({ print(paste0("Phone Number: ", input$numbers)) }) output$item_desc <- renderText({ print(paste0("Item Description: ",input$description)) }) output$item_quantity<- renderText({ print(paste0("Number Of Items: ", input$quantity)) }) output$mpesa_code <- renderText({ print(paste0("MPESA Code: ", input$mpesa)) }) output$confirmation_text<- renderText({ print(paste0("Please Confirm That The Detail Provided Above Are Correct Before Pressing The Submit Button")) }) }
#' Konwersja mile na kilometry. metoda alternatywna #' #' #' @description Funkcja sluzacaca do przeliczania podanej #' odleglosci w milach na odleglosci w kilometrach #' #' @param par zawieracjacy wartosci numeryczne (mile) #' #' @return wektor num #' @export #' #' @examples #' mil_do_km(230) #' mil_do_km(34.22) #' mil_do_km(c(44,66,88)) mil_do_km2=function(par){ km=par/0.62137 km }
/R/mil_do_km2.R
permissive
lpmacniak/konwerter
R
false
false
393
r
#' Konwersja mile na kilometry. metoda alternatywna #' #' #' @description Funkcja sluzacaca do przeliczania podanej #' odleglosci w milach na odleglosci w kilometrach #' #' @param par zawieracjacy wartosci numeryczne (mile) #' #' @return wektor num #' @export #' #' @examples #' mil_do_km(230) #' mil_do_km(34.22) #' mil_do_km(c(44,66,88)) mil_do_km2=function(par){ km=par/0.62137 km }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mscale.R \name{mscale} \alias{mscale} \title{Robust M-estimate of Scale} \usage{ mscale( x, delta = 0.5, rho = c("bisquare", "huber", "gauss"), cc, eps = 1e-08, maxit = 200 ) } \arguments{ \item{x}{numeric vector.} \item{delta}{desired value for the right-hand side of the M-estimation equation.} \item{rho}{rho function to use in the M-estimation equation. Valid options are \code{bisquare}, \code{huber} and \code{gauss}.} \item{cc}{non-negative constant for the chosen rho function. If missing, it will be chosen such that the expected value of the rho function under the normal model is equal to \code{delta}.} \item{eps}{threshold for convergence. Defaults to \code{1e-8}.} \item{maxit}{maximum number of iterations. Defaults to \code{200}.} } \value{ Numeric vector of length one containing the solution \code{s_n} to the equation above. } \description{ Compute the M-estimate of scale using the MAD as initial estimate. } \details{ This solves the M-estimation equation given by \deqn{\sum_{i=1}^n \rho( x_i / s_n; cc ) = n delta} All \code{NA} values in \code{x} are removed before calculating the scale. }
/man/mscale.Rd
no_license
dakep/pyinit
R
false
true
1,211
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mscale.R \name{mscale} \alias{mscale} \title{Robust M-estimate of Scale} \usage{ mscale( x, delta = 0.5, rho = c("bisquare", "huber", "gauss"), cc, eps = 1e-08, maxit = 200 ) } \arguments{ \item{x}{numeric vector.} \item{delta}{desired value for the right-hand side of the M-estimation equation.} \item{rho}{rho function to use in the M-estimation equation. Valid options are \code{bisquare}, \code{huber} and \code{gauss}.} \item{cc}{non-negative constant for the chosen rho function. If missing, it will be chosen such that the expected value of the rho function under the normal model is equal to \code{delta}.} \item{eps}{threshold for convergence. Defaults to \code{1e-8}.} \item{maxit}{maximum number of iterations. Defaults to \code{200}.} } \value{ Numeric vector of length one containing the solution \code{s_n} to the equation above. } \description{ Compute the M-estimate of scale using the MAD as initial estimate. } \details{ This solves the M-estimation equation given by \deqn{\sum_{i=1}^n \rho( x_i / s_n; cc ) = n delta} All \code{NA} values in \code{x} are removed before calculating the scale. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/endpoints-soap.R \name{make_base_soap_url} \alias{make_base_soap_url} \title{Base SOAP API URL Generator} \usage{ make_base_soap_url() } \value{ \code{character}; a complete URL (as a string) that will be used to send SOAP API calls to. This URL is specific to your instance and the API version being used. } \description{ Base SOAP API URL Generator } \note{ This function is meant to be used internally. Only use when debugging. } \keyword{internal}
/man/make_base_soap_url.Rd
permissive
StevenMMortimer/salesforcer
R
false
true
530
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/endpoints-soap.R \name{make_base_soap_url} \alias{make_base_soap_url} \title{Base SOAP API URL Generator} \usage{ make_base_soap_url() } \value{ \code{character}; a complete URL (as a string) that will be used to send SOAP API calls to. This URL is specific to your instance and the API version being used. } \description{ Base SOAP API URL Generator } \note{ This function is meant to be used internally. Only use when debugging. } \keyword{internal}
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/message.R \name{import_message} \alias{import_message} \title{Import a message into the gmail mailbox from a mime message} \usage{ import_message(mail, label_ids, type = c("multipart", "media", "resumable"), internal_date_source = c("dateHeader", "recievedTime"), user_id = "me") } \arguments{ \item{mail}{mime mail message created by mime} \item{label_ids}{optional label ids to apply to the message} \item{type}{the type of upload to perform} \item{internal_date_source}{whether to date the object based on the date of the message or when it was received by gmail.} \item{user_id}{gmail user_id to access, special value of 'me' indicates the authenticated user.} } \description{ Import a message into the gmail mailbox from a mime message } \examples{ \dontrun{ import_message(mime(from="you@me.com", to="any@one.com", subject='hello", "how are you doing?")) } } \references{ \url{https://developers.google.com/gmail/api/v1/reference/users/messages/import} }
/man/import_message.Rd
no_license
data-steve/gmailr
R
false
false
1,080
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/message.R \name{import_message} \alias{import_message} \title{Import a message into the gmail mailbox from a mime message} \usage{ import_message(mail, label_ids, type = c("multipart", "media", "resumable"), internal_date_source = c("dateHeader", "recievedTime"), user_id = "me") } \arguments{ \item{mail}{mime mail message created by mime} \item{label_ids}{optional label ids to apply to the message} \item{type}{the type of upload to perform} \item{internal_date_source}{whether to date the object based on the date of the message or when it was received by gmail.} \item{user_id}{gmail user_id to access, special value of 'me' indicates the authenticated user.} } \description{ Import a message into the gmail mailbox from a mime message } \examples{ \dontrun{ import_message(mime(from="you@me.com", to="any@one.com", subject='hello", "how are you doing?")) } } \references{ \url{https://developers.google.com/gmail/api/v1/reference/users/messages/import} }
TrackingError_edit = function (Ra, Rb, scale = NA) { Ra = checkData(Ra) Rb = checkData(Rb) Ra.ncols = NCOL(Ra) Rb.ncols = NCOL(Rb) pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols) if (is.na(scale)) { freq = periodicity(Ra) switch(freq$scale, minute = { stop("Data periodicity too high") }, hourly = { stop("Data periodicity too high") }, daily = { scale = 252 }, weekly = { scale = 52 }, monthly = { scale = 12 }, quarterly = { scale = 4 }, yearly = { scale = 1 }) } terr <- function(Ra, Rb, scale) { TE = sd(Return.excess_edit(Ra, Rb), na.rm = TRUE) * sqrt(scale) return(TE) } result = apply(pairs, 1, FUN = function(n, Ra, Rb, scale) terr(Ra[, n[1]], Rb[, n[2]], scale), Ra = Ra, Rb = Rb, scale = scale) if (length(result) == 1) return(result) else { dim(result) = c(Ra.ncols, Rb.ncols) colnames(result) = paste("Tracking Error:", colnames(Rb)) rownames(result) = colnames(Ra) return(t(result)) } }
/Functions/trackingError_edit.R
no_license
bplloyd/R-risk-mgmt
R
false
false
1,096
r
TrackingError_edit = function (Ra, Rb, scale = NA) { Ra = checkData(Ra) Rb = checkData(Rb) Ra.ncols = NCOL(Ra) Rb.ncols = NCOL(Rb) pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols) if (is.na(scale)) { freq = periodicity(Ra) switch(freq$scale, minute = { stop("Data periodicity too high") }, hourly = { stop("Data periodicity too high") }, daily = { scale = 252 }, weekly = { scale = 52 }, monthly = { scale = 12 }, quarterly = { scale = 4 }, yearly = { scale = 1 }) } terr <- function(Ra, Rb, scale) { TE = sd(Return.excess_edit(Ra, Rb), na.rm = TRUE) * sqrt(scale) return(TE) } result = apply(pairs, 1, FUN = function(n, Ra, Rb, scale) terr(Ra[, n[1]], Rb[, n[2]], scale), Ra = Ra, Rb = Rb, scale = scale) if (length(result) == 1) return(result) else { dim(result) = c(Ra.ncols, Rb.ncols) colnames(result) = paste("Tracking Error:", colnames(Rb)) rownames(result) = colnames(Ra) return(t(result)) } }
\name{print.evolution} \Rdversion{1.1} \alias{print.evolution} \title{ Summarize Some Properties of the Simulation } \description{ A summary of some important properties of the simulation. More is to be added. Same as \code{link{summary.evolution}} and \code{\link{print.summary.evolution}} } \usage{ \method{print}{evolution} (x, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ the simulation result generated by \code{\link{multispin.evol}} } \item{...}{ additional arguments, passed to \code{\link{print}} } } \value{ The result is a list, whose elements are attributes of \code{g}: \code{n}, \code{time}, \code{dt}, \code{call}. The result is defined as a "summary.evolution" object. } \author{ Hai Qian, Electrical Engineering Department, UCLA } \seealso{ \code{\link{multispin.evol}}, \code{\link{summary.evolution}}, \code{\link{print.summary.evolution}}, }
/man/print.evolution.Rd
no_license
cran/magnets
R
false
false
944
rd
\name{print.evolution} \Rdversion{1.1} \alias{print.evolution} \title{ Summarize Some Properties of the Simulation } \description{ A summary of some important properties of the simulation. More is to be added. Same as \code{link{summary.evolution}} and \code{\link{print.summary.evolution}} } \usage{ \method{print}{evolution} (x, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ the simulation result generated by \code{\link{multispin.evol}} } \item{...}{ additional arguments, passed to \code{\link{print}} } } \value{ The result is a list, whose elements are attributes of \code{g}: \code{n}, \code{time}, \code{dt}, \code{call}. The result is defined as a "summary.evolution" object. } \author{ Hai Qian, Electrical Engineering Department, UCLA } \seealso{ \code{\link{multispin.evol}}, \code{\link{summary.evolution}}, \code{\link{print.summary.evolution}}, }
nrow=20 ncol=30 n = nrow*ncol s.noise = 1 zi.mat = matrix(NA,nrow=nrow,ncol=ncol) i=1:nrow for(j in 1:ncol) zi.mat[i,j] = 3*exp(-(i-j)^2/4) ## iid noise noise.mat=matrix(rnorm(nrow*ncol, sd=s.noise),nrow,ncol) ## make simulated data with no spatial component y.mat = zi.mat + noise.mat ## convert matrices to the internal representation in INLA y = inla.matrix2vector(y.mat) node = 1:n formula= y ~ 1+ f(node, model="matern2d", nu=1, nrow=nrow, ncol=ncol, hyper = list(range = list(param =c(1, 1), prior = "loggamma", initial=1), prec = list(param=c(1, 1)))) data=data.frame(y=y,node=node) ## fit the model result=inla(formula, family="gaussian", data=data, verbose=TRUE, control.predictor = list(compute = TRUE), control.family = list(hyper = list(theta = list(initial = log(1/s.noise^2), fixed = FALSE))), keep=T) ## plot the posterior mean for `predictor' and compare with the truth dev.new() INLA:::inla.display.matrix(zi.mat) dev.new() INLA:::inla.display.matrix(INLA:::inla.vector2matrix(result$summary.linear.predictor$mean,nrow,ncol))
/r-inla.org/doc/latent/matern2d-example.R
no_license
HughParsonage/R-INLA-mirror
R
false
false
1,223
r
nrow=20 ncol=30 n = nrow*ncol s.noise = 1 zi.mat = matrix(NA,nrow=nrow,ncol=ncol) i=1:nrow for(j in 1:ncol) zi.mat[i,j] = 3*exp(-(i-j)^2/4) ## iid noise noise.mat=matrix(rnorm(nrow*ncol, sd=s.noise),nrow,ncol) ## make simulated data with no spatial component y.mat = zi.mat + noise.mat ## convert matrices to the internal representation in INLA y = inla.matrix2vector(y.mat) node = 1:n formula= y ~ 1+ f(node, model="matern2d", nu=1, nrow=nrow, ncol=ncol, hyper = list(range = list(param =c(1, 1), prior = "loggamma", initial=1), prec = list(param=c(1, 1)))) data=data.frame(y=y,node=node) ## fit the model result=inla(formula, family="gaussian", data=data, verbose=TRUE, control.predictor = list(compute = TRUE), control.family = list(hyper = list(theta = list(initial = log(1/s.noise^2), fixed = FALSE))), keep=T) ## plot the posterior mean for `predictor' and compare with the truth dev.new() INLA:::inla.display.matrix(zi.mat) dev.new() INLA:::inla.display.matrix(INLA:::inla.vector2matrix(result$summary.linear.predictor$mean,nrow,ncol))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterluts.R \name{cutshades} \alias{cutshades} \title{prepare shades and reorder to match cut} \usage{ cutshades(cut, col.fun = default.rgb, ...) } \arguments{ \item{cut}{vector of cluster assignments} \item{col.fun}{function mapping a number to that many colors from a palette} \item{...}{passed to \code{\link{color.shades}}} } \value{ color shades reordered (see details) } \description{ For the given cut \code{cut} analyze the internal structure of each cluster and use that as basis for a shading through \code{\link{color.shades}}. Then reorder the shading to arrange according to cluster members. } \examples{ set.seed(42); (cs <- cutshades(ct <- sample(6, 15, TRUE))) op <- par(mfrow=2:1) show.colmat(cs, width=15) show.shades(cs) par(op) } \author{ Benno Pütz \email{puetz@psych.mpg.de} }
/man/cutshades.Rd
no_license
bennop/clusterLUTs
R
false
true
883
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterluts.R \name{cutshades} \alias{cutshades} \title{prepare shades and reorder to match cut} \usage{ cutshades(cut, col.fun = default.rgb, ...) } \arguments{ \item{cut}{vector of cluster assignments} \item{col.fun}{function mapping a number to that many colors from a palette} \item{...}{passed to \code{\link{color.shades}}} } \value{ color shades reordered (see details) } \description{ For the given cut \code{cut} analyze the internal structure of each cluster and use that as basis for a shading through \code{\link{color.shades}}. Then reorder the shading to arrange according to cluster members. } \examples{ set.seed(42); (cs <- cutshades(ct <- sample(6, 15, TRUE))) op <- par(mfrow=2:1) show.colmat(cs, width=15) show.shades(cs) par(op) } \author{ Benno Pütz \email{puetz@psych.mpg.de} }
# install.packages("mplot") library(mplot) library(MASS) library(ggplot2) ############################################################################################################ # Example - diabetes ############################################################################################################ head(diabetes) # interactive version # http://garthtarr.com/apps/mplot # Linear Regression lm.d <- lm(y ~ ., data = diabetes) # 200 bootstrap sample vis.d <- vis(lm.d, B = 200, seed = 1) af.d <- af(lm.d, B = 200, n.c = 100, c.max = 100, seed = 1) # Variable Inclusion Plot plot(vis.d, interactive = FALSE, which = "vip") # Comment: As the penalty value increases, and a more parsimonious model is sought, the hdl variable is # selected more frequently while at the same time other variables with similar information are # dropped. Such paths occur when a group of variables contains similar information to another # variable. # The bmi and ltg paths are horizontal with a bootstrap probability of 1 for all penalty values indicating that they are # very important variables, as are map and sex. # The path for the age variable lies below the path for the redundant # variable, indicating that it does not provide any useful information. # Bootstrap plot plot(vis.d, interactive = FALSE, which = "boot", max.circle = 10, highlight = "hdl") + scale_x_continuous(breaks = c(2, 4, 6, 8, 10, 12)) vis.d # for example: In models of size four (including theintercept), the model with bmi, ltg and map # was selected in 70% of bootstrap resamples. # Adaptive fence plot plot(af.d, interactive = FALSE, best.only = TRUE, legend.position = "right") plot(af.d, interactive = FALSE, best.only = FALSE, legend.position = "right") # interactive version mplot(lm.d, vis.d, af.d)
/PrezentacjePakietow/mplot_code_example.R
no_license
pbiecek/AdvancedR2018
R
false
false
1,811
r
# install.packages("mplot") library(mplot) library(MASS) library(ggplot2) ############################################################################################################ # Example - diabetes ############################################################################################################ head(diabetes) # interactive version # http://garthtarr.com/apps/mplot # Linear Regression lm.d <- lm(y ~ ., data = diabetes) # 200 bootstrap sample vis.d <- vis(lm.d, B = 200, seed = 1) af.d <- af(lm.d, B = 200, n.c = 100, c.max = 100, seed = 1) # Variable Inclusion Plot plot(vis.d, interactive = FALSE, which = "vip") # Comment: As the penalty value increases, and a more parsimonious model is sought, the hdl variable is # selected more frequently while at the same time other variables with similar information are # dropped. Such paths occur when a group of variables contains similar information to another # variable. # The bmi and ltg paths are horizontal with a bootstrap probability of 1 for all penalty values indicating that they are # very important variables, as are map and sex. # The path for the age variable lies below the path for the redundant # variable, indicating that it does not provide any useful information. # Bootstrap plot plot(vis.d, interactive = FALSE, which = "boot", max.circle = 10, highlight = "hdl") + scale_x_continuous(breaks = c(2, 4, 6, 8, 10, 12)) vis.d # for example: In models of size four (including theintercept), the model with bmi, ltg and map # was selected in 70% of bootstrap resamples. # Adaptive fence plot plot(af.d, interactive = FALSE, best.only = TRUE, legend.position = "right") plot(af.d, interactive = FALSE, best.only = FALSE, legend.position = "right") # interactive version mplot(lm.d, vis.d, af.d)
#' Estimate required sample size for accuracy in parameter estimation using bootES #' #' This function uses [bootES::bootES()] to compute #' #' @param data The dataset, as you would normally supply to [bootES::bootES()]; #' you will probably have to simulate this. #' @param ci.type The estimation method; by default, the default of #' [bootES::bootES()] is used ('bca'), but this is changed to 'basic' if it #' encounters problems. #' @param ... Other options for [bootES::bootES()] (see that help page). #' @param w The desired 'halfwidth' of the confidence interval. #' @param silent Whether to provide a lot of information about progress #' ('FALSE') or not ('TRUE'). #' #' @return A single numeric value (the sample size). #' #' @references #' Kirby, K. N., & Gerlanc, D. (2013). BootES: An R package for bootstrap confidence #' intervals on effect sizes. *Behavior Research Methods, 45*, 905–927. \doi{10.3758/s13428-013-0330-5} #' #' @export #' #' @examples ### This requires the bootES package #' if (requireNamespace("bootES", quietly = TRUE)) { #' #' ### To estimate a mean #' x <- rnorm(500, mean=8, sd=3); #' pwr.bootES(data.frame(x=x), #' R=500, #' w=.5); #' #' ### To estimate a correlation (the 'effect.type' parameter is #' ### redundant here; with two columns in the data frame, computing #' ### the confidence interval for the Pearson correlation is the default #' ### ehavior of bootES) #' y <- x+rnorm(500, mean=0, sd=5); #' cor(x, y); #' requiredN <- #' pwr.bootES(data.frame(x=x, #' y=y), #' effect.type='r', #' R=500, #' w=.2); #' print(requiredN); #' ### Compare to parametric confidence interval #' ### based on the computed required sample size #' confIntR(r = cor(x, y), #' N = requiredN); #' ### Width of obtained confidence interval #' print(round(diff(as.numeric(confIntR(r = cor(x, y), #' N = requiredN))), 2)); #' } pwr.bootES <- function(data=data, ci.type="bca", ..., w=.1, silent=TRUE) { if (!requireNamespace("bootES", quietly = TRUE)) { message("To build a tree, the \"bootES\" package is required. ", "Please install it using `install.packages('bootES');`."); return(invisible(FALSE)); } if (length(w) != 1) { warning("Multiple widths not supported (yet); only the first one is used!\n", "You can use sapply to approximate this vectorization, for example,\n\n", "sapply(c(", vecTxt(w, lastElements = 0), "), pwr.cohensdCI, d=.5)", "\n"); w <- w[1]; } ### From a post at the R-help mailig list by Luke Tierney, see ### http://stackoverflow.com/questions/3903157/how-can-i-check-whether-a-function-call-results-in-a-warning wHandler <- function(w) { myWarnings <<- c(myWarnings, list(w)); invokeRestart("muffleWarning"); } eHandler <- function(e) { myErrors <<- c(myErrors, list(e)); } myWarnings <- NULL; myErrors <- NULL; n <- 4; if (!silent) { cat0("Setting n to 4 to start.\n"); } for (steps in c(1000, 100, 10, 1)) { ciWidth <- 3*w; while (ciWidth > 2*w) { n <- n + steps; if (!silent) { if (!silent) { cat0("Adding ", steps, " to n.\n"); } cat0("Taking ", n, " samples from `data` and running `bootES::bootES`: "); } sampledData <- data[sample(1:nrow(data), size=n, replace=TRUE), ]; tryCatch({ bootESres <- bootES::bootES(data = sampledData, ci.type = ci.type, ...); }, error=eHandler, warning=wHandler); if (!is.null(myErrors) && ci.type=="bca") { ci.type <- "basic"; n <- n - steps; bcaMsg <- paste0("You specified the 'bca' type of bootstrapped confidence ", "intervals (the default, so maybe you didn't specify anything). ", "However, `bootES::bootES` ran into an error, so resetting the ", "bootstrapping method to 'basic' and trying to continue."); if (!silent) { cat0(bcaMsg, "\n"); } warning(bcaMsg); } if (exists('bootESres')) { obtainedCI <- bootESres$bounds; ciWidth <- abs(diff(obtainedCI)); if (!silent) { cat0("Obtained CI of ", formatCI(obtainedCI), "; width=", round(ciWidth, 2), "."); if (ciWidth < w*2) { cat0(" This is smaller than the margin of error (2*w, or ", 2*w, ").\n"); } else { cat0(" This is larger than the margin of error (2*w, or ", 2*w, ").\n"); } } } } if (!silent) { cat0("Done with this cycle; subtracting ", steps, " from n.\n"); } n <- n - steps; } return(n); }
/R/pwr.bootES.R
no_license
cran/ufs
R
false
false
5,385
r
#' Estimate required sample size for accuracy in parameter estimation using bootES #' #' This function uses [bootES::bootES()] to compute #' #' @param data The dataset, as you would normally supply to [bootES::bootES()]; #' you will probably have to simulate this. #' @param ci.type The estimation method; by default, the default of #' [bootES::bootES()] is used ('bca'), but this is changed to 'basic' if it #' encounters problems. #' @param ... Other options for [bootES::bootES()] (see that help page). #' @param w The desired 'halfwidth' of the confidence interval. #' @param silent Whether to provide a lot of information about progress #' ('FALSE') or not ('TRUE'). #' #' @return A single numeric value (the sample size). #' #' @references #' Kirby, K. N., & Gerlanc, D. (2013). BootES: An R package for bootstrap confidence #' intervals on effect sizes. *Behavior Research Methods, 45*, 905–927. \doi{10.3758/s13428-013-0330-5} #' #' @export #' #' @examples ### This requires the bootES package #' if (requireNamespace("bootES", quietly = TRUE)) { #' #' ### To estimate a mean #' x <- rnorm(500, mean=8, sd=3); #' pwr.bootES(data.frame(x=x), #' R=500, #' w=.5); #' #' ### To estimate a correlation (the 'effect.type' parameter is #' ### redundant here; with two columns in the data frame, computing #' ### the confidence interval for the Pearson correlation is the default #' ### ehavior of bootES) #' y <- x+rnorm(500, mean=0, sd=5); #' cor(x, y); #' requiredN <- #' pwr.bootES(data.frame(x=x, #' y=y), #' effect.type='r', #' R=500, #' w=.2); #' print(requiredN); #' ### Compare to parametric confidence interval #' ### based on the computed required sample size #' confIntR(r = cor(x, y), #' N = requiredN); #' ### Width of obtained confidence interval #' print(round(diff(as.numeric(confIntR(r = cor(x, y), #' N = requiredN))), 2)); #' } pwr.bootES <- function(data=data, ci.type="bca", ..., w=.1, silent=TRUE) { if (!requireNamespace("bootES", quietly = TRUE)) { message("To build a tree, the \"bootES\" package is required. ", "Please install it using `install.packages('bootES');`."); return(invisible(FALSE)); } if (length(w) != 1) { warning("Multiple widths not supported (yet); only the first one is used!\n", "You can use sapply to approximate this vectorization, for example,\n\n", "sapply(c(", vecTxt(w, lastElements = 0), "), pwr.cohensdCI, d=.5)", "\n"); w <- w[1]; } ### From a post at the R-help mailig list by Luke Tierney, see ### http://stackoverflow.com/questions/3903157/how-can-i-check-whether-a-function-call-results-in-a-warning wHandler <- function(w) { myWarnings <<- c(myWarnings, list(w)); invokeRestart("muffleWarning"); } eHandler <- function(e) { myErrors <<- c(myErrors, list(e)); } myWarnings <- NULL; myErrors <- NULL; n <- 4; if (!silent) { cat0("Setting n to 4 to start.\n"); } for (steps in c(1000, 100, 10, 1)) { ciWidth <- 3*w; while (ciWidth > 2*w) { n <- n + steps; if (!silent) { if (!silent) { cat0("Adding ", steps, " to n.\n"); } cat0("Taking ", n, " samples from `data` and running `bootES::bootES`: "); } sampledData <- data[sample(1:nrow(data), size=n, replace=TRUE), ]; tryCatch({ bootESres <- bootES::bootES(data = sampledData, ci.type = ci.type, ...); }, error=eHandler, warning=wHandler); if (!is.null(myErrors) && ci.type=="bca") { ci.type <- "basic"; n <- n - steps; bcaMsg <- paste0("You specified the 'bca' type of bootstrapped confidence ", "intervals (the default, so maybe you didn't specify anything). ", "However, `bootES::bootES` ran into an error, so resetting the ", "bootstrapping method to 'basic' and trying to continue."); if (!silent) { cat0(bcaMsg, "\n"); } warning(bcaMsg); } if (exists('bootESres')) { obtainedCI <- bootESres$bounds; ciWidth <- abs(diff(obtainedCI)); if (!silent) { cat0("Obtained CI of ", formatCI(obtainedCI), "; width=", round(ciWidth, 2), "."); if (ciWidth < w*2) { cat0(" This is smaller than the margin of error (2*w, or ", 2*w, ").\n"); } else { cat0(" This is larger than the margin of error (2*w, or ", 2*w, ").\n"); } } } } if (!silent) { cat0("Done with this cycle; subtracting ", steps, " from n.\n"); } n <- n - steps; } return(n); }
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392674e+77, 1877.24227362572, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615776584-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
357
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392674e+77, 1877.24227362572, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
# CROSS-SPECIES ANALYSIS PIPELINE # Adrian Baez-Ortega, 2020 # STEP 5.5: OBTAIN REGIONS ABOVE MAX. COV. PERCENTILE IN EACH SAMPLE # Input file paths INPUT = list( SAMPLE.INFO = "../data/original/CrossSpecies_ProjectInfo.txt", COV.PCT = "../data/processed/Coverage_99Percentiles.RData", BAM.PATHS = "../data/original/Path_SampleBAMs.txt" ) # Output file paths OUTPUT = list( LOG.DIR = "logs_5.5", OUT.DIR = "../data/processed/CoverageFilters", COV.PREFIX = "CovAbove99Pct_" ) # Memory, queue and command templates for job submission MEM = 5000 QUEUE = "long" COV.CMD = "bedtools genomecov -ibam ${BAM} -bg | awk '\\$4 > ${MAXCOV}' | bedtools merge > ${OUTFILE}" BSUB.CMD = "bsub -G team78-grp -o ${LOG}/log.%J -q ${QUEUE} -n 1 -R \"span[hosts=1] select[mem>=${MEM}] rusage[mem=${MEM}]\" -M ${MEM} \"${CMD}\"" # Create output and log directories dir.create(OUTPUT$LOG.DIR, showWarnings=F) dir.create(OUTPUT$OUT.DIR, showWarnings=F) cat("Loading data...\n") load(INPUT$COV.PCT) sample.info = read.table(INPUT$SAMPLE.INFO, sep="\t", header=T, as.is=T) bam.paths = read.table(INPUT$BAM.PATHS, sep="\t", header=T, as.is=T) stopifnot(!any(duplicated(sample.info$SAMPLE_NAME))) cat("Loaded\n") # For each species for (species in unique(sample.info$SPECIES_NAME)) { cat("\nProcessing species:", species, "\n") species.idx = sample.info$SPECIES_NAME == species ref.name = sample.info$REFERENCE_GENOME[species.idx][1] project = sample.info$PROJECT_ID[species.idx][1] # Use bedtools genomecov to measure coverage per position in each sample for (sample.id in unique(c(sample.info$SAMPLE_NAME[species.idx], sample.info$NORMAL_NAME[species.idx]))) { cat("Processing sample", sample.id, "\n") cat("99th cov. percentile =", coverage.99percentiles[[species]][sample.id], "\n") if (species %in% bam.paths$SPECIES) { bam.path = bam.paths$PATH[bam.paths$SPECIES == species] } else { bam.path = bam.paths$PATH[bam.paths$SPECIES == "default"] } bam = gsub("${SPECIES}", species, gsub("${REFGENOME}", ref.name, gsub("${PROJECT}", project, gsub("${SAMPLE}", sample.id, bam.path, fixed=T), fixed=T), fixed=T), fixed=T) if (file.exists(bam)) { cmd = gsub("${MAXCOV}", coverage.99percentiles[[species]][sample.id], gsub("${BAM}", bam, gsub("${OUTFILE}", paste0(OUTPUT$OUT.DIR, "/", OUTPUT$COV.PREFIX, sample.id, ".bed"), COV.CMD, fixed=T), fixed=T), fixed=T) system(gsub("${QUEUE}", QUEUE, gsub("${MEM}", MEM, gsub("${LOG}", OUTPUT$LOG.DIR, gsub("${CMD}", cmd, BSUB.CMD, fixed=T), fixed=T), fixed=T), fixed=T)) } else { cat("WARNING: File", bam, "not found\n") } } } cat("\nDone\n")
/5_CoverageFilter/5.5_MaxCoverage_PerSample.R
no_license
baezortega/cross-species_farm
R
false
false
3,204
r
# CROSS-SPECIES ANALYSIS PIPELINE # Adrian Baez-Ortega, 2020 # STEP 5.5: OBTAIN REGIONS ABOVE MAX. COV. PERCENTILE IN EACH SAMPLE # Input file paths INPUT = list( SAMPLE.INFO = "../data/original/CrossSpecies_ProjectInfo.txt", COV.PCT = "../data/processed/Coverage_99Percentiles.RData", BAM.PATHS = "../data/original/Path_SampleBAMs.txt" ) # Output file paths OUTPUT = list( LOG.DIR = "logs_5.5", OUT.DIR = "../data/processed/CoverageFilters", COV.PREFIX = "CovAbove99Pct_" ) # Memory, queue and command templates for job submission MEM = 5000 QUEUE = "long" COV.CMD = "bedtools genomecov -ibam ${BAM} -bg | awk '\\$4 > ${MAXCOV}' | bedtools merge > ${OUTFILE}" BSUB.CMD = "bsub -G team78-grp -o ${LOG}/log.%J -q ${QUEUE} -n 1 -R \"span[hosts=1] select[mem>=${MEM}] rusage[mem=${MEM}]\" -M ${MEM} \"${CMD}\"" # Create output and log directories dir.create(OUTPUT$LOG.DIR, showWarnings=F) dir.create(OUTPUT$OUT.DIR, showWarnings=F) cat("Loading data...\n") load(INPUT$COV.PCT) sample.info = read.table(INPUT$SAMPLE.INFO, sep="\t", header=T, as.is=T) bam.paths = read.table(INPUT$BAM.PATHS, sep="\t", header=T, as.is=T) stopifnot(!any(duplicated(sample.info$SAMPLE_NAME))) cat("Loaded\n") # For each species for (species in unique(sample.info$SPECIES_NAME)) { cat("\nProcessing species:", species, "\n") species.idx = sample.info$SPECIES_NAME == species ref.name = sample.info$REFERENCE_GENOME[species.idx][1] project = sample.info$PROJECT_ID[species.idx][1] # Use bedtools genomecov to measure coverage per position in each sample for (sample.id in unique(c(sample.info$SAMPLE_NAME[species.idx], sample.info$NORMAL_NAME[species.idx]))) { cat("Processing sample", sample.id, "\n") cat("99th cov. percentile =", coverage.99percentiles[[species]][sample.id], "\n") if (species %in% bam.paths$SPECIES) { bam.path = bam.paths$PATH[bam.paths$SPECIES == species] } else { bam.path = bam.paths$PATH[bam.paths$SPECIES == "default"] } bam = gsub("${SPECIES}", species, gsub("${REFGENOME}", ref.name, gsub("${PROJECT}", project, gsub("${SAMPLE}", sample.id, bam.path, fixed=T), fixed=T), fixed=T), fixed=T) if (file.exists(bam)) { cmd = gsub("${MAXCOV}", coverage.99percentiles[[species]][sample.id], gsub("${BAM}", bam, gsub("${OUTFILE}", paste0(OUTPUT$OUT.DIR, "/", OUTPUT$COV.PREFIX, sample.id, ".bed"), COV.CMD, fixed=T), fixed=T), fixed=T) system(gsub("${QUEUE}", QUEUE, gsub("${MEM}", MEM, gsub("${LOG}", OUTPUT$LOG.DIR, gsub("${CMD}", cmd, BSUB.CMD, fixed=T), fixed=T), fixed=T), fixed=T)) } else { cat("WARNING: File", bam, "not found\n") } } } cat("\nDone\n")
# Natural language processing # Downloading data dataset_org = read.delim('Restaurant_Reviews.tsv', quote = '', stringsAsFactors = FALSE) # Cleaning the texts library(SnowballC) library(NLP) library(tm) corpus = VCorpus(VectorSource(dataset_org$Review)) corpus = tm_map(corpus, content_transformer(tolower)) corpus = tm_map(corpus, removeNumbers) corpus = tm_map(corpus, removePunctuation) corpus = tm_map(corpus, removeWords, stopwords()) corpus = tm_map(corpus, stemDocument) corpus = tm_map(corpus, stripWhitespace) # Creating the bag of words model dtm = DocumentTermMatrix(corpus) dtm = removeSparseTerms(dtm, 0.999) dataset = as.data.frame(as.matrix(dtm)) dataset$Liked = dataset_org$Liked # Encoding the target feature as factor dataset$Liked = factor(dataset$Liked, levels = c(0, 1)) # Splitting the dataset into the Training set and Test set # install.packages('caTools') library(caTools) set.seed(123) split = sample.split(dataset$Liked, SplitRatio = 0.8) training_set = subset(dataset, split == TRUE) test_set = subset(dataset, split == FALSE) # Fitting classifier to the Training set library(randomForest) classifier = randomForest(x = training_set[-692], y = training_set$Liked, ntree = 10) # Predicting the Test set results y_pred = predict(classifier, newdata = test_set[-692]) # Making the Confusion Matrix cm = table(test_set[, 692], y_pred) accuracy = (cm[1,1]+cm[2,2])/(cm[1,1]+cm[2,2]+cm[1,2]+cm[2,1])
/Code_and_Data/natural_language_processing.R
no_license
jhsvendsen/ML_A-Z_templates
R
false
false
1,523
r
# Natural language processing # Downloading data dataset_org = read.delim('Restaurant_Reviews.tsv', quote = '', stringsAsFactors = FALSE) # Cleaning the texts library(SnowballC) library(NLP) library(tm) corpus = VCorpus(VectorSource(dataset_org$Review)) corpus = tm_map(corpus, content_transformer(tolower)) corpus = tm_map(corpus, removeNumbers) corpus = tm_map(corpus, removePunctuation) corpus = tm_map(corpus, removeWords, stopwords()) corpus = tm_map(corpus, stemDocument) corpus = tm_map(corpus, stripWhitespace) # Creating the bag of words model dtm = DocumentTermMatrix(corpus) dtm = removeSparseTerms(dtm, 0.999) dataset = as.data.frame(as.matrix(dtm)) dataset$Liked = dataset_org$Liked # Encoding the target feature as factor dataset$Liked = factor(dataset$Liked, levels = c(0, 1)) # Splitting the dataset into the Training set and Test set # install.packages('caTools') library(caTools) set.seed(123) split = sample.split(dataset$Liked, SplitRatio = 0.8) training_set = subset(dataset, split == TRUE) test_set = subset(dataset, split == FALSE) # Fitting classifier to the Training set library(randomForest) classifier = randomForest(x = training_set[-692], y = training_set$Liked, ntree = 10) # Predicting the Test set results y_pred = predict(classifier, newdata = test_set[-692]) # Making the Confusion Matrix cm = table(test_set[, 692], y_pred) accuracy = (cm[1,1]+cm[2,2])/(cm[1,1]+cm[2,2]+cm[1,2]+cm[2,1])
# ui.R shinyUI(fluidPage( tags$head( tags$style(HTML(" @import url('//fonts.googleapis.com/css?family=Abril+Fatface|Open+Sans|Courgette'); body { background-color: #bfbfbf; background-image: url(titanic-P.jpg); background-repeat: no-repeat; } h1 { height: 30px; width: 900px; font-family: 'Abril Fatface'; line-height: 1.1; font-size: 3em; margin: 0px; } h2 { width: 300px; background-color: rgba(0,0,0,0.5); color: white; border-radius: 10px; margin-left: 125px; } h6 { margin-top: 0px; } body { font-family: Open Sans; } .accuracy { width: 500px; background-color: rgba(0,0,0,0.5); color: white; border-radius: 10px; margin-left: 50px; margin-top: 40px; } .plottree { margin-top: 30px; } .about { height: 400px; background-color: white; padding: 10px; } .shiny-output-error { visibility: hidden; } .shiny-output-error:before { visibility: hidden; } .subtitel { font-size: 0.5em; align: left; border: -20px; margin-left: 4px; font-size: 0.5em; } p{ height: 690px; background-color: white; padding: 10px; } .shiny-progress { top: 50% !important; left: 50% !important; margin-top: -100px !important; margin-left: -250px !important; } ")) ), headerPanel("Titanic", "Predicting the Survivors"), h5(class="subtitel",headerPanel("Predicting the Survivors")), sidebarLayout( sidebarPanel( selectInput("var1", "Feature 1:", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age","None" = "Dummyr"),selected="Sex"), selectInput("var2", "Feature 2", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age","None" = "Dummys"),selected="Dummys"), selectInput("var3", "Feature 3", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age","None" = "Dummyt"),selected="Dummyt"), selectInput("var4", "Feature 4", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age","None" = "Dummyu"),selected="Dummyu"), selectInput("var5", "Feature 5", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age", "None" = "Dummyx"),selected="Dummyx"), selectInput("var6", "Feature 6", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age", "None" = "Dummyy"),selected="Dummyy"), selectInput("var7", "Feature 7", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked", "Passenger Class" = "PassengerClass","Age" = "Age", "None" = "Dummyz"),selected="Dummyz"), br(), sliderInput("minsplit", label = h6("Minimum of Splits"), min = 2, max = 200, value = 2, step=10, ticks=FALSE), br(), sliderInput("cp", label = h6("Complexity Parameter"), min = 0.01, max = 0.05, value = 0.01, step=0.01, ticks=TRUE)), mainPanel( tabsetPanel( tabPanel("Output", div(class="plottree",plotOutput("tree")), h3(class="accuracy",textOutput("accuracy"), align="center")), tabPanel("Help", p(class="help","With the Titanic app you can predict the rate of survival of death of the Titanic Disaster . The app makes use of a training set and test set. The training set used for training the model by using the decision tree technique. After selecting the features it will plot a tree. After that it uses the model to make a prediction wiht the use of the test set. The results are shown in the accuracy box.",img(src="screenshot.png", height = 530, width = 530),br(), "With the Minimum of Splits you can set the minimum number of observations that must exist in a node in order for a split to be attempted.",br(),br(),"With the Complexity Parameter you can set a threshold. Any split that does not decrease the overall lack of fit by a factor of the threshold is not attempted.")), tabPanel("About", p(class="about",br(),br(),"Title: Titanic",br(),"Author: Karlan Astrego",br(),"Version 1.0",br(), br(), "The data used in this app is from Kaggle.com.",br(),br(), "This app was a project for the 'Developing Data Products' Course from Coursera and the John Hopkins University.", br(),br(),"You can find the code on Github:",a(href="https://github.com/astrego/Titanic",span("https://github.com/astrego/Titanic")),a(href= "https://www.coursera.org/specialization/jhudatascience/1?utm_medium=listingPage",img(src="coursera.jpg", a(href= "https://www.kaggle.com",img(src="kaggle.png",height = 200, width = 190)))))) ) ) ) ))
/ui.R
no_license
astrego/Titanic
R
false
false
6,532
r
# ui.R shinyUI(fluidPage( tags$head( tags$style(HTML(" @import url('//fonts.googleapis.com/css?family=Abril+Fatface|Open+Sans|Courgette'); body { background-color: #bfbfbf; background-image: url(titanic-P.jpg); background-repeat: no-repeat; } h1 { height: 30px; width: 900px; font-family: 'Abril Fatface'; line-height: 1.1; font-size: 3em; margin: 0px; } h2 { width: 300px; background-color: rgba(0,0,0,0.5); color: white; border-radius: 10px; margin-left: 125px; } h6 { margin-top: 0px; } body { font-family: Open Sans; } .accuracy { width: 500px; background-color: rgba(0,0,0,0.5); color: white; border-radius: 10px; margin-left: 50px; margin-top: 40px; } .plottree { margin-top: 30px; } .about { height: 400px; background-color: white; padding: 10px; } .shiny-output-error { visibility: hidden; } .shiny-output-error:before { visibility: hidden; } .subtitel { font-size: 0.5em; align: left; border: -20px; margin-left: 4px; font-size: 0.5em; } p{ height: 690px; background-color: white; padding: 10px; } .shiny-progress { top: 50% !important; left: 50% !important; margin-top: -100px !important; margin-left: -250px !important; } ")) ), headerPanel("Titanic", "Predicting the Survivors"), h5(class="subtitel",headerPanel("Predicting the Survivors")), sidebarLayout( sidebarPanel( selectInput("var1", "Feature 1:", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age","None" = "Dummyr"),selected="Sex"), selectInput("var2", "Feature 2", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age","None" = "Dummys"),selected="Dummys"), selectInput("var3", "Feature 3", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age","None" = "Dummyt"),selected="Dummyt"), selectInput("var4", "Feature 4", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age","None" = "Dummyu"),selected="Dummyu"), selectInput("var5", "Feature 5", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age", "None" = "Dummyx"),selected="Dummyx"), selectInput("var6", "Feature 6", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked","Passenger Class" = "PassengerClass","Age" = "Age", "None" = "Dummyy"),selected="Dummyy"), selectInput("var7", "Feature 7", c("Sex" = "Sex","Fare in $" = "Fare","Number of Siblings/Spouses" = "SibSp","Number of Parents/Children" = "ParentsChildren", "Port of Embarkation" = "Embarked", "Passenger Class" = "PassengerClass","Age" = "Age", "None" = "Dummyz"),selected="Dummyz"), br(), sliderInput("minsplit", label = h6("Minimum of Splits"), min = 2, max = 200, value = 2, step=10, ticks=FALSE), br(), sliderInput("cp", label = h6("Complexity Parameter"), min = 0.01, max = 0.05, value = 0.01, step=0.01, ticks=TRUE)), mainPanel( tabsetPanel( tabPanel("Output", div(class="plottree",plotOutput("tree")), h3(class="accuracy",textOutput("accuracy"), align="center")), tabPanel("Help", p(class="help","With the Titanic app you can predict the rate of survival of death of the Titanic Disaster . The app makes use of a training set and test set. The training set used for training the model by using the decision tree technique. After selecting the features it will plot a tree. After that it uses the model to make a prediction wiht the use of the test set. The results are shown in the accuracy box.",img(src="screenshot.png", height = 530, width = 530),br(), "With the Minimum of Splits you can set the minimum number of observations that must exist in a node in order for a split to be attempted.",br(),br(),"With the Complexity Parameter you can set a threshold. Any split that does not decrease the overall lack of fit by a factor of the threshold is not attempted.")), tabPanel("About", p(class="about",br(),br(),"Title: Titanic",br(),"Author: Karlan Astrego",br(),"Version 1.0",br(), br(), "The data used in this app is from Kaggle.com.",br(),br(), "This app was a project for the 'Developing Data Products' Course from Coursera and the John Hopkins University.", br(),br(),"You can find the code on Github:",a(href="https://github.com/astrego/Titanic",span("https://github.com/astrego/Titanic")),a(href= "https://www.coursera.org/specialization/jhudatascience/1?utm_medium=listingPage",img(src="coursera.jpg", a(href= "https://www.kaggle.com",img(src="kaggle.png",height = 200, width = 190)))))) ) ) ) ))
# install.packages('devtools') # library(devtools) # # slam_url <- "https://cran.r-project.org/src/contrib/Archive/slam/slam_0.1-37.tar.gz" # install_url(slam_url) # # update.packages('Rcpp')# # install.packages('tm') # install.packages('quanteda') setwd("C:/Users/vijay.bhaskar/Desktop/MLA LADS/") dataset <- read.csv('Combined_data_v1.csv') #This csv is the output of Data Prep Code dataset$Project <- as.character(dataset$Project) library(tm) # Create a corpus corpus <- Corpus(VectorSource(dataset$item_description)) corpus corpus <- tm_map(corpus, content_transformer(tolower)) corpus <- tm_map(corpus, removeNumbers) corpus <- tm_map(corpus, removePunctuation) corpus <- tm_map(corpus, stripWhitespace) corpus <- tm_map(corpus, removeWords, stopwords('english')) #Create a dtm and form a TF-IDF matrix mat <- DocumentTermMatrix(corpus) mat mat4 <- weightTfIdf(mat) mat4 <- as.matrix(mat4) mat4 #Function for Euclidean distance calculation norm_eucl <- function(m) m/apply(m,1,function(x) sum(x^2)^.5) mat_norm <- norm_eucl(mat4) #Perform K-Means Clustering set.seed(5) k <- 10 kmeansResult <- kmeans(mat_norm, k,iter=100) table(kmeansResult$cluster) #Output output1 <- cbind(dataset[,c(2,5:6)],kmeansResult$cluster) write.csv(output1,"output_combined.csv")
/KMeans_ Model_MLA_Combined.R
no_license
mvbreddy/DataKind
R
false
false
1,318
r
# install.packages('devtools') # library(devtools) # # slam_url <- "https://cran.r-project.org/src/contrib/Archive/slam/slam_0.1-37.tar.gz" # install_url(slam_url) # # update.packages('Rcpp')# # install.packages('tm') # install.packages('quanteda') setwd("C:/Users/vijay.bhaskar/Desktop/MLA LADS/") dataset <- read.csv('Combined_data_v1.csv') #This csv is the output of Data Prep Code dataset$Project <- as.character(dataset$Project) library(tm) # Create a corpus corpus <- Corpus(VectorSource(dataset$item_description)) corpus corpus <- tm_map(corpus, content_transformer(tolower)) corpus <- tm_map(corpus, removeNumbers) corpus <- tm_map(corpus, removePunctuation) corpus <- tm_map(corpus, stripWhitespace) corpus <- tm_map(corpus, removeWords, stopwords('english')) #Create a dtm and form a TF-IDF matrix mat <- DocumentTermMatrix(corpus) mat mat4 <- weightTfIdf(mat) mat4 <- as.matrix(mat4) mat4 #Function for Euclidean distance calculation norm_eucl <- function(m) m/apply(m,1,function(x) sum(x^2)^.5) mat_norm <- norm_eucl(mat4) #Perform K-Means Clustering set.seed(5) k <- 10 kmeansResult <- kmeans(mat_norm, k,iter=100) table(kmeansResult$cluster) #Output output1 <- cbind(dataset[,c(2,5:6)],kmeansResult$cluster) write.csv(output1,"output_combined.csv")
# SPDX-Copyright: Copyright (c) Capital One Services, LLC # SPDX-License-Identifier: Apache-2.0 # Copyright 2017 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. library(testthat) # # UNIT TEST: print.dataCompareRobject # # The aim of this test is to check the print output object, # which has a certain structure based on the content of the dataCompareR output. # It is not a complete test. # # Create a series of data we can use for testing with a single index context("OutputComparisons : Print") if(require(titanic)) { source('createTitanicDatasets.R') test_that("print only generates message when data sets match", { # The object has already pre-determined structure compareObject <- rCompare(titanic2,titanic2) # Check the print output with different parameters expect_output(p0 <- print(compareObject)) expect_output(p1 <- print(compareObject,nObs = 1)) expect_output(p2 <- print(compareObject,nObs = 2, nVars = 1)) expect_output(p3 <- print(compareObject,verbose = TRUE)) expect_output(p4 <- print(compareObject,nObs = 2, nVars = 1,verbose = TRUE)) expect_null(p0, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") expect_null(p1, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") expect_null(p2, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") expect_null(p3, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") expect_null(p4, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") }) test_that("print returns message and data when mismatches occur", { # The object has already pre-determined structure b1 <- rCompare(titanic,titanic2, trimChars = FALSE, keys = 'PassengerId') #Generate print output objects expect_output(p0 <- print(b1)) expect_output(p1 <- print(b1,nObs = 1)) expect_output(p2 <- print(b1,nObs = 2, nVars = 1)) expect_output(p3 <- print(b1,verbose = TRUE)) expect_output(p4 <- print(b1,nObs = 2, nVars = 1,verbose = TRUE)) #Test output is as expected: expect_is(p0, "data.frame") expect_output(str(p0), "20 obs") expect_output(str(p0), "7 variables") expect_is(p0$PASSENGERID,"character") expect_is(p0$valueA, "character") expect_is(p0$valueA, "character") expect_is(p0$variable, "character") expect_is(p0$typeA, "character") expect_is(p0$typeB, "character") expect_is(p0$diffAB, "character") expect_equal(p0$PASSENGERID, as.character(c(2,10,20,27,31,867,875,876,880,890,5,1,2,3,4,5,6,7,8,9))) expect_equal(p0$variable, c(rep("EMBARKED",10),"HASSURVIVED",rep("NAME",9))) expect_equal(p0$typeA, c(rep("character",20))) expect_equal(p0$typeB, c(rep("character",20))) expect_equal(p0$diffAB, c(rep("",20))) expect_output(str(p1),"5 obs") expect_output(str(p1), "7 variables") expect_equal(p1$PASSENGERID, as.character(c(2,890,5,1,9))) expect_equal(p1$variable, c("EMBARKED", "EMBARKED","HASSURVIVED","NAME", "NAME")) expect_output(str(p2), "8 obs") expect_output(str(p2), "7 variables") expect_equal(p2$PASSENGERID, as.character(c(2,10,880,890, 1, 2, 8, 9))) expect_equal(p2$variable, c(rep("EMBARKED",4),rep("NAME",4))) expect_output(str(p3),"180 obs") expect_output(str(p3), "7 variables") expect_output(str(p4),"180 obs") expect_output(str(p4),"7 variables") }) } else { print("Part of OutputComparisons : Print test context not run, due to lack of titanic data") } test_that("print rcomp object", { # Create a couple of R compare objects testSame <- rCompare(iris,iris) iris2 <- iris[1:140,] iris2[3,3] <- 1.5 testDiff <- rCompare(iris,iris2) # Capture the outputs of createReportText as text textSame <- capture.output(print(testSame)) textDiff <- capture.output(print(testDiff)) # For now we won't hard code each - instead, we will just check a few points... # We should look for the all match label expect_true(any(textSame == "All compared variables match ")) expect_true(any(textSame == " Number of rows compared: 150 ")) expect_true(any(textSame == " Number of columns compared: 5")) # Expect they differ expect_false(textSame[[1]] == textDiff[[1]]) expect_false(textSame[[2]] == textDiff[[2]]) expect_false(textSame[[3]] == textDiff[[3]]) expect_false(textSame[[4]] == textDiff[[4]]) # Check that the textDiff has more cols expect_true(length(textDiff) > 2) }) test_that("test print rcompobj rows columns dropped messages", { # We'll use the pressure dataset for comparison # Make a copy of pressure with missing rows pressure2 <- pressure[1:15,] # Make a copy of pressure with extra cols pressure3 <- pressure pressure3$randomCol <- 1 # Make a copy with missing rows AND extra cols pressure4 <- pressure[1:15,] pressure4$randomCol <- 1 pressure4$randomCol2 <- 3 comp1 <- rCompare(pressure, pressure) comp2 <- rCompare(pressure, pressure2) comp3 <- rCompare(pressure, pressure3) comp4 <- rCompare(pressure, pressure4) text1 <- capture.output(print(comp1)) text2 <- capture.output(print(comp2)) text3 <- capture.output(print(comp3)) text4 <- capture.output(print(comp4)) # Check we see what we expect expect_true(any(grepl("All columns were compared, all rows were compared", text1, fixed = TRUE))) expect_true(any(grepl("All columns were compared, 4 row(s) were dropped from comparison", text2, fixed = TRUE))) expect_true(any(grepl("1 column(s) were dropped, all rows were compared", text3, fixed = TRUE))) expect_true(any(grepl("2 column(s) were dropped, 4 row(s) were dropped from comparison", text4, fixed = TRUE))) }) test_that("test print argument validation", { aaa <- rCompare(iris,iris) expect_error(print(aaa,nVars="A")) expect_error(print(aaa,nObs="A")) expect_error(print(aaa,verbose="A")) expect_error(print(aaa,nVars=-1)) expect_error(print(aaa,nObs=-1)) }) test_that("test print with two empty data frames", { # We'll use the pressure dataset for comparison # Make a copy of pressure with missing rows df_empty <- data.frame(ColA = character(), ColB = as.Date(character()), ColC = character(), stringsAsFactors = FALSE) comp1 <- rCompare(df_empty, df_empty) comp2 <- rCompare(df_empty, df_empty, keys = "ColA") comp3 <- rCompare(df_empty, df_empty, keys = c("ColA","ColB")) comp4 <- rCompare(df_empty, df_empty, keys = c("ColA","ColB","ColC")) text1 <- capture.output(print(comp1)) text2 <- capture.output(print(comp2)) text3 <- capture.output(print(comp3)) text4 <- capture.output(print(comp4)) expect_true(length(text1)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text1, fixed = TRUE))) expect_true(any(grepl("No variables match", text1, fixed = TRUE))) expect_true(length(text2)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text2, fixed = TRUE))) expect_true(any(grepl("No variables match", text2, fixed = TRUE))) expect_true(length(text3)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text3, fixed = TRUE))) expect_true(any(grepl("No variables match", text3, fixed = TRUE))) expect_true(length(text4)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text4, fixed = TRUE))) expect_true(any(grepl("No variables match", text4, fixed = TRUE))) }) test_that("test print with one empty data frames", { # We'll make two test data frames - one with empty rows, one populated df_empty <- data.frame(ColA = character(), ColB = as.Date(character()), ColC = character(), stringsAsFactors = FALSE) df_not_empty <- data.frame(ColA = c("A","B"), ColB = c(Sys.Date(), Sys.Date()), ColC = c(1,1), stringsAsFactors = FALSE) # Run a set of comparisons on them comp1 <- rCompare(df_empty, df_not_empty) comp2 <- rCompare(df_empty, df_not_empty, keys = "ColA") comp3 <- rCompare(df_empty, df_not_empty, keys = c("ColA","ColB")) comp4 <- rCompare(df_empty, df_not_empty, keys = c("ColA","ColB","ColC")) comp5 <- rCompare(df_not_empty, df_empty) comp6 <- rCompare(df_not_empty, df_empty, keys = "ColA") comp7 <- rCompare(df_not_empty, df_empty, keys = c("ColA","ColB")) comp8 <- rCompare(df_not_empty, df_empty, keys = c("ColA","ColB","ColC")) text1 <- capture.output(print(comp1)) text2 <- capture.output(print(comp2)) text3 <- capture.output(print(comp3)) text4 <- capture.output(print(comp4)) text5 <- capture.output(print(comp5)) text6 <- capture.output(print(comp6)) text7 <- capture.output(print(comp7)) text8 <- capture.output(print(comp8)) expect_true(length(text1)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text1, fixed = TRUE))) expect_true(any(grepl("No variables match", text1, fixed = TRUE))) expect_true(length(text2)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text2, fixed = TRUE))) expect_true(any(grepl("No variables match", text2, fixed = TRUE))) expect_true(length(text3)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text3, fixed = TRUE))) expect_true(any(grepl("No variables match", text3, fixed = TRUE))) expect_true(length(text4)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text4, fixed = TRUE))) expect_true(any(grepl("No variables match", text4, fixed = TRUE))) expect_true(length(text5)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text5, fixed = TRUE))) expect_true(any(grepl("No variables match", text5, fixed = TRUE))) expect_true(length(text6)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text6, fixed = TRUE))) expect_true(any(grepl("No variables match", text6, fixed = TRUE))) expect_true(length(text7)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text7, fixed = TRUE))) expect_true(any(grepl("No variables match", text7, fixed = TRUE))) expect_true(length(text8)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text8, fixed = TRUE))) expect_true(any(grepl("No variables match", text8, fixed = TRUE))) })
/tests/testthat/testCheckPrintObject.R
permissive
cran/dataCompareR
R
false
false
12,089
r
# SPDX-Copyright: Copyright (c) Capital One Services, LLC # SPDX-License-Identifier: Apache-2.0 # Copyright 2017 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. library(testthat) # # UNIT TEST: print.dataCompareRobject # # The aim of this test is to check the print output object, # which has a certain structure based on the content of the dataCompareR output. # It is not a complete test. # # Create a series of data we can use for testing with a single index context("OutputComparisons : Print") if(require(titanic)) { source('createTitanicDatasets.R') test_that("print only generates message when data sets match", { # The object has already pre-determined structure compareObject <- rCompare(titanic2,titanic2) # Check the print output with different parameters expect_output(p0 <- print(compareObject)) expect_output(p1 <- print(compareObject,nObs = 1)) expect_output(p2 <- print(compareObject,nObs = 2, nVars = 1)) expect_output(p3 <- print(compareObject,verbose = TRUE)) expect_output(p4 <- print(compareObject,nObs = 2, nVars = 1,verbose = TRUE)) expect_null(p0, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") expect_null(p1, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") expect_null(p2, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") expect_null(p3, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") expect_null(p4, info = "Expect a NULL object to be created and a message sent to the console 'All variables match'") }) test_that("print returns message and data when mismatches occur", { # The object has already pre-determined structure b1 <- rCompare(titanic,titanic2, trimChars = FALSE, keys = 'PassengerId') #Generate print output objects expect_output(p0 <- print(b1)) expect_output(p1 <- print(b1,nObs = 1)) expect_output(p2 <- print(b1,nObs = 2, nVars = 1)) expect_output(p3 <- print(b1,verbose = TRUE)) expect_output(p4 <- print(b1,nObs = 2, nVars = 1,verbose = TRUE)) #Test output is as expected: expect_is(p0, "data.frame") expect_output(str(p0), "20 obs") expect_output(str(p0), "7 variables") expect_is(p0$PASSENGERID,"character") expect_is(p0$valueA, "character") expect_is(p0$valueA, "character") expect_is(p0$variable, "character") expect_is(p0$typeA, "character") expect_is(p0$typeB, "character") expect_is(p0$diffAB, "character") expect_equal(p0$PASSENGERID, as.character(c(2,10,20,27,31,867,875,876,880,890,5,1,2,3,4,5,6,7,8,9))) expect_equal(p0$variable, c(rep("EMBARKED",10),"HASSURVIVED",rep("NAME",9))) expect_equal(p0$typeA, c(rep("character",20))) expect_equal(p0$typeB, c(rep("character",20))) expect_equal(p0$diffAB, c(rep("",20))) expect_output(str(p1),"5 obs") expect_output(str(p1), "7 variables") expect_equal(p1$PASSENGERID, as.character(c(2,890,5,1,9))) expect_equal(p1$variable, c("EMBARKED", "EMBARKED","HASSURVIVED","NAME", "NAME")) expect_output(str(p2), "8 obs") expect_output(str(p2), "7 variables") expect_equal(p2$PASSENGERID, as.character(c(2,10,880,890, 1, 2, 8, 9))) expect_equal(p2$variable, c(rep("EMBARKED",4),rep("NAME",4))) expect_output(str(p3),"180 obs") expect_output(str(p3), "7 variables") expect_output(str(p4),"180 obs") expect_output(str(p4),"7 variables") }) } else { print("Part of OutputComparisons : Print test context not run, due to lack of titanic data") } test_that("print rcomp object", { # Create a couple of R compare objects testSame <- rCompare(iris,iris) iris2 <- iris[1:140,] iris2[3,3] <- 1.5 testDiff <- rCompare(iris,iris2) # Capture the outputs of createReportText as text textSame <- capture.output(print(testSame)) textDiff <- capture.output(print(testDiff)) # For now we won't hard code each - instead, we will just check a few points... # We should look for the all match label expect_true(any(textSame == "All compared variables match ")) expect_true(any(textSame == " Number of rows compared: 150 ")) expect_true(any(textSame == " Number of columns compared: 5")) # Expect they differ expect_false(textSame[[1]] == textDiff[[1]]) expect_false(textSame[[2]] == textDiff[[2]]) expect_false(textSame[[3]] == textDiff[[3]]) expect_false(textSame[[4]] == textDiff[[4]]) # Check that the textDiff has more cols expect_true(length(textDiff) > 2) }) test_that("test print rcompobj rows columns dropped messages", { # We'll use the pressure dataset for comparison # Make a copy of pressure with missing rows pressure2 <- pressure[1:15,] # Make a copy of pressure with extra cols pressure3 <- pressure pressure3$randomCol <- 1 # Make a copy with missing rows AND extra cols pressure4 <- pressure[1:15,] pressure4$randomCol <- 1 pressure4$randomCol2 <- 3 comp1 <- rCompare(pressure, pressure) comp2 <- rCompare(pressure, pressure2) comp3 <- rCompare(pressure, pressure3) comp4 <- rCompare(pressure, pressure4) text1 <- capture.output(print(comp1)) text2 <- capture.output(print(comp2)) text3 <- capture.output(print(comp3)) text4 <- capture.output(print(comp4)) # Check we see what we expect expect_true(any(grepl("All columns were compared, all rows were compared", text1, fixed = TRUE))) expect_true(any(grepl("All columns were compared, 4 row(s) were dropped from comparison", text2, fixed = TRUE))) expect_true(any(grepl("1 column(s) were dropped, all rows were compared", text3, fixed = TRUE))) expect_true(any(grepl("2 column(s) were dropped, 4 row(s) were dropped from comparison", text4, fixed = TRUE))) }) test_that("test print argument validation", { aaa <- rCompare(iris,iris) expect_error(print(aaa,nVars="A")) expect_error(print(aaa,nObs="A")) expect_error(print(aaa,verbose="A")) expect_error(print(aaa,nVars=-1)) expect_error(print(aaa,nObs=-1)) }) test_that("test print with two empty data frames", { # We'll use the pressure dataset for comparison # Make a copy of pressure with missing rows df_empty <- data.frame(ColA = character(), ColB = as.Date(character()), ColC = character(), stringsAsFactors = FALSE) comp1 <- rCompare(df_empty, df_empty) comp2 <- rCompare(df_empty, df_empty, keys = "ColA") comp3 <- rCompare(df_empty, df_empty, keys = c("ColA","ColB")) comp4 <- rCompare(df_empty, df_empty, keys = c("ColA","ColB","ColC")) text1 <- capture.output(print(comp1)) text2 <- capture.output(print(comp2)) text3 <- capture.output(print(comp3)) text4 <- capture.output(print(comp4)) expect_true(length(text1)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text1, fixed = TRUE))) expect_true(any(grepl("No variables match", text1, fixed = TRUE))) expect_true(length(text2)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text2, fixed = TRUE))) expect_true(any(grepl("No variables match", text2, fixed = TRUE))) expect_true(length(text3)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text3, fixed = TRUE))) expect_true(any(grepl("No variables match", text3, fixed = TRUE))) expect_true(length(text4)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text4, fixed = TRUE))) expect_true(any(grepl("No variables match", text4, fixed = TRUE))) }) test_that("test print with one empty data frames", { # We'll make two test data frames - one with empty rows, one populated df_empty <- data.frame(ColA = character(), ColB = as.Date(character()), ColC = character(), stringsAsFactors = FALSE) df_not_empty <- data.frame(ColA = c("A","B"), ColB = c(Sys.Date(), Sys.Date()), ColC = c(1,1), stringsAsFactors = FALSE) # Run a set of comparisons on them comp1 <- rCompare(df_empty, df_not_empty) comp2 <- rCompare(df_empty, df_not_empty, keys = "ColA") comp3 <- rCompare(df_empty, df_not_empty, keys = c("ColA","ColB")) comp4 <- rCompare(df_empty, df_not_empty, keys = c("ColA","ColB","ColC")) comp5 <- rCompare(df_not_empty, df_empty) comp6 <- rCompare(df_not_empty, df_empty, keys = "ColA") comp7 <- rCompare(df_not_empty, df_empty, keys = c("ColA","ColB")) comp8 <- rCompare(df_not_empty, df_empty, keys = c("ColA","ColB","ColC")) text1 <- capture.output(print(comp1)) text2 <- capture.output(print(comp2)) text3 <- capture.output(print(comp3)) text4 <- capture.output(print(comp4)) text5 <- capture.output(print(comp5)) text6 <- capture.output(print(comp6)) text7 <- capture.output(print(comp7)) text8 <- capture.output(print(comp8)) expect_true(length(text1)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text1, fixed = TRUE))) expect_true(any(grepl("No variables match", text1, fixed = TRUE))) expect_true(length(text2)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text2, fixed = TRUE))) expect_true(any(grepl("No variables match", text2, fixed = TRUE))) expect_true(length(text3)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text3, fixed = TRUE))) expect_true(any(grepl("No variables match", text3, fixed = TRUE))) expect_true(length(text4)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text4, fixed = TRUE))) expect_true(any(grepl("No variables match", text4, fixed = TRUE))) expect_true(length(text5)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text5, fixed = TRUE))) expect_true(any(grepl("No variables match", text5, fixed = TRUE))) expect_true(length(text6)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text6, fixed = TRUE))) expect_true(any(grepl("No variables match", text6, fixed = TRUE))) expect_true(length(text7)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text7, fixed = TRUE))) expect_true(any(grepl("No variables match", text7, fixed = TRUE))) expect_true(length(text8)==2) expect_true(any(grepl("All columns were compared, no rows compared because at least one table has no rows", text8, fixed = TRUE))) expect_true(any(grepl("No variables match", text8, fixed = TRUE))) })
#' @note #' As this code may add and remove observations, numerical imprecision #' may result in negative estimates of squared quantities, like the #' second or fourth moments. We do not currently correct for this #' issue, although it may be somewhat mitigated by setting a smaller #' \code{restart_period}. In the future we will add a check for #' this case. Post an issue if you experience this bug.
/man-roxygen/note-heywood.R
no_license
shabbychef/fromo
R
false
false
404
r
#' @note #' As this code may add and remove observations, numerical imprecision #' may result in negative estimates of squared quantities, like the #' second or fourth moments. We do not currently correct for this #' issue, although it may be somewhat mitigated by setting a smaller #' \code{restart_period}. In the future we will add a check for #' this case. Post an issue if you experience this bug.
library(PNLabStats) library(readxl) library(openxlsx) library(foreach) library(doParallel) rm(list = ls()) file <-file.choose() system.time({ MFF <-master_sheets(file,2) MFF2 <-master_sheets(file,3) MFS <-master_sheets(file,5) MFS2 <-master_sheets(file,6) MSF <-master_sheets(file,7) MSF2 <-master_sheets(file,8) MI <-master_sheets(file,10) MI2 <-master_sheets(file,11)}) parlist <-getUniqueParlist(MFF,1) parlist <-na.omit(parlist) registerDoParallel(8) system.time(x<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MFF[,grepl(parlist[i],names(MFF))] participant2 <- MFF2[,grepl(parlist[i],names(MFF2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[8,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[9,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[9,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[10,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[10,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[11,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[11,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroTotal<- zeroTotal+1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal) }) system.time(FFNeverGaze<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MFF[,grepl(parlist[i],names(MFF))] participant2 <- MFF2[,grepl(parlist[i],names(MFF2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 zeroAll <-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if(participant2[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if (participant1[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if(participant2[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if (participant1[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant1[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant2[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant1[3,j] == 0 && participant1[4,j]==0 && participant1[5,j]==0 && participant1[6,j] == 0 && participant1[7,j] ==0 && participant1[8,j] == 0 && participant1[9,j]==0 && participant1[10,j]==0 && participant1[11,j]==0 && participant1[12,j]==0){ zeroAll <- zeroAll + 1 } if(participant2[3,j] == 0 && participant2[4,j]==0 && participant2[5,j]==0 && participant2[6,j] == 0 && participant2[7,j] ==0 && participant2[8,j] == 0 && participant2[9,j]==0 && participant2[10,j]==0 && participant2[11,j]==0 && participant2[12,j]==0){ zeroAll <- zeroAll + 1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal,zeroAll) }) FFNeverGaze<-as.data.frame(FFNeverGaze) colnames(FFNeverGaze) <- c("LFeye","RFeye","LFNose","RFNose","LFMouth","RFMouth","LFHair","RFHair","LFJaw","RFJaw","Total","All") rownames(FFNeverGaze) <- parlist system.time(FSNeverGaze<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MFS[,grepl(parlist[i],names(MFS))] participant2 <- MFS2[,grepl(parlist[i],names(MFS2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 zeroAll <-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if(participant2[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if (participant1[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if(participant2[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if (participant1[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant1[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant2[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant1[3,j] == 0 && participant1[4,j]==0 && participant1[5,j]==0 && participant1[6,j] == 0 && participant1[7,j] ==0 && participant1[8,j] == 0 && participant1[9,j]==0 && participant1[10,j]==0 && participant1[11,j]==0 && participant1[12,j]==0){ zeroAll <- zeroAll + 1 } if(participant2[3,j] == 0 && participant2[4,j]==0 && participant2[5,j]==0 && participant2[6,j] == 0 && participant2[7,j] ==0 && participant2[8,j] == 0 && participant2[9,j]==0 && participant2[10,j]==0 && participant2[11,j]==0 && participant2[12,j]==0){ zeroAll <- zeroAll + 1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal,zeroAll) }) FSNeverGaze<-as.data.frame(FSNeverGaze) colnames(FSNeverGaze) <- c("LFeye","RFeye","LFNose","RFNose","LFMouth","RFMouth","LFHair","RFHair","LFJaw","RFJaw","Total","All") rownames(FSNeverGaze) <- parlist system.time(SFNeverGaze<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MSF[,grepl(parlist[i],names(MSF))] participant2 <- MSF2[,grepl(parlist[i],names(MSF2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 zeroAll <-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if(participant2[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if (participant1[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if(participant2[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if (participant1[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant1[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant2[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant1[3,j] == 0 && participant1[4,j]==0 && participant1[5,j]==0 && participant1[6,j] == 0 && participant1[7,j] ==0 && participant1[8,j] == 0 && participant1[9,j]==0 && participant1[10,j]==0 && participant1[11,j]==0 && participant1[12,j]==0){ zeroAll <- zeroAll + 1 } if(participant2[3,j] == 0 && participant2[4,j]==0 && participant2[5,j]==0 && participant2[6,j] == 0 && participant2[7,j] ==0 && participant2[8,j] == 0 && participant2[9,j]==0 && participant2[10,j]==0 && participant2[11,j]==0 && participant2[12,j]==0){ zeroAll <- zeroAll + 1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal,zeroAll) }) SFNeverGaze<-as.data.frame(SFNeverGaze) colnames(SFNeverGaze) <- c("LFeye","RFeye","LFNose","RFNose","LFMouth","RFMouth","LFHair","RFHair","LFJaw","RFJaw","Total","All") rownames(SFNeverGaze) <- parlist system.time(INeverGaze<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MI[,grepl(parlist[i],names(MI))] participant2 <- MI2[,grepl(parlist[i],names(MI2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 zeroAll <-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if(participant2[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if (participant1[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if(participant2[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if (participant1[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant1[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant2[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant1[3,j] == 0 && participant1[4,j]==0 && participant1[5,j]==0 && participant1[6,j] == 0 && participant1[7,j] ==0 && participant1[8,j] == 0 && participant1[9,j]==0 && participant1[10,j]==0 && participant1[11,j]==0 && participant1[12,j]==0){ zeroAll <- zeroAll + 1 } if(participant2[3,j] == 0 && participant2[4,j]==0 && participant2[5,j]==0 && participant2[6,j] == 0 && participant2[7,j] ==0 && participant2[8,j] == 0 && participant2[9,j]==0 && participant2[10,j]==0 && participant2[11,j]==0 && participant2[12,j]==0){ zeroAll <- zeroAll + 1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal,zeroAll) }) INeverGaze<-as.data.frame(INeverGaze) colnames(INeverGaze) <- c("LFeye","RFeye","LFNose","RFNose","LFMouth","RFMouth","LFHair","RFHair","LFJaw","RFJaw","Total","All") rownames(INeverGaze) <- parlist dataframes<-list(FFNeverGaze,FSNeverGaze,SFNeverGaze,INeverGaze) conditions <-list("FF","FS","SF","I") wb <- createWorkbook() for (i in 1: length(conditions)){ addWorksheet(wb, conditions[i]) } #browser() for (j in 1: length(dataframes)){ #browser() writeDataTable(wb, conditions[[j]], as.data.frame(dataframes[[j]]), rowNames = TRUE) } saveWorkbook(wb, paste(getwd(),"/NeverGazed.xls",sep=""),overwrite = TRUE) writeToExcel(dataframes,paste(getwd(),"/NeverGazed.xls",sep=""), conditions)
/NumberWrong.R
no_license
RScicomp/PNLab_Optimized
R
false
false
15,601
r
library(PNLabStats) library(readxl) library(openxlsx) library(foreach) library(doParallel) rm(list = ls()) file <-file.choose() system.time({ MFF <-master_sheets(file,2) MFF2 <-master_sheets(file,3) MFS <-master_sheets(file,5) MFS2 <-master_sheets(file,6) MSF <-master_sheets(file,7) MSF2 <-master_sheets(file,8) MI <-master_sheets(file,10) MI2 <-master_sheets(file,11)}) parlist <-getUniqueParlist(MFF,1) parlist <-na.omit(parlist) registerDoParallel(8) system.time(x<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MFF[,grepl(parlist[i],names(MFF))] participant2 <- MFF2[,grepl(parlist[i],names(MFF2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[8,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[9,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[9,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[10,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[10,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[11,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[11,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroTotal<- zeroTotal+1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal) }) system.time(FFNeverGaze<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MFF[,grepl(parlist[i],names(MFF))] participant2 <- MFF2[,grepl(parlist[i],names(MFF2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 zeroAll <-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if(participant2[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if (participant1[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if(participant2[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if (participant1[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant1[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant2[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant1[3,j] == 0 && participant1[4,j]==0 && participant1[5,j]==0 && participant1[6,j] == 0 && participant1[7,j] ==0 && participant1[8,j] == 0 && participant1[9,j]==0 && participant1[10,j]==0 && participant1[11,j]==0 && participant1[12,j]==0){ zeroAll <- zeroAll + 1 } if(participant2[3,j] == 0 && participant2[4,j]==0 && participant2[5,j]==0 && participant2[6,j] == 0 && participant2[7,j] ==0 && participant2[8,j] == 0 && participant2[9,j]==0 && participant2[10,j]==0 && participant2[11,j]==0 && participant2[12,j]==0){ zeroAll <- zeroAll + 1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal,zeroAll) }) FFNeverGaze<-as.data.frame(FFNeverGaze) colnames(FFNeverGaze) <- c("LFeye","RFeye","LFNose","RFNose","LFMouth","RFMouth","LFHair","RFHair","LFJaw","RFJaw","Total","All") rownames(FFNeverGaze) <- parlist system.time(FSNeverGaze<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MFS[,grepl(parlist[i],names(MFS))] participant2 <- MFS2[,grepl(parlist[i],names(MFS2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 zeroAll <-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if(participant2[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if (participant1[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if(participant2[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if (participant1[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant1[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant2[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant1[3,j] == 0 && participant1[4,j]==0 && participant1[5,j]==0 && participant1[6,j] == 0 && participant1[7,j] ==0 && participant1[8,j] == 0 && participant1[9,j]==0 && participant1[10,j]==0 && participant1[11,j]==0 && participant1[12,j]==0){ zeroAll <- zeroAll + 1 } if(participant2[3,j] == 0 && participant2[4,j]==0 && participant2[5,j]==0 && participant2[6,j] == 0 && participant2[7,j] ==0 && participant2[8,j] == 0 && participant2[9,j]==0 && participant2[10,j]==0 && participant2[11,j]==0 && participant2[12,j]==0){ zeroAll <- zeroAll + 1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal,zeroAll) }) FSNeverGaze<-as.data.frame(FSNeverGaze) colnames(FSNeverGaze) <- c("LFeye","RFeye","LFNose","RFNose","LFMouth","RFMouth","LFHair","RFHair","LFJaw","RFJaw","Total","All") rownames(FSNeverGaze) <- parlist system.time(SFNeverGaze<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MSF[,grepl(parlist[i],names(MSF))] participant2 <- MSF2[,grepl(parlist[i],names(MSF2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 zeroAll <-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if(participant2[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if (participant1[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if(participant2[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if (participant1[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant1[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant2[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant1[3,j] == 0 && participant1[4,j]==0 && participant1[5,j]==0 && participant1[6,j] == 0 && participant1[7,j] ==0 && participant1[8,j] == 0 && participant1[9,j]==0 && participant1[10,j]==0 && participant1[11,j]==0 && participant1[12,j]==0){ zeroAll <- zeroAll + 1 } if(participant2[3,j] == 0 && participant2[4,j]==0 && participant2[5,j]==0 && participant2[6,j] == 0 && participant2[7,j] ==0 && participant2[8,j] == 0 && participant2[9,j]==0 && participant2[10,j]==0 && participant2[11,j]==0 && participant2[12,j]==0){ zeroAll <- zeroAll + 1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal,zeroAll) }) SFNeverGaze<-as.data.frame(SFNeverGaze) colnames(SFNeverGaze) <- c("LFeye","RFeye","LFNose","RFNose","LFMouth","RFMouth","LFHair","RFHair","LFJaw","RFJaw","Total","All") rownames(SFNeverGaze) <- parlist system.time(INeverGaze<-foreach (i = (1: length(parlist)), .combine = rbind) %dopar% { #FF participant1 <- MI[,grepl(parlist[i],names(MI))] participant2 <- MI2[,grepl(parlist[i],names(MI2))] zeroCountLFEye<-0 zeroCountRFEye<-0 zeroCountNoseLF<-0 zeroCountNoseRF<-0 zeroMouthLF<-0 zeroMouthRF<-0 zeroHairLF<-0 zeroHairRF<-0 zeroJawLF<-0 zeroJawRF<-0 zeroTotal<-0 zeroAll <-0 for (j in seq(2,ncol(participant1),3)){ if (participant1[3,j] == 0){ zeroCountLFEye <- zeroCountLFEye+1 } if(participant2[3,j] == 0){ zeroCountLFEye<- zeroCountLFEye+1 } if (participant1[4,j] == 0){ zeroCountRFEye <- zeroCountRFEye+1 } if(participant2[4,j] == 0){ zeroCountRFEye<- zeroCountRFEye+1 } if (participant1[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if(participant2[5,j] == 0){ zeroCountNoseLF<- zeroCountNoseLF+1 } if (participant1[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if(participant2[6,j] == 0){ zeroCountNoseRF<- zeroCountNoseRF+1 } if (participant1[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if(participant2[7,j] == 0){ zeroMouthLF<- zeroMouthLF+1 } if (participant1[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if(participant2[8,j] == 0){ zeroMouthRF<- zeroMouthRF+1 } if (participant1[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if(participant2[9,j] == 0){ zeroHairLF<- zeroHairLF+1 } if (participant1[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if(participant2[10,j] == 0){ zeroHairRF<- zeroHairRF+1 } if (participant1[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if(participant2[11,j] == 0){ zeroJawLF<- zeroJawLF+1 } if (participant1[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant2[12,j] == 0){ zeroJawRF<- zeroJawRF+1 } if(participant1[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant2[13,j] == 0){ zeroTotal<- zeroTotal+1 } if(participant1[3,j] == 0 && participant1[4,j]==0 && participant1[5,j]==0 && participant1[6,j] == 0 && participant1[7,j] ==0 && participant1[8,j] == 0 && participant1[9,j]==0 && participant1[10,j]==0 && participant1[11,j]==0 && participant1[12,j]==0){ zeroAll <- zeroAll + 1 } if(participant2[3,j] == 0 && participant2[4,j]==0 && participant2[5,j]==0 && participant2[6,j] == 0 && participant2[7,j] ==0 && participant2[8,j] == 0 && participant2[9,j]==0 && participant2[10,j]==0 && participant2[11,j]==0 && participant2[12,j]==0){ zeroAll <- zeroAll + 1 } } list(zeroCountLFEye,zeroCountRFEye, zeroCountNoseLF, zeroCountNoseRF, zeroMouthLF, zeroMouthRF, zeroHairLF, zeroHairRF, zeroJawLF, zeroJawRF, zeroTotal,zeroAll) }) INeverGaze<-as.data.frame(INeverGaze) colnames(INeverGaze) <- c("LFeye","RFeye","LFNose","RFNose","LFMouth","RFMouth","LFHair","RFHair","LFJaw","RFJaw","Total","All") rownames(INeverGaze) <- parlist dataframes<-list(FFNeverGaze,FSNeverGaze,SFNeverGaze,INeverGaze) conditions <-list("FF","FS","SF","I") wb <- createWorkbook() for (i in 1: length(conditions)){ addWorksheet(wb, conditions[i]) } #browser() for (j in 1: length(dataframes)){ #browser() writeDataTable(wb, conditions[[j]], as.data.frame(dataframes[[j]]), rowNames = TRUE) } saveWorkbook(wb, paste(getwd(),"/NeverGazed.xls",sep=""),overwrite = TRUE) writeToExcel(dataframes,paste(getwd(),"/NeverGazed.xls",sep=""), conditions)
x=list(a=1:3,b="hello!") # x is a list save(x,file="x.Rdata",ascii=TRUE) # save into working directory rm(x) # remove an object print(x) # gives an error load("x.Rdata") # x now exists! print(x) # show x t=readLines("x.Rdata") # read all text file cat("first line:",t[1],"\n") # show 1st line cat("first line:",readLines("x.Rdata",n=1),"\n") # write a text file using writeLines: conn=file("demo.txt") # create a connection writeLines("hello!", conn) # write something close(conn) # close connection # write a text file using sink: sink("demo2.txt") # divert output cat("hello!\n") # write something sink() # stop sink
/Chapter2/session-11.R
no_license
ajholguin/Modern-Optimization-with-R
R
false
false
842
r
x=list(a=1:3,b="hello!") # x is a list save(x,file="x.Rdata",ascii=TRUE) # save into working directory rm(x) # remove an object print(x) # gives an error load("x.Rdata") # x now exists! print(x) # show x t=readLines("x.Rdata") # read all text file cat("first line:",t[1],"\n") # show 1st line cat("first line:",readLines("x.Rdata",n=1),"\n") # write a text file using writeLines: conn=file("demo.txt") # create a connection writeLines("hello!", conn) # write something close(conn) # close connection # write a text file using sink: sink("demo2.txt") # divert output cat("hello!\n") # write something sink() # stop sink
/xgboost/prueba.R
no_license
raulcarlomagno/dmuba-dm-economiafinanzas
R
false
false
2,341
r
# check that the function returns a dataframe testthat::test_that( "conversion dataframe expectation", { testthat::expect_s3_class( convertVector2Df("A1,C2,A3,B12,C42",3,42,"blank"), "data.frame" ) } ) # check that the function returns an error message when user gives a wrong object testthat::test_that( "Wrong object passed for argument chr_wells", { testthat::expect_error(convertVector2Df(48,3,42,"blank")) } ) # check that the function returns an error when the user gives wells that do # not correspond to the dimensions of the plate he has provided. # In this test, we give a wrong number of columns (5), we should have given # a number> = 42 because of well C42. testthat::test_that( "incompatibility between plate dimensions and wells.", { testthat::expect_null( convertVector2Df("A1,C2,A3,B12,C42",3,5,"blank") ) testthat::expect_null( convertVector2Df("A1,C2,A3,B12,C42",2,45,"blank") ) } )
/tests/testthat/test-conversion.R
permissive
HelBor/wpm
R
false
false
1,048
r
# check that the function returns a dataframe testthat::test_that( "conversion dataframe expectation", { testthat::expect_s3_class( convertVector2Df("A1,C2,A3,B12,C42",3,42,"blank"), "data.frame" ) } ) # check that the function returns an error message when user gives a wrong object testthat::test_that( "Wrong object passed for argument chr_wells", { testthat::expect_error(convertVector2Df(48,3,42,"blank")) } ) # check that the function returns an error when the user gives wells that do # not correspond to the dimensions of the plate he has provided. # In this test, we give a wrong number of columns (5), we should have given # a number> = 42 because of well C42. testthat::test_that( "incompatibility between plate dimensions and wells.", { testthat::expect_null( convertVector2Df("A1,C2,A3,B12,C42",3,5,"blank") ) testthat::expect_null( convertVector2Df("A1,C2,A3,B12,C42",2,45,"blank") ) } )
mrema <- function(postdata, raw.gs, set_number = NULL, DF = NULL, params = NULL, threshold = NULL, overlap = 0.25){ postdata <- postdata[complete.cases(postdata),] effect <- dplyr::pull(postdata,2) variance<- dplyr::pull(postdata,3) if(is.null(set_number) == FALSE){ set_name <- names(raw.gs[set_number]) raw.gs <- raw.gs[which(names(raw.gs) == set_name)]} ## threshold max for middle component comp1_var_max <- seq(0,1, by = 0.00001) comp1_var_max <- comp1_var_max[which(pnorm(log2(threshold), 0, sqrt(comp1_var_max)) < 0.975)[1]] # fit ggm to all genes without regard for set membership all_genes_mixture <- .EM_6FP_fixed(effect, variance, comp1_var_max = comp1_var_max, threshold = threshold, overlap = overlap) loglike_all_genes <- all_genes_mixture$loglike v<- rep(1,length(raw.gs)) nonDE_criterion<- rep(FALSE, length(raw.gs)) tol<- rep(0, length(raw.gs)) weight_diff <- rep(1,length(raw.gs)) BIC<- rep(FALSE, length(raw.gs)) for(j in 1:length(raw.gs)){ tryCatch({ if(DF == 1){ ### run 1DF test set_specific_post<- postdata set_specific_post$set<- ifelse(pull(postdata,1) %in% raw.gs[[j]], 1, 0) effect_set <- pull(set_specific_post,2) variance_set<- pull(set_specific_post,3) set<- set_specific_post$set set_mixture <- .EM_1FP_fixed(effect_set, variance_set, set, comp1_var_max, threshold = threshold, overlap = overlap) loglike_set_genes <- set_mixture$loglike set_parameters <- set_mixture$param ll_trace <- set_mixture$ll.vector # compare the two models aa<-lr.test(-loglike_all_genes, -loglike_set_genes,alpha = 0.05, 1) BIC_all<- 6*log(nrow(postdata))-2*loglike_all_genes BIC_set<- 7*log(nrow(postdata))-2*(loglike_set_genes) parameters_h1 <- set_parameters nonDE_criterion[j] <- set_parameters$alpha[1] < set_parameters$alpha[4] weight_diff[j] <- (set_parameters$alpha[4] - set_parameters$alpha[1]) v[j]<- aa$p.value[[1]] tol[j]<- length(raw.gs[[j]]) BIC[j]<- BIC_set < BIC_all progress(j, max.value = length(raw.gs)) } else if(DF == 6) { ### run 6DF approach set_specific_post<- postdata set_specific_post$set<- ifelse(pull(postdata,1) %in% raw.gs[[j]], 1, 0) set_specific_post_in<- set_specific_post %>% filter(set==1) effect_inset <- pull(set_specific_post_in,2) variance_inset<- pull(set_specific_post_in,3) # fit the gmm to the genes in the gene set inset_mixture <- .EM_6FP_fixed(effect_inset, variance_inset, comp1_var_max = comp1_var_max, threshold = threshold, overlap = overlap) loglike_Inset_genes <- inset_mixture$loglike inset_parameters <- inset_mixture$param # get all genes in the outset set_specific_post_out<- set_specific_post %>% filter(set==0) effect_outset <- pull(set_specific_post_out,2) variance_outset<- pull(set_specific_post_out,3) # fit the gmm to genes outside the gene set outset_mixture <- .EM_6FP_fixed(effect_outset, variance_outset, comp1_var_max = comp1_var_max, threshold = threshold, overlap = overlap) loglike_Outset_genes <- outset_mixture$loglike outset_parameters <- outset_mixture$param parameters_h1 <- c(inset_parameters, outset_parameters) # compare the two models aa<-lr.test(-loglike_all_genes, -(loglike_Inset_genes+loglike_Outset_genes),alpha = 0.05, 6) BIC_all<- 6*log(nrow(postdata))-2*loglike_all_genes BIC_set<- 12*log(nrow(postdata))-2*(loglike_Inset_genes+loglike_Outset_genes) v[j]<- aa$p.value[[1]] nonDE_criterion[j] <- inset_parameters$alpha[1] < outset_parameters$alpha[1] weight_diff[j] <- (outset_parameters$alpha[1] - inset_parameters$alpha[1]) tol[j]<- length(raw.gs[[j]]) BIC[j]<- BIC_set < BIC_all progress(j, max.value = length(raw.gs)) } else if(DF == 2){ ### run 2DF approach set_specific_post<- postdata set_specific_post$set<- ifelse(pull(postdata,1) %in% raw.gs[[j]], 1, 0) effect_set <- pull(set_specific_post,2) variance_set<- pull(set_specific_post,3) set<- set_specific_post$set set_mixture <- .EM_2FP_fixed(effect_set, variance_set, set, comp1_var_max, threshold = threshold, overlap = overlap) loglike_set_genes <- set_mixture$loglike set_parameters <- set_mixture$param ll_trace <- set_mixture$ll.vector # compare the two models aa<-lr.test(-loglike_all_genes, -loglike_set_genes,alpha = 0.05, 2) BIC_all<- 6*log(nrow(postdata))-2*loglike_all_genes BIC_set<- 8*log(nrow(postdata))-2*(loglike_set_genes) parameters_h1 <- set_parameters nonDE_criterion[j] <- set_parameters$alpha[1] < set_parameters$alpha[4] weight_diff[j] <- (set_parameters$alpha[4] - set_parameters$alpha[1]) v[j]<- aa$p.value[[1]] tol[j]<- length(raw.gs[[j]]) BIC[j]<- BIC_set < BIC_all progress(j, max.value = length(raw.gs)) } }, error = function(e){ cat("Error in ", names(raw.gs[j]),"set\n") nonDE_criterion[j]<- NA v[j]<- NA tol[j]<- NA BIC[j]<- NA nonDE_weight_upper_CI[j] <- NA progress(j, max.value = length(raw.gs)) }) } p_adj <- p.adjust(v, method = "BH", n = length(v)) detected_gene_sets<- tibble("GENE.SET"= names(raw.gs), "Prop.DE.Increased"= as.numeric(nonDE_criterion),"Estimated.Difference" = weight_diff, "NR.GENES"=tol, "PVAL"= v, "ADJ.PVAL" = p_adj)#, "BIC.Value"=BIC, "Adj.Pval" = p_adj, "Enrichment" = weight_diff) if(is.null(params) == TRUE) return(detected_gene_sets) else return(parameters_h1) } # fitting the ggm with all parameters (apart from non-DE component mean and variance) free .EM_6FP_fixed <- function(effect, variance, effect_summary_df, comp1_var_max, threshold, overlap = overlap){ n<- 1000 m<- 1e-6 iter<- 10 for (i in 1:n) { if (i == 1) { # Initialization e.step <- .e_step_iter(effect, variance, c(0, 1, -1), c(0.005, 0.005, 0.005), c(.80, 0.1, 0.1)) m.step <- .m_step_iter_fixed(effect,variance, c(0.005, 0.005, 0.005),iter, e.step[["posterior_df"]], comp1_var_max, threshold, overlap = overlap) cur.loglik <- e.step[["loglik"]] loglik.vector <- e.step[["loglik"]] } else { # Repeat E and M steps till convergence e.step <- .e_step_iter(effect, variance, m.step[["mu"]], m.step[["var"]], m.step[["alpha"]]) m.step <- .m_step_iter_fixed(effect,variance, m.step[["var"]],iter, e.step[["posterior_df"]], comp1_var_max, threshold, overlap = overlap) loglik.vector <- c(loglik.vector, e.step[["loglik"]]) loglik.diff <- abs((cur.loglik - e.step[["loglik"]])) if(loglik.diff < m) { break } else { cur.loglik <- e.step[["loglik"]] } } } loglike_all_genes<-tail(loglik.vector, n=1) parameters <- list("loglike" = loglike_all_genes, "param" = m.step) return(parameters) } ## # fitting the ggm 1DF approach .EM_1FP_fixed <- function(effect_set, variance_set, set, comp1_var_max, threshold, overlap = overlap){ n<- 1000 m<- 1e-6 iter<- 10 for (i in 1:n) { if (i == 1) { # Initialization e.step.set <- .e_step_set_iter(effect_set, variance_set, set, c(0, 1, -1), c(0.005, 0.005, 0.005), c(.80, 0.1, 0.1, 0.8, 0.1, 0.1)) m.step.set <- .m_step_set_iter_fixed(effect_set, variance_set, set, c(0.005, 0.005, 0.005),iter, e.step.set[["posterior_df"]], comp1_var_max, threshold = threshold, overlap = overlap) cur.loglik.set <- e.step.set[["loglik"]] loglik.vector.set <- e.step.set[["loglik"]] } else { # Repeat E and M steps till convergence e.step.set <- .e_step_set_iter(effect_set, variance_set, set, m.step.set[["mu"]], m.step.set[["var"]],m.step.set[["alpha"]]) m.step.set <- .m_step_set_iter_fixed(effect_set,variance_set,set,m.step.set[["var"]],iter, e.step.set[["posterior_df"]], comp1_var_max, threshold = threshold, overlap = overlap) loglik.vector.set <- c(loglik.vector.set, e.step.set[["loglik"]]) loglik.diff.set <- abs((cur.loglik.set - e.step.set[["loglik"]])) if(loglik.diff.set < m) { break } else { cur.loglik.set <- e.step.set[["loglik"]] } } } loglike_set_genes<-tail(loglik.vector.set, n=1) parameters <- list("loglike" = loglike_set_genes, "param" = m.step.set, "ll.vector" = loglik.vector.set) return(parameters) } # the e_step and m_step functions in the EM algorithm .e_step_iter <- function(x,lfc_var, mu_vector, component_var, alpha_vector) { # both lfc_se and component_var contribute to total variance comp1_prod <- dnorm(x, mu_vector[1], sqrt(component_var[1]+lfc_var)) * alpha_vector[1] comp2_prod <- dnorm(x, mu_vector[2], sqrt(component_var[2]+lfc_var)) * alpha_vector[2] comp3_prod <- dnorm(x, mu_vector[3], sqrt(component_var[3]+lfc_var)) * alpha_vector[3] sum_of_comps <- comp1_prod + comp2_prod + comp3_prod sum_of_comps[which(sum_of_comps == 0)] <- 1e-200 comp1_post <- comp1_prod / sum_of_comps comp2_post <- comp2_prod / sum_of_comps comp3_post <- comp3_prod / sum_of_comps sum_of_comps_ln <- log(sum_of_comps, base = exp(1)) sum_of_comps_ln_sum <- sum(sum_of_comps_ln) list("loglik" = sum_of_comps_ln_sum, "posterior_df" = cbind(comp1_post, comp2_post, comp3_post), "prod_df" = cbind(comp1_prod, comp2_prod, comp3_prod)) } # the e_step and m_step functions in the EM algorithm for w_o_mrema (weights only) .e_step_set_iter <- function(x,lfc_var,set, mu_vector, component_var, alpha_vector) { # both sd_vector and sigma are variance comp1_prod <- dnorm(x, mu_vector[1], sqrt(component_var[1]+lfc_var)) * alpha_vector[1]*set comp2_prod <- dnorm(x, mu_vector[2], sqrt(component_var[2]+lfc_var)) * alpha_vector[2]*set comp3_prod <- dnorm(x, mu_vector[3], sqrt(component_var[3]+lfc_var)) * alpha_vector[3]*set # posteiror dist for genes outside of the set comp4_prod <- dnorm(x, mu_vector[1], sqrt(component_var[1]+lfc_var)) * alpha_vector[4]*(1-set) comp5_prod <- dnorm(x, mu_vector[2], sqrt(component_var[2]+lfc_var)) * alpha_vector[5]*(1-set) comp6_prod <- dnorm(x, mu_vector[3], sqrt(component_var[3]+lfc_var)) * alpha_vector[6]*(1-set) sum_of_comps1 <- comp1_prod + comp2_prod + comp3_prod sum_of_comps2 <- comp4_prod + comp5_prod + comp6_prod sum_of_comps<- sum_of_comps1 + sum_of_comps2 sum_of_comps[which(sum_of_comps == 0)] <- 1e-200 comp1_post <- comp1_prod / sum_of_comps1 comp1_post[is.na(comp1_post)] <- 0 comp2_post <- comp2_prod / sum_of_comps1 comp2_post[is.na(comp2_post)] <- 0 comp3_post <- comp3_prod / sum_of_comps1 comp3_post[is.na(comp3_post)] <- 0 comp4_post <- comp4_prod / sum_of_comps2 comp4_post[is.na(comp4_post)] <- 0 comp5_post <- comp5_prod / sum_of_comps2 comp5_post[is.na(comp5_post)] <- 0 comp6_post <- comp6_prod / sum_of_comps2 comp6_post[is.na(comp6_post)] <- 0 sum_of_comps_ln <- log(sum_of_comps, base = exp(1)) sum_of_comps_ln_sum <- sum(sum_of_comps_ln) list("loglik" = sum_of_comps_ln_sum, "posterior_df" = cbind(comp1_post, comp2_post, comp3_post, comp4_post, comp5_post, comp6_post)) } .m_step_iter_fixed <- function(x, lfc_var, component_var, t, posterior_df, comp1_var_max, threshold, overlap = overlap) { comp1_n <- sum(posterior_df[, 1]) comp2_n <- sum(posterior_df[, 2]) comp3_n <- sum(posterior_df[, 3]) ########################### for (i in 1:t) { if (i == 1) { # Initialization comp2_var <- component_var[2] ## weights w2_i <- 1/(comp2_var+lfc_var) # gets the mininum mean allowed when variance is comp2_var to keep 75% of the distribution above threshold comp2_mean_min <- qnorm((1 - overlap), log2(threshold), sqrt(comp2_var)) ## use either minimum value or mean estimate if bigger comp2_mu <- max(comp2_mean_min, sum(posterior_df[,2] * w2_i * x)/sum(posterior_df[,2] * w2_i)) comp3_var <- component_var[3] ## weights w3_i <- 1/(comp3_var+lfc_var) # gets the maximum mean allowed when variance is comp3_var to keep 75% of the distribution below threshold comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) ## use either maximum value or mean estimate if smaller comp3_mu <- min(comp3_mean_max,sum(posterior_df[,3] * w3_i * x)/sum(posterior_df[,3] * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } else { comp2_var <- max(0.005,sum(posterior_df[, 2] * ((w2_i)^2) * (((x - comp2_mu)^2)-lfc_var))/(sum(posterior_df[, 2]*w2_i^2))) w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm((1-overlap), log2(threshold), sqrt(comp2_var)) comp2_mu <- max(comp2_mean_min,sum(posterior_df[, 2] * w2_i * x)/sum(posterior_df[, 2] * w2_i)) comp3_var <- max(0.005,sum(posterior_df[, 3] * ((w3_i)^2) * (((x - comp3_mu)^2)-lfc_var))/(sum(posterior_df[, 3]*w3_i^2))) w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(posterior_df[, 3] * w3_i * x)/sum(posterior_df[, 3] * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } } comp1_alpha <- max(comp1_n / length(x),lower_bound) comp2_alpha <- max(comp2_n / length(x),lower_bound) comp3_alpha <- max(comp3_n / length(x),lower_bound) list("mu" = c(comp1_mu, comp2_mu, comp3_mu), "var" = c(comp1_var, comp2_var, comp3_var), "alpha" = c(comp1_alpha, comp2_alpha, comp3_alpha)) } .m_step_set_iter_fixed <- function(x, lfc_var, set, component_var, t, posterior_df, comp1_var_max, threshold, overlap = overlap) { comp1_n <- sum(posterior_df[, 1]) comp2_n <- sum(posterior_df[, 2]) comp3_n <- sum(posterior_df[, 3]) comp4_n <- sum(posterior_df[, 4]) comp5_n <- sum(posterior_df[, 5]) comp6_n <- sum(posterior_df[, 6]) comp1_alpha <- max(comp1_n / sum(set),lower_bound) comp4_alpha <- max(comp4_n / (length(set)-sum(set)),lower_bound) c <- (comp2_n + comp5_n)/(comp2_n + comp5_n + comp3_n + comp6_n) comp2_alpha <- (1 - comp1_alpha)*(c) comp3_alpha <- (1 - comp1_alpha)*(1-c) comp5_alpha <- (1 - comp4_alpha)*(c) comp6_alpha <- (1 - comp4_alpha)*(1-c) # the following for loop iterates between mean and variance, the number of iteration is t, comp1_pd <- posterior_df[,1] + posterior_df[,4] comp2_pd <- posterior_df[,2] + posterior_df[,5] comp3_pd <- posterior_df[,3] + posterior_df[,6] ########################### for (i in 1:t) { if (i == 1) { comp2_var <- component_var[2] w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm(1-overlap, log2(threshold), sqrt(comp2_var)) comp2_mu <- max(comp2_mean_min, sum(comp2_pd * w2_i * x)/sum(comp2_pd * w2_i)) comp3_var <- component_var[3] w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(comp3_pd * w3_i * x)/sum(comp3_pd * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } else { comp2_var <- max(0.005,sum(comp2_pd * ((w2_i)^2) * (((x - comp2_mu)^2)-lfc_var))/(sum(comp2_pd*w2_i^2))) w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm((1-overlap), log2(threshold), sqrt(comp2_var)) comp2_mu <-max(comp2_mean_min, sum(comp2_pd * w2_i * x)/sum(comp2_pd * w2_i)) comp3_var <- max(0.005,sum(comp3_pd * ((w3_i)^2) * (((x - comp3_mu)^2)-lfc_var))/(sum(comp3_pd*w3_i^2))) w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(comp3_pd * w3_i * x)/sum(comp3_pd * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } } comp1_alpha <- max(comp1_alpha,lower_bound) comp2_alpha <- max(comp2_alpha,lower_bound) comp3_alpha <- max(comp3_alpha,lower_bound) comp4_alpha <- max(comp4_alpha,lower_bound) comp5_alpha <- max(comp5_alpha,lower_bound) comp6_alpha <- max(comp6_alpha,lower_bound) list("mu" = c(comp1_mu, comp2_mu, comp3_mu), "var" = c(comp1_var, comp2_var, comp3_var), "alpha" = c(comp1_alpha, comp2_alpha, comp3_alpha, comp4_alpha, comp5_alpha, comp6_alpha )) } ## # fitting the ggm 2DF approach .EM_2FP_fixed <- function(effect_set, variance_set, set, comp1_var_max, threshold, overlap = overlap){ n<- 1000 m<- 1e-6 iter<- 10 for (i in 1:n) { if (i == 1) { # Initialization e.step.set <- .e_step_set_iter(effect_set, variance_set, set, c(0, 1, -1), c(0.005, 0.005, 0.005), c(.80, 0.1, 0.1, 0.8, 0.1, 0.1)) m.step.set <- .m_step_set_iter_fixed_2DF(effect_set, variance_set, set, c(0.005, 0.005, 0.005),iter, e.step.set[["posterior_df"]], comp1_var_max, threshold = threshold, overlap = overlap) cur.loglik.set <- e.step.set[["loglik"]] loglik.vector.set <- e.step.set[["loglik"]] } else { # Repeat E and M steps till convergence e.step.set <- .e_step_set_iter(effect_set, variance_set, set, m.step.set[["mu"]], m.step.set[["var"]],m.step.set[["alpha"]]) m.step.set <- .m_step_set_iter_fixed_2DF(effect_set,variance_set,set,m.step.set[["var"]],iter, e.step.set[["posterior_df"]], comp1_var_max, threshold = threshold, overlap = overlap) loglik.vector.set <- c(loglik.vector.set, e.step.set[["loglik"]]) loglik.diff.set <- abs((cur.loglik.set - e.step.set[["loglik"]])) if(loglik.diff.set < m) { break } else { cur.loglik.set <- e.step.set[["loglik"]] } } } loglike_set_genes<-tail(loglik.vector.set, n=1) parameters <- list("loglike" = loglike_set_genes, "param" = m.step.set, "ll.vector" = loglik.vector.set) return(parameters) } .m_step_set_iter_fixed_2DF <- function(x, lfc_var, set, component_var, t, posterior_df, comp1_var_max, threshold, overlap = overlap) { comp1_n <- sum(posterior_df[, 1]) comp2_n <- sum(posterior_df[, 2]) comp3_n <- sum(posterior_df[, 3]) comp4_n <- sum(posterior_df[, 4]) comp5_n <- sum(posterior_df[, 5]) comp6_n <- sum(posterior_df[, 6]) # the following for loop iterates between mean and variance, the number of iteration is t, comp1_pd <- posterior_df[,1] + posterior_df[,4] comp2_pd <- posterior_df[,2] + posterior_df[,5] comp3_pd <- posterior_df[,3] + posterior_df[,6] ########################### for (i in 1:t) { if (i == 1) { comp2_var <- component_var[2] w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm(1-overlap, log2(threshold), sqrt(comp2_var)) comp2_mu <- max(comp2_mean_min, sum(comp2_pd * w2_i * x)/sum(comp2_pd * w2_i)) comp3_var <- component_var[3] w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(comp3_pd * w3_i * x)/sum(comp3_pd * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } else { comp2_var <- max(0.005,sum(comp2_pd * ((w2_i)^2) * (((x - comp2_mu)^2)-lfc_var))/(sum(comp2_pd*w2_i^2))) w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm((1-overlap), log2(threshold), sqrt(comp2_var)) comp2_mu <-max(comp2_mean_min, sum(comp2_pd * w2_i * x)/sum(comp2_pd * w2_i)) comp3_var <- max(0.005,sum(comp3_pd * ((w3_i)^2) * (((x - comp3_mu)^2)-lfc_var))/(sum(comp3_pd*w3_i^2))) w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(comp3_pd * w3_i * x)/sum(comp3_pd * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } } comp1_alpha <- max(comp1_n / sum(set),lower_bound) comp2_alpha <- max(comp2_n / sum(set),lower_bound) comp3_alpha <- max(comp3_n / sum(set),lower_bound) comp4_alpha <- max(comp4_n / (length(set)-sum(set)),lower_bound) comp5_alpha <- max(comp5_n / (length(set)-sum(set)),lower_bound) comp6_alpha <- max(comp6_n / (length(set)-sum(set)),lower_bound) list("mu" = c(comp1_mu, comp2_mu, comp3_mu), "var" = c(comp1_var, comp2_var, comp3_var), "alpha" = c(comp1_alpha, comp2_alpha, comp3_alpha, comp4_alpha, comp5_alpha, comp6_alpha )) } lower_bound<- 0.000000005
/mrema.R
no_license
osedo/GSA-MREMA
R
false
false
20,919
r
mrema <- function(postdata, raw.gs, set_number = NULL, DF = NULL, params = NULL, threshold = NULL, overlap = 0.25){ postdata <- postdata[complete.cases(postdata),] effect <- dplyr::pull(postdata,2) variance<- dplyr::pull(postdata,3) if(is.null(set_number) == FALSE){ set_name <- names(raw.gs[set_number]) raw.gs <- raw.gs[which(names(raw.gs) == set_name)]} ## threshold max for middle component comp1_var_max <- seq(0,1, by = 0.00001) comp1_var_max <- comp1_var_max[which(pnorm(log2(threshold), 0, sqrt(comp1_var_max)) < 0.975)[1]] # fit ggm to all genes without regard for set membership all_genes_mixture <- .EM_6FP_fixed(effect, variance, comp1_var_max = comp1_var_max, threshold = threshold, overlap = overlap) loglike_all_genes <- all_genes_mixture$loglike v<- rep(1,length(raw.gs)) nonDE_criterion<- rep(FALSE, length(raw.gs)) tol<- rep(0, length(raw.gs)) weight_diff <- rep(1,length(raw.gs)) BIC<- rep(FALSE, length(raw.gs)) for(j in 1:length(raw.gs)){ tryCatch({ if(DF == 1){ ### run 1DF test set_specific_post<- postdata set_specific_post$set<- ifelse(pull(postdata,1) %in% raw.gs[[j]], 1, 0) effect_set <- pull(set_specific_post,2) variance_set<- pull(set_specific_post,3) set<- set_specific_post$set set_mixture <- .EM_1FP_fixed(effect_set, variance_set, set, comp1_var_max, threshold = threshold, overlap = overlap) loglike_set_genes <- set_mixture$loglike set_parameters <- set_mixture$param ll_trace <- set_mixture$ll.vector # compare the two models aa<-lr.test(-loglike_all_genes, -loglike_set_genes,alpha = 0.05, 1) BIC_all<- 6*log(nrow(postdata))-2*loglike_all_genes BIC_set<- 7*log(nrow(postdata))-2*(loglike_set_genes) parameters_h1 <- set_parameters nonDE_criterion[j] <- set_parameters$alpha[1] < set_parameters$alpha[4] weight_diff[j] <- (set_parameters$alpha[4] - set_parameters$alpha[1]) v[j]<- aa$p.value[[1]] tol[j]<- length(raw.gs[[j]]) BIC[j]<- BIC_set < BIC_all progress(j, max.value = length(raw.gs)) } else if(DF == 6) { ### run 6DF approach set_specific_post<- postdata set_specific_post$set<- ifelse(pull(postdata,1) %in% raw.gs[[j]], 1, 0) set_specific_post_in<- set_specific_post %>% filter(set==1) effect_inset <- pull(set_specific_post_in,2) variance_inset<- pull(set_specific_post_in,3) # fit the gmm to the genes in the gene set inset_mixture <- .EM_6FP_fixed(effect_inset, variance_inset, comp1_var_max = comp1_var_max, threshold = threshold, overlap = overlap) loglike_Inset_genes <- inset_mixture$loglike inset_parameters <- inset_mixture$param # get all genes in the outset set_specific_post_out<- set_specific_post %>% filter(set==0) effect_outset <- pull(set_specific_post_out,2) variance_outset<- pull(set_specific_post_out,3) # fit the gmm to genes outside the gene set outset_mixture <- .EM_6FP_fixed(effect_outset, variance_outset, comp1_var_max = comp1_var_max, threshold = threshold, overlap = overlap) loglike_Outset_genes <- outset_mixture$loglike outset_parameters <- outset_mixture$param parameters_h1 <- c(inset_parameters, outset_parameters) # compare the two models aa<-lr.test(-loglike_all_genes, -(loglike_Inset_genes+loglike_Outset_genes),alpha = 0.05, 6) BIC_all<- 6*log(nrow(postdata))-2*loglike_all_genes BIC_set<- 12*log(nrow(postdata))-2*(loglike_Inset_genes+loglike_Outset_genes) v[j]<- aa$p.value[[1]] nonDE_criterion[j] <- inset_parameters$alpha[1] < outset_parameters$alpha[1] weight_diff[j] <- (outset_parameters$alpha[1] - inset_parameters$alpha[1]) tol[j]<- length(raw.gs[[j]]) BIC[j]<- BIC_set < BIC_all progress(j, max.value = length(raw.gs)) } else if(DF == 2){ ### run 2DF approach set_specific_post<- postdata set_specific_post$set<- ifelse(pull(postdata,1) %in% raw.gs[[j]], 1, 0) effect_set <- pull(set_specific_post,2) variance_set<- pull(set_specific_post,3) set<- set_specific_post$set set_mixture <- .EM_2FP_fixed(effect_set, variance_set, set, comp1_var_max, threshold = threshold, overlap = overlap) loglike_set_genes <- set_mixture$loglike set_parameters <- set_mixture$param ll_trace <- set_mixture$ll.vector # compare the two models aa<-lr.test(-loglike_all_genes, -loglike_set_genes,alpha = 0.05, 2) BIC_all<- 6*log(nrow(postdata))-2*loglike_all_genes BIC_set<- 8*log(nrow(postdata))-2*(loglike_set_genes) parameters_h1 <- set_parameters nonDE_criterion[j] <- set_parameters$alpha[1] < set_parameters$alpha[4] weight_diff[j] <- (set_parameters$alpha[4] - set_parameters$alpha[1]) v[j]<- aa$p.value[[1]] tol[j]<- length(raw.gs[[j]]) BIC[j]<- BIC_set < BIC_all progress(j, max.value = length(raw.gs)) } }, error = function(e){ cat("Error in ", names(raw.gs[j]),"set\n") nonDE_criterion[j]<- NA v[j]<- NA tol[j]<- NA BIC[j]<- NA nonDE_weight_upper_CI[j] <- NA progress(j, max.value = length(raw.gs)) }) } p_adj <- p.adjust(v, method = "BH", n = length(v)) detected_gene_sets<- tibble("GENE.SET"= names(raw.gs), "Prop.DE.Increased"= as.numeric(nonDE_criterion),"Estimated.Difference" = weight_diff, "NR.GENES"=tol, "PVAL"= v, "ADJ.PVAL" = p_adj)#, "BIC.Value"=BIC, "Adj.Pval" = p_adj, "Enrichment" = weight_diff) if(is.null(params) == TRUE) return(detected_gene_sets) else return(parameters_h1) } # fitting the ggm with all parameters (apart from non-DE component mean and variance) free .EM_6FP_fixed <- function(effect, variance, effect_summary_df, comp1_var_max, threshold, overlap = overlap){ n<- 1000 m<- 1e-6 iter<- 10 for (i in 1:n) { if (i == 1) { # Initialization e.step <- .e_step_iter(effect, variance, c(0, 1, -1), c(0.005, 0.005, 0.005), c(.80, 0.1, 0.1)) m.step <- .m_step_iter_fixed(effect,variance, c(0.005, 0.005, 0.005),iter, e.step[["posterior_df"]], comp1_var_max, threshold, overlap = overlap) cur.loglik <- e.step[["loglik"]] loglik.vector <- e.step[["loglik"]] } else { # Repeat E and M steps till convergence e.step <- .e_step_iter(effect, variance, m.step[["mu"]], m.step[["var"]], m.step[["alpha"]]) m.step <- .m_step_iter_fixed(effect,variance, m.step[["var"]],iter, e.step[["posterior_df"]], comp1_var_max, threshold, overlap = overlap) loglik.vector <- c(loglik.vector, e.step[["loglik"]]) loglik.diff <- abs((cur.loglik - e.step[["loglik"]])) if(loglik.diff < m) { break } else { cur.loglik <- e.step[["loglik"]] } } } loglike_all_genes<-tail(loglik.vector, n=1) parameters <- list("loglike" = loglike_all_genes, "param" = m.step) return(parameters) } ## # fitting the ggm 1DF approach .EM_1FP_fixed <- function(effect_set, variance_set, set, comp1_var_max, threshold, overlap = overlap){ n<- 1000 m<- 1e-6 iter<- 10 for (i in 1:n) { if (i == 1) { # Initialization e.step.set <- .e_step_set_iter(effect_set, variance_set, set, c(0, 1, -1), c(0.005, 0.005, 0.005), c(.80, 0.1, 0.1, 0.8, 0.1, 0.1)) m.step.set <- .m_step_set_iter_fixed(effect_set, variance_set, set, c(0.005, 0.005, 0.005),iter, e.step.set[["posterior_df"]], comp1_var_max, threshold = threshold, overlap = overlap) cur.loglik.set <- e.step.set[["loglik"]] loglik.vector.set <- e.step.set[["loglik"]] } else { # Repeat E and M steps till convergence e.step.set <- .e_step_set_iter(effect_set, variance_set, set, m.step.set[["mu"]], m.step.set[["var"]],m.step.set[["alpha"]]) m.step.set <- .m_step_set_iter_fixed(effect_set,variance_set,set,m.step.set[["var"]],iter, e.step.set[["posterior_df"]], comp1_var_max, threshold = threshold, overlap = overlap) loglik.vector.set <- c(loglik.vector.set, e.step.set[["loglik"]]) loglik.diff.set <- abs((cur.loglik.set - e.step.set[["loglik"]])) if(loglik.diff.set < m) { break } else { cur.loglik.set <- e.step.set[["loglik"]] } } } loglike_set_genes<-tail(loglik.vector.set, n=1) parameters <- list("loglike" = loglike_set_genes, "param" = m.step.set, "ll.vector" = loglik.vector.set) return(parameters) } # the e_step and m_step functions in the EM algorithm .e_step_iter <- function(x,lfc_var, mu_vector, component_var, alpha_vector) { # both lfc_se and component_var contribute to total variance comp1_prod <- dnorm(x, mu_vector[1], sqrt(component_var[1]+lfc_var)) * alpha_vector[1] comp2_prod <- dnorm(x, mu_vector[2], sqrt(component_var[2]+lfc_var)) * alpha_vector[2] comp3_prod <- dnorm(x, mu_vector[3], sqrt(component_var[3]+lfc_var)) * alpha_vector[3] sum_of_comps <- comp1_prod + comp2_prod + comp3_prod sum_of_comps[which(sum_of_comps == 0)] <- 1e-200 comp1_post <- comp1_prod / sum_of_comps comp2_post <- comp2_prod / sum_of_comps comp3_post <- comp3_prod / sum_of_comps sum_of_comps_ln <- log(sum_of_comps, base = exp(1)) sum_of_comps_ln_sum <- sum(sum_of_comps_ln) list("loglik" = sum_of_comps_ln_sum, "posterior_df" = cbind(comp1_post, comp2_post, comp3_post), "prod_df" = cbind(comp1_prod, comp2_prod, comp3_prod)) } # the e_step and m_step functions in the EM algorithm for w_o_mrema (weights only) .e_step_set_iter <- function(x,lfc_var,set, mu_vector, component_var, alpha_vector) { # both sd_vector and sigma are variance comp1_prod <- dnorm(x, mu_vector[1], sqrt(component_var[1]+lfc_var)) * alpha_vector[1]*set comp2_prod <- dnorm(x, mu_vector[2], sqrt(component_var[2]+lfc_var)) * alpha_vector[2]*set comp3_prod <- dnorm(x, mu_vector[3], sqrt(component_var[3]+lfc_var)) * alpha_vector[3]*set # posteiror dist for genes outside of the set comp4_prod <- dnorm(x, mu_vector[1], sqrt(component_var[1]+lfc_var)) * alpha_vector[4]*(1-set) comp5_prod <- dnorm(x, mu_vector[2], sqrt(component_var[2]+lfc_var)) * alpha_vector[5]*(1-set) comp6_prod <- dnorm(x, mu_vector[3], sqrt(component_var[3]+lfc_var)) * alpha_vector[6]*(1-set) sum_of_comps1 <- comp1_prod + comp2_prod + comp3_prod sum_of_comps2 <- comp4_prod + comp5_prod + comp6_prod sum_of_comps<- sum_of_comps1 + sum_of_comps2 sum_of_comps[which(sum_of_comps == 0)] <- 1e-200 comp1_post <- comp1_prod / sum_of_comps1 comp1_post[is.na(comp1_post)] <- 0 comp2_post <- comp2_prod / sum_of_comps1 comp2_post[is.na(comp2_post)] <- 0 comp3_post <- comp3_prod / sum_of_comps1 comp3_post[is.na(comp3_post)] <- 0 comp4_post <- comp4_prod / sum_of_comps2 comp4_post[is.na(comp4_post)] <- 0 comp5_post <- comp5_prod / sum_of_comps2 comp5_post[is.na(comp5_post)] <- 0 comp6_post <- comp6_prod / sum_of_comps2 comp6_post[is.na(comp6_post)] <- 0 sum_of_comps_ln <- log(sum_of_comps, base = exp(1)) sum_of_comps_ln_sum <- sum(sum_of_comps_ln) list("loglik" = sum_of_comps_ln_sum, "posterior_df" = cbind(comp1_post, comp2_post, comp3_post, comp4_post, comp5_post, comp6_post)) } .m_step_iter_fixed <- function(x, lfc_var, component_var, t, posterior_df, comp1_var_max, threshold, overlap = overlap) { comp1_n <- sum(posterior_df[, 1]) comp2_n <- sum(posterior_df[, 2]) comp3_n <- sum(posterior_df[, 3]) ########################### for (i in 1:t) { if (i == 1) { # Initialization comp2_var <- component_var[2] ## weights w2_i <- 1/(comp2_var+lfc_var) # gets the mininum mean allowed when variance is comp2_var to keep 75% of the distribution above threshold comp2_mean_min <- qnorm((1 - overlap), log2(threshold), sqrt(comp2_var)) ## use either minimum value or mean estimate if bigger comp2_mu <- max(comp2_mean_min, sum(posterior_df[,2] * w2_i * x)/sum(posterior_df[,2] * w2_i)) comp3_var <- component_var[3] ## weights w3_i <- 1/(comp3_var+lfc_var) # gets the maximum mean allowed when variance is comp3_var to keep 75% of the distribution below threshold comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) ## use either maximum value or mean estimate if smaller comp3_mu <- min(comp3_mean_max,sum(posterior_df[,3] * w3_i * x)/sum(posterior_df[,3] * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } else { comp2_var <- max(0.005,sum(posterior_df[, 2] * ((w2_i)^2) * (((x - comp2_mu)^2)-lfc_var))/(sum(posterior_df[, 2]*w2_i^2))) w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm((1-overlap), log2(threshold), sqrt(comp2_var)) comp2_mu <- max(comp2_mean_min,sum(posterior_df[, 2] * w2_i * x)/sum(posterior_df[, 2] * w2_i)) comp3_var <- max(0.005,sum(posterior_df[, 3] * ((w3_i)^2) * (((x - comp3_mu)^2)-lfc_var))/(sum(posterior_df[, 3]*w3_i^2))) w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(posterior_df[, 3] * w3_i * x)/sum(posterior_df[, 3] * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } } comp1_alpha <- max(comp1_n / length(x),lower_bound) comp2_alpha <- max(comp2_n / length(x),lower_bound) comp3_alpha <- max(comp3_n / length(x),lower_bound) list("mu" = c(comp1_mu, comp2_mu, comp3_mu), "var" = c(comp1_var, comp2_var, comp3_var), "alpha" = c(comp1_alpha, comp2_alpha, comp3_alpha)) } .m_step_set_iter_fixed <- function(x, lfc_var, set, component_var, t, posterior_df, comp1_var_max, threshold, overlap = overlap) { comp1_n <- sum(posterior_df[, 1]) comp2_n <- sum(posterior_df[, 2]) comp3_n <- sum(posterior_df[, 3]) comp4_n <- sum(posterior_df[, 4]) comp5_n <- sum(posterior_df[, 5]) comp6_n <- sum(posterior_df[, 6]) comp1_alpha <- max(comp1_n / sum(set),lower_bound) comp4_alpha <- max(comp4_n / (length(set)-sum(set)),lower_bound) c <- (comp2_n + comp5_n)/(comp2_n + comp5_n + comp3_n + comp6_n) comp2_alpha <- (1 - comp1_alpha)*(c) comp3_alpha <- (1 - comp1_alpha)*(1-c) comp5_alpha <- (1 - comp4_alpha)*(c) comp6_alpha <- (1 - comp4_alpha)*(1-c) # the following for loop iterates between mean and variance, the number of iteration is t, comp1_pd <- posterior_df[,1] + posterior_df[,4] comp2_pd <- posterior_df[,2] + posterior_df[,5] comp3_pd <- posterior_df[,3] + posterior_df[,6] ########################### for (i in 1:t) { if (i == 1) { comp2_var <- component_var[2] w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm(1-overlap, log2(threshold), sqrt(comp2_var)) comp2_mu <- max(comp2_mean_min, sum(comp2_pd * w2_i * x)/sum(comp2_pd * w2_i)) comp3_var <- component_var[3] w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(comp3_pd * w3_i * x)/sum(comp3_pd * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } else { comp2_var <- max(0.005,sum(comp2_pd * ((w2_i)^2) * (((x - comp2_mu)^2)-lfc_var))/(sum(comp2_pd*w2_i^2))) w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm((1-overlap), log2(threshold), sqrt(comp2_var)) comp2_mu <-max(comp2_mean_min, sum(comp2_pd * w2_i * x)/sum(comp2_pd * w2_i)) comp3_var <- max(0.005,sum(comp3_pd * ((w3_i)^2) * (((x - comp3_mu)^2)-lfc_var))/(sum(comp3_pd*w3_i^2))) w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(comp3_pd * w3_i * x)/sum(comp3_pd * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } } comp1_alpha <- max(comp1_alpha,lower_bound) comp2_alpha <- max(comp2_alpha,lower_bound) comp3_alpha <- max(comp3_alpha,lower_bound) comp4_alpha <- max(comp4_alpha,lower_bound) comp5_alpha <- max(comp5_alpha,lower_bound) comp6_alpha <- max(comp6_alpha,lower_bound) list("mu" = c(comp1_mu, comp2_mu, comp3_mu), "var" = c(comp1_var, comp2_var, comp3_var), "alpha" = c(comp1_alpha, comp2_alpha, comp3_alpha, comp4_alpha, comp5_alpha, comp6_alpha )) } ## # fitting the ggm 2DF approach .EM_2FP_fixed <- function(effect_set, variance_set, set, comp1_var_max, threshold, overlap = overlap){ n<- 1000 m<- 1e-6 iter<- 10 for (i in 1:n) { if (i == 1) { # Initialization e.step.set <- .e_step_set_iter(effect_set, variance_set, set, c(0, 1, -1), c(0.005, 0.005, 0.005), c(.80, 0.1, 0.1, 0.8, 0.1, 0.1)) m.step.set <- .m_step_set_iter_fixed_2DF(effect_set, variance_set, set, c(0.005, 0.005, 0.005),iter, e.step.set[["posterior_df"]], comp1_var_max, threshold = threshold, overlap = overlap) cur.loglik.set <- e.step.set[["loglik"]] loglik.vector.set <- e.step.set[["loglik"]] } else { # Repeat E and M steps till convergence e.step.set <- .e_step_set_iter(effect_set, variance_set, set, m.step.set[["mu"]], m.step.set[["var"]],m.step.set[["alpha"]]) m.step.set <- .m_step_set_iter_fixed_2DF(effect_set,variance_set,set,m.step.set[["var"]],iter, e.step.set[["posterior_df"]], comp1_var_max, threshold = threshold, overlap = overlap) loglik.vector.set <- c(loglik.vector.set, e.step.set[["loglik"]]) loglik.diff.set <- abs((cur.loglik.set - e.step.set[["loglik"]])) if(loglik.diff.set < m) { break } else { cur.loglik.set <- e.step.set[["loglik"]] } } } loglike_set_genes<-tail(loglik.vector.set, n=1) parameters <- list("loglike" = loglike_set_genes, "param" = m.step.set, "ll.vector" = loglik.vector.set) return(parameters) } .m_step_set_iter_fixed_2DF <- function(x, lfc_var, set, component_var, t, posterior_df, comp1_var_max, threshold, overlap = overlap) { comp1_n <- sum(posterior_df[, 1]) comp2_n <- sum(posterior_df[, 2]) comp3_n <- sum(posterior_df[, 3]) comp4_n <- sum(posterior_df[, 4]) comp5_n <- sum(posterior_df[, 5]) comp6_n <- sum(posterior_df[, 6]) # the following for loop iterates between mean and variance, the number of iteration is t, comp1_pd <- posterior_df[,1] + posterior_df[,4] comp2_pd <- posterior_df[,2] + posterior_df[,5] comp3_pd <- posterior_df[,3] + posterior_df[,6] ########################### for (i in 1:t) { if (i == 1) { comp2_var <- component_var[2] w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm(1-overlap, log2(threshold), sqrt(comp2_var)) comp2_mu <- max(comp2_mean_min, sum(comp2_pd * w2_i * x)/sum(comp2_pd * w2_i)) comp3_var <- component_var[3] w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(comp3_pd * w3_i * x)/sum(comp3_pd * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } else { comp2_var <- max(0.005,sum(comp2_pd * ((w2_i)^2) * (((x - comp2_mu)^2)-lfc_var))/(sum(comp2_pd*w2_i^2))) w2_i <- 1/(comp2_var+lfc_var) comp2_mean_min <- qnorm((1-overlap), log2(threshold), sqrt(comp2_var)) comp2_mu <-max(comp2_mean_min, sum(comp2_pd * w2_i * x)/sum(comp2_pd * w2_i)) comp3_var <- max(0.005,sum(comp3_pd * ((w3_i)^2) * (((x - comp3_mu)^2)-lfc_var))/(sum(comp3_pd*w3_i^2))) w3_i <- 1/(comp3_var+lfc_var) comp3_mean_max <- qnorm(overlap, -log2(threshold), sqrt(comp3_var)) comp3_mu <- min(comp3_mean_max, sum(comp3_pd * w3_i * x)/sum(comp3_pd * w3_i)) comp1_var <- comp1_var_max comp1_mu<- 0 } } comp1_alpha <- max(comp1_n / sum(set),lower_bound) comp2_alpha <- max(comp2_n / sum(set),lower_bound) comp3_alpha <- max(comp3_n / sum(set),lower_bound) comp4_alpha <- max(comp4_n / (length(set)-sum(set)),lower_bound) comp5_alpha <- max(comp5_n / (length(set)-sum(set)),lower_bound) comp6_alpha <- max(comp6_n / (length(set)-sum(set)),lower_bound) list("mu" = c(comp1_mu, comp2_mu, comp3_mu), "var" = c(comp1_var, comp2_var, comp3_var), "alpha" = c(comp1_alpha, comp2_alpha, comp3_alpha, comp4_alpha, comp5_alpha, comp6_alpha )) } lower_bound<- 0.000000005
# This is the server logic of a Shiny web application. You can run the application by clicking 'Run App' above. library(shiny) library(dplyr) library(ggplot2) df <- read.csv("hgs.csv", header = TRUE) df <- df %>% dplyr::select("X","latitude","longitude","tlpop","area") model <- lm( log(area) ~ log(tlpop) + abs(latitude), data = df) # Define server logic server = function(input,output,session){ observeEvent( input$Enter, { latitude = input$latitude tlpop = input$tlpop t = data.frame(latitude,tlpop) t$area <- predict(model, t) t$area <- round(exp(t$area),1) output$plot_foo = renderPlot({ ggplot(df, aes(x=tlpop, y=area)) + geom_point() + geom_smooth() + scale_x_continuous(trans="log10") + scale_y_continuous(trans="log10") + geom_point(data = t, col = 'red', size = 5) + ylab("Home range area (sq km)") + xlab("Total population") }) output$text<-renderText({ return(paste("<span style=\"color:red; font-size: 20px\">Predicted home range area in square kilometers is </span>", t$area)) }) }) }
/server.R
no_license
RobertSWalker/shiny-hunter-gatherer-area-scaling
R
false
false
1,144
r
# This is the server logic of a Shiny web application. You can run the application by clicking 'Run App' above. library(shiny) library(dplyr) library(ggplot2) df <- read.csv("hgs.csv", header = TRUE) df <- df %>% dplyr::select("X","latitude","longitude","tlpop","area") model <- lm( log(area) ~ log(tlpop) + abs(latitude), data = df) # Define server logic server = function(input,output,session){ observeEvent( input$Enter, { latitude = input$latitude tlpop = input$tlpop t = data.frame(latitude,tlpop) t$area <- predict(model, t) t$area <- round(exp(t$area),1) output$plot_foo = renderPlot({ ggplot(df, aes(x=tlpop, y=area)) + geom_point() + geom_smooth() + scale_x_continuous(trans="log10") + scale_y_continuous(trans="log10") + geom_point(data = t, col = 'red', size = 5) + ylab("Home range area (sq km)") + xlab("Total population") }) output$text<-renderText({ return(paste("<span style=\"color:red; font-size: 20px\">Predicted home range area in square kilometers is </span>", t$area)) }) }) }
#' @include FLMatrix.R NULL #' Singular Value Decomposition of a Matrix. #' #' \code{svd} computes the singular value decomposition for FLMatrix objects. #' #' @param object is of class FLMatrix #' @param ... has nu number of left singular vectors to be computed.This must between 0 and nrow(object). #' nv number of right singular vectors to be computed.This must between 0 and ncol(object). #' @section Constraints: #' Input can only be with maximum dimension limitations of (550 x 550). #' @return \code{svd} returns a list of three components: #' \item{d}{a FLVector containing the singular values of x, of size min(n, p).} #' \item{u}{a FLMatrix whose columns contain the left singular vectors of x, present if nu > 0. Dimension c(n, nu).} #' \item{v}{a FLMatrix whose columns contain the right singular vectors of x, present if nv > 0. Dimension c(p, nv).} #' @examples #' flmatrix <- FLMatrix("tblMatrixMulti", 5,"MATRIX_ID","ROW_ID","COL_ID","CELL_VAL") #' resultList <- svd(flmatrix) #' resultList$d #' resultList$u #' resultList$v #' @export svd<-function(object, ...){ UseMethod("svd",object) } #' @export svd.default<-base::svd #' @export svd.FLMatrix<-function(object,nu=c(),nv=c()) { connection<-getConnection(object) flag1Check(connection) flag3Check(connection) sqlstr <- paste0( viewSelectMatrix(object, "a","z"), outputSelectMatrix("FLSVDUdt",viewName="z",localName="a", outColNames=list("OutputMatrixID","OutputRowNum", "OutputColNum","OutUVal","OutSVal","OutVVal"), whereClause="") ) sqlstr <- gsub("'%insertIDhere%'",1,sqlstr) sqlstr <- ensureQuerySize(pResult=sqlstr, pInput=list(object,nu,nv), pOperator="svd") tempResultTable <- createTable(pTableName=gen_unique_table_name("SVD"), pSelect=sqlstr) UMatrix <- FLMatrix(connection = connection, table_name = tempResultTable, matrix_id_value = "", matrix_id_colname = "", row_id_colname = "OutputRowNum", col_id_colname = "OutputColNum", cell_val_colname = "OutUVal", whereconditions=paste0(tempResultTable,".OutUVal IS NOT NULL ") ) VMatrix <- FLMatrix(connection = connection, table_name = tempResultTable, matrix_id_value = "", matrix_id_colname = "", row_id_colname = "OutputRowNum", col_id_colname = "OutputColNum", cell_val_colname = "OutVVal", whereconditions= paste0(tempResultTable,".OutVVal IS NOT NULL ") ) table <- FLTable( tempResultTable, "OutputRowNum", whereconditions=c(paste0(tempResultTable,".OutputRowNum = ", tempResultTable,".OutputColNum "), paste0(tempResultTable,".OutSVal IS NOT NULL ")) ) SVector <- table[,"OutSVal"] if (is.null(nu) && is.null(nv)) { result<-list(d = SVector, u = UMatrix[1:nrow(object),1:min(nrow(object),ncol(object))], v = VMatrix[1:ncol(object),1:min(nrow(object),ncol(object))]) } else if (is.null(nu)) { result<-list(d = SVector, u = UMatrix[1:nrow(object),1:min(nrow(object),ncol(object))], v = VMatrix[1:ncol(object),1:min(nv,ncol(object))]) } else if (is.null(nv)) { result<-list(d = SVector, u = UMatrix[1:nrow(object),1:min(nrow(object),nu)], v = VMatrix[1:ncol(object),1:min(nrow(object),ncol(object))]) } else { result<-list(d = SVector, u = UMatrix[1:nrow(object),1:min(nrow(object),nu)], v = VMatrix[1:ncol(object),1:min(nv,ncol(object))]) } result }
/R/FLSVDecomp.R
no_license
amalshri/AdapteR
R
false
false
3,830
r
#' @include FLMatrix.R NULL #' Singular Value Decomposition of a Matrix. #' #' \code{svd} computes the singular value decomposition for FLMatrix objects. #' #' @param object is of class FLMatrix #' @param ... has nu number of left singular vectors to be computed.This must between 0 and nrow(object). #' nv number of right singular vectors to be computed.This must between 0 and ncol(object). #' @section Constraints: #' Input can only be with maximum dimension limitations of (550 x 550). #' @return \code{svd} returns a list of three components: #' \item{d}{a FLVector containing the singular values of x, of size min(n, p).} #' \item{u}{a FLMatrix whose columns contain the left singular vectors of x, present if nu > 0. Dimension c(n, nu).} #' \item{v}{a FLMatrix whose columns contain the right singular vectors of x, present if nv > 0. Dimension c(p, nv).} #' @examples #' flmatrix <- FLMatrix("tblMatrixMulti", 5,"MATRIX_ID","ROW_ID","COL_ID","CELL_VAL") #' resultList <- svd(flmatrix) #' resultList$d #' resultList$u #' resultList$v #' @export svd<-function(object, ...){ UseMethod("svd",object) } #' @export svd.default<-base::svd #' @export svd.FLMatrix<-function(object,nu=c(),nv=c()) { connection<-getConnection(object) flag1Check(connection) flag3Check(connection) sqlstr <- paste0( viewSelectMatrix(object, "a","z"), outputSelectMatrix("FLSVDUdt",viewName="z",localName="a", outColNames=list("OutputMatrixID","OutputRowNum", "OutputColNum","OutUVal","OutSVal","OutVVal"), whereClause="") ) sqlstr <- gsub("'%insertIDhere%'",1,sqlstr) sqlstr <- ensureQuerySize(pResult=sqlstr, pInput=list(object,nu,nv), pOperator="svd") tempResultTable <- createTable(pTableName=gen_unique_table_name("SVD"), pSelect=sqlstr) UMatrix <- FLMatrix(connection = connection, table_name = tempResultTable, matrix_id_value = "", matrix_id_colname = "", row_id_colname = "OutputRowNum", col_id_colname = "OutputColNum", cell_val_colname = "OutUVal", whereconditions=paste0(tempResultTable,".OutUVal IS NOT NULL ") ) VMatrix <- FLMatrix(connection = connection, table_name = tempResultTable, matrix_id_value = "", matrix_id_colname = "", row_id_colname = "OutputRowNum", col_id_colname = "OutputColNum", cell_val_colname = "OutVVal", whereconditions= paste0(tempResultTable,".OutVVal IS NOT NULL ") ) table <- FLTable( tempResultTable, "OutputRowNum", whereconditions=c(paste0(tempResultTable,".OutputRowNum = ", tempResultTable,".OutputColNum "), paste0(tempResultTable,".OutSVal IS NOT NULL ")) ) SVector <- table[,"OutSVal"] if (is.null(nu) && is.null(nv)) { result<-list(d = SVector, u = UMatrix[1:nrow(object),1:min(nrow(object),ncol(object))], v = VMatrix[1:ncol(object),1:min(nrow(object),ncol(object))]) } else if (is.null(nu)) { result<-list(d = SVector, u = UMatrix[1:nrow(object),1:min(nrow(object),ncol(object))], v = VMatrix[1:ncol(object),1:min(nv,ncol(object))]) } else if (is.null(nv)) { result<-list(d = SVector, u = UMatrix[1:nrow(object),1:min(nrow(object),nu)], v = VMatrix[1:ncol(object),1:min(nrow(object),ncol(object))]) } else { result<-list(d = SVector, u = UMatrix[1:nrow(object),1:min(nrow(object),nu)], v = VMatrix[1:ncol(object),1:min(nv,ncol(object))]) } result }
/R_Samsonov/Others_Work/9.r
no_license
aar44i/R_geographic
R
false
false
5,530
r
set.seed(123) ### Gerando dados dados<-data.frame(Sp=c("Columbina talpacoti","Scardafella squammata"),N=c(62,57), Prevalence=c(0.516,0.193)) dados$simul<-0 for(i in 1:2) { dados[i,"simul"]<-rbinom(1, dados[i,"N"], prob = dados[i,"Prevalence"]) } #figura 1 layout(matrix(1:2,ncol=1,nrow=2)) for(i in 1:2) { plot(dbinom(x=0:dados$N[i],size=dados$N[i],prob=dados$Prevalence[i])~c(0:dados$N[i]),frame=F,pch=19, main=paste(dados$Sp[i]," N=",dados$N[i],sep=""),ylab="Frequência",xlim=c(0,70),xlab="",ylim=c(0,0.15)) legend("topright",legend=paste("Prevalência =",dados$Prevalence[i]*100,"%"),bty="n") lines(c(dados$N[i],dados$N[i]),c(0.05,0),col="blue",lty=2,lwd=3) text(dados$N[i]+5,0.05,paste("Limite N =",dados$N[i]),cex=0.7) } tabela<- as.table(cbind(dados$simul, dados$N - dados$simul)) dimnames(tabela) <- list(Sp = c("Columbina talpacoti","Scardafella squammata"),Parasita = c("Sim","Não")) tabela teste.chi <- chisq.test(tabela) str(teste.chi) teste.chi$observed # Contagens observadas teste.chi$expected # Contagens esperada sobre a hipotese nula teste.chi$residuals # Residuo de Pearson teste.chi$stdres # Residos "standardized" ### Análise usando Bugs # Definindo Modelo sink("Binomial.t.test.txt") cat(" model { # Priors alpha ~ dnorm(0,0.01) beta ~ dnorm(0,0.01) # Likelihood for (i in 1:n) { C[i] ~ dbin(p[i], N[i]) # Note p before N logit(p[i]) <- alpha + beta *species[i] } # Derived quantities prev.C_talpacoti <- exp(alpha) / (1 + exp(alpha)) prev.S_squammata <- exp(alpha + beta) / (1 + exp(alpha + beta)) prev.Diff <- prev.C_talpacoti - prev.S_squammata # Teste } ",fill=TRUE) sink() # Juntando os dados numa lista bugs.dados <- list(C = dados$simul, N = dados$N, species = c(0,1), n = length(dados$Sp)) # Função geradora de parametros iniciais para as cadeias. inits <- function(){ list(alpha=rlnorm(1), beta=rlnorm(1))} # Parametros a estimar params <- c("alpha", "beta", "prev.C_talpacoti", "prev.S_squammata", "prev.Diff") # Configurações do MCMC nc <- 3 ni <- 1200 nb <- 200 nt <- 2 # Iniciando o Gibbs sampling library(R2OpenBUGS) out <- bugs(data=bugs.dados, inits=inits, parameters.to.save=params,model.file="Binomial.t.test.txt", n.thin=nt, n.chains=nc, n.burnin=nb,n.iter=ni) print(out, dig = 3) #Onde estão os dados out$summary #Figura 2 str(out) par(mfrow = c(3,1)) hist(out$sims.list$prev.C_talpacoti, col = "grey", xlim = c(0,1), xlab="", main = "Prevalência de C. talpacoti", breaks = 30,ylim=c(0,300)) abline(v = out$mean$prev.C_talpacoti, lwd = 3, col = "red") hist(out$sims.list$prev.S_squammata, col = "grey", xlim = c(0,1), xlab= "", main = "Prevalência de S. squammata", breaks = 30,ylim=c(0,300)) abline(v = out$mean$prev.S_squammata, lwd = 3, col = "red") hist(out$sims.list$prev.Diff, col = "grey", xlim = c(0,1), xlab = "", main = "Diferença nas Prevalências", breaks = 30,ylim=c(0,300)) abline(v = 0, lwd = 3, col = "red") ### Analise usando GLM modelo.glm<-glm(cbind(dados$simul, dados$N - dados$simul) ~ dados$Sp, family = binomial) summary(modelo.glm)
/teste_t_binomial.r
no_license
Squiercg/recologia
R
false
false
3,124
r
set.seed(123) ### Gerando dados dados<-data.frame(Sp=c("Columbina talpacoti","Scardafella squammata"),N=c(62,57), Prevalence=c(0.516,0.193)) dados$simul<-0 for(i in 1:2) { dados[i,"simul"]<-rbinom(1, dados[i,"N"], prob = dados[i,"Prevalence"]) } #figura 1 layout(matrix(1:2,ncol=1,nrow=2)) for(i in 1:2) { plot(dbinom(x=0:dados$N[i],size=dados$N[i],prob=dados$Prevalence[i])~c(0:dados$N[i]),frame=F,pch=19, main=paste(dados$Sp[i]," N=",dados$N[i],sep=""),ylab="Frequência",xlim=c(0,70),xlab="",ylim=c(0,0.15)) legend("topright",legend=paste("Prevalência =",dados$Prevalence[i]*100,"%"),bty="n") lines(c(dados$N[i],dados$N[i]),c(0.05,0),col="blue",lty=2,lwd=3) text(dados$N[i]+5,0.05,paste("Limite N =",dados$N[i]),cex=0.7) } tabela<- as.table(cbind(dados$simul, dados$N - dados$simul)) dimnames(tabela) <- list(Sp = c("Columbina talpacoti","Scardafella squammata"),Parasita = c("Sim","Não")) tabela teste.chi <- chisq.test(tabela) str(teste.chi) teste.chi$observed # Contagens observadas teste.chi$expected # Contagens esperada sobre a hipotese nula teste.chi$residuals # Residuo de Pearson teste.chi$stdres # Residos "standardized" ### Análise usando Bugs # Definindo Modelo sink("Binomial.t.test.txt") cat(" model { # Priors alpha ~ dnorm(0,0.01) beta ~ dnorm(0,0.01) # Likelihood for (i in 1:n) { C[i] ~ dbin(p[i], N[i]) # Note p before N logit(p[i]) <- alpha + beta *species[i] } # Derived quantities prev.C_talpacoti <- exp(alpha) / (1 + exp(alpha)) prev.S_squammata <- exp(alpha + beta) / (1 + exp(alpha + beta)) prev.Diff <- prev.C_talpacoti - prev.S_squammata # Teste } ",fill=TRUE) sink() # Juntando os dados numa lista bugs.dados <- list(C = dados$simul, N = dados$N, species = c(0,1), n = length(dados$Sp)) # Função geradora de parametros iniciais para as cadeias. inits <- function(){ list(alpha=rlnorm(1), beta=rlnorm(1))} # Parametros a estimar params <- c("alpha", "beta", "prev.C_talpacoti", "prev.S_squammata", "prev.Diff") # Configurações do MCMC nc <- 3 ni <- 1200 nb <- 200 nt <- 2 # Iniciando o Gibbs sampling library(R2OpenBUGS) out <- bugs(data=bugs.dados, inits=inits, parameters.to.save=params,model.file="Binomial.t.test.txt", n.thin=nt, n.chains=nc, n.burnin=nb,n.iter=ni) print(out, dig = 3) #Onde estão os dados out$summary #Figura 2 str(out) par(mfrow = c(3,1)) hist(out$sims.list$prev.C_talpacoti, col = "grey", xlim = c(0,1), xlab="", main = "Prevalência de C. talpacoti", breaks = 30,ylim=c(0,300)) abline(v = out$mean$prev.C_talpacoti, lwd = 3, col = "red") hist(out$sims.list$prev.S_squammata, col = "grey", xlim = c(0,1), xlab= "", main = "Prevalência de S. squammata", breaks = 30,ylim=c(0,300)) abline(v = out$mean$prev.S_squammata, lwd = 3, col = "red") hist(out$sims.list$prev.Diff, col = "grey", xlim = c(0,1), xlab = "", main = "Diferença nas Prevalências", breaks = 30,ylim=c(0,300)) abline(v = 0, lwd = 3, col = "red") ### Analise usando GLM modelo.glm<-glm(cbind(dados$simul, dados$N - dados$simul) ~ dados$Sp, family = binomial) summary(modelo.glm)
#' Lookup details for specific names in all taxonomies in GBIF. #' #' @export #' @template occ #' @template nameusage #' @param limit Number of records to return. Default: 100. #' @param start Record number to start at. Default: 0. #' @param return Defunct. All components are returned; index to the #' one(s) you want #' @return An object of class gbif, which is a S3 class list, with slots for #' metadata (\code{meta}) and the data itself (\code{data}). In addition, the #' object has attributes listing the user supplied arguments and type of #' search, which is, differently from occurrence data, always equals to #' 'single' even if multiple values for some parameters are given. \code{meta} #' is a list of length four with offset, limit, endOfRecords and count fields. #' \code{data} is a tibble (aka data.frame) containing all information about #' the found taxa. #' #' @references <https://www.gbif.org/developer/species#nameUsages> #' @details #' This service uses fuzzy lookup so that you can put in partial names and #' you should get back those things that match. See examples below. #' #' This function is different from [name_lookup()] in that that function #' searches for names. This function encompasses a bunch of API endpoints, #' most of which require that you already have a taxon key, but there is one #' endpoint that allows name searches (see examples below). #' #' Note that `data="verbatim"` hasn't been working. #' #' Options for the data parameter are: 'all', 'verbatim', 'name', 'parents', #' 'children', 'related', 'synonyms', 'descriptions','distributions', 'media', #' 'references', 'speciesProfiles', 'vernacularNames', 'typeSpecimens', 'root', #' 'iucnRedListCategory' #' #' This function used to be vectorized with respect to the `data` #' parameter, where you could pass in multiple values and the function #' internally loops over each option making separate requests. This has been #' removed. You can still loop over many options for the `data` parameter, #' just use an `lapply` family function, or a for loop, etc. #' #' See [name_issues()] for more information about issues in `issues` column. #' #' @examples \dontrun{ #' # A single name usage #' name_usage(key=1) #' #' # Name usage for a taxonomic name #' name_usage(name='Puma', rank="GENUS") #' #' # Name usage for all taxa in a dataset #' # (set sufficient high limit, but less than 100000) #' # name_usage(datasetKey = "9ff7d317-609b-4c08-bd86-3bc404b77c42", #' # limit = 10000) #' # All name usages #' name_usage() #' #' # References for a name usage #' name_usage(key=2435099, data='references') #' #' # Species profiles, descriptions #' name_usage(key=3119195, data='speciesProfiles') #' name_usage(key=3119195, data='descriptions') #' name_usage(key=2435099, data='children') #' #' # Vernacular names for a name usage #' name_usage(key=3119195, data='vernacularNames') #' #' # Limit number of results returned #' name_usage(key=3119195, data='vernacularNames', limit=3) #' #' # Search for names by dataset with datasetKey parameter #' name_usage(datasetKey="d7dddbf4-2cf0-4f39-9b2a-bb099caae36c") #' #' # Search for a particular language #' name_usage(key=3119195, language="FRENCH", data='vernacularNames') #' #' # get root usage with a uuid #' name_usage(data = "root", uuid = "73605f3a-af85-4ade-bbc5-522bfb90d847") #' #' # search by language #' name_usage(language = "spanish") #' #' # Pass on curl options #' name_usage(name='Puma concolor', limit=300, curlopts = list(verbose=TRUE)) #' #' # look up iucn red list category #' name_usage(key = 7707728, data = 'iucnRedListCategory') #' } name_usage <- function(key=NULL, name=NULL, data='all', language=NULL, datasetKey=NULL, uuid=NULL, rank=NULL, shortname=NULL, start=0, limit=100, return=NULL, curlopts = list()) { pchk(return, "name_usage") # check limit and start params check_vals(limit, "limit") check_vals(start, "start") # each of these args must be length=1 if (!is.null(rank)) stopifnot(length(rank) == 1) if (!is.null(name)) stopifnot(length(name) == 1) if (!is.null(language)) stopifnot(length(language) == 1) if (!is.null(datasetKey)) stopifnot(length(datasetKey) == 1) args <- rgbif_compact(list(offset = start, limit = limit, rank = rank, name = name, language = language, datasetKey = datasetKey)) data <- match.arg(data, choices = c('all', 'verbatim', 'name', 'parents', 'children', 'related', 'synonyms', 'descriptions', 'distributions', 'media', 'references', 'speciesProfiles', 'vernacularNames', 'typeSpecimens', 'root', 'iucnRedListCategory'), several.ok = FALSE) # paging implementation iter <- NULL if (limit > 1000) { iter <- 0 sumreturned <- 0 numreturned <- 0 outout <- list() while (sumreturned < limit) { iter <- iter + 1 tt <- getdata(data, key, uuid, shortname, args, curlopts) # if no results, assign numreturned var with 0 if (identical(tt$results, list())) { numreturned <- 0} else { numreturned <- length(tt$results)} sumreturned <- sumreturned + numreturned # if less results than maximum if ((numreturned > 0) && (numreturned < 1000)) { # update limit for metadata before exiting limit <- numreturned args$limit <- limit } if (sumreturned < limit) { # update args for next query args$offset <- args$offset + numreturned args$limit <- limit - sumreturned } outout[[iter]] <- tt } out <- list() out$results <- do.call(c, lapply(outout, "[[", "results")) out$offset <- args$offset out$limit <- args$limit out$endOfRecords <- outout[[iter]]$endOfRecords } else { # retrieve data in a single query out <- getdata(data, key, uuid, shortname, args, curlopts) } out <- list(meta = get_meta_nu(out), data = tibble::as_tibble(name_usage_parse(out, data))) structure(out, class = "gbif", args = args, type = "single") } get_meta_nu <- function(x) { if (has_meta(x)) { tibble::as_tibble(data.frame(x[c('offset','limit','endOfRecords')], stringsAsFactors = FALSE)) } else { tibble::tibble() } } has_meta <- function(x) any(c('offset','limit','endOfRecords') %in% names(x)) getdata <- function(x, key, uuid, shortname, args, curlopts = list()){ if (!x == 'all' && is.null(key)) { # data can == 'root' if uuid is not null if (x != 'root' && !is.null(uuid) || x != 'root' && !is.null(shortname)) { stop('You must specify a key if data does not equal "all"', call. = FALSE) } } if (x == 'all' && is.null(key)) { url <- paste0(gbif_base(), '/species') } else { if (x == 'all' && !is.null(key)) { url <- sprintf('%s/species/%s', gbif_base(), key) } else if (x %in% c('verbatim', 'name', 'parents', 'children', 'related', 'synonyms', 'descriptions', 'distributions', 'media', 'references', 'speciesProfiles', 'vernacularNames', 'typeSpecimens','iucnRedListCategory')) { url <- sprintf('%s/species/%s/%s', gbif_base(), key, x) } else if (x == 'root') { z <- if (is.null(uuid)) shortname else uuid url <- sprintf('%s/species/root/%s', gbif_base(), z) } } gbif_GET(url, args, FALSE, curlopts) } name_usage_parse <- function(x, y) { many <- "parents" if (has_meta(x) || y %in% many) { if (y %in% many) { (outtt <- data.table::setDF( data.table::rbindlist( lapply(x, no_zero), use.names = TRUE, fill = TRUE))) } else { (outtt <- data.table::setDF( data.table::rbindlist( lapply(x$results, function(x) { # reduce multiple element slots to comma sep if ("issues" %in% names(x)) { x[names(x) %in% "issues"] <- collapse_issues(x) } lapply(x, function(x) { if (length(x) == 0) { NA } else { x } }) }), use.names = TRUE, fill = TRUE))) } } else { nameusageparser(x) } } no_zero <- function(x) Filter(function(z) length(z) != 0, x)
/R/name_usage.r
permissive
ropensci/rgbif
R
false
false
8,389
r
#' Lookup details for specific names in all taxonomies in GBIF. #' #' @export #' @template occ #' @template nameusage #' @param limit Number of records to return. Default: 100. #' @param start Record number to start at. Default: 0. #' @param return Defunct. All components are returned; index to the #' one(s) you want #' @return An object of class gbif, which is a S3 class list, with slots for #' metadata (\code{meta}) and the data itself (\code{data}). In addition, the #' object has attributes listing the user supplied arguments and type of #' search, which is, differently from occurrence data, always equals to #' 'single' even if multiple values for some parameters are given. \code{meta} #' is a list of length four with offset, limit, endOfRecords and count fields. #' \code{data} is a tibble (aka data.frame) containing all information about #' the found taxa. #' #' @references <https://www.gbif.org/developer/species#nameUsages> #' @details #' This service uses fuzzy lookup so that you can put in partial names and #' you should get back those things that match. See examples below. #' #' This function is different from [name_lookup()] in that that function #' searches for names. This function encompasses a bunch of API endpoints, #' most of which require that you already have a taxon key, but there is one #' endpoint that allows name searches (see examples below). #' #' Note that `data="verbatim"` hasn't been working. #' #' Options for the data parameter are: 'all', 'verbatim', 'name', 'parents', #' 'children', 'related', 'synonyms', 'descriptions','distributions', 'media', #' 'references', 'speciesProfiles', 'vernacularNames', 'typeSpecimens', 'root', #' 'iucnRedListCategory' #' #' This function used to be vectorized with respect to the `data` #' parameter, where you could pass in multiple values and the function #' internally loops over each option making separate requests. This has been #' removed. You can still loop over many options for the `data` parameter, #' just use an `lapply` family function, or a for loop, etc. #' #' See [name_issues()] for more information about issues in `issues` column. #' #' @examples \dontrun{ #' # A single name usage #' name_usage(key=1) #' #' # Name usage for a taxonomic name #' name_usage(name='Puma', rank="GENUS") #' #' # Name usage for all taxa in a dataset #' # (set sufficient high limit, but less than 100000) #' # name_usage(datasetKey = "9ff7d317-609b-4c08-bd86-3bc404b77c42", #' # limit = 10000) #' # All name usages #' name_usage() #' #' # References for a name usage #' name_usage(key=2435099, data='references') #' #' # Species profiles, descriptions #' name_usage(key=3119195, data='speciesProfiles') #' name_usage(key=3119195, data='descriptions') #' name_usage(key=2435099, data='children') #' #' # Vernacular names for a name usage #' name_usage(key=3119195, data='vernacularNames') #' #' # Limit number of results returned #' name_usage(key=3119195, data='vernacularNames', limit=3) #' #' # Search for names by dataset with datasetKey parameter #' name_usage(datasetKey="d7dddbf4-2cf0-4f39-9b2a-bb099caae36c") #' #' # Search for a particular language #' name_usage(key=3119195, language="FRENCH", data='vernacularNames') #' #' # get root usage with a uuid #' name_usage(data = "root", uuid = "73605f3a-af85-4ade-bbc5-522bfb90d847") #' #' # search by language #' name_usage(language = "spanish") #' #' # Pass on curl options #' name_usage(name='Puma concolor', limit=300, curlopts = list(verbose=TRUE)) #' #' # look up iucn red list category #' name_usage(key = 7707728, data = 'iucnRedListCategory') #' } name_usage <- function(key=NULL, name=NULL, data='all', language=NULL, datasetKey=NULL, uuid=NULL, rank=NULL, shortname=NULL, start=0, limit=100, return=NULL, curlopts = list()) { pchk(return, "name_usage") # check limit and start params check_vals(limit, "limit") check_vals(start, "start") # each of these args must be length=1 if (!is.null(rank)) stopifnot(length(rank) == 1) if (!is.null(name)) stopifnot(length(name) == 1) if (!is.null(language)) stopifnot(length(language) == 1) if (!is.null(datasetKey)) stopifnot(length(datasetKey) == 1) args <- rgbif_compact(list(offset = start, limit = limit, rank = rank, name = name, language = language, datasetKey = datasetKey)) data <- match.arg(data, choices = c('all', 'verbatim', 'name', 'parents', 'children', 'related', 'synonyms', 'descriptions', 'distributions', 'media', 'references', 'speciesProfiles', 'vernacularNames', 'typeSpecimens', 'root', 'iucnRedListCategory'), several.ok = FALSE) # paging implementation iter <- NULL if (limit > 1000) { iter <- 0 sumreturned <- 0 numreturned <- 0 outout <- list() while (sumreturned < limit) { iter <- iter + 1 tt <- getdata(data, key, uuid, shortname, args, curlopts) # if no results, assign numreturned var with 0 if (identical(tt$results, list())) { numreturned <- 0} else { numreturned <- length(tt$results)} sumreturned <- sumreturned + numreturned # if less results than maximum if ((numreturned > 0) && (numreturned < 1000)) { # update limit for metadata before exiting limit <- numreturned args$limit <- limit } if (sumreturned < limit) { # update args for next query args$offset <- args$offset + numreturned args$limit <- limit - sumreturned } outout[[iter]] <- tt } out <- list() out$results <- do.call(c, lapply(outout, "[[", "results")) out$offset <- args$offset out$limit <- args$limit out$endOfRecords <- outout[[iter]]$endOfRecords } else { # retrieve data in a single query out <- getdata(data, key, uuid, shortname, args, curlopts) } out <- list(meta = get_meta_nu(out), data = tibble::as_tibble(name_usage_parse(out, data))) structure(out, class = "gbif", args = args, type = "single") } get_meta_nu <- function(x) { if (has_meta(x)) { tibble::as_tibble(data.frame(x[c('offset','limit','endOfRecords')], stringsAsFactors = FALSE)) } else { tibble::tibble() } } has_meta <- function(x) any(c('offset','limit','endOfRecords') %in% names(x)) getdata <- function(x, key, uuid, shortname, args, curlopts = list()){ if (!x == 'all' && is.null(key)) { # data can == 'root' if uuid is not null if (x != 'root' && !is.null(uuid) || x != 'root' && !is.null(shortname)) { stop('You must specify a key if data does not equal "all"', call. = FALSE) } } if (x == 'all' && is.null(key)) { url <- paste0(gbif_base(), '/species') } else { if (x == 'all' && !is.null(key)) { url <- sprintf('%s/species/%s', gbif_base(), key) } else if (x %in% c('verbatim', 'name', 'parents', 'children', 'related', 'synonyms', 'descriptions', 'distributions', 'media', 'references', 'speciesProfiles', 'vernacularNames', 'typeSpecimens','iucnRedListCategory')) { url <- sprintf('%s/species/%s/%s', gbif_base(), key, x) } else if (x == 'root') { z <- if (is.null(uuid)) shortname else uuid url <- sprintf('%s/species/root/%s', gbif_base(), z) } } gbif_GET(url, args, FALSE, curlopts) } name_usage_parse <- function(x, y) { many <- "parents" if (has_meta(x) || y %in% many) { if (y %in% many) { (outtt <- data.table::setDF( data.table::rbindlist( lapply(x, no_zero), use.names = TRUE, fill = TRUE))) } else { (outtt <- data.table::setDF( data.table::rbindlist( lapply(x$results, function(x) { # reduce multiple element slots to comma sep if ("issues" %in% names(x)) { x[names(x) %in% "issues"] <- collapse_issues(x) } lapply(x, function(x) { if (length(x) == 0) { NA } else { x } }) }), use.names = TRUE, fill = TRUE))) } } else { nameusageparser(x) } } no_zero <- function(x) Filter(function(z) length(z) != 0, x)
x <- as.matrix(all_results) t(x) heatmap(x, Rowv=NA, Colv = NA, scale="column")
/heatmap.R
no_license
laurenexcell/hello-world
R
false
false
80
r
x <- as.matrix(all_results) t(x) heatmap(x, Rowv=NA, Colv = NA, scale="column")
/26FEB.R
no_license
guillerosas/claseseriesdetiempo
R
false
false
1,713
r