content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
# plot1.R # Exploring Data Analysis # Project 1 part 1 # Lars Rahm # August 2014 # Grab just the rows we need instead of reading the whole dataset. # See the link for an explanation # Wei Zhang - https://class.coursera.org/exdata-005/forum/thread?thread_id=56 startrow = 47 * 60 * 24 -17 * 60 - 24 endrow = 2 * 24 * 60 energy <- read.table("data/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?", skip = startrow, nrows = endrow, colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")) colnames(energy) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3") hist(energy$Global_active_power, col="red", xlab = "Global Active Power(kilowatts)", main = "Global Active Power") dev.copy(png, file = "plot1.png", height=480, width=480) ## Copy my plot to a PNG file dev.off() ## Don't forget to close the PNG device!
/plot1.R
no_license
lrahm/ExData_Plotting1
R
false
false
1,240
r
# plot1.R # Exploring Data Analysis # Project 1 part 1 # Lars Rahm # August 2014 # Grab just the rows we need instead of reading the whole dataset. # See the link for an explanation # Wei Zhang - https://class.coursera.org/exdata-005/forum/thread?thread_id=56 startrow = 47 * 60 * 24 -17 * 60 - 24 endrow = 2 * 24 * 60 energy <- read.table("data/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?", skip = startrow, nrows = endrow, colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")) colnames(energy) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3") hist(energy$Global_active_power, col="red", xlab = "Global Active Power(kilowatts)", main = "Global Active Power") dev.copy(png, file = "plot1.png", height=480, width=480) ## Copy my plot to a PNG file dev.off() ## Don't forget to close the PNG device!
library(rpf) ### Name: rpf.sample ### Title: Randomly sample response patterns given a list of items ### Aliases: rpf.sample ### ** Examples # 1 dimensional items i1 <- rpf.drm() i1.p <- rpf.rparam(i1) i2 <- rpf.nrm(outcomes=3) i2.p <- rpf.rparam(i2) rpf.sample(5, list(i1,i2), list(i1.p, i2.p))
/data/genthat_extracted_code/rpf/examples/rpf.sample.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
303
r
library(rpf) ### Name: rpf.sample ### Title: Randomly sample response patterns given a list of items ### Aliases: rpf.sample ### ** Examples # 1 dimensional items i1 <- rpf.drm() i1.p <- rpf.rparam(i1) i2 <- rpf.nrm(outcomes=3) i2.p <- rpf.rparam(i2) rpf.sample(5, list(i1,i2), list(i1.p, i2.p))
library(dplyr) setwd("C:\\Users\\savet\\OneDrive\\Documenti\\R\\Esercizi") overall <- read.delim("household_power_consumption.txt", sep = ";", stringsAsFactors = FALSE, header = TRUE, na.strings = "?") overall$Date <- as.Date(overall$Date, format = "%d/%m/%Y") dataset <- overall %>% filter(Date=="2007-02-01" | Date=="2007-02-02") #####PLOT2 dataset$Time <- paste(dataset$Date, dataset$Time) dataset$Time <- strptime(dataset$Time, format = "%Y-%m-%d %H:%M:%S") plot(dataset$Time, dataset$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)") dev.copy(png, file="plot2.png") dev.off()
/Plot2.R
no_license
tommasoanastasia/ExData_Plotting1
R
false
false
646
r
library(dplyr) setwd("C:\\Users\\savet\\OneDrive\\Documenti\\R\\Esercizi") overall <- read.delim("household_power_consumption.txt", sep = ";", stringsAsFactors = FALSE, header = TRUE, na.strings = "?") overall$Date <- as.Date(overall$Date, format = "%d/%m/%Y") dataset <- overall %>% filter(Date=="2007-02-01" | Date=="2007-02-02") #####PLOT2 dataset$Time <- paste(dataset$Date, dataset$Time) dataset$Time <- strptime(dataset$Time, format = "%Y-%m-%d %H:%M:%S") plot(dataset$Time, dataset$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)") dev.copy(png, file="plot2.png") dev.off()
#' R.SamBada: A package for running samBada within R with pipeline from pre to post-processing #' #' The R.SamBada package provides functions that can be classified into four categories: #' Install samBada, Preprocessing, Running samBada and Post-processing. #' #' @section Install samBada functions: #' You can download samBada (if not already on your computer) from GitHub using the function \code{\link{downloadSambada}} #' #' @section Preprocessing functions: #' The Preprocessing functions contain three functions: #'\itemize{ #'\item{\code{\link{prepareGeno}}: translate genomic file to samBada's input file while applying genomic filters} #'\item{\code{\link{setLocation}}: opens local web page with interactive map to assign sample location} #'\item{\code{\link{createEnv}}: create your environmental file from file location from local raster or global worldclim database} #'\item{\code{\link{prepareEnv}}: reduce environmental file with correlated variables and analyse population structure} #'} #' #' @section Running samBada function: #' To run samBada, you will want to use the function: \code{\link{sambadaParallel}} #' #' @section Postprocessing functions: #' The Postprocessing functions contain three functions: #'\itemize{ #'\item{\code{\link{prepareOutput}}: calculate p and q-values from samBada output and retrieve SNP position for manhattan plots} #'\item{\code{\link{plotManhattan}}: create a manhattan plot of one or several environmental variables} #'\item{\code{\link{plotResultInteractive}}: start an interactive local web page to query a manhattan plot with maps, plots and ensembl query result} #'\item{\code{\link{plotMap}}: create a map of marker, population structure or environmental variable distribution} #'} #' #' @importFrom grDevices colorRampPalette colors dev.off dev.size pdf png terrain.colors #' @importFrom graphics abline axis barplot boxplot hist image layout lines par plot plot.new points text #' @importFrom stats aggregate complete.cases cor cutree hclust kmeans na.exclude pchisq splinefun #' @importFrom utils data read.csv read.table write.table #' #' @docType package #' @name R.SamBada-package NULL
/R/RSamBada.R
no_license
SolangeD/R.SamBada
R
false
false
2,198
r
#' R.SamBada: A package for running samBada within R with pipeline from pre to post-processing #' #' The R.SamBada package provides functions that can be classified into four categories: #' Install samBada, Preprocessing, Running samBada and Post-processing. #' #' @section Install samBada functions: #' You can download samBada (if not already on your computer) from GitHub using the function \code{\link{downloadSambada}} #' #' @section Preprocessing functions: #' The Preprocessing functions contain three functions: #'\itemize{ #'\item{\code{\link{prepareGeno}}: translate genomic file to samBada's input file while applying genomic filters} #'\item{\code{\link{setLocation}}: opens local web page with interactive map to assign sample location} #'\item{\code{\link{createEnv}}: create your environmental file from file location from local raster or global worldclim database} #'\item{\code{\link{prepareEnv}}: reduce environmental file with correlated variables and analyse population structure} #'} #' #' @section Running samBada function: #' To run samBada, you will want to use the function: \code{\link{sambadaParallel}} #' #' @section Postprocessing functions: #' The Postprocessing functions contain three functions: #'\itemize{ #'\item{\code{\link{prepareOutput}}: calculate p and q-values from samBada output and retrieve SNP position for manhattan plots} #'\item{\code{\link{plotManhattan}}: create a manhattan plot of one or several environmental variables} #'\item{\code{\link{plotResultInteractive}}: start an interactive local web page to query a manhattan plot with maps, plots and ensembl query result} #'\item{\code{\link{plotMap}}: create a map of marker, population structure or environmental variable distribution} #'} #' #' @importFrom grDevices colorRampPalette colors dev.off dev.size pdf png terrain.colors #' @importFrom graphics abline axis barplot boxplot hist image layout lines par plot plot.new points text #' @importFrom stats aggregate complete.cases cor cutree hclust kmeans na.exclude pchisq splinefun #' @importFrom utils data read.csv read.table write.table #' #' @docType package #' @name R.SamBada-package NULL
load("../../data/07_data_for_SIS.rda") source("../../common/08_SIS_common_funcs.R") x = x.diag_dsd y = y.diag_dsd seed = 1234567890 library(plyr) library(doMC) registerDoMC(32) grid.tau = seq(0.6, 0.99, 0.01) grid.gamma = seq(5, 15, 1) grid.tasks = expand.grid(tau = grid.tau, gamma = grid.gamma) x.std = (x - rowMeans(x)) / apply(x, 1, sd) set.seed(seed) results = aaply(grid.tasks, 1, function(params) { print(params) nonperm = CPSS(x.std, y, SIS.FAST, params$tau, 50, gamma = params$gamma, scale = FALSE)$sel # nonperm_boot = sapply(1:24, function(i) { # samp = sample.int(ncol(x), replace = TRUE) # CPSS(x.std[,samp], y[samp,], SIS.FAST, params$tau, 50, gamma = params$gamma, scale = FALSE)$sel # }) perm = sapply(1:25, function(i) { CPSS(x.std, y[sample.int(nrow(y)),], SIS.FAST, params$tau, 50, gamma = params$gamma, scale = FALSE)$sel }) # c(sum(nonperm), apply(nonperm_boot, 2, sum), apply(perm, 2, sum)) c(sum(nonperm), apply(perm, 2, sum)) }, .parallel = TRUE) nvar.total = aaply(results[,,1], 1:2, median) nvar.false = aaply(results[,,-1], 1:2, median) nvar.true = nvar.total - nvar.false fdr = nvar.false / nvar.total library(fields) library(reshape2) data = melt(nvar.total) colnames(data)[3] = "nTotal" data = cbind(data, "nFalse" = melt(nvar.false)[,3]) data$fdr = data$nFalse / data$nTotal data$nTrue = data$nTotal - data$nFalse data$nTotalSmth = predict(Tps(x = data[,1:2], Y = data[,3])) data$nFalseSmth = predict(Tps(x = data[,1:2], Y = data[,4])) data$fdrSmth = data$nFalseSmth / data$nTotalSmth data$nTrueSmth = data$nTotalSmth - data$nFalseSmth nvar.trueSmth = acast(data, tau ~ gamma, value.var = "nTrueSmth") fdrSmth = acast(data, tau ~ gamma, value.var = "fdrSmth") pdf("varsel.pdf", height = 10, width = 10) contour(x = as.numeric(rownames(fdr)), y = as.numeric(colnames(fdr)), z = fdr, zlim = c(0, 0.4), nlevels = 10, col = "red") contour(x = as.numeric(rownames(nvar.true)), y = as.numeric(colnames(nvar.true)), z = nvar.true, add = TRUE, col = "blue") contour(x = as.numeric(rownames(fdrSmth)), y = as.numeric(colnames(fdr)), z = fdrSmth, zlim = c(0, 0.4), nlevels = 10, col = "red") contour(x = as.numeric(rownames(nvar.trueSmth)), y = as.numeric(colnames(nvar.trueSmth)), z = nvar.true, add = TRUE, col = "blue") plot(fdr ~ nTrue, data) plot(fdrSmth ~ nTrueSmth, data, ylim = c(0, 1)) dev.off() save.image("varsel.rda")
/analysis/biosurv/reports/18_SIS_diag_dsd_final/varsel.R
no_license
mpinese/phd-thesis
R
false
false
2,377
r
load("../../data/07_data_for_SIS.rda") source("../../common/08_SIS_common_funcs.R") x = x.diag_dsd y = y.diag_dsd seed = 1234567890 library(plyr) library(doMC) registerDoMC(32) grid.tau = seq(0.6, 0.99, 0.01) grid.gamma = seq(5, 15, 1) grid.tasks = expand.grid(tau = grid.tau, gamma = grid.gamma) x.std = (x - rowMeans(x)) / apply(x, 1, sd) set.seed(seed) results = aaply(grid.tasks, 1, function(params) { print(params) nonperm = CPSS(x.std, y, SIS.FAST, params$tau, 50, gamma = params$gamma, scale = FALSE)$sel # nonperm_boot = sapply(1:24, function(i) { # samp = sample.int(ncol(x), replace = TRUE) # CPSS(x.std[,samp], y[samp,], SIS.FAST, params$tau, 50, gamma = params$gamma, scale = FALSE)$sel # }) perm = sapply(1:25, function(i) { CPSS(x.std, y[sample.int(nrow(y)),], SIS.FAST, params$tau, 50, gamma = params$gamma, scale = FALSE)$sel }) # c(sum(nonperm), apply(nonperm_boot, 2, sum), apply(perm, 2, sum)) c(sum(nonperm), apply(perm, 2, sum)) }, .parallel = TRUE) nvar.total = aaply(results[,,1], 1:2, median) nvar.false = aaply(results[,,-1], 1:2, median) nvar.true = nvar.total - nvar.false fdr = nvar.false / nvar.total library(fields) library(reshape2) data = melt(nvar.total) colnames(data)[3] = "nTotal" data = cbind(data, "nFalse" = melt(nvar.false)[,3]) data$fdr = data$nFalse / data$nTotal data$nTrue = data$nTotal - data$nFalse data$nTotalSmth = predict(Tps(x = data[,1:2], Y = data[,3])) data$nFalseSmth = predict(Tps(x = data[,1:2], Y = data[,4])) data$fdrSmth = data$nFalseSmth / data$nTotalSmth data$nTrueSmth = data$nTotalSmth - data$nFalseSmth nvar.trueSmth = acast(data, tau ~ gamma, value.var = "nTrueSmth") fdrSmth = acast(data, tau ~ gamma, value.var = "fdrSmth") pdf("varsel.pdf", height = 10, width = 10) contour(x = as.numeric(rownames(fdr)), y = as.numeric(colnames(fdr)), z = fdr, zlim = c(0, 0.4), nlevels = 10, col = "red") contour(x = as.numeric(rownames(nvar.true)), y = as.numeric(colnames(nvar.true)), z = nvar.true, add = TRUE, col = "blue") contour(x = as.numeric(rownames(fdrSmth)), y = as.numeric(colnames(fdr)), z = fdrSmth, zlim = c(0, 0.4), nlevels = 10, col = "red") contour(x = as.numeric(rownames(nvar.trueSmth)), y = as.numeric(colnames(nvar.trueSmth)), z = nvar.true, add = TRUE, col = "blue") plot(fdr ~ nTrue, data) plot(fdrSmth ~ nTrueSmth, data, ylim = c(0, 1)) dev.off() save.image("varsel.rda")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/limma_model_d.R \name{limma_model_d} \alias{limma_model_d} \title{Run a limma model on an expressionset and formula} \usage{ limma_model_d(x, d, isFormula = FALSE) } \arguments{ \item{x}{ExpressionSet (can be created with make_filtered_eset)} \item{d}{design matrix or a formula to be used to create one.} \item{isFormula}{boolean specifying if d is a design matrix (isFormula=FALSE) or a formula (default=FALSE)} } \value{ MArrayLM object ("fit") } \description{ Reads an expression set and formula and runs the model design, fits the model, and runs eBayes. Returns the MArrayLM object ("fit") If d is a design matrix, it will be input directly into limma:fit If d is a formula (set isFormula = TRUE), a design matrix will be created using model.matrix(f) } \examples{ dfList <- load_RawCountsGroup(counts_file = file.path(dir_data, file_data), groups_file = file.path(dir_data, file_groups)) raw <- dfList[[1]] groupsDF <- dfList[[2]] rm(dfList) names(covar_levels) <- covars covarMtx <- factordf_to_mtx(groupsDF[, covars], cnames=covars, clevels=covar_levels) eset <- make_filtered_eset(x=raw, y=covarMtx, g=groupsDF$Group, minc = filter_minCPM, mins = filter_minSampleCount, nmethod="quantile") Genotype <- covarMtx[,"Genotype"] Treatment <- covarMtx[, "Treatment"] myF <- as.formula("~ Genotype + Treatment") myFit <- limma_model(x=eset, f=myF) } \author{ Susan Huse \email{susan.huse@nih.gov} } \keyword{Pipeliner} \keyword{RNASeq} \keyword{limma}
/NCBR.RTools/man/limma_model_d.Rd
no_license
arunbodd/NIAID
R
false
true
1,631
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/limma_model_d.R \name{limma_model_d} \alias{limma_model_d} \title{Run a limma model on an expressionset and formula} \usage{ limma_model_d(x, d, isFormula = FALSE) } \arguments{ \item{x}{ExpressionSet (can be created with make_filtered_eset)} \item{d}{design matrix or a formula to be used to create one.} \item{isFormula}{boolean specifying if d is a design matrix (isFormula=FALSE) or a formula (default=FALSE)} } \value{ MArrayLM object ("fit") } \description{ Reads an expression set and formula and runs the model design, fits the model, and runs eBayes. Returns the MArrayLM object ("fit") If d is a design matrix, it will be input directly into limma:fit If d is a formula (set isFormula = TRUE), a design matrix will be created using model.matrix(f) } \examples{ dfList <- load_RawCountsGroup(counts_file = file.path(dir_data, file_data), groups_file = file.path(dir_data, file_groups)) raw <- dfList[[1]] groupsDF <- dfList[[2]] rm(dfList) names(covar_levels) <- covars covarMtx <- factordf_to_mtx(groupsDF[, covars], cnames=covars, clevels=covar_levels) eset <- make_filtered_eset(x=raw, y=covarMtx, g=groupsDF$Group, minc = filter_minCPM, mins = filter_minSampleCount, nmethod="quantile") Genotype <- covarMtx[,"Genotype"] Treatment <- covarMtx[, "Treatment"] myF <- as.formula("~ Genotype + Treatment") myFit <- limma_model(x=eset, f=myF) } \author{ Susan Huse \email{susan.huse@nih.gov} } \keyword{Pipeliner} \keyword{RNASeq} \keyword{limma}
## "best" finds the name of the hospital that has the lowest 30-day mortality ## for the specified outcome in that state ## this R script will be making use of the dplyr package library(dplyr) best <- function(state, outcome){ ## arguments: ## state = abbreviated name of a state to query from ## outcome = a specified condition to query outcomes from ## note: [,11] = heart attack ## [,17] = heart failure ## [,23] = pneumonia ## read outcome data caremeasures <- read.csv("outcome-of-care-measures.csv", header = TRUE, na.strings = "Not Available", stringsAsFactors = FALSE) caremeasures <- tbl_df(caremeasures) ## check that outcome are valid validoutcomes <- c("heart attack" = 11, "heart failure" = 17, "pneumonia" = 23) if(outcome %in% names(validoutcomes)){ outcomecol <- validoutcomes[outcome] } else{ print("invalid outcome") stop() } ## check that state is valid if(state %in% caremeasures$State == FALSE){ print("invalid state") stop() } ## subset and rank relevant parameters queryfields <- caremeasures %>% select(2, 7, outcomecol) %>% filter(State == state) colnames(queryfields) <- c("Name", "State", "Rate") is.numeric(queryfields$Rate) ranked <- arrange(queryfields, Rate, Name) print(as.character(ranked[1,1])) }
/best.R
no_license
MarielM93/RProgrammingAssignment3
R
false
false
1,712
r
## "best" finds the name of the hospital that has the lowest 30-day mortality ## for the specified outcome in that state ## this R script will be making use of the dplyr package library(dplyr) best <- function(state, outcome){ ## arguments: ## state = abbreviated name of a state to query from ## outcome = a specified condition to query outcomes from ## note: [,11] = heart attack ## [,17] = heart failure ## [,23] = pneumonia ## read outcome data caremeasures <- read.csv("outcome-of-care-measures.csv", header = TRUE, na.strings = "Not Available", stringsAsFactors = FALSE) caremeasures <- tbl_df(caremeasures) ## check that outcome are valid validoutcomes <- c("heart attack" = 11, "heart failure" = 17, "pneumonia" = 23) if(outcome %in% names(validoutcomes)){ outcomecol <- validoutcomes[outcome] } else{ print("invalid outcome") stop() } ## check that state is valid if(state %in% caremeasures$State == FALSE){ print("invalid state") stop() } ## subset and rank relevant parameters queryfields <- caremeasures %>% select(2, 7, outcomecol) %>% filter(State == state) colnames(queryfields) <- c("Name", "State", "Rate") is.numeric(queryfields$Rate) ranked <- arrange(queryfields, Rate, Name) print(as.character(ranked[1,1])) }
\name{predict.evmOpt} \alias{predict.evmOpt} \alias{predict.evmSim} \alias{predict.evmBoot} \alias{linearPredictors} \alias{linearPredictors.evmOpt} \alias{linearPredictors.evmSim} \alias{linearPredictors.evmBoot} \alias{plot.lp.evmOpt} \alias{plot.lp.evmSim} \alias{plot.lp.evmBoot} \alias{print.lp.evmOpt} \alias{print.lp.evmSim} \alias{print.lp.evmBoot} \alias{summary.lp.evmOpt} \alias{summary.lp.evmSim} \alias{summary.lp.evmBoot} \alias{predict.evmSim} \title{Predict return levels from extreme value models, or obtain the linear predictors.} \description{Predict return levels from extreme value models, or obtain the linear predictors.} \usage{ \method{predict}{evmOpt}(object, M = 1000, newdata = NULL, type = "return level", se.fit = FALSE, ci.fit = FALSE, alpha = 0.05, unique. = TRUE,...) \method{predict}{evmSim}(object, M = 1000, newdata = NULL, type = "return level", se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, all = FALSE, sumfun = NULL,...) \method{predict}{evmBoot}(object, M = 1000, newdata = NULL, type = "return level", se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, all = FALSE, sumfun = NULL,...) linearPredictors(object, newdata = NULL, se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, ...) \method{linearPredictors}{evmOpt}(object, newdata = NULL, se.fit = FALSE, ci.fit = FALSE, alpha = 0.05, unique. = TRUE, full.cov = FALSE,...) \method{linearPredictors}{evmSim}(object, newdata = NULL, se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, all = FALSE, sumfun = NULL,...) \method{linearPredictors}{evmBoot}(object, newdata = NULL, se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, all = FALSE, sumfun = NULL,...) \method{print}{lp.evmOpt}(x, digits=3,...) \method{print}{lp.evmSim}(x, digits=3,...) \method{print}{lp.evmBoot}(x, digits=3,...) \method{summary}{lp.evmOpt}(object, digits=3,...) \method{summary}{lp.evmSim}(object, digits=3,...) \method{summary}{lp.evmBoot}(object, digits=3,...) \method{plot}{lp.evmOpt}(x, main=NULL, pch=1, ptcol=2, cex=.75, linecol=4, cicol=1, polycol=15,...) \method{plot}{lp.evmSim}(x, type="median", ...) \method{plot}{lp.evmBoot}(x, type="median", ...) } \arguments{ \item{object}{ An object of class \code{evmOpt}, \code{evmSim} or \code{evmBoot}. } \item{newdata}{ The new data that you want to make the prediction for. Defaults in \code{newdata = NULL} in which case the data used in fitting the model will be used. Column names must match those of the original data matrix used for model fitting.} \item{type}{For the predict methods, the type of prediction, either "return level" (or "rl") or "link" (or "lp"). Defaults to \code{type = "return level"}. When a return level is wanted, the user can specify the associated return period via the \code{M} argument. If \code{type = "link"} the linear predictor(s) for \code{phi} and \code{xi} (or whatever other parameters are in your \code{texmexFamily} are returned. For the plot methods for simulation based estimation of underlying distributions i.e. objects derived from "evmSim" and "evmBoot" classes, whether to use the sample median \code{type="median"} or mean \code{type="mean"} estimate of the parameter. } \item{se.fit}{ Whether or not to return the standard error of the predicted value. Defaults to \code{se.fit = FALSE} and is not implemented for \code{predict.evmSim} or \code{predict.evmBoot}. } \item{ci.fit}{ Whether or not to return a confidence interval for the predicted value. Defaults to \code{ci.fit = FALSE}. For objects of class \code{evmOpt}, if set to \code{TRUE} then the confidence interval is a simple symmetric confidence interval based on the estimated approximate standard error. For the \code{evmSim} and \code{evmBoot} methods, the confidence interval represents quantiles of the simulated distribution of the parameters. } \item{M}{ The return level: units are number of observations. Defaults to \code{M = 1000}. If a vector is passed, a list is returned, with items corresponding to the different values of the vector \code{M}. } \item{alpha}{ If \code{ci.fit = TRUE}, a 100(1 - alpha)\% confidence interval is returned. Defaults to \code{alpha = 0.050}. } \item{unique.}{ If \code{unique. = TRUE}, predictions for only the unique values of the linear predictors are returned, rather than for every row of \code{newdata}. Defaults to \code{unique. = TRUE}. } \item{all}{ For the \code{evmSim} and \code{evmBoot} methods, if \code{all = TRUE}, the predictions are returned for every simulated parameter vector. Otherwise, only a summary of the posterior/bootstrap distribution is returned. Defaults to \code{all = FALSE}. } \item{full.cov}{ Should the full covariance matrix be returned as part of a \code{list} object. This is used internally and not intended for direct use. Defaults to \code{full.cov = FALSE} } \item{sumfun}{ For the \code{evmSim} and \code{evmBoot} methods, a summary function can be passed in. If \code{sumfun = FALSE}, the default, the summary function used returns the estimated mean and median, and quantiles implied by \code{alpha}. } \item{x}{An object of class \code{lp.evmOpt}, \code{lp.evmSim} or \code{lp.evmBoot}, to be passed to methods for these classes.} \item{main, pch, ptcol, cex, linecol, cicol, polycol}{Further arguments to plot methods.} \item{digits}{Number of digits to show when printing objects.} \item{...}{Further arguments to methods.} } \details{ By default, return levels predicted from the unique values of the linear predictors are returned. For the \code{evmBoot} method, estimates of confidence intervals are simply quantiles of the bootstrap sample. The \code{evmBoot} method is just a wrapper for the \code{evmSim} method. } \value{ A list with one entry for each value of \code{M}.} \author{Harry Southworth and Janet E. Heffernan} \note{ At present, the confidence intervals returned for an object of class \code{evmOpt} are simple confidence intervals based on assumptions of normality that are likely to be far from the truth in many cases. A better approach would be to use profile likelihood, and we intend to implement this method at a future date. Alternatively, the credible intervals returned by using Bayesian estimation and the predict method for class "evmSim" will tend to give a better representation of the asymmetry of the estimated intervals around the parameter point estimates. } \keyword{ methods }
/man/predict.evm.Rd
no_license
angrycoffeemonster/texmex
R
false
false
6,674
rd
\name{predict.evmOpt} \alias{predict.evmOpt} \alias{predict.evmSim} \alias{predict.evmBoot} \alias{linearPredictors} \alias{linearPredictors.evmOpt} \alias{linearPredictors.evmSim} \alias{linearPredictors.evmBoot} \alias{plot.lp.evmOpt} \alias{plot.lp.evmSim} \alias{plot.lp.evmBoot} \alias{print.lp.evmOpt} \alias{print.lp.evmSim} \alias{print.lp.evmBoot} \alias{summary.lp.evmOpt} \alias{summary.lp.evmSim} \alias{summary.lp.evmBoot} \alias{predict.evmSim} \title{Predict return levels from extreme value models, or obtain the linear predictors.} \description{Predict return levels from extreme value models, or obtain the linear predictors.} \usage{ \method{predict}{evmOpt}(object, M = 1000, newdata = NULL, type = "return level", se.fit = FALSE, ci.fit = FALSE, alpha = 0.05, unique. = TRUE,...) \method{predict}{evmSim}(object, M = 1000, newdata = NULL, type = "return level", se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, all = FALSE, sumfun = NULL,...) \method{predict}{evmBoot}(object, M = 1000, newdata = NULL, type = "return level", se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, all = FALSE, sumfun = NULL,...) linearPredictors(object, newdata = NULL, se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, ...) \method{linearPredictors}{evmOpt}(object, newdata = NULL, se.fit = FALSE, ci.fit = FALSE, alpha = 0.05, unique. = TRUE, full.cov = FALSE,...) \method{linearPredictors}{evmSim}(object, newdata = NULL, se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, all = FALSE, sumfun = NULL,...) \method{linearPredictors}{evmBoot}(object, newdata = NULL, se.fit = FALSE, ci.fit = FALSE, alpha = 0.050, unique. = TRUE, all = FALSE, sumfun = NULL,...) \method{print}{lp.evmOpt}(x, digits=3,...) \method{print}{lp.evmSim}(x, digits=3,...) \method{print}{lp.evmBoot}(x, digits=3,...) \method{summary}{lp.evmOpt}(object, digits=3,...) \method{summary}{lp.evmSim}(object, digits=3,...) \method{summary}{lp.evmBoot}(object, digits=3,...) \method{plot}{lp.evmOpt}(x, main=NULL, pch=1, ptcol=2, cex=.75, linecol=4, cicol=1, polycol=15,...) \method{plot}{lp.evmSim}(x, type="median", ...) \method{plot}{lp.evmBoot}(x, type="median", ...) } \arguments{ \item{object}{ An object of class \code{evmOpt}, \code{evmSim} or \code{evmBoot}. } \item{newdata}{ The new data that you want to make the prediction for. Defaults in \code{newdata = NULL} in which case the data used in fitting the model will be used. Column names must match those of the original data matrix used for model fitting.} \item{type}{For the predict methods, the type of prediction, either "return level" (or "rl") or "link" (or "lp"). Defaults to \code{type = "return level"}. When a return level is wanted, the user can specify the associated return period via the \code{M} argument. If \code{type = "link"} the linear predictor(s) for \code{phi} and \code{xi} (or whatever other parameters are in your \code{texmexFamily} are returned. For the plot methods for simulation based estimation of underlying distributions i.e. objects derived from "evmSim" and "evmBoot" classes, whether to use the sample median \code{type="median"} or mean \code{type="mean"} estimate of the parameter. } \item{se.fit}{ Whether or not to return the standard error of the predicted value. Defaults to \code{se.fit = FALSE} and is not implemented for \code{predict.evmSim} or \code{predict.evmBoot}. } \item{ci.fit}{ Whether or not to return a confidence interval for the predicted value. Defaults to \code{ci.fit = FALSE}. For objects of class \code{evmOpt}, if set to \code{TRUE} then the confidence interval is a simple symmetric confidence interval based on the estimated approximate standard error. For the \code{evmSim} and \code{evmBoot} methods, the confidence interval represents quantiles of the simulated distribution of the parameters. } \item{M}{ The return level: units are number of observations. Defaults to \code{M = 1000}. If a vector is passed, a list is returned, with items corresponding to the different values of the vector \code{M}. } \item{alpha}{ If \code{ci.fit = TRUE}, a 100(1 - alpha)\% confidence interval is returned. Defaults to \code{alpha = 0.050}. } \item{unique.}{ If \code{unique. = TRUE}, predictions for only the unique values of the linear predictors are returned, rather than for every row of \code{newdata}. Defaults to \code{unique. = TRUE}. } \item{all}{ For the \code{evmSim} and \code{evmBoot} methods, if \code{all = TRUE}, the predictions are returned for every simulated parameter vector. Otherwise, only a summary of the posterior/bootstrap distribution is returned. Defaults to \code{all = FALSE}. } \item{full.cov}{ Should the full covariance matrix be returned as part of a \code{list} object. This is used internally and not intended for direct use. Defaults to \code{full.cov = FALSE} } \item{sumfun}{ For the \code{evmSim} and \code{evmBoot} methods, a summary function can be passed in. If \code{sumfun = FALSE}, the default, the summary function used returns the estimated mean and median, and quantiles implied by \code{alpha}. } \item{x}{An object of class \code{lp.evmOpt}, \code{lp.evmSim} or \code{lp.evmBoot}, to be passed to methods for these classes.} \item{main, pch, ptcol, cex, linecol, cicol, polycol}{Further arguments to plot methods.} \item{digits}{Number of digits to show when printing objects.} \item{...}{Further arguments to methods.} } \details{ By default, return levels predicted from the unique values of the linear predictors are returned. For the \code{evmBoot} method, estimates of confidence intervals are simply quantiles of the bootstrap sample. The \code{evmBoot} method is just a wrapper for the \code{evmSim} method. } \value{ A list with one entry for each value of \code{M}.} \author{Harry Southworth and Janet E. Heffernan} \note{ At present, the confidence intervals returned for an object of class \code{evmOpt} are simple confidence intervals based on assumptions of normality that are likely to be far from the truth in many cases. A better approach would be to use profile likelihood, and we intend to implement this method at a future date. Alternatively, the credible intervals returned by using Bayesian estimation and the predict method for class "evmSim" will tend to give a better representation of the asymmetry of the estimated intervals around the parameter point estimates. } \keyword{ methods }
library(pmxTools) ### Name: read_nmcov ### Title: Read in the NONMEM variance-covariance matrix. ### Aliases: read_nmcov ### ** Examples ## Not run: ##D nmVcov <- read_nmcov("run315") ## End(Not run)
/data/genthat_extracted_code/pmxTools/examples/read_nmcov.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
208
r
library(pmxTools) ### Name: read_nmcov ### Title: Read in the NONMEM variance-covariance matrix. ### Aliases: read_nmcov ### ** Examples ## Not run: ##D nmVcov <- read_nmcov("run315") ## End(Not run)
context('s3path') # TODO: (RK) Fill this in.
/tests/testthat/test-s3path.R
permissive
ithayer/s3mpi
R
false
false
46
r
context('s3path') # TODO: (RK) Fill this in.
shinyUI(pageWithSidebar( headerPanel('Shear Matrices and Their Effects'), sidebarPanel( h3('How to Use the App:'), helpText('There are two areas of user input in the app, selecting the axis to shear and the b value for the shear matrix.'), helpText('Selecting the axis in the drop down menu (from options "X" and "Y") will change the way shear transformation affects the barn image on the right.'), helpText('Selecting a b value will change the magnitude of the shear transformation. Note how changing this value affects the transformation of the barn.'), helpText('Hit the "Submit" button to see the image change'), a('For more information on shear matrices', href= 'http://en.wikipedia.org/wiki/Shear_matrix'), br(), br(), br(), selectInput('shearType', 'Along which axis would you like to shear?', choices = c('X' = 'X', 'Y' = 'Y')), sliderInput('b', 'b Value for Shearing the Barn', min = -2, max = 2, value = 0, step = .2), submitButton('Submit'), h4('You decided to shear along...'), verbatimTextOutput('shearType'), h4('The value for b was...'), verbatimTextOutput('b') ), mainPanel(h3('Results of the Shear Matrix'), h4('The original barn...'), plotOutput('barnPlot'), h4('After being transformed by the shear matrix...'), plotOutput('shearPlot') ) ) )
/DataProducts/ShinyAppProject/ui.R
no_license
2ook/datasciencecoursera
R
false
false
1,435
r
shinyUI(pageWithSidebar( headerPanel('Shear Matrices and Their Effects'), sidebarPanel( h3('How to Use the App:'), helpText('There are two areas of user input in the app, selecting the axis to shear and the b value for the shear matrix.'), helpText('Selecting the axis in the drop down menu (from options "X" and "Y") will change the way shear transformation affects the barn image on the right.'), helpText('Selecting a b value will change the magnitude of the shear transformation. Note how changing this value affects the transformation of the barn.'), helpText('Hit the "Submit" button to see the image change'), a('For more information on shear matrices', href= 'http://en.wikipedia.org/wiki/Shear_matrix'), br(), br(), br(), selectInput('shearType', 'Along which axis would you like to shear?', choices = c('X' = 'X', 'Y' = 'Y')), sliderInput('b', 'b Value for Shearing the Barn', min = -2, max = 2, value = 0, step = .2), submitButton('Submit'), h4('You decided to shear along...'), verbatimTextOutput('shearType'), h4('The value for b was...'), verbatimTextOutput('b') ), mainPanel(h3('Results of the Shear Matrix'), h4('The original barn...'), plotOutput('barnPlot'), h4('After being transformed by the shear matrix...'), plotOutput('shearPlot') ) ) )
F <- "./data/household_power_consumption.txt" data <- read.table(F, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") D <- data[data$Date %in% c("1/2/2007","2/2/2007") ,] #str(D) datetime <- strptime(paste(D$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S") GAP <- as.numeric(D$Global_active_power) png("plot2.png", width=480, height=480) plot(datetime, GAP , type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off()
/plot2.R
no_license
KanchanIIT/ExData_Plotting1
R
false
false
439
r
F <- "./data/household_power_consumption.txt" data <- read.table(F, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") D <- data[data$Date %in% c("1/2/2007","2/2/2007") ,] #str(D) datetime <- strptime(paste(D$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S") GAP <- as.numeric(D$Global_active_power) png("plot2.png", width=480, height=480) plot(datetime, GAP , type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off()
library(glmnet) mydata = read.table("./TrainingSet/RF/large_intestine.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=FALSE) sink('./Model/EN/Classifier/large_intestine/large_intestine_074.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Classifier/large_intestine/large_intestine_074.R
no_license
leon1003/QSMART
R
false
false
377
r
library(glmnet) mydata = read.table("./TrainingSet/RF/large_intestine.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=FALSE) sink('./Model/EN/Classifier/large_intestine/large_intestine_074.txt',append=TRUE) print(glm$glmnet.fit) sink()
# produce summary graphs source("code/format_data.R") library(dplyr) library(tidyr) library(data.table) library(magrittr) library(testthat) library(viridis) library(egg) options(stringsAsFactors = F ) today_date <- gsub("-","_",Sys.time() %>% as.Date %>% as.character) m_back <- 36 interval <- NULL pre_chelsa <- NULL # '_pre_chelsa' # plot standardized betas ------------------------------------------------ # standardize betas from simulations betas_st_05 <- read.csv('results/simulations/beta_st_0.5_sim.csv') betas_st_03 <- read.csv('results/simulations/beta_st_0.3_sim.csv') # standardize betas from data betas_st_df <- read.csv('I:/sie/101_data_AC/betas_st.csv') %>% mutate( species = substr(species,1,20) ) # standardized betas ggplot(betas_st_df, aes(x=species, y=b_st) ) + geom_point() + facet_grid( ~ model) + geom_hline( yintercept = 0 ) + theme( axis.text = element_text( angle = 90, size = 5), axis.title.x = element_blank() ) + ylab( expression('Standardized '*beta) ) + ggsave('results/beta_stand.tiff', height=3,width=6.3,compression='lzw') # absolute betas ggplot(betas_st_df, aes(x=species, y=beta) ) + geom_point() + facet_grid( ~ model) + geom_hline( yintercept = 0 ) + theme( axis.text = element_text( angle = 90, size = 5), axis.title.x = element_blank() ) + ylab( expression('Standardized '*beta) ) + ggsave('results/beta.tiff', height=3,width=6.3,compression='lzw') # sd of x_antecedent ggplot(betas_st_df, aes(x=species, y=sd_x) ) + geom_point() + facet_grid( ~ model) + theme( axis.text = element_text( angle = 90, size = 5), axis.title.x = element_blank() ) + ylab( 'sd of x_antecedent' ) + ggsave('results/sd_x.tiff', height=3,width=6.3,compression='lzw') # residual standard deviation ggplot(betas_st_df, aes(x=species, y=sd_y) ) + geom_point() + facet_grid( ~ model) + geom_hline( yintercept = 0 ) + theme( axis.text = element_text( angle = 90, size = 5), axis.title.x = element_blank() ) + ylab( 'sd of y' ) + ggsave('results/sd_y.tiff', height=3,width=6.3,compression='lzw') # plot histograms beta_h_df <- betas_st_df %>% gather(measure,value, b_st:sd_y) # calculate moments beta_mom <- beta_h_df %>% group_by(measure) %>% summarise( mean = mean(value), med = median(value) ) %>% ungroup # histograms, with means and medians beta_h_df %>% left_join(beta_mom) %>% ggplot() + geom_histogram( aes(value) ) + geom_vline( aes(xintercept = mean) ) + geom_vline( aes(xintercept = med), linetype = 2) + facet_grid( ~ measure) + ggsave('results/beta_hist1.tiff', height=3,width=6.3,compression='lzw') # exploratory betas_st_df %>% select(beta,sd_x,sd_y) %>% pairs # result of an ointere ggplot(betas_st_df) + geom_point( aes(x=sd_x,y=sd_y) ) + ggsave('results/sd_y_vs_sd_x.tiff', height=3,width=6.3,compression='lzw') # beta_st from simulations and data beta_h_05 <- betas_st_05 %>% gather(measure,value, b_st:sd_y) %>% mutate( Origin = 'Sim_sd_05' ) beta_h_03 <- betas_st_03 %>% gather(measure,value, b_st:sd_y) %>% mutate( Origin = 'Sim_sd_03' ) # only beta_st from sims with true beta 0.2 or 0.45 beta_02_03 <- beta_h_03 %>% subset( b_sim < 0.5 ) beta_02_05 <- beta_h_05 %>% subset( b_sim < 0.5 ) # plot only beta standardized bind_rows( list(beta_h_df, beta_02_03, beta_02_05) ) %>% mutate( Origin = replace(Origin, is.na(b_sim), 'data') ) %>% subset( measure == 'b_st') %>% ggplot() + geom_density( aes(value, color=Origin), size = 1 ) + scale_color_viridis(discrete=TRUE) + ggtitle( expression('Sim true '*beta*' = 0.2 or 0.45') ) + ggsave('results/beta_st_data_vs_sim_0.2_0.45.tiff', height=3,width=6.3,compression='lzw') # beta_st: data vs. simulations bind_rows( list(beta_h_df, beta_h_05, beta_h_03) ) %>% mutate( Origin = replace(Origin, is.na(b_sim), 'data') ) %>% ggplot() + geom_density( aes(value, color = Origin), size = 1 ) + scale_color_viridis(discrete=TRUE) + facet_grid( ~ measure) + ggsave('results/beta_data_vs_sim.tiff', height=3,width=6.3,compression='lzw') # Summarize moving windows results by climate variable ------------------------------- par_post <- function(ii){ clim_var <- input_df$clim_var[ii] response <- input_df$response[ii] interval <- input_df$interval[ii] resp_clim<- paste0("_",response,"_",clim_var) # read lambda/clim data lam <- read.csv("C:/cloud/Dropbox/sAPROPOS/all_demog_6tr.csv", stringsAsFactors = F) #%>% #subset( SpeciesAuthor != "Purshia_subintegra" ) # summary info # result folder name res_folder<- paste0("C:/cloud/Dropbox/sAPROPOS/results/moving_windows/", response, "/", clim_var,pre_chelsa,interval) # summary files names sum_files <- list.files(res_folder)[grep("posterior", list.files(res_folder) )] %>% stringr::str_subset(resp_clim) # read files mod_summ <- lapply(sum_files, function(x) fread(paste0(res_folder,"/",x)) ) %>% setNames( gsub("posterior", "", sum_files ) ) %>% setNames( gsub(paste0(resp_clim,".csv"), "", names(.) ) ) # all model selection summaries all_sums <- Map(function(x,y) tibble::add_column(x, species = y, .before = 1), mod_summ, names(mod_summ) ) %>% # selec ONLY these model selection variables lapply(function(x) x %>% dplyr::select(species, model, beta) ) %>% bind_rows %>% mutate( clim_var = clim_var ) all_sums } # all models results input_df <- expand.grid( clim_var = c("precip","airt"), response = "log_lambda", #c("surv","grow","fec", interval = "", stringsAsFactors = F) # ALL model information post_df <- lapply(1:nrow(input_df), par_post) %>% bind_rows %>% arrange(species) %>% mutate( species = gsub('^_','',species) ) # Beta means and sd post_summ <- post_df %>% subset( !(model %in% c('ctrl1','yr_bet')) ) %>% group_by(species, model, clim_var) %>% summarise( beta_m = mean(beta,na.rm=T), beta_05 = quantile(beta,prob=0.05,na.rm=T), beta_95 = quantile(beta,prob=0.95,na.rm=T), ) %>% ungroup %>% # beta's CI mutate( beta_ci = beta_95 - beta_05 ) %>% # trim species names mutate( species = strtrim(species, 20) ) %>% # make mutate( species = as.factor(species), model = as.factor(model), clim_var = as.factor(clim_var) ) # plots ---------------------------------------------------- # plot beta means vs. model ggplot(post_summ, aes(model, beta_m)) + geom_point( aes(color = clim_var) ) + scale_color_viridis(discrete=TRUE) + theme(axis.text.x = element_text(angle = 70, hjust = 1) ) + geom_hline( yintercept = 0.5, linetype = 'dashed' ) + geom_hline( yintercept = -0.5, linetype = 'dashed' ) + ylab("mean beta") + ggsave('results/beta_m_by_mod.tiff', width = 6.3, height = 6.3) # plot beta means vs. species ggplot(post_summ, aes(species, beta_m)) + geom_point( aes(color = clim_var) ) + scale_color_viridis(discrete=TRUE) + theme(axis.text.x = element_text(angle = 70, hjust = 1) ) + geom_hline( yintercept = 0.5, linetype = 'dashed' ) + geom_hline( yintercept = -0.5, linetype = 'dashed' ) + ggsave('results/betas_m_by_spp.tiff', width = 6.3, height = 5) # plot beta ci width vs. model ggplot(post_summ, aes(model, beta_ci)) + geom_point( aes(color = clim_var) ) + scale_color_viridis(discrete=TRUE) + theme(axis.text.x = element_text(angle = 70, hjust = 1) ) + ylab("mean beta") + ggsave('results/beta_ciw_by_mod.tiff', width = 6.3, height = 6.3) # plot beta ci width vs. species ggplot(post_summ, aes(species, beta_ci)) + geom_point( aes(color = clim_var) ) + scale_color_viridis(discrete=TRUE) + theme(axis.text.x = element_text(angle = 70, hjust = 1) ) + ylab("mean beta") + ggsave('results/beta_ciw_by_spp.tiff', width = 6.3, height = 5)
/R/mod_params.R
no_license
AldoCompagnoni/climate_drivers_methods
R
false
false
9,220
r
# produce summary graphs source("code/format_data.R") library(dplyr) library(tidyr) library(data.table) library(magrittr) library(testthat) library(viridis) library(egg) options(stringsAsFactors = F ) today_date <- gsub("-","_",Sys.time() %>% as.Date %>% as.character) m_back <- 36 interval <- NULL pre_chelsa <- NULL # '_pre_chelsa' # plot standardized betas ------------------------------------------------ # standardize betas from simulations betas_st_05 <- read.csv('results/simulations/beta_st_0.5_sim.csv') betas_st_03 <- read.csv('results/simulations/beta_st_0.3_sim.csv') # standardize betas from data betas_st_df <- read.csv('I:/sie/101_data_AC/betas_st.csv') %>% mutate( species = substr(species,1,20) ) # standardized betas ggplot(betas_st_df, aes(x=species, y=b_st) ) + geom_point() + facet_grid( ~ model) + geom_hline( yintercept = 0 ) + theme( axis.text = element_text( angle = 90, size = 5), axis.title.x = element_blank() ) + ylab( expression('Standardized '*beta) ) + ggsave('results/beta_stand.tiff', height=3,width=6.3,compression='lzw') # absolute betas ggplot(betas_st_df, aes(x=species, y=beta) ) + geom_point() + facet_grid( ~ model) + geom_hline( yintercept = 0 ) + theme( axis.text = element_text( angle = 90, size = 5), axis.title.x = element_blank() ) + ylab( expression('Standardized '*beta) ) + ggsave('results/beta.tiff', height=3,width=6.3,compression='lzw') # sd of x_antecedent ggplot(betas_st_df, aes(x=species, y=sd_x) ) + geom_point() + facet_grid( ~ model) + theme( axis.text = element_text( angle = 90, size = 5), axis.title.x = element_blank() ) + ylab( 'sd of x_antecedent' ) + ggsave('results/sd_x.tiff', height=3,width=6.3,compression='lzw') # residual standard deviation ggplot(betas_st_df, aes(x=species, y=sd_y) ) + geom_point() + facet_grid( ~ model) + geom_hline( yintercept = 0 ) + theme( axis.text = element_text( angle = 90, size = 5), axis.title.x = element_blank() ) + ylab( 'sd of y' ) + ggsave('results/sd_y.tiff', height=3,width=6.3,compression='lzw') # plot histograms beta_h_df <- betas_st_df %>% gather(measure,value, b_st:sd_y) # calculate moments beta_mom <- beta_h_df %>% group_by(measure) %>% summarise( mean = mean(value), med = median(value) ) %>% ungroup # histograms, with means and medians beta_h_df %>% left_join(beta_mom) %>% ggplot() + geom_histogram( aes(value) ) + geom_vline( aes(xintercept = mean) ) + geom_vline( aes(xintercept = med), linetype = 2) + facet_grid( ~ measure) + ggsave('results/beta_hist1.tiff', height=3,width=6.3,compression='lzw') # exploratory betas_st_df %>% select(beta,sd_x,sd_y) %>% pairs # result of an ointere ggplot(betas_st_df) + geom_point( aes(x=sd_x,y=sd_y) ) + ggsave('results/sd_y_vs_sd_x.tiff', height=3,width=6.3,compression='lzw') # beta_st from simulations and data beta_h_05 <- betas_st_05 %>% gather(measure,value, b_st:sd_y) %>% mutate( Origin = 'Sim_sd_05' ) beta_h_03 <- betas_st_03 %>% gather(measure,value, b_st:sd_y) %>% mutate( Origin = 'Sim_sd_03' ) # only beta_st from sims with true beta 0.2 or 0.45 beta_02_03 <- beta_h_03 %>% subset( b_sim < 0.5 ) beta_02_05 <- beta_h_05 %>% subset( b_sim < 0.5 ) # plot only beta standardized bind_rows( list(beta_h_df, beta_02_03, beta_02_05) ) %>% mutate( Origin = replace(Origin, is.na(b_sim), 'data') ) %>% subset( measure == 'b_st') %>% ggplot() + geom_density( aes(value, color=Origin), size = 1 ) + scale_color_viridis(discrete=TRUE) + ggtitle( expression('Sim true '*beta*' = 0.2 or 0.45') ) + ggsave('results/beta_st_data_vs_sim_0.2_0.45.tiff', height=3,width=6.3,compression='lzw') # beta_st: data vs. simulations bind_rows( list(beta_h_df, beta_h_05, beta_h_03) ) %>% mutate( Origin = replace(Origin, is.na(b_sim), 'data') ) %>% ggplot() + geom_density( aes(value, color = Origin), size = 1 ) + scale_color_viridis(discrete=TRUE) + facet_grid( ~ measure) + ggsave('results/beta_data_vs_sim.tiff', height=3,width=6.3,compression='lzw') # Summarize moving windows results by climate variable ------------------------------- par_post <- function(ii){ clim_var <- input_df$clim_var[ii] response <- input_df$response[ii] interval <- input_df$interval[ii] resp_clim<- paste0("_",response,"_",clim_var) # read lambda/clim data lam <- read.csv("C:/cloud/Dropbox/sAPROPOS/all_demog_6tr.csv", stringsAsFactors = F) #%>% #subset( SpeciesAuthor != "Purshia_subintegra" ) # summary info # result folder name res_folder<- paste0("C:/cloud/Dropbox/sAPROPOS/results/moving_windows/", response, "/", clim_var,pre_chelsa,interval) # summary files names sum_files <- list.files(res_folder)[grep("posterior", list.files(res_folder) )] %>% stringr::str_subset(resp_clim) # read files mod_summ <- lapply(sum_files, function(x) fread(paste0(res_folder,"/",x)) ) %>% setNames( gsub("posterior", "", sum_files ) ) %>% setNames( gsub(paste0(resp_clim,".csv"), "", names(.) ) ) # all model selection summaries all_sums <- Map(function(x,y) tibble::add_column(x, species = y, .before = 1), mod_summ, names(mod_summ) ) %>% # selec ONLY these model selection variables lapply(function(x) x %>% dplyr::select(species, model, beta) ) %>% bind_rows %>% mutate( clim_var = clim_var ) all_sums } # all models results input_df <- expand.grid( clim_var = c("precip","airt"), response = "log_lambda", #c("surv","grow","fec", interval = "", stringsAsFactors = F) # ALL model information post_df <- lapply(1:nrow(input_df), par_post) %>% bind_rows %>% arrange(species) %>% mutate( species = gsub('^_','',species) ) # Beta means and sd post_summ <- post_df %>% subset( !(model %in% c('ctrl1','yr_bet')) ) %>% group_by(species, model, clim_var) %>% summarise( beta_m = mean(beta,na.rm=T), beta_05 = quantile(beta,prob=0.05,na.rm=T), beta_95 = quantile(beta,prob=0.95,na.rm=T), ) %>% ungroup %>% # beta's CI mutate( beta_ci = beta_95 - beta_05 ) %>% # trim species names mutate( species = strtrim(species, 20) ) %>% # make mutate( species = as.factor(species), model = as.factor(model), clim_var = as.factor(clim_var) ) # plots ---------------------------------------------------- # plot beta means vs. model ggplot(post_summ, aes(model, beta_m)) + geom_point( aes(color = clim_var) ) + scale_color_viridis(discrete=TRUE) + theme(axis.text.x = element_text(angle = 70, hjust = 1) ) + geom_hline( yintercept = 0.5, linetype = 'dashed' ) + geom_hline( yintercept = -0.5, linetype = 'dashed' ) + ylab("mean beta") + ggsave('results/beta_m_by_mod.tiff', width = 6.3, height = 6.3) # plot beta means vs. species ggplot(post_summ, aes(species, beta_m)) + geom_point( aes(color = clim_var) ) + scale_color_viridis(discrete=TRUE) + theme(axis.text.x = element_text(angle = 70, hjust = 1) ) + geom_hline( yintercept = 0.5, linetype = 'dashed' ) + geom_hline( yintercept = -0.5, linetype = 'dashed' ) + ggsave('results/betas_m_by_spp.tiff', width = 6.3, height = 5) # plot beta ci width vs. model ggplot(post_summ, aes(model, beta_ci)) + geom_point( aes(color = clim_var) ) + scale_color_viridis(discrete=TRUE) + theme(axis.text.x = element_text(angle = 70, hjust = 1) ) + ylab("mean beta") + ggsave('results/beta_ciw_by_mod.tiff', width = 6.3, height = 6.3) # plot beta ci width vs. species ggplot(post_summ, aes(species, beta_ci)) + geom_point( aes(color = clim_var) ) + scale_color_viridis(discrete=TRUE) + theme(axis.text.x = element_text(angle = 70, hjust = 1) ) + ylab("mean beta") + ggsave('results/beta_ciw_by_spp.tiff', width = 6.3, height = 5)
Sys.setlocale("LC_ALL", "C") if (dir.exists(file.path("H:","COURSERA","INCOMING EDX & OTHER","DATA SCIENCE - EDX - The Analytics Edge","DATA"))) { default_path = file.path("H:","COURSERA","INCOMING EDX & OTHER","DATA SCIENCE - EDX - The Analytics Edge","DATA") } else if (dir.exists(file.path("C:","COURSERA","DATA SCIENCE - EDX - The Analytics Edge","DATA"))) { default_path = file.path("C:","COURSERA","DATA SCIENCE - EDX - The Analytics Edge","DATA") } else { next } # Add the argument stringsAsFactors=FALSE since we have some text fields eBayTrain = read.csv(file.path(default_path,"eBayiPadTrain.csv"), stringsAsFactors=TRUE) eBayTest = read.csv(file.path(default_path,"eBayiPadTest.csv"), stringsAsFactors=TRUE) eBayTrain$description = NULL eBayTest$description = NULL library(caTools) spl = sample.split(eBayTrain$sold, SplitRatio = 0.7) Train = subset(eBayTrain, spl==TRUE) Test = subset(eBayTrain, spl==FALSE) #Logistic Regression model on train part of ebaytrain logreg1 = glm(sold ~ biddable + startprice + condition + storage + productline , data = Train , family = "binomial") #install.packages("car") #collinearity reduces the acuracy of estimates #vif=1 no collinearity, vif>5 too much colinearity, drop or combine a variable library(car) vif(logreg1) #make a prediction on the test part of ebaytrain pred_int = predict(logreg1 , newdata=Test , type = "response" ) sum(diag(table(Test$sold , pred_int > 0.5))) / sum(table(Test$sold , pred_int > 0.5)) #delete sold variable to make ebaytrain and ebaytest have the same variables eBayTrain$sold = NULL #combine ebaytrain and ebaytest into a dataframe eBayAll = rbind(eBayTrain,eBayTest) #make external prediction on ebaytrain + ebaytest pred_ext = predict(logreg1 , newdata=eBayAll , type = "response" ) MySubmission = data.frame(UniqueID = eBayTest$UniqueID, Probability1 = pred_ext) write.csv(MySubmission, file.path(substr(default_path,0,51),"Submission1.csv"), row.names=FALSE)
/r-edx-analytics_edge/KAGGLE-GLM-GOOD.R
no_license
ao1/course_notes
R
false
false
1,979
r
Sys.setlocale("LC_ALL", "C") if (dir.exists(file.path("H:","COURSERA","INCOMING EDX & OTHER","DATA SCIENCE - EDX - The Analytics Edge","DATA"))) { default_path = file.path("H:","COURSERA","INCOMING EDX & OTHER","DATA SCIENCE - EDX - The Analytics Edge","DATA") } else if (dir.exists(file.path("C:","COURSERA","DATA SCIENCE - EDX - The Analytics Edge","DATA"))) { default_path = file.path("C:","COURSERA","DATA SCIENCE - EDX - The Analytics Edge","DATA") } else { next } # Add the argument stringsAsFactors=FALSE since we have some text fields eBayTrain = read.csv(file.path(default_path,"eBayiPadTrain.csv"), stringsAsFactors=TRUE) eBayTest = read.csv(file.path(default_path,"eBayiPadTest.csv"), stringsAsFactors=TRUE) eBayTrain$description = NULL eBayTest$description = NULL library(caTools) spl = sample.split(eBayTrain$sold, SplitRatio = 0.7) Train = subset(eBayTrain, spl==TRUE) Test = subset(eBayTrain, spl==FALSE) #Logistic Regression model on train part of ebaytrain logreg1 = glm(sold ~ biddable + startprice + condition + storage + productline , data = Train , family = "binomial") #install.packages("car") #collinearity reduces the acuracy of estimates #vif=1 no collinearity, vif>5 too much colinearity, drop or combine a variable library(car) vif(logreg1) #make a prediction on the test part of ebaytrain pred_int = predict(logreg1 , newdata=Test , type = "response" ) sum(diag(table(Test$sold , pred_int > 0.5))) / sum(table(Test$sold , pred_int > 0.5)) #delete sold variable to make ebaytrain and ebaytest have the same variables eBayTrain$sold = NULL #combine ebaytrain and ebaytest into a dataframe eBayAll = rbind(eBayTrain,eBayTest) #make external prediction on ebaytrain + ebaytest pred_ext = predict(logreg1 , newdata=eBayAll , type = "response" ) MySubmission = data.frame(UniqueID = eBayTest$UniqueID, Probability1 = pred_ext) write.csv(MySubmission, file.path(substr(default_path,0,51),"Submission1.csv"), row.names=FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RRlog.R \name{predict.RRlog} \alias{predict.RRlog} \title{Predict Individual Prevalences of the RR Attribute} \usage{ \method{predict}{RRlog}(object, newdata = NULL, se.fit = FALSE, ci = 0.95, ...) } \arguments{ \item{object}{A fitted \code{\link{RRlog}} model} \item{newdata}{An optional vector, matrix, or data.frame with values on the predictor variables. Note that for matrices, the order of predictors should match the order of predictors in the formula. Uses the fitted values of the model if omitted.} \item{se.fit}{Get standard errors for the fitted/predicted values (using the error variance and df of the original RR model).} \item{ci}{Confidence level for confidence interval. If 0, no boundaries are returned.} \item{...}{ignored} } \description{ Predictions of the loglinear RR model for the individual probabilities of having the sensitive RR attribute. }
/man/predict.RRlog.Rd
no_license
unDocUMeantIt/RRreg
R
false
true
955
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RRlog.R \name{predict.RRlog} \alias{predict.RRlog} \title{Predict Individual Prevalences of the RR Attribute} \usage{ \method{predict}{RRlog}(object, newdata = NULL, se.fit = FALSE, ci = 0.95, ...) } \arguments{ \item{object}{A fitted \code{\link{RRlog}} model} \item{newdata}{An optional vector, matrix, or data.frame with values on the predictor variables. Note that for matrices, the order of predictors should match the order of predictors in the formula. Uses the fitted values of the model if omitted.} \item{se.fit}{Get standard errors for the fitted/predicted values (using the error variance and df of the original RR model).} \item{ci}{Confidence level for confidence interval. If 0, no boundaries are returned.} \item{...}{ignored} } \description{ Predictions of the loglinear RR model for the individual probabilities of having the sensitive RR attribute. }
crossValidationFeatureSelection_Bin <- function(size=10,fraction=1.0,pvalue=0.05,loops=100,covariates="1",Outcome,timeOutcome="Time",variableList,data,maxTrainModelSize=20,type=c("LM","LOGIT","COX"),selectionType=c("zIDI","zNRI"),startOffset=0,elimination.bootstrap.steps=100,trainFraction=0.67,trainRepetition=9,bootstrap.steps=100,nk=0,unirank=NULL,print=TRUE,plots=TRUE,lambda="lambda.1se",equivalent=FALSE,bswimsCycles=10,usrFitFun=NULL,featureSize=0) { if (!requireNamespace("cvTools", quietly = TRUE)) { install.packages("cvTools", dependencies = TRUE) } if (!requireNamespace("glmnet", quietly = TRUE)) { install.packages("glmnet", dependencies = TRUE) } nlenght <- ncol(data)-1; enetSamples <- NULL; enetTrainSamples <- NULL; casesample = subset(data,get(Outcome) == 1); controlsample = subset(data,get(Outcome) == 0); casesamplesize <- nrow(casesample); controlsamplesize <- nrow(controlsample); filter.p.value = 2.0*pvalue; if (length(pvalue)>1) { filter.p.value = pvalue[2]; pvalue=pvalue[1]; } K <- as.integer(1.0/(1.0-trainFraction) + 0.5); acc = 0.0; sen = 0.0; spe = 0.0; sizecases = 0; sizecontrol = 0; totsize = 0; paracc = 0; psen = 0; pspe = 0; Full.acc = 0.0; Full.sen = 0.0; Full.spe = 0.0; Full.paracc = 0; Full.psen = 0; Full.pspe = 0; formulas <- character(); AtOptFormulas <-character(); ForwardFormulas <- character(); baggFormulas <- character(); equiFormulas <- character(); allBSWIMSFormulas <- character(); trainCorrelations <- vector(); trainAccuracy <- vector(); trainSensitivity <- vector(); trainSpecificity <- vector(); trainAUC <- vector(); testAccuracy <- vector(); testSensitivity <- vector(); testSpecificity <- vector(); testAUC <- vector(); blindCorrelations <- vector(); WholeFoldBlindAccuracy <- vector(); WholeFoldBlindSpecificity <- vector(); WholeFoldBlindSensitivity <- vector(); WholeFoldBlindAUC <- vector(); FoldBlindAccuracy <- vector(); FoldBlindSpecificity <- vector(); FoldBlindSensitivity <- vector(); TopUniCoherenceTest <- vector(); selection.pValue <- pvalue; CVselection.pValue <- pvalue; par(mfrow=c(1,1)) if (!is.null(unirank)) { uprank <- update.uniRankVar(unirank,data=data,FullAnalysis=FALSE) variableList <- uprank$orderframe; # print(variableList[1:20,]) } if (size>nrow(variableList)) size <- nrow(variableList); if (size<5) size=min(c(5,nrow(variableList))); shortVarList <- as.vector(variableList[1:size,1]); varlist <- paste(Outcome,"~1"); for (i in 1:length(shortVarList)) { varlist <- paste(varlist,shortVarList[i],sep="+"); } varlist <- all.vars(formula(varlist))[-1]; shortVarList <- varlist[varlist %in% colnames(data)]; enetshortVarList <- shortVarList; Fullenet <- try(glmnet::cv.glmnet(as.matrix(data[,shortVarList]),as.vector(data[,Outcome]),family="binomial")); LASSOVariables <- NULL; if (inherits(Fullenet, "try-error")) { cat("enet Error") Fullenet <- NULL; } else { cenet <- as.matrix(coef(Fullenet,s=lambda)) lanames <- names(cenet[as.vector(cenet[,1] != 0),]) print(LASSOVariables <- paste(lanames[lanames != "(Intercept)" ],collapse=" + ")) } # cat ("END LASSO\n") selType = selectionType; ex_covariates=covariates; if (type!="COX") { baseformula <- paste(Outcome,"~",covariates); abaseformula <- paste(Outcome,"~ ."); extvar <- Outcome; } else { baseformula <- paste("Surv(",timeOutcome,",",Outcome,") ~",covariates); abaseformula <- paste("Surv(",timeOutcome,",",Outcome,") ~ ."); extvar <- c(timeOutcome,Outcome); } # cat ("Data:",ncol(data),"Features:",nrow(variableList),"Size:",size,"\n") FULLBSWiMS.models <- BSWiMS.model(formula=baseformula,data=data,type=type,testType=selectionType,pvalue=pvalue,variableList=variableList,size=size,loops=loops,elimination.bootstrap.steps=elimination.bootstrap.steps,fraction=fraction,maxTrainModelSize=maxTrainModelSize,maxCycles=bswimsCycles,print=print,plots=plots,featureSize=featureSize); CurModel_Full <- FULLBSWiMS.models$forward.model; UCurModel_Full <- FULLBSWiMS.models$update.model; redCurmodel_Full <- FULLBSWiMS.models$BSWiMS.model; Full_formula <- redCurmodel_Full$back.model; FullBootCross <- bootstrapValidation_Bin(1.0000,bootstrap.steps,redCurmodel_Full$back.formula,Outcome,data,type,plots=plots) redBootCross <- FullBootCross; cat ("CV pvalue :",CVselection.pValue,"\n") cat ("Update :",UCurModel_Full$formula,"\n") cat ("At Accuray:",redCurmodel_Full$at.Accuracy.formula,"\n") cat ("B:SWiMS :",redCurmodel_Full$back.formula,"\n") if (is.null(FullBootCross)) { stop("no initial model found\n"); } if (print) summary(FullBootCross,2) inserted = 0; rocadded = 0; split.blindSen <- NULL; blindreboot <- NULL; KNNSamples <- NULL; Full.KNNSamples <- NULL; totSamples <- NULL; Full.totSamples <- NULL; totTrainSamples <- NULL; Full.totTrainSamples <- NULL; uniTrainAccuracy <- NULL; uniTestAccuracy <- NULL; totSamples <- NULL; Fullsammples <- min(casesamplesize,controlsamplesize); if ( K > Fullsammples) K=Fullsammples # cat("Number of folds: ",K,"\n"); specificities <- c(0.975,0.95,0.90,0.80,0.70,0.60,0.50,0.40,0.30,0.20,0.10,0.05); for (i in 1:trainRepetition) { j <- 1 + ((i-1) %% K) if ( j == 1) { casefolds <- cvTools::cvFolds(casesamplesize, K,1, "random"); controlfolds <- cvTools::cvFolds(controlsamplesize, K,1, "random"); cycleinsert=0; totalUniCor=0; } CaseTrainSet <- casesample[casefolds$subsets[casefolds$which != j,],]; CaseBlindSet <- casesample[casefolds$subsets[casefolds$which == j,],]; ControlTrainSet <- controlsample[controlfolds$subsets[controlfolds$which != j,],]; ControlBlindSet <- controlsample[controlfolds$subsets[controlfolds$which == j,],]; TrainSet <- rbind(CaseTrainSet,ControlTrainSet); BlindSet <- rbind(CaseBlindSet,ControlBlindSet); framesize <- nrow(BlindSet); minTrainSamples <- min(nrow(CaseTrainSet),nrow(ControlTrainSet)); if (nk==0) { nk = 2*as.integer(sqrt(minTrainSamples/2)) + 1; } KnnTrainSet <- rbind(CaseTrainSet[sample(1:nrow(CaseTrainSet),minTrainSamples,replace=FALSE),],ControlTrainSet[sample(1:nrow(ControlTrainSet),minTrainSamples,replace=FALSE),]) par(mfrow=c(1,1)) redBootCross <- bootstrapValidation_Bin(1.0000,bootstrap.steps,redCurmodel_Full$back.formula,Outcome,TrainSet,type,plots=plots) Full.p <- predict.fitFRESA(redBootCross$boot.model,BlindSet, 'linear'); Fullknnclass <- getKNNpredictionFromFormula(FULLBSWiMS.models$bagging$formula,KnnTrainSet,BlindSet,Outcome,nk) if (!is.null(unirank)) { # cat("Unirank\n") uprank <- update.uniRankVar(unirank,data=TrainSet,FullAnalysis=FALSE) variableList <- uprank$orderframe; unitPvalues <- (1.0-pnorm(variableList$ZUni)); names(unitPvalues) <- variableList$Name; if (unirank$categorizationType == "Raw") { adjPvalues <- p.adjust(unitPvalues,"BH") gadjPvalues <- adjPvalues[adjPvalues < 2*filter.p.value] noncornames <- correlated_Remove(data,names(gadjPvalues),thr=0.99); if (length(noncornames) > 1) featureSize <- featureSize*length(noncornames)/length(gadjPvalues); # cat(length(noncornames),":",length(gadjPvalues),":",length(noncornames)/length(gadjPvalues),"\n"); } filter.z.value <- abs(qnorm(filter.p.value)) varMax <- sum(variableList$ZUni >= filter.z.value); pvarMax <- sum(p.adjust(unitPvalues,"BH") < 2*filter.p.value); cat(ncol(TrainSet),": Unadjusted size:",varMax," Adjusted Size:",pvarMax,"\n") size= min(c(pvarMax,varMax)); if (size<5) size=min(c(5,nrow(variableList))); shortVarList <- as.vector(variableList[1:size,1]); varlist <- paste(Outcome,"~1"); for (nn in 1:length(shortVarList)) { varlist <- paste(varlist,shortVarList[nn],sep="+"); } varlist <- all.vars(formula(varlist))[-1]; shortVarList <- varlist[varlist %in% colnames(TrainSet)]; # print(variableList[,1]); } if (!is.null(Fullenet)) { # cat(length(shortVarList)," :In elastic Net\n") foldenet <- try(glmnet::cv.glmnet(as.matrix(TrainSet[,shortVarList]),as.vector(TrainSet[,Outcome]),family="binomial")); cenet <- as.matrix(coef(foldenet,s=lambda)) lanames <- names(cenet[as.vector(cenet[,1] != 0),]) LASSOVariables <- append(LASSOVariables,paste(lanames[lanames != "(Intercept)" ],collapse=" + ")) if (i == 1) { enetSamples <- cbind(BlindSet[,Outcome],predict(foldenet,as.matrix(BlindSet[,shortVarList]),s=lambda),i); enetTrainSamples <- cbind(TrainSet[,Outcome],predict(foldenet,as.matrix(TrainSet[,shortVarList]),s=lambda),i); } else { enetSamples <- rbind(enetSamples,cbind(BlindSet[,Outcome],predict(foldenet,as.matrix(BlindSet[,shortVarList]),s=lambda),i)); enetTrainSamples <- rbind(enetTrainSamples,cbind(TrainSet[,Outcome],predict(foldenet,as.matrix(TrainSet[,shortVarList]),s=lambda),i)); } # print(LASSOVariables) } cat ("Loop :",i,"Input Cases =",sum(data[,Outcome] > 0 ),"Input Control =",sum(data[,Outcome] == 0),"\n") cat ("Loop :",i,"Train Cases =",sum(TrainSet[,Outcome] > 0 ),"Train Control =",sum(TrainSet[,Outcome] == 0),"\n") cat ("Loop :",i,"Blind Cases =",sum(BlindSet[,Outcome] > 0 ),"Blind Control =",sum(BlindSet[,Outcome] == 0),"\n") cat ("K :",nk,"KNN T Cases =",sum(KnnTrainSet[,Outcome] > 0 ),"KNN T Control =",sum(KnnTrainSet[,Outcome] == 0),"\n") lastinserted = inserted; BSWiMS.models <- BSWiMS.model(formula=baseformula,data=TrainSet,type=type,testType=selectionType,pvalue=CVselection.pValue,variableList=variableList,size=size,loops=loops,elimination.bootstrap.steps=elimination.bootstrap.steps,fraction=fraction,maxTrainModelSize=maxTrainModelSize,maxCycles=bswimsCycles,print=print,plots=plots,featureSize=featureSize); CurModel_S <- BSWiMS.models$forward.model; UCurModel_S <- BSWiMS.models$update.model; redCurmodel_S <- BSWiMS.models$BSWiMS.model; redBootCross_S <- redCurmodel_S$bootCV; if (length(CurModel_S$var.names)>0) { # lets use beforeFSC model for prediction redfoldmodel.AtOpt <- redCurmodel_S$at.opt.model; forwardmodel <- UCurModel_S$final.model; baggedfoldmodel <- BSWiMS.models$bagging$bagged.model; redfoldmodel <- redCurmodel_S$back.model; cat ("Update :",UCurModel_S$formula,"\n") cat ("At Accuracy:",redCurmodel_S$at.Accuracy.formula,"\n") cat ("B:SWiMS :",redCurmodel_S$back.formula,"\n") if (!is.null(forwardmodel)) { if (print) { cat ("\n The last CV bootstrapped model") s <- summary(redBootCross_S,2) } thebaglist <- CurModel_S$formula.list; bagg <- baggedModel(thebaglist,TrainSet,type,Outcome,timeOutcome,univariate=variableList,useFreq=loops); # cat ("Predict 1\n") p.AtOpt <- predict.fitFRESA(redfoldmodel.AtOpt,BlindSet, 'linear'); p.forward <- predict.fitFRESA(forwardmodel,BlindSet, 'linear'); baggedForwardPredict <- predict.fitFRESA(bagg$bagged.model,BlindSet,'linear'); firstBSWIMSPredict <- predict.fitFRESA(redfoldmodel,BlindSet, 'linear'); if (length(BSWiMS.models$formula.list)>1) { medianBSWIMSPredict <- ensemblePredict(BSWiMS.models$formula.list,TrainSet,BlindSet, predictType = "linear",type = type)$ensemblePredict; } else { medianBSWIMSPredict <- firstBSWIMSPredict; } # cat ("Predict 2\n") medianPred <- ensemblePredict(BSWiMS.models$forward.selection.list,TrainSet,BlindSet, predictType = "linear",type = type)$ensemblePredict; eq <- NULL; # cat ("End Predict\n") if (length(redfoldmodel$coefficients)> 1) { p <- predict.fitFRESA(baggedfoldmodel,BlindSet,'linear'); if (equivalent) { collectFormulas <- BSWiMS.models$forward.selection.list; bagg2 <- baggedModel(collectFormulas,data,type,Outcome,timeOutcome,univariate=variableList,useFreq=loops); modelterms <- attr(terms(redfoldmodel),"term.labels"); shortcan <- bagg2$frequencyTable; eshortlist <- unique(c(names(shortcan),str_replace_all(modelterms,":","\\*"))); eshortlist <- eshortlist[!is.na(eshortlist)]; if (length(eshortlist)>0) { nameslist <- c(all.vars(baggedfoldmodel$formula),as.character(variableList[eshortlist,2])); nameslist <- unique(nameslist[!is.na(nameslist)]); if (!is.null(unirank) && (unirank$categorizationType != "RawRaw")) { eqdata <- TrainSet[,nameslist]; } else { eqdata <- TrainSet; } eq <- reportEquivalentVariables(redfoldmodel,pvalue = 0.25*pvalue, data=eqdata, variableList=cbind(eshortlist,eshortlist), Outcome = Outcome, timeOutcome=timeOutcome, type = type,osize=featureSize, method="BH",fitFRESA=TRUE); eqpredict <- ensemblePredict(eq$formula.list,TrainSet,BlindSet, predictType = "linear",type = type)$ensemblePredict; equiFormulas <- append(equiFormulas,eq$formula.list); } } else { eqpredict <- p; } } else { p <- firstBSWIMSPredict; eqpredict <- firstBSWIMSPredict; } # cat("B KNN\n") knnclass <- getKNNpredictionFromFormula(baggedfoldmodel$formula,KnnTrainSet,BlindSet,Outcome,nk); # cat("A KNN \n") palt <- NULL; if (!is.null(usrFitFun)) { fit <- usrFitFun(formula(abaseformula),TrainSet[,c(extvar,shortVarList)]); palt <- predict(fit,BlindSet); if (is.null(baggedfoldmodel)) { vset <- all.vars(bagg$formula); } else { vset <- all.vars(baggedfoldmodel$formula); } if (!is.null(eq)) { vset <- unique(append(vset,all.vars(eq$equivalentModel$formula))); } if (length(vset) == (1+1*(type=="COX"))) { vset <- all.vars(forwardmodel$formula); } if (length(vset) > (1+1*(type=="COX"))) { fit <- usrFitFun(formula(abaseformula),TrainSet[,vset]); palt <- cbind(palt,predict(fit,BlindSet)); } else { palt <- cbind(palt,numeric(nrow(BlindSet))); } } inserted = inserted + 1 cycleinsert = cycleinsert + 1 tcor <- cor.test(predict.fitFRESA(redfoldmodel,TrainSet, 'linear'),predict.fitFRESA(redBootCross$boot.model,TrainSet, 'linear'), method = "spearman",na.action=na.omit,exact=FALSE)$estimate trainCorrelations <- append(trainCorrelations,tcor); trainAccuracy <- append(trainAccuracy,redBootCross_S$base.Accuracy); trainSensitivity <- append(trainSensitivity,redBootCross_S$base.Sensitivity); trainSpecificity <- append(trainSpecificity,redBootCross_S$base.Specificity); trainAUC <- append(trainAUC,mean(redBootCross_S$train.ROCAUC)); bcor <- 0; if (framesize>5) { bcor <- cor.test(p, Full.p, method = "spearman",na.action=na.omit,exact=FALSE)$estimate; blindCorrelations <- append(blindCorrelations,bcor); } if (((sumca <- sum(BlindSet[,Outcome]>0)) > 1) && ((sumco <- sum(BlindSet[,Outcome]==0)) > 1)) { atRoc <- pROC::roc(as.vector(BlindSet[,Outcome]), p,plot=FALSE,ci=TRUE,auc=TRUE,of='se',specificities=specificities,boot.n=100,smooth=FALSE,progress= 'none',quiet = TRUE) splitRoc <- atRoc$ci[,2]; FullRocBlindAUC <- pROC::roc(as.vector(BlindSet[,Outcome]), Full.p,plot=FALSE,auc=TRUE,ci=FALSE,quiet = TRUE)$auc WholeFoldBlindAUC <- append(WholeFoldBlindAUC,FullRocBlindAUC); if (rocadded == 0) { split.blindSen <- splitRoc; } else { split.blindSen <- rbind(split.blindSen,splitRoc); } rocadded = rocadded + 1; } totsize <- totsize + framesize; scase <- sum(BlindSet[,Outcome] == 1); scotr <- sum(BlindSet[,Outcome] == 0); sizecases <- sizecases + scase; sizecontrol <- sizecontrol + scotr; psen <- sum( 1*((BlindSet[,Outcome] > 0)*( p >= 0.0 )) , na.rm = TRUE) pspe <- sum( 1*((BlindSet[,Outcome] == 0)*( p < 0.0 )) , na.rm = TRUE) acc <- acc + psen + pspe; sen <- sen + psen; spe <- spe + pspe; psen <- sum( 1*((BlindSet[,Outcome] > 0)*( Full.p >= 0.0 )) , na.rm = TRUE) pspe <- sum( 1*((BlindSet[,Outcome] == 0)*( Full.p < 0.0 )) , na.rm = TRUE) Full.acc <- Full.acc + psen + pspe; Full.sen <- Full.sen + psen; Full.spe <- Full.spe + pspe; paracc = acc/totsize; psen = 0; pspe = 0; if (sizecases>0) { psen = sen/sizecases; } if (sizecontrol>0) { pspe = spe/sizecontrol; } Full.paracc = Full.acc/totsize; Full.psen = 0; Full.pspe = 0; if (sizecases>0) { Full.psen = Full.sen/sizecases; } if (sizecontrol>0) { Full.pspe = Full.spe/sizecontrol; } WholeFoldBlindAccuracy <- append(WholeFoldBlindAccuracy,redBootCross$blind.accuracy); WholeFoldBlindSpecificity <- append(WholeFoldBlindSpecificity,redBootCross$blind.specificity); WholeFoldBlindSensitivity <- append(WholeFoldBlindSensitivity,redBootCross$blind.sensitivity); FoldBlindAccuracy <- append(FoldBlindAccuracy,redBootCross_S$blind.accuracy); FoldBlindSpecificity <- append(FoldBlindSpecificity,redBootCross_S$blind.specificty); FoldBlindSensitivity <- append(FoldBlindSensitivity,redBootCross_S$blind.sensitivity); Full.ptrain <- predict.fitFRESA(redBootCross$boot.model,TrainSet, 'linear'); ptrain <- predict.fitFRESA(redfoldmodel,TrainSet, 'linear'); if ( cycleinsert == 1) { cvcycle.predictions <- cbind(BlindSet[,Outcome],p.AtOpt,i); } px <- cbind(BlindSet[,Outcome],p,i,medianPred,baggedForwardPredict,p.forward,p.AtOpt,eqpredict,firstBSWIMSPredict,medianBSWIMSPredict); if (!is.null(usrFitFun)) {px <- cbind(px,palt);} rownames(px) <- rownames(BlindSet); totSamples <- rbind(totSamples,px); px <- cbind(BlindSet[,Outcome],p.AtOpt,i); rownames(px) <- rownames(BlindSet); cvcycle.predictions <- rbind(cvcycle.predictions,px); px <- cbind(BlindSet[,Outcome],Full.p,i); rownames(px) <- rownames(BlindSet); Full.totSamples <- rbind(Full.totSamples,px); px <- cbind(BlindSet[,Outcome],abs(knnclass$prob$prob-1*(knnclass$prediction=="0")),i); rownames(px) <- rownames(BlindSet); KNNSamples <- rbind(KNNSamples,px); px <- cbind(BlindSet[,Outcome],abs(Fullknnclass$prob$prob-1*(Fullknnclass$prediction=="0")),i); rownames(px) <- rownames(BlindSet); Full.KNNSamples <- rbind(Full.KNNSamples,px); px <- cbind(TrainSet[,Outcome],ptrain,i); rownames(px) <- rownames(TrainSet); totTrainSamples <- rbind(totTrainSamples,px); px <- cbind(TrainSet[,Outcome],Full.ptrain,i); rownames(px) <- rownames(TrainSet); Full.totTrainSamples <- rbind(Full.totTrainSamples,px); formulas <- append(formulas,BSWiMS.models$bagging$formula); AtOptFormulas <- append(AtOptFormulas,redCurmodel_S$at.Accuracy.formula); ForwardFormulas <- append(ForwardFormulas,UCurModel_S$formula); baggFormulas <- append(baggFormulas,bagg$formula); allBSWIMSFormulas <- append(allBSWIMSFormulas,BSWiMS.models$formula.list); knnACC <- sum(KNNSamples[,1] == (KNNSamples[,2]>0.5))/totsize; knnSEN <- sum((KNNSamples[,1]>0.5) & (KNNSamples[,2]>0.5))/sizecases; knnSPE <- sum((KNNSamples[,1]<0.5) & (KNNSamples[,2]<0.5))/sizecontrol; Full.knnACC <- sum(Full.KNNSamples[,1] == (Full.KNNSamples[,2]>0.5))/totsize; Full.knnSEN <- sum((Full.KNNSamples[,1]>0.5) & (Full.KNNSamples[,2]>0.5))/sizecases; Full.knnSPE <- sum((Full.KNNSamples[,1]<0.5) & (Full.KNNSamples[,2]<0.5))/sizecontrol; cat ("Loop :",i,"Blind Cases =",scase,"Blind Control =",scotr,"Total =",totsize, "Size Cases =",sizecases,"Size Control =",sizecontrol,"\n") cat ("Accumulated Models CV Accuracy =",paracc,"Sensitivity =",psen,"Specificity =",pspe,"Forw. Ensemble Accuracy=",mean(1.0*((totSamples[,4] > 0) == (totSamples[,1] > 0))),"\n") cat ("Initial Model Accumulated CV Accuracy =",Full.paracc,"Sensitivity =",Full.psen,"Specificity =",Full.pspe,"\n"); cat ("Initial Model Bootstrapped Accuracy =",redBootCross$blind.accuracy,"Sensitivity =",redBootCross$blind.sensitivity,"Specificity =",redBootCross$blind.specificity,"\n") cat ("Current Model Bootstrapped Accuracy =",redBootCross_S$blind.accuracy,"Sensitivity =",redBootCross_S$blind.sensitivity,"Specificity =",redBootCross_S$blind.specificity,"\n") cat ("Current KNN Accuracy =",knnACC,"Sensitivity =",knnSEN,"Specificity =",knnSPE,"\n") cat ("Initial KNN Accuracy =",Full.knnACC,"Sensitivity =",Full.knnSEN,"Specificity =",Full.knnSPE,"\n") cat ("Train Correlation: ",tcor," Blind Correlation :",bcor,"\n KNN to Model Confusion Matrix: \n") print(table(KNNSamples[,2]>0.5,totSamples[,2]>0.0)) } } else { cat ("Loop :",i,"No Model.\n") } uniEval <- getVar.Bin(UCurModel_Full$final.model,TrainSet,Outcome,type = type,testData=BlindSet); if (i==1) { uniTrainAccuracy <- rbind(uniEval$uniTrainAccuracy); TopUniTrainCor <- vector(); } else { uniTrainAccuracy <- rbind(uniTrainAccuracy,uniEval$uniTrainAccuracy); } if ( j == 1) { cvcycle.uniAccuracies <- uniEval$uniTestAccuracy * framesize; totblindadded = framesize; topUniTestCor <- vector(); totalUniCor = 0; } else { cvcycle.uniAccuracies <- rbind(cvcycle.uniAccuracies,uniEval$uniTestAccuracy * framesize); totblindadded = totblindadded + framesize; } if ((lastinserted<inserted)&&(length(redCurmodel_S$back.model$coefficients)>1)) { uniEvalCor <- getVar.Bin(redCurmodel_S$back.model,TrainSet,Outcome,type = type,testData=BlindSet); TopUniTrainCor <- append(TopUniTrainCor,uniEvalCor$uniTrainAccuracy[1]); topUniTestCor <- append(topUniTestCor,uniEvalCor$uniTestAccuracy[1] * framesize); totalUniCor <- totalUniCor + framesize } if ( j == K) { if (totalUniCor>0) TopUniCoherenceTest <- append(TopUniCoherenceTest,sum(topUniTestCor)/totalUniCor) if (i == K) { uniTestAccuracy <- rbind(colSums(cvcycle.uniAccuracies)/totblindadded); } else { uniTestAccuracy <- rbind(uniTestAccuracy,colSums(cvcycle.uniAccuracies)/totblindadded); } } if ( j == K) { nsamp <- nrow(cvcycle.predictions) if (nsamp>0) { atRocAUC <- pROC::roc(as.vector(cvcycle.predictions[,1]), cvcycle.predictions[,2],plot=FALSE,auc=TRUE,smooth=FALSE,quiet = TRUE)$auc; testAccuracy <- append(testAccuracy,sum(cvcycle.predictions[,1] == 1.0*(cvcycle.predictions[,2]>=0.0))/nsamp); testSensitivity <- append(testSensitivity,sum((cvcycle.predictions[,1] == 1) & (cvcycle.predictions[,2]>=0.0))/sum(cvcycle.predictions[,1] == 1)); testSpecificity <- append(testSpecificity,sum((cvcycle.predictions[,1] == 0) & (cvcycle.predictions[,2] <0.0))/sum(cvcycle.predictions[,1] == 0)); testAUC <- append(testAUC,atRocAUC); } # print(testAccuracy) # print(testAUC) } } if (length(formulas)==0) { stop("No Significant Models Found\n"); } if (!is.null(usrFitFun)) { colnames(totSamples) <- c("Outcome","Prediction","Model","Ensemble.Forward","Forward.Selection.Bagged","Forward","Backwards","eB.SWiMS","first.B.SWiMS","Ensemble.B.SWiMS","usrFitFunction","usrFitFunction_Sel"); } else { colnames(totSamples) <- c("Outcome","Prediction","Model","Ensemble.Forward","Forward.Selection.Bagged","Forward","Backwards","eB.SWiMS","first.B.SWiMS","Ensemble.B.SWiMS"); } # totSamples <- as.data.frame(totSamples); colnames(Full.totSamples) <- c("Outcome","Prediction","Model"); # Full.totSamples <- as.data.frame(Full.totSamples); colnames(totTrainSamples) <- c("Outcome","Prediction","Model"); # totTrainSamples <- as.data.frame(totTrainSamples); colnames(Full.totTrainSamples) <- c("Outcome","Prediction","Model"); # Full.totTrainSamples <- as.data.frame(Full.totTrainSamples); colnames(KNNSamples) <- c("Outcome","Prediction","Model"); # KNNSamples <- as.data.frame(KNNSamples); colnames(Full.KNNSamples) <- c("Outcome","Prediction","Model"); # Full.KNNSamples <- as.data.frame(Full.KNNSamples); BSWiMS.ensemble.prediction <- NULL bsta <- boxplot(totSamples[,"Prediction"]~rownames(totSamples),plot=FALSE) sta <- cbind(bsta$stats[3,]) rownames(sta) <- bsta$names BSWiMS.ensemble.prediction <- cbind(data[rownames(sta),Outcome],sta) colnames(BSWiMS.ensemble.prediction) <- c("Outcome","Prediction"); BSWiMS.ensemble.prediction <- as.data.frame(BSWiMS.ensemble.prediction); if (!is.null(enetSamples)) { colnames(enetSamples) <- c("Outcome","Prediction","Model"); # enetSamples <- as.data.frame(enetSamples); colnames(enetTrainSamples) <- c("Outcome","Prediction","Model"); # enetTrainSamples <- as.data.frame(enetTrainSamples); } sumSen = NA; if (plots) { plotModels.ROC(totSamples,theCVfolds=K,predictor="Prediction",main="B:SWiMS"); par(mfrow=c(1,1)) incBsen=0 aucBlindTest <- pROC::roc(as.vector(totSamples[,1]),totSamples[,2],col="red",auc=TRUE,plot=TRUE,smooth=FALSE,lty=3,quiet = TRUE)$auc par(new=TRUE) aucCVBlind <- pROC::roc(as.vector(Full.totSamples[,1]),Full.totSamples[,2],col="blue",auc=TRUE,plot=TRUE,ci=FALSE,smooth=FALSE,quiet = TRUE)$auc aucBoot=0; aucTrain=0; if (!is.null(FullBootCross$testPrediction)) { par(new=TRUE) aucTrain <- pROC::roc( as.vector(FullBootCross$outcome), FullBootCross$boot.model$linear.predictors,col="green",plot=TRUE,auc=TRUE,smooth=FALSE,quiet = TRUE)$auc; par(new=TRUE) aucBoot <- pROC::roc( as.vector(FullBootCross$testOutcome), FullBootCross$testPrediction,col="black",auc=TRUE,plot=TRUE,smooth=FALSE,quiet = TRUE)$auc; } ley.names <- c(paste("Bootstrapped: Train Model ROC (",sprintf("%.3f",aucTrain),")"),paste("Bootstrapped: Blind ROC (",sprintf("%.3f",aucBoot),")"), paste("CV: Blind ROC (",sprintf("%.3f",aucCVBlind),")"),paste("CV: Blind Fold Models Coherence (",sprintf("%.3f",aucBlindTest),")")) ley.colors <- c("green","black","blue","red") ley.lty <- c(1,1,1,3) if (rocadded>0) { boxplot(split.blindSen,add=TRUE, axes = FALSE,boxwex=0.04,at=specificities); sumSen <- colMeans(split.blindSen,na.rm = TRUE); sennames <- names(sumSen); sumSen <- append(0,sumSen); sumSen <- append(sumSen,1); sennames <- append("1",sennames); sennames <- append(sennames,"0"); names(sumSen) <- sennames; spevalues <- as.numeric(names(sumSen)); lines(spevalues,sumSen,col="red",lwd=2.0); auc = 0; for (i in 2:length(spevalues)) { auc = auc + (spevalues[i-1]-spevalues[i])*(sumSen[i-1]+(sumSen[i]-sumSen[i-1])/2) } ley.names <- append(ley.names,paste("CV Blind: Mean ROC of Models (",sprintf("%.3f",auc),")")); ley.colors <- append(ley.colors,"red"); ley.lty <- append(ley.lty,1); } else { sumSen = NA; } legend(0.6,0.30, legend=ley.names,col = ley.colors, lty = ley.lty,bty="n") } if (!is.null(uniTrainAccuracy)) { uniTrainAccuracy <- as.data.frame(uniTrainAccuracy); uniTestAccuracy <- as.data.frame(uniTestAccuracy); colnames(uniTrainAccuracy) <- attr(terms(formula(UCurModel_Full$formula)),'term.labels'); colnames(uniTestAccuracy) <- attr(terms(formula(UCurModel_Full$formula)),'term.labels'); } result <- list(formula.list=formulas, Models.testPrediction=totSamples, FullBSWiMS.testPrediction=Full.totSamples, TestRetrained.blindPredictions=blindreboot, LastTrainBSWiMS.bootstrapped=redCurmodel_S$bootCV, Test.accuracy=paracc, Test.sensitivity=psen, Test.specificity=pspe, Train.correlationsToFull=trainCorrelations, Blind.correlationsToFull=blindCorrelations, FullModelAtFoldAccuracies=WholeFoldBlindAccuracy, FullModelAtFoldSpecificties=WholeFoldBlindSpecificity, FullModelAtFoldSensitivities=WholeFoldBlindSensitivity, FullModelAtFoldAUC=WholeFoldBlindAUC, CVTrain.Accuracies=trainAccuracy, CVTrain.Sensitivity=trainSensitivity, CVTrain.Specificity=trainSpecificity, CVTrain.AUCs=trainAUC, CVTest.Accuracies=testAccuracy, CVTest.Sensitivity=testSensitivity, CVTest.Specificity=testSpecificity, CVTest.AUCs=testAUC, AtCVFoldModelBlindAccuracies=FoldBlindAccuracy, AtCVFoldModelBlindSpecificities=FoldBlindSpecificity, AtCVFoldModelBlindSensitivities=FoldBlindSensitivity, forwardSelection = CurModel_Full, updateforwardSelection = UCurModel_Full, BSWiMS = redCurmodel_Full, FullBSWiMS.bootstrapped=FullBootCross, Models.testSensitivities = split.blindSen, FullKNN.testPrediction=Full.KNNSamples, KNN.testPrediction=KNNSamples, Fullenet=Fullenet, LASSO.testPredictions=enetSamples, LASSOVariables=LASSOVariables, uniTrain.Accuracies=uniTrainAccuracy, uniTest.Accuracies=uniTestAccuracy, uniTest.TopCoherence=TopUniCoherenceTest, uniTrain.TopCoherence=TopUniTrainCor, Models.trainPrediction=totTrainSamples, FullBSWiMS.trainPrediction=Full.totTrainSamples, LASSO.trainPredictions=enetTrainSamples, BSWiMS.ensemble.prediction = BSWiMS.ensemble.prediction, ForwardFormulas.list = ForwardFormulas, AtOptFormulas.list = AtOptFormulas, baggFormulas.list = baggFormulas, equiFormulas.list = equiFormulas, allBSWiMSFormulas.list = allBSWIMSFormulas, LassoFilterVarList = enetshortVarList, BSWiMS.models=FULLBSWiMS.models ); return (result) }
/fuzzedpackages/FRESA.CAD/R/crossValidationFeatureSelection.Bin.R
no_license
akhikolla/testpackages
R
false
false
29,169
r
crossValidationFeatureSelection_Bin <- function(size=10,fraction=1.0,pvalue=0.05,loops=100,covariates="1",Outcome,timeOutcome="Time",variableList,data,maxTrainModelSize=20,type=c("LM","LOGIT","COX"),selectionType=c("zIDI","zNRI"),startOffset=0,elimination.bootstrap.steps=100,trainFraction=0.67,trainRepetition=9,bootstrap.steps=100,nk=0,unirank=NULL,print=TRUE,plots=TRUE,lambda="lambda.1se",equivalent=FALSE,bswimsCycles=10,usrFitFun=NULL,featureSize=0) { if (!requireNamespace("cvTools", quietly = TRUE)) { install.packages("cvTools", dependencies = TRUE) } if (!requireNamespace("glmnet", quietly = TRUE)) { install.packages("glmnet", dependencies = TRUE) } nlenght <- ncol(data)-1; enetSamples <- NULL; enetTrainSamples <- NULL; casesample = subset(data,get(Outcome) == 1); controlsample = subset(data,get(Outcome) == 0); casesamplesize <- nrow(casesample); controlsamplesize <- nrow(controlsample); filter.p.value = 2.0*pvalue; if (length(pvalue)>1) { filter.p.value = pvalue[2]; pvalue=pvalue[1]; } K <- as.integer(1.0/(1.0-trainFraction) + 0.5); acc = 0.0; sen = 0.0; spe = 0.0; sizecases = 0; sizecontrol = 0; totsize = 0; paracc = 0; psen = 0; pspe = 0; Full.acc = 0.0; Full.sen = 0.0; Full.spe = 0.0; Full.paracc = 0; Full.psen = 0; Full.pspe = 0; formulas <- character(); AtOptFormulas <-character(); ForwardFormulas <- character(); baggFormulas <- character(); equiFormulas <- character(); allBSWIMSFormulas <- character(); trainCorrelations <- vector(); trainAccuracy <- vector(); trainSensitivity <- vector(); trainSpecificity <- vector(); trainAUC <- vector(); testAccuracy <- vector(); testSensitivity <- vector(); testSpecificity <- vector(); testAUC <- vector(); blindCorrelations <- vector(); WholeFoldBlindAccuracy <- vector(); WholeFoldBlindSpecificity <- vector(); WholeFoldBlindSensitivity <- vector(); WholeFoldBlindAUC <- vector(); FoldBlindAccuracy <- vector(); FoldBlindSpecificity <- vector(); FoldBlindSensitivity <- vector(); TopUniCoherenceTest <- vector(); selection.pValue <- pvalue; CVselection.pValue <- pvalue; par(mfrow=c(1,1)) if (!is.null(unirank)) { uprank <- update.uniRankVar(unirank,data=data,FullAnalysis=FALSE) variableList <- uprank$orderframe; # print(variableList[1:20,]) } if (size>nrow(variableList)) size <- nrow(variableList); if (size<5) size=min(c(5,nrow(variableList))); shortVarList <- as.vector(variableList[1:size,1]); varlist <- paste(Outcome,"~1"); for (i in 1:length(shortVarList)) { varlist <- paste(varlist,shortVarList[i],sep="+"); } varlist <- all.vars(formula(varlist))[-1]; shortVarList <- varlist[varlist %in% colnames(data)]; enetshortVarList <- shortVarList; Fullenet <- try(glmnet::cv.glmnet(as.matrix(data[,shortVarList]),as.vector(data[,Outcome]),family="binomial")); LASSOVariables <- NULL; if (inherits(Fullenet, "try-error")) { cat("enet Error") Fullenet <- NULL; } else { cenet <- as.matrix(coef(Fullenet,s=lambda)) lanames <- names(cenet[as.vector(cenet[,1] != 0),]) print(LASSOVariables <- paste(lanames[lanames != "(Intercept)" ],collapse=" + ")) } # cat ("END LASSO\n") selType = selectionType; ex_covariates=covariates; if (type!="COX") { baseformula <- paste(Outcome,"~",covariates); abaseformula <- paste(Outcome,"~ ."); extvar <- Outcome; } else { baseformula <- paste("Surv(",timeOutcome,",",Outcome,") ~",covariates); abaseformula <- paste("Surv(",timeOutcome,",",Outcome,") ~ ."); extvar <- c(timeOutcome,Outcome); } # cat ("Data:",ncol(data),"Features:",nrow(variableList),"Size:",size,"\n") FULLBSWiMS.models <- BSWiMS.model(formula=baseformula,data=data,type=type,testType=selectionType,pvalue=pvalue,variableList=variableList,size=size,loops=loops,elimination.bootstrap.steps=elimination.bootstrap.steps,fraction=fraction,maxTrainModelSize=maxTrainModelSize,maxCycles=bswimsCycles,print=print,plots=plots,featureSize=featureSize); CurModel_Full <- FULLBSWiMS.models$forward.model; UCurModel_Full <- FULLBSWiMS.models$update.model; redCurmodel_Full <- FULLBSWiMS.models$BSWiMS.model; Full_formula <- redCurmodel_Full$back.model; FullBootCross <- bootstrapValidation_Bin(1.0000,bootstrap.steps,redCurmodel_Full$back.formula,Outcome,data,type,plots=plots) redBootCross <- FullBootCross; cat ("CV pvalue :",CVselection.pValue,"\n") cat ("Update :",UCurModel_Full$formula,"\n") cat ("At Accuray:",redCurmodel_Full$at.Accuracy.formula,"\n") cat ("B:SWiMS :",redCurmodel_Full$back.formula,"\n") if (is.null(FullBootCross)) { stop("no initial model found\n"); } if (print) summary(FullBootCross,2) inserted = 0; rocadded = 0; split.blindSen <- NULL; blindreboot <- NULL; KNNSamples <- NULL; Full.KNNSamples <- NULL; totSamples <- NULL; Full.totSamples <- NULL; totTrainSamples <- NULL; Full.totTrainSamples <- NULL; uniTrainAccuracy <- NULL; uniTestAccuracy <- NULL; totSamples <- NULL; Fullsammples <- min(casesamplesize,controlsamplesize); if ( K > Fullsammples) K=Fullsammples # cat("Number of folds: ",K,"\n"); specificities <- c(0.975,0.95,0.90,0.80,0.70,0.60,0.50,0.40,0.30,0.20,0.10,0.05); for (i in 1:trainRepetition) { j <- 1 + ((i-1) %% K) if ( j == 1) { casefolds <- cvTools::cvFolds(casesamplesize, K,1, "random"); controlfolds <- cvTools::cvFolds(controlsamplesize, K,1, "random"); cycleinsert=0; totalUniCor=0; } CaseTrainSet <- casesample[casefolds$subsets[casefolds$which != j,],]; CaseBlindSet <- casesample[casefolds$subsets[casefolds$which == j,],]; ControlTrainSet <- controlsample[controlfolds$subsets[controlfolds$which != j,],]; ControlBlindSet <- controlsample[controlfolds$subsets[controlfolds$which == j,],]; TrainSet <- rbind(CaseTrainSet,ControlTrainSet); BlindSet <- rbind(CaseBlindSet,ControlBlindSet); framesize <- nrow(BlindSet); minTrainSamples <- min(nrow(CaseTrainSet),nrow(ControlTrainSet)); if (nk==0) { nk = 2*as.integer(sqrt(minTrainSamples/2)) + 1; } KnnTrainSet <- rbind(CaseTrainSet[sample(1:nrow(CaseTrainSet),minTrainSamples,replace=FALSE),],ControlTrainSet[sample(1:nrow(ControlTrainSet),minTrainSamples,replace=FALSE),]) par(mfrow=c(1,1)) redBootCross <- bootstrapValidation_Bin(1.0000,bootstrap.steps,redCurmodel_Full$back.formula,Outcome,TrainSet,type,plots=plots) Full.p <- predict.fitFRESA(redBootCross$boot.model,BlindSet, 'linear'); Fullknnclass <- getKNNpredictionFromFormula(FULLBSWiMS.models$bagging$formula,KnnTrainSet,BlindSet,Outcome,nk) if (!is.null(unirank)) { # cat("Unirank\n") uprank <- update.uniRankVar(unirank,data=TrainSet,FullAnalysis=FALSE) variableList <- uprank$orderframe; unitPvalues <- (1.0-pnorm(variableList$ZUni)); names(unitPvalues) <- variableList$Name; if (unirank$categorizationType == "Raw") { adjPvalues <- p.adjust(unitPvalues,"BH") gadjPvalues <- adjPvalues[adjPvalues < 2*filter.p.value] noncornames <- correlated_Remove(data,names(gadjPvalues),thr=0.99); if (length(noncornames) > 1) featureSize <- featureSize*length(noncornames)/length(gadjPvalues); # cat(length(noncornames),":",length(gadjPvalues),":",length(noncornames)/length(gadjPvalues),"\n"); } filter.z.value <- abs(qnorm(filter.p.value)) varMax <- sum(variableList$ZUni >= filter.z.value); pvarMax <- sum(p.adjust(unitPvalues,"BH") < 2*filter.p.value); cat(ncol(TrainSet),": Unadjusted size:",varMax," Adjusted Size:",pvarMax,"\n") size= min(c(pvarMax,varMax)); if (size<5) size=min(c(5,nrow(variableList))); shortVarList <- as.vector(variableList[1:size,1]); varlist <- paste(Outcome,"~1"); for (nn in 1:length(shortVarList)) { varlist <- paste(varlist,shortVarList[nn],sep="+"); } varlist <- all.vars(formula(varlist))[-1]; shortVarList <- varlist[varlist %in% colnames(TrainSet)]; # print(variableList[,1]); } if (!is.null(Fullenet)) { # cat(length(shortVarList)," :In elastic Net\n") foldenet <- try(glmnet::cv.glmnet(as.matrix(TrainSet[,shortVarList]),as.vector(TrainSet[,Outcome]),family="binomial")); cenet <- as.matrix(coef(foldenet,s=lambda)) lanames <- names(cenet[as.vector(cenet[,1] != 0),]) LASSOVariables <- append(LASSOVariables,paste(lanames[lanames != "(Intercept)" ],collapse=" + ")) if (i == 1) { enetSamples <- cbind(BlindSet[,Outcome],predict(foldenet,as.matrix(BlindSet[,shortVarList]),s=lambda),i); enetTrainSamples <- cbind(TrainSet[,Outcome],predict(foldenet,as.matrix(TrainSet[,shortVarList]),s=lambda),i); } else { enetSamples <- rbind(enetSamples,cbind(BlindSet[,Outcome],predict(foldenet,as.matrix(BlindSet[,shortVarList]),s=lambda),i)); enetTrainSamples <- rbind(enetTrainSamples,cbind(TrainSet[,Outcome],predict(foldenet,as.matrix(TrainSet[,shortVarList]),s=lambda),i)); } # print(LASSOVariables) } cat ("Loop :",i,"Input Cases =",sum(data[,Outcome] > 0 ),"Input Control =",sum(data[,Outcome] == 0),"\n") cat ("Loop :",i,"Train Cases =",sum(TrainSet[,Outcome] > 0 ),"Train Control =",sum(TrainSet[,Outcome] == 0),"\n") cat ("Loop :",i,"Blind Cases =",sum(BlindSet[,Outcome] > 0 ),"Blind Control =",sum(BlindSet[,Outcome] == 0),"\n") cat ("K :",nk,"KNN T Cases =",sum(KnnTrainSet[,Outcome] > 0 ),"KNN T Control =",sum(KnnTrainSet[,Outcome] == 0),"\n") lastinserted = inserted; BSWiMS.models <- BSWiMS.model(formula=baseformula,data=TrainSet,type=type,testType=selectionType,pvalue=CVselection.pValue,variableList=variableList,size=size,loops=loops,elimination.bootstrap.steps=elimination.bootstrap.steps,fraction=fraction,maxTrainModelSize=maxTrainModelSize,maxCycles=bswimsCycles,print=print,plots=plots,featureSize=featureSize); CurModel_S <- BSWiMS.models$forward.model; UCurModel_S <- BSWiMS.models$update.model; redCurmodel_S <- BSWiMS.models$BSWiMS.model; redBootCross_S <- redCurmodel_S$bootCV; if (length(CurModel_S$var.names)>0) { # lets use beforeFSC model for prediction redfoldmodel.AtOpt <- redCurmodel_S$at.opt.model; forwardmodel <- UCurModel_S$final.model; baggedfoldmodel <- BSWiMS.models$bagging$bagged.model; redfoldmodel <- redCurmodel_S$back.model; cat ("Update :",UCurModel_S$formula,"\n") cat ("At Accuracy:",redCurmodel_S$at.Accuracy.formula,"\n") cat ("B:SWiMS :",redCurmodel_S$back.formula,"\n") if (!is.null(forwardmodel)) { if (print) { cat ("\n The last CV bootstrapped model") s <- summary(redBootCross_S,2) } thebaglist <- CurModel_S$formula.list; bagg <- baggedModel(thebaglist,TrainSet,type,Outcome,timeOutcome,univariate=variableList,useFreq=loops); # cat ("Predict 1\n") p.AtOpt <- predict.fitFRESA(redfoldmodel.AtOpt,BlindSet, 'linear'); p.forward <- predict.fitFRESA(forwardmodel,BlindSet, 'linear'); baggedForwardPredict <- predict.fitFRESA(bagg$bagged.model,BlindSet,'linear'); firstBSWIMSPredict <- predict.fitFRESA(redfoldmodel,BlindSet, 'linear'); if (length(BSWiMS.models$formula.list)>1) { medianBSWIMSPredict <- ensemblePredict(BSWiMS.models$formula.list,TrainSet,BlindSet, predictType = "linear",type = type)$ensemblePredict; } else { medianBSWIMSPredict <- firstBSWIMSPredict; } # cat ("Predict 2\n") medianPred <- ensemblePredict(BSWiMS.models$forward.selection.list,TrainSet,BlindSet, predictType = "linear",type = type)$ensemblePredict; eq <- NULL; # cat ("End Predict\n") if (length(redfoldmodel$coefficients)> 1) { p <- predict.fitFRESA(baggedfoldmodel,BlindSet,'linear'); if (equivalent) { collectFormulas <- BSWiMS.models$forward.selection.list; bagg2 <- baggedModel(collectFormulas,data,type,Outcome,timeOutcome,univariate=variableList,useFreq=loops); modelterms <- attr(terms(redfoldmodel),"term.labels"); shortcan <- bagg2$frequencyTable; eshortlist <- unique(c(names(shortcan),str_replace_all(modelterms,":","\\*"))); eshortlist <- eshortlist[!is.na(eshortlist)]; if (length(eshortlist)>0) { nameslist <- c(all.vars(baggedfoldmodel$formula),as.character(variableList[eshortlist,2])); nameslist <- unique(nameslist[!is.na(nameslist)]); if (!is.null(unirank) && (unirank$categorizationType != "RawRaw")) { eqdata <- TrainSet[,nameslist]; } else { eqdata <- TrainSet; } eq <- reportEquivalentVariables(redfoldmodel,pvalue = 0.25*pvalue, data=eqdata, variableList=cbind(eshortlist,eshortlist), Outcome = Outcome, timeOutcome=timeOutcome, type = type,osize=featureSize, method="BH",fitFRESA=TRUE); eqpredict <- ensemblePredict(eq$formula.list,TrainSet,BlindSet, predictType = "linear",type = type)$ensemblePredict; equiFormulas <- append(equiFormulas,eq$formula.list); } } else { eqpredict <- p; } } else { p <- firstBSWIMSPredict; eqpredict <- firstBSWIMSPredict; } # cat("B KNN\n") knnclass <- getKNNpredictionFromFormula(baggedfoldmodel$formula,KnnTrainSet,BlindSet,Outcome,nk); # cat("A KNN \n") palt <- NULL; if (!is.null(usrFitFun)) { fit <- usrFitFun(formula(abaseformula),TrainSet[,c(extvar,shortVarList)]); palt <- predict(fit,BlindSet); if (is.null(baggedfoldmodel)) { vset <- all.vars(bagg$formula); } else { vset <- all.vars(baggedfoldmodel$formula); } if (!is.null(eq)) { vset <- unique(append(vset,all.vars(eq$equivalentModel$formula))); } if (length(vset) == (1+1*(type=="COX"))) { vset <- all.vars(forwardmodel$formula); } if (length(vset) > (1+1*(type=="COX"))) { fit <- usrFitFun(formula(abaseformula),TrainSet[,vset]); palt <- cbind(palt,predict(fit,BlindSet)); } else { palt <- cbind(palt,numeric(nrow(BlindSet))); } } inserted = inserted + 1 cycleinsert = cycleinsert + 1 tcor <- cor.test(predict.fitFRESA(redfoldmodel,TrainSet, 'linear'),predict.fitFRESA(redBootCross$boot.model,TrainSet, 'linear'), method = "spearman",na.action=na.omit,exact=FALSE)$estimate trainCorrelations <- append(trainCorrelations,tcor); trainAccuracy <- append(trainAccuracy,redBootCross_S$base.Accuracy); trainSensitivity <- append(trainSensitivity,redBootCross_S$base.Sensitivity); trainSpecificity <- append(trainSpecificity,redBootCross_S$base.Specificity); trainAUC <- append(trainAUC,mean(redBootCross_S$train.ROCAUC)); bcor <- 0; if (framesize>5) { bcor <- cor.test(p, Full.p, method = "spearman",na.action=na.omit,exact=FALSE)$estimate; blindCorrelations <- append(blindCorrelations,bcor); } if (((sumca <- sum(BlindSet[,Outcome]>0)) > 1) && ((sumco <- sum(BlindSet[,Outcome]==0)) > 1)) { atRoc <- pROC::roc(as.vector(BlindSet[,Outcome]), p,plot=FALSE,ci=TRUE,auc=TRUE,of='se',specificities=specificities,boot.n=100,smooth=FALSE,progress= 'none',quiet = TRUE) splitRoc <- atRoc$ci[,2]; FullRocBlindAUC <- pROC::roc(as.vector(BlindSet[,Outcome]), Full.p,plot=FALSE,auc=TRUE,ci=FALSE,quiet = TRUE)$auc WholeFoldBlindAUC <- append(WholeFoldBlindAUC,FullRocBlindAUC); if (rocadded == 0) { split.blindSen <- splitRoc; } else { split.blindSen <- rbind(split.blindSen,splitRoc); } rocadded = rocadded + 1; } totsize <- totsize + framesize; scase <- sum(BlindSet[,Outcome] == 1); scotr <- sum(BlindSet[,Outcome] == 0); sizecases <- sizecases + scase; sizecontrol <- sizecontrol + scotr; psen <- sum( 1*((BlindSet[,Outcome] > 0)*( p >= 0.0 )) , na.rm = TRUE) pspe <- sum( 1*((BlindSet[,Outcome] == 0)*( p < 0.0 )) , na.rm = TRUE) acc <- acc + psen + pspe; sen <- sen + psen; spe <- spe + pspe; psen <- sum( 1*((BlindSet[,Outcome] > 0)*( Full.p >= 0.0 )) , na.rm = TRUE) pspe <- sum( 1*((BlindSet[,Outcome] == 0)*( Full.p < 0.0 )) , na.rm = TRUE) Full.acc <- Full.acc + psen + pspe; Full.sen <- Full.sen + psen; Full.spe <- Full.spe + pspe; paracc = acc/totsize; psen = 0; pspe = 0; if (sizecases>0) { psen = sen/sizecases; } if (sizecontrol>0) { pspe = spe/sizecontrol; } Full.paracc = Full.acc/totsize; Full.psen = 0; Full.pspe = 0; if (sizecases>0) { Full.psen = Full.sen/sizecases; } if (sizecontrol>0) { Full.pspe = Full.spe/sizecontrol; } WholeFoldBlindAccuracy <- append(WholeFoldBlindAccuracy,redBootCross$blind.accuracy); WholeFoldBlindSpecificity <- append(WholeFoldBlindSpecificity,redBootCross$blind.specificity); WholeFoldBlindSensitivity <- append(WholeFoldBlindSensitivity,redBootCross$blind.sensitivity); FoldBlindAccuracy <- append(FoldBlindAccuracy,redBootCross_S$blind.accuracy); FoldBlindSpecificity <- append(FoldBlindSpecificity,redBootCross_S$blind.specificty); FoldBlindSensitivity <- append(FoldBlindSensitivity,redBootCross_S$blind.sensitivity); Full.ptrain <- predict.fitFRESA(redBootCross$boot.model,TrainSet, 'linear'); ptrain <- predict.fitFRESA(redfoldmodel,TrainSet, 'linear'); if ( cycleinsert == 1) { cvcycle.predictions <- cbind(BlindSet[,Outcome],p.AtOpt,i); } px <- cbind(BlindSet[,Outcome],p,i,medianPred,baggedForwardPredict,p.forward,p.AtOpt,eqpredict,firstBSWIMSPredict,medianBSWIMSPredict); if (!is.null(usrFitFun)) {px <- cbind(px,palt);} rownames(px) <- rownames(BlindSet); totSamples <- rbind(totSamples,px); px <- cbind(BlindSet[,Outcome],p.AtOpt,i); rownames(px) <- rownames(BlindSet); cvcycle.predictions <- rbind(cvcycle.predictions,px); px <- cbind(BlindSet[,Outcome],Full.p,i); rownames(px) <- rownames(BlindSet); Full.totSamples <- rbind(Full.totSamples,px); px <- cbind(BlindSet[,Outcome],abs(knnclass$prob$prob-1*(knnclass$prediction=="0")),i); rownames(px) <- rownames(BlindSet); KNNSamples <- rbind(KNNSamples,px); px <- cbind(BlindSet[,Outcome],abs(Fullknnclass$prob$prob-1*(Fullknnclass$prediction=="0")),i); rownames(px) <- rownames(BlindSet); Full.KNNSamples <- rbind(Full.KNNSamples,px); px <- cbind(TrainSet[,Outcome],ptrain,i); rownames(px) <- rownames(TrainSet); totTrainSamples <- rbind(totTrainSamples,px); px <- cbind(TrainSet[,Outcome],Full.ptrain,i); rownames(px) <- rownames(TrainSet); Full.totTrainSamples <- rbind(Full.totTrainSamples,px); formulas <- append(formulas,BSWiMS.models$bagging$formula); AtOptFormulas <- append(AtOptFormulas,redCurmodel_S$at.Accuracy.formula); ForwardFormulas <- append(ForwardFormulas,UCurModel_S$formula); baggFormulas <- append(baggFormulas,bagg$formula); allBSWIMSFormulas <- append(allBSWIMSFormulas,BSWiMS.models$formula.list); knnACC <- sum(KNNSamples[,1] == (KNNSamples[,2]>0.5))/totsize; knnSEN <- sum((KNNSamples[,1]>0.5) & (KNNSamples[,2]>0.5))/sizecases; knnSPE <- sum((KNNSamples[,1]<0.5) & (KNNSamples[,2]<0.5))/sizecontrol; Full.knnACC <- sum(Full.KNNSamples[,1] == (Full.KNNSamples[,2]>0.5))/totsize; Full.knnSEN <- sum((Full.KNNSamples[,1]>0.5) & (Full.KNNSamples[,2]>0.5))/sizecases; Full.knnSPE <- sum((Full.KNNSamples[,1]<0.5) & (Full.KNNSamples[,2]<0.5))/sizecontrol; cat ("Loop :",i,"Blind Cases =",scase,"Blind Control =",scotr,"Total =",totsize, "Size Cases =",sizecases,"Size Control =",sizecontrol,"\n") cat ("Accumulated Models CV Accuracy =",paracc,"Sensitivity =",psen,"Specificity =",pspe,"Forw. Ensemble Accuracy=",mean(1.0*((totSamples[,4] > 0) == (totSamples[,1] > 0))),"\n") cat ("Initial Model Accumulated CV Accuracy =",Full.paracc,"Sensitivity =",Full.psen,"Specificity =",Full.pspe,"\n"); cat ("Initial Model Bootstrapped Accuracy =",redBootCross$blind.accuracy,"Sensitivity =",redBootCross$blind.sensitivity,"Specificity =",redBootCross$blind.specificity,"\n") cat ("Current Model Bootstrapped Accuracy =",redBootCross_S$blind.accuracy,"Sensitivity =",redBootCross_S$blind.sensitivity,"Specificity =",redBootCross_S$blind.specificity,"\n") cat ("Current KNN Accuracy =",knnACC,"Sensitivity =",knnSEN,"Specificity =",knnSPE,"\n") cat ("Initial KNN Accuracy =",Full.knnACC,"Sensitivity =",Full.knnSEN,"Specificity =",Full.knnSPE,"\n") cat ("Train Correlation: ",tcor," Blind Correlation :",bcor,"\n KNN to Model Confusion Matrix: \n") print(table(KNNSamples[,2]>0.5,totSamples[,2]>0.0)) } } else { cat ("Loop :",i,"No Model.\n") } uniEval <- getVar.Bin(UCurModel_Full$final.model,TrainSet,Outcome,type = type,testData=BlindSet); if (i==1) { uniTrainAccuracy <- rbind(uniEval$uniTrainAccuracy); TopUniTrainCor <- vector(); } else { uniTrainAccuracy <- rbind(uniTrainAccuracy,uniEval$uniTrainAccuracy); } if ( j == 1) { cvcycle.uniAccuracies <- uniEval$uniTestAccuracy * framesize; totblindadded = framesize; topUniTestCor <- vector(); totalUniCor = 0; } else { cvcycle.uniAccuracies <- rbind(cvcycle.uniAccuracies,uniEval$uniTestAccuracy * framesize); totblindadded = totblindadded + framesize; } if ((lastinserted<inserted)&&(length(redCurmodel_S$back.model$coefficients)>1)) { uniEvalCor <- getVar.Bin(redCurmodel_S$back.model,TrainSet,Outcome,type = type,testData=BlindSet); TopUniTrainCor <- append(TopUniTrainCor,uniEvalCor$uniTrainAccuracy[1]); topUniTestCor <- append(topUniTestCor,uniEvalCor$uniTestAccuracy[1] * framesize); totalUniCor <- totalUniCor + framesize } if ( j == K) { if (totalUniCor>0) TopUniCoherenceTest <- append(TopUniCoherenceTest,sum(topUniTestCor)/totalUniCor) if (i == K) { uniTestAccuracy <- rbind(colSums(cvcycle.uniAccuracies)/totblindadded); } else { uniTestAccuracy <- rbind(uniTestAccuracy,colSums(cvcycle.uniAccuracies)/totblindadded); } } if ( j == K) { nsamp <- nrow(cvcycle.predictions) if (nsamp>0) { atRocAUC <- pROC::roc(as.vector(cvcycle.predictions[,1]), cvcycle.predictions[,2],plot=FALSE,auc=TRUE,smooth=FALSE,quiet = TRUE)$auc; testAccuracy <- append(testAccuracy,sum(cvcycle.predictions[,1] == 1.0*(cvcycle.predictions[,2]>=0.0))/nsamp); testSensitivity <- append(testSensitivity,sum((cvcycle.predictions[,1] == 1) & (cvcycle.predictions[,2]>=0.0))/sum(cvcycle.predictions[,1] == 1)); testSpecificity <- append(testSpecificity,sum((cvcycle.predictions[,1] == 0) & (cvcycle.predictions[,2] <0.0))/sum(cvcycle.predictions[,1] == 0)); testAUC <- append(testAUC,atRocAUC); } # print(testAccuracy) # print(testAUC) } } if (length(formulas)==0) { stop("No Significant Models Found\n"); } if (!is.null(usrFitFun)) { colnames(totSamples) <- c("Outcome","Prediction","Model","Ensemble.Forward","Forward.Selection.Bagged","Forward","Backwards","eB.SWiMS","first.B.SWiMS","Ensemble.B.SWiMS","usrFitFunction","usrFitFunction_Sel"); } else { colnames(totSamples) <- c("Outcome","Prediction","Model","Ensemble.Forward","Forward.Selection.Bagged","Forward","Backwards","eB.SWiMS","first.B.SWiMS","Ensemble.B.SWiMS"); } # totSamples <- as.data.frame(totSamples); colnames(Full.totSamples) <- c("Outcome","Prediction","Model"); # Full.totSamples <- as.data.frame(Full.totSamples); colnames(totTrainSamples) <- c("Outcome","Prediction","Model"); # totTrainSamples <- as.data.frame(totTrainSamples); colnames(Full.totTrainSamples) <- c("Outcome","Prediction","Model"); # Full.totTrainSamples <- as.data.frame(Full.totTrainSamples); colnames(KNNSamples) <- c("Outcome","Prediction","Model"); # KNNSamples <- as.data.frame(KNNSamples); colnames(Full.KNNSamples) <- c("Outcome","Prediction","Model"); # Full.KNNSamples <- as.data.frame(Full.KNNSamples); BSWiMS.ensemble.prediction <- NULL bsta <- boxplot(totSamples[,"Prediction"]~rownames(totSamples),plot=FALSE) sta <- cbind(bsta$stats[3,]) rownames(sta) <- bsta$names BSWiMS.ensemble.prediction <- cbind(data[rownames(sta),Outcome],sta) colnames(BSWiMS.ensemble.prediction) <- c("Outcome","Prediction"); BSWiMS.ensemble.prediction <- as.data.frame(BSWiMS.ensemble.prediction); if (!is.null(enetSamples)) { colnames(enetSamples) <- c("Outcome","Prediction","Model"); # enetSamples <- as.data.frame(enetSamples); colnames(enetTrainSamples) <- c("Outcome","Prediction","Model"); # enetTrainSamples <- as.data.frame(enetTrainSamples); } sumSen = NA; if (plots) { plotModels.ROC(totSamples,theCVfolds=K,predictor="Prediction",main="B:SWiMS"); par(mfrow=c(1,1)) incBsen=0 aucBlindTest <- pROC::roc(as.vector(totSamples[,1]),totSamples[,2],col="red",auc=TRUE,plot=TRUE,smooth=FALSE,lty=3,quiet = TRUE)$auc par(new=TRUE) aucCVBlind <- pROC::roc(as.vector(Full.totSamples[,1]),Full.totSamples[,2],col="blue",auc=TRUE,plot=TRUE,ci=FALSE,smooth=FALSE,quiet = TRUE)$auc aucBoot=0; aucTrain=0; if (!is.null(FullBootCross$testPrediction)) { par(new=TRUE) aucTrain <- pROC::roc( as.vector(FullBootCross$outcome), FullBootCross$boot.model$linear.predictors,col="green",plot=TRUE,auc=TRUE,smooth=FALSE,quiet = TRUE)$auc; par(new=TRUE) aucBoot <- pROC::roc( as.vector(FullBootCross$testOutcome), FullBootCross$testPrediction,col="black",auc=TRUE,plot=TRUE,smooth=FALSE,quiet = TRUE)$auc; } ley.names <- c(paste("Bootstrapped: Train Model ROC (",sprintf("%.3f",aucTrain),")"),paste("Bootstrapped: Blind ROC (",sprintf("%.3f",aucBoot),")"), paste("CV: Blind ROC (",sprintf("%.3f",aucCVBlind),")"),paste("CV: Blind Fold Models Coherence (",sprintf("%.3f",aucBlindTest),")")) ley.colors <- c("green","black","blue","red") ley.lty <- c(1,1,1,3) if (rocadded>0) { boxplot(split.blindSen,add=TRUE, axes = FALSE,boxwex=0.04,at=specificities); sumSen <- colMeans(split.blindSen,na.rm = TRUE); sennames <- names(sumSen); sumSen <- append(0,sumSen); sumSen <- append(sumSen,1); sennames <- append("1",sennames); sennames <- append(sennames,"0"); names(sumSen) <- sennames; spevalues <- as.numeric(names(sumSen)); lines(spevalues,sumSen,col="red",lwd=2.0); auc = 0; for (i in 2:length(spevalues)) { auc = auc + (spevalues[i-1]-spevalues[i])*(sumSen[i-1]+(sumSen[i]-sumSen[i-1])/2) } ley.names <- append(ley.names,paste("CV Blind: Mean ROC of Models (",sprintf("%.3f",auc),")")); ley.colors <- append(ley.colors,"red"); ley.lty <- append(ley.lty,1); } else { sumSen = NA; } legend(0.6,0.30, legend=ley.names,col = ley.colors, lty = ley.lty,bty="n") } if (!is.null(uniTrainAccuracy)) { uniTrainAccuracy <- as.data.frame(uniTrainAccuracy); uniTestAccuracy <- as.data.frame(uniTestAccuracy); colnames(uniTrainAccuracy) <- attr(terms(formula(UCurModel_Full$formula)),'term.labels'); colnames(uniTestAccuracy) <- attr(terms(formula(UCurModel_Full$formula)),'term.labels'); } result <- list(formula.list=formulas, Models.testPrediction=totSamples, FullBSWiMS.testPrediction=Full.totSamples, TestRetrained.blindPredictions=blindreboot, LastTrainBSWiMS.bootstrapped=redCurmodel_S$bootCV, Test.accuracy=paracc, Test.sensitivity=psen, Test.specificity=pspe, Train.correlationsToFull=trainCorrelations, Blind.correlationsToFull=blindCorrelations, FullModelAtFoldAccuracies=WholeFoldBlindAccuracy, FullModelAtFoldSpecificties=WholeFoldBlindSpecificity, FullModelAtFoldSensitivities=WholeFoldBlindSensitivity, FullModelAtFoldAUC=WholeFoldBlindAUC, CVTrain.Accuracies=trainAccuracy, CVTrain.Sensitivity=trainSensitivity, CVTrain.Specificity=trainSpecificity, CVTrain.AUCs=trainAUC, CVTest.Accuracies=testAccuracy, CVTest.Sensitivity=testSensitivity, CVTest.Specificity=testSpecificity, CVTest.AUCs=testAUC, AtCVFoldModelBlindAccuracies=FoldBlindAccuracy, AtCVFoldModelBlindSpecificities=FoldBlindSpecificity, AtCVFoldModelBlindSensitivities=FoldBlindSensitivity, forwardSelection = CurModel_Full, updateforwardSelection = UCurModel_Full, BSWiMS = redCurmodel_Full, FullBSWiMS.bootstrapped=FullBootCross, Models.testSensitivities = split.blindSen, FullKNN.testPrediction=Full.KNNSamples, KNN.testPrediction=KNNSamples, Fullenet=Fullenet, LASSO.testPredictions=enetSamples, LASSOVariables=LASSOVariables, uniTrain.Accuracies=uniTrainAccuracy, uniTest.Accuracies=uniTestAccuracy, uniTest.TopCoherence=TopUniCoherenceTest, uniTrain.TopCoherence=TopUniTrainCor, Models.trainPrediction=totTrainSamples, FullBSWiMS.trainPrediction=Full.totTrainSamples, LASSO.trainPredictions=enetTrainSamples, BSWiMS.ensemble.prediction = BSWiMS.ensemble.prediction, ForwardFormulas.list = ForwardFormulas, AtOptFormulas.list = AtOptFormulas, baggFormulas.list = baggFormulas, equiFormulas.list = equiFormulas, allBSWiMSFormulas.list = allBSWIMSFormulas, LassoFilterVarList = enetshortVarList, BSWiMS.models=FULLBSWiMS.models ); return (result) }
#' Function to create stack for predictions #' @param nxy Number of points in x and y directions. #' @param mesh INLA mesh. #' @param data Data frame with columns for coordinates, and others are covariates. #' @param tag Name for tag for the stack (defaults to "points"). #' @param coordnames Names of coorinates (defaults to X and Y) #' @param boundary Boundary of region to project onto. Defaults to NULL, when the boundary of the mesh will be used. Either of class SpatialPolygons or two columns with the coorinates of the polygon #' @param intercept Logical: should an intercept be added? Defaults to TRUE #' #' @return An INLA stack onto which new data can be projected #' #' @export #' @import INLA MakeProjectionGrid <- function(nxy, mesh, data, tag='pred', coordnames = c("X", "Y"), boundary=NULL, intercept=TRUE) { if("resp"%in%coordnames) stop("resp cannot be a coordinate name") if("e"%in%coordnames) stop("e cannot be a coordinate name") if(is.null(boundary)) boundary <- mesh$loc[mesh$segm$int$idx[,2],] if(class(boundary)=="SpatialPolygons") { #create grid based on inla mesh and number of cells specificed by the nxy parameter projgrid <- inla.mesh.projector(mesh, xlim=boundary@bbox["x",], ylim=boundary@bbox["y",], dims=nxy) #get the index of points on the grid within the boundary xy.in <- !is.na(over(SpatialPoints(projgrid$lattice$loc, proj4string=boundary@proj4string), boundary)) } else { if(ncol(boundary)<2) stop("Boundary should have at least 2 columns") #create grid based on inla mesh projgrid <- inla.mesh.projector(mesh, xlim=range(boundary[,1]), ylim=range(boundary[,2]), dims=nxy) #get the index of points on the grid within the boundary xy.in <- splancs::inout(projgrid$lattice$loc, boundary) } #select only points on the grid that fall within the boudary predcoords <- projgrid$lattice$loc[which(xy.in),] colnames(predcoords) <- coordnames Apred <- projgrid$proj$A[which(xy.in), ] # Extract covariates for points, add intercept and coordinates NearestCovs=GetNearestCovariate(points=predcoords, covs=data) if(intercept) NearestCovs$Intercept=1 NearestCovs@data[,colnames(NearestCovs@coords)] <- NearestCovs@coords # stack the predicted data stk <- inla.stack(list(resp=cbind(NA, rep(NA, nrow(NearestCovs))), e=rep(0, nrow(NearestCovs))), A=list(1,Apred), tag=tag, effects=list(NearestCovs@data, list(i=1:mesh$n))) pred=list(stk=stk, xy.in=xy.in, predcoords=predcoords) pred }
/Functions/MakeProjectionGrid.R
no_license
nmpeters/HMMandPPMcode
R
false
false
2,555
r
#' Function to create stack for predictions #' @param nxy Number of points in x and y directions. #' @param mesh INLA mesh. #' @param data Data frame with columns for coordinates, and others are covariates. #' @param tag Name for tag for the stack (defaults to "points"). #' @param coordnames Names of coorinates (defaults to X and Y) #' @param boundary Boundary of region to project onto. Defaults to NULL, when the boundary of the mesh will be used. Either of class SpatialPolygons or two columns with the coorinates of the polygon #' @param intercept Logical: should an intercept be added? Defaults to TRUE #' #' @return An INLA stack onto which new data can be projected #' #' @export #' @import INLA MakeProjectionGrid <- function(nxy, mesh, data, tag='pred', coordnames = c("X", "Y"), boundary=NULL, intercept=TRUE) { if("resp"%in%coordnames) stop("resp cannot be a coordinate name") if("e"%in%coordnames) stop("e cannot be a coordinate name") if(is.null(boundary)) boundary <- mesh$loc[mesh$segm$int$idx[,2],] if(class(boundary)=="SpatialPolygons") { #create grid based on inla mesh and number of cells specificed by the nxy parameter projgrid <- inla.mesh.projector(mesh, xlim=boundary@bbox["x",], ylim=boundary@bbox["y",], dims=nxy) #get the index of points on the grid within the boundary xy.in <- !is.na(over(SpatialPoints(projgrid$lattice$loc, proj4string=boundary@proj4string), boundary)) } else { if(ncol(boundary)<2) stop("Boundary should have at least 2 columns") #create grid based on inla mesh projgrid <- inla.mesh.projector(mesh, xlim=range(boundary[,1]), ylim=range(boundary[,2]), dims=nxy) #get the index of points on the grid within the boundary xy.in <- splancs::inout(projgrid$lattice$loc, boundary) } #select only points on the grid that fall within the boudary predcoords <- projgrid$lattice$loc[which(xy.in),] colnames(predcoords) <- coordnames Apred <- projgrid$proj$A[which(xy.in), ] # Extract covariates for points, add intercept and coordinates NearestCovs=GetNearestCovariate(points=predcoords, covs=data) if(intercept) NearestCovs$Intercept=1 NearestCovs@data[,colnames(NearestCovs@coords)] <- NearestCovs@coords # stack the predicted data stk <- inla.stack(list(resp=cbind(NA, rep(NA, nrow(NearestCovs))), e=rep(0, nrow(NearestCovs))), A=list(1,Apred), tag=tag, effects=list(NearestCovs@data, list(i=1:mesh$n))) pred=list(stk=stk, xy.in=xy.in, predcoords=predcoords) pred }
#These first few lines run only when the file is run in RStudio, !!NOT when an Rmd/Rnw file calls it!! rm(list=ls(all=TRUE)) #Clear the variables from previous runs. ############################ ## @knitr LoadPackages require(RODBC) require(grid) require(lattice) require(dplyr) require(ggplot2) require(gridExtra) require(lme4) require(reshape2) require(testit) require(png) ############################ ## @knitr LoadSources source("./Models/LCM/graphs/FERE graph.R") #Load the `BuildFERE()` function source("./Models/LCM/graphs/bar graph of fit.R") #Load the `BuildBar()` function source("./Models/LCM/graphs/line graph of trajectories.R") #Load the `BuildLine()` function ############################ ## @knitr @loadRDS modelName<- "m0R3" # Read in different REDS files and join them all together pathDataDirectory <- file.path("./Models/LCM/models/datasets") # filenamePattern <- ".+\\.rds" #All RDS files filenamePattern <- "m.{1,}Info\\.rds" #All RDS files retrievedFilenames <- list.files(path=pathDataDirectory, pattern=filenamePattern) filePaths <- file.path(pathDataDirectory, retrievedFilenames) dsInfo <- readRDS(filePaths[1]) for( i in 1:length(filePaths) ) { # To debug, change the '6' to some number to isolate the problem: for( i in 2:6 ) { # message("About to read", filePaths[i], "\\") dsInfoSingle <- readRDS(filePaths[i]) dsInfo <- plyr::join(x=dsInfo, y=dsInfoSingle, by="Coefficient", type="left", match="all") rm(dsInfoSingle) }
/Models/LCM/fit/customFit.R
no_license
taoistgirl/Longitudinal_Models_of_Religiosity_NLSY97
R
false
false
1,473
r
#These first few lines run only when the file is run in RStudio, !!NOT when an Rmd/Rnw file calls it!! rm(list=ls(all=TRUE)) #Clear the variables from previous runs. ############################ ## @knitr LoadPackages require(RODBC) require(grid) require(lattice) require(dplyr) require(ggplot2) require(gridExtra) require(lme4) require(reshape2) require(testit) require(png) ############################ ## @knitr LoadSources source("./Models/LCM/graphs/FERE graph.R") #Load the `BuildFERE()` function source("./Models/LCM/graphs/bar graph of fit.R") #Load the `BuildBar()` function source("./Models/LCM/graphs/line graph of trajectories.R") #Load the `BuildLine()` function ############################ ## @knitr @loadRDS modelName<- "m0R3" # Read in different REDS files and join them all together pathDataDirectory <- file.path("./Models/LCM/models/datasets") # filenamePattern <- ".+\\.rds" #All RDS files filenamePattern <- "m.{1,}Info\\.rds" #All RDS files retrievedFilenames <- list.files(path=pathDataDirectory, pattern=filenamePattern) filePaths <- file.path(pathDataDirectory, retrievedFilenames) dsInfo <- readRDS(filePaths[1]) for( i in 1:length(filePaths) ) { # To debug, change the '6' to some number to isolate the problem: for( i in 2:6 ) { # message("About to read", filePaths[i], "\\") dsInfoSingle <- readRDS(filePaths[i]) dsInfo <- plyr::join(x=dsInfo, y=dsInfoSingle, by="Coefficient", type="left", match="all") rm(dsInfoSingle) }
#### Test INLA and MCMC on a toy example library(INLA) library(lattice) library(fields) #### 1 Test on the plane (example from JSS paper) #### 1.1 Generate the True process and the data ## Simulate points -- uniform on [0,1] * [0,1] m = 400 yloc <- matrix(runif(m*2), m, 2) mesh <- inla.mesh.2d(loc = yloc, cutoff = 0.05, offset = c(0.1, 0.4), max.edge = c(0.05, 0.2)) ## Given mean on a fine grid xmu <- log(mesh$loc[,1]+5) - mesh$loc[,2]^2*5 + mesh$loc[,1]*mesh$loc[,2]*10 proj <- inla.mesh.projector(mesh, dims = c(100,100)) levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, xmu), contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1)) ## simulate x ~ GP(0, Sigma(0.4, 4)) range0 <- 0.4 sigma0 <- 1 kappa0 <- sqrt(8)/range0 tau0 <- 1/(sqrt(4*pi)*kappa0*sigma0) spde <- inla.spde2.matern(mesh, B.tau = cbind(log(tau0), -1, 1), B.kappa = cbind(log(kappa0), 0, -1), theta.prior.mean = c(0,0), theta.prior.prec = c(0.1, 1)) Q <- inla.spde.precision(spde, theta = c(log(8),0)) xSim <- as.numeric(inla.qsample(n=1, Q)) ## Plot the simulated process levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, xSim), contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1)) ## The true phyical process X = xmu + xSim X <- xmu + xSim ## plot the true process levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, X), contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1)) ## Simulate the observations ## Use only a fraction of the locations ff <- 0.7 indy <- sample(1:m, m*ff) dataloc <- yloc[indy,] ## Find the projection matrix from mesh vertices to the locations A <- inla.spde.make.A(mesh, loc = dataloc) ## Generate the data y = Ax + e errors <- rep(0, m*ff) errors[which(dataloc[,1] <=0.25)] <- 0.02 errors[which(dataloc[,1] > 0.25 & dataloc[,1] <= 0.5)] <- 0.05 errors[which(dataloc[,1] > 0.5 & dataloc[,1] <= 0.75)] <- 0.1 errors[which(dataloc[,1] > 0.75)] <- 0.5 y <- as.vector(A %*% X) + rnorm(m*ff)*errors #### 1.2 Now start INLA analysis ydata <- y - as.vector(A %*% xmu) st.est <- inla.stack(data = list(y=ydata), A = list(A), effects = list(GIA = 1:spde$n.spde), tag = "est") ## Predict at the y location, mesh grid and predict error location xg <- seq(0, 1, length.out = 10) yg <- seq(0, 1, length.out = 10) xyg <- as.matrix(expand.grid(x = xg, y = yg)) A_pred <- inla.spde.make.A(mesh = mesh, loc = xyg) st.pred <- inla.stack(data = list(y=NA), A = list(A_pred), effects = list(GIA=1:spde$n.spde), tag = "pred") stGIA <- inla.stack(st.est, st.pred) hyper <- list(prec = list(fixed = TRUE, initial =0)) formula = y ~ -1 + f(GIA, model = spde) prec_scale <- c(1/errors^2, rep(1, 100)) res_inla <- inla(formula, data = inla.stack.data(stGIA, spde = spde), family = "gaussian", scale = prec_scale, control.family = list(hyper = hyper), control.predictor=list(A=inla.stack.A(stGIA), compute =TRUE)) summary(res_inla) ## Plot the posteriors of the parameters result <- inla.spde2.result(res_inla, "GIA", spde) par(mfrow= c(1,2)) plot(result[["marginals.range.nominal"]][[1]], type = "l", main = "Nominal range, posterior density") plot(result[["marginals.variance.nominal"]][[1]], type = "l", main = "Nominal variance, posterior density") ## Plot the predicted GIA field mean and variance pidx <- inla.stack.index(stGIA, tag = "pred") GIA_mpost <- res_inla$summary.random$GIA$mean + xmu GIA_spost <- res_inla$summary.random$GIA$sd xyg_mpost <- res_inla$summary.linear.predictor$mean[pidx$data] xyg_spost <- res_inla$summary.linear.predictor$sd[pidx$data] y_mpost <- res_inla$summary.linear.predictor$mean[1:(m*ff)] + A%*%xSim y_spost <- res_inla$summary.linear.predictor$sd[1:(m*ff)] ## Plot the variance par(mfrow = c(3,1)) theta_mode <- result$summary.theta$mode Q2 <- inla.spde2.precision(spde, theta = theta_mode) Q1 <- inla.spde.precision(spde, theta = c(0,0)) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(sqrt(1/diag(Q1)))), col = topo.colors(7), xlim = c(0,1), ylim = c(0,1), breaks = c(0, 0.02, 0.05, 0.1,0.5, 2, 4, 8), xlab = "Longitude", ylab = "Latitude", main = "Matern prior Error field") points(dataloc[,1], dataloc[,2], cex = y_spost*2, pch = 1) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(sqrt(1/diag(Q2)))), col = topo.colors(7), xlim = c(0,1), ylim = c(0,1), breaks = c(0, 0.02, 0.05, 0.1,0.5, 2, 4, 8), xlab = "Longitude", ylab = "Latitude", main = "Matern posterior Error field") points(dataloc[,1], dataloc[,2], cex = y_spost*2, pch = 1) ## The standard error image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(GIA_spost)), col = topo.colors(7), xlim = c(0,1), ylim = c(0,1), breaks = c(0, 0.02, 0.05, 0.1,0.5, 2, 4, 8), xlab = "Longitude", ylab = "Latitude", main = "predicted error field") points(dataloc[,1], dataloc[,2], cex = y_spost*2, pch = 1) points(dataloc[,1], dataloc[,2], cex = errors*2, pch = 1, col = 2) ## Plot the error on mean par(mfrow = c(3,1)) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(X)), col = topo.colors(40), breaks = seq(-10, 10, 0.5), xlab = "Longitude", ylab = "Latitude", main = "True Process", xlim = c(0, 1), ylim = c(0,1)) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(xmu)), col = topo.colors(40), xlab = "Longitude", ylab = "Latitude", main = "The prior mean", breaks = seq(-10, 10, 0.5), xlim = c(0, 1), ylim = c(0,1)) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(GIA_mpost)), col = topo.colors(40), breaks = seq(-10, 10, 0.5), xlab = "Longitude", ylab = "Latitude", main = "Matern posterior Error field", xlim = c(0, 1), ylim = c(0,1)) points(xyg[,1], xyg[,2], cex = xyg_spost) points(dataloc[,1], dataloc[,2], cex = y_spost, pch = 1, col = 2) ## Add some signals to the data ## 15% points nearer southwest are lower n1 = round(m*ff*0.1) id1 <- sample(which(yyloc[,1] <= 0.5 & yyloc[,2] <= 0.5), n1) y[id1] <- y[id1] - abs(rnorm(n1, sd = 3)) levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, xSim), panel=function(...){ panel.levelplot(...) panel.points(yyloc[id1,], col = "red", pch = 19) }, contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1)) ## 5% points nearer north are higher n2 <- round(m*ff*0.10) id2 <- sample(which(yyloc[,2] > 0.7), n2) y[id2] <- y[id2] + abs(rnorm(n2, sd = 2)) levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, xSim), panel=function(...){ panel.levelplot(...) panel.points(yyloc[id1,], col = "red", pch = 19) panel.points(yyloc[id2,], col = "blue", pch = 19) }, contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1))
/Experiment1a/Rscript/toytest.R
no_license
WhythiskolaveriD/GM_experiments_BDV
R
false
false
7,274
r
#### Test INLA and MCMC on a toy example library(INLA) library(lattice) library(fields) #### 1 Test on the plane (example from JSS paper) #### 1.1 Generate the True process and the data ## Simulate points -- uniform on [0,1] * [0,1] m = 400 yloc <- matrix(runif(m*2), m, 2) mesh <- inla.mesh.2d(loc = yloc, cutoff = 0.05, offset = c(0.1, 0.4), max.edge = c(0.05, 0.2)) ## Given mean on a fine grid xmu <- log(mesh$loc[,1]+5) - mesh$loc[,2]^2*5 + mesh$loc[,1]*mesh$loc[,2]*10 proj <- inla.mesh.projector(mesh, dims = c(100,100)) levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, xmu), contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1)) ## simulate x ~ GP(0, Sigma(0.4, 4)) range0 <- 0.4 sigma0 <- 1 kappa0 <- sqrt(8)/range0 tau0 <- 1/(sqrt(4*pi)*kappa0*sigma0) spde <- inla.spde2.matern(mesh, B.tau = cbind(log(tau0), -1, 1), B.kappa = cbind(log(kappa0), 0, -1), theta.prior.mean = c(0,0), theta.prior.prec = c(0.1, 1)) Q <- inla.spde.precision(spde, theta = c(log(8),0)) xSim <- as.numeric(inla.qsample(n=1, Q)) ## Plot the simulated process levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, xSim), contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1)) ## The true phyical process X = xmu + xSim X <- xmu + xSim ## plot the true process levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, X), contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1)) ## Simulate the observations ## Use only a fraction of the locations ff <- 0.7 indy <- sample(1:m, m*ff) dataloc <- yloc[indy,] ## Find the projection matrix from mesh vertices to the locations A <- inla.spde.make.A(mesh, loc = dataloc) ## Generate the data y = Ax + e errors <- rep(0, m*ff) errors[which(dataloc[,1] <=0.25)] <- 0.02 errors[which(dataloc[,1] > 0.25 & dataloc[,1] <= 0.5)] <- 0.05 errors[which(dataloc[,1] > 0.5 & dataloc[,1] <= 0.75)] <- 0.1 errors[which(dataloc[,1] > 0.75)] <- 0.5 y <- as.vector(A %*% X) + rnorm(m*ff)*errors #### 1.2 Now start INLA analysis ydata <- y - as.vector(A %*% xmu) st.est <- inla.stack(data = list(y=ydata), A = list(A), effects = list(GIA = 1:spde$n.spde), tag = "est") ## Predict at the y location, mesh grid and predict error location xg <- seq(0, 1, length.out = 10) yg <- seq(0, 1, length.out = 10) xyg <- as.matrix(expand.grid(x = xg, y = yg)) A_pred <- inla.spde.make.A(mesh = mesh, loc = xyg) st.pred <- inla.stack(data = list(y=NA), A = list(A_pred), effects = list(GIA=1:spde$n.spde), tag = "pred") stGIA <- inla.stack(st.est, st.pred) hyper <- list(prec = list(fixed = TRUE, initial =0)) formula = y ~ -1 + f(GIA, model = spde) prec_scale <- c(1/errors^2, rep(1, 100)) res_inla <- inla(formula, data = inla.stack.data(stGIA, spde = spde), family = "gaussian", scale = prec_scale, control.family = list(hyper = hyper), control.predictor=list(A=inla.stack.A(stGIA), compute =TRUE)) summary(res_inla) ## Plot the posteriors of the parameters result <- inla.spde2.result(res_inla, "GIA", spde) par(mfrow= c(1,2)) plot(result[["marginals.range.nominal"]][[1]], type = "l", main = "Nominal range, posterior density") plot(result[["marginals.variance.nominal"]][[1]], type = "l", main = "Nominal variance, posterior density") ## Plot the predicted GIA field mean and variance pidx <- inla.stack.index(stGIA, tag = "pred") GIA_mpost <- res_inla$summary.random$GIA$mean + xmu GIA_spost <- res_inla$summary.random$GIA$sd xyg_mpost <- res_inla$summary.linear.predictor$mean[pidx$data] xyg_spost <- res_inla$summary.linear.predictor$sd[pidx$data] y_mpost <- res_inla$summary.linear.predictor$mean[1:(m*ff)] + A%*%xSim y_spost <- res_inla$summary.linear.predictor$sd[1:(m*ff)] ## Plot the variance par(mfrow = c(3,1)) theta_mode <- result$summary.theta$mode Q2 <- inla.spde2.precision(spde, theta = theta_mode) Q1 <- inla.spde.precision(spde, theta = c(0,0)) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(sqrt(1/diag(Q1)))), col = topo.colors(7), xlim = c(0,1), ylim = c(0,1), breaks = c(0, 0.02, 0.05, 0.1,0.5, 2, 4, 8), xlab = "Longitude", ylab = "Latitude", main = "Matern prior Error field") points(dataloc[,1], dataloc[,2], cex = y_spost*2, pch = 1) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(sqrt(1/diag(Q2)))), col = topo.colors(7), xlim = c(0,1), ylim = c(0,1), breaks = c(0, 0.02, 0.05, 0.1,0.5, 2, 4, 8), xlab = "Longitude", ylab = "Latitude", main = "Matern posterior Error field") points(dataloc[,1], dataloc[,2], cex = y_spost*2, pch = 1) ## The standard error image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(GIA_spost)), col = topo.colors(7), xlim = c(0,1), ylim = c(0,1), breaks = c(0, 0.02, 0.05, 0.1,0.5, 2, 4, 8), xlab = "Longitude", ylab = "Latitude", main = "predicted error field") points(dataloc[,1], dataloc[,2], cex = y_spost*2, pch = 1) points(dataloc[,1], dataloc[,2], cex = errors*2, pch = 1, col = 2) ## Plot the error on mean par(mfrow = c(3,1)) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(X)), col = topo.colors(40), breaks = seq(-10, 10, 0.5), xlab = "Longitude", ylab = "Latitude", main = "True Process", xlim = c(0, 1), ylim = c(0,1)) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(xmu)), col = topo.colors(40), xlab = "Longitude", ylab = "Latitude", main = "The prior mean", breaks = seq(-10, 10, 0.5), xlim = c(0, 1), ylim = c(0,1)) image.plot(proj$x, proj$y, inla.mesh.project(proj, as.vector(GIA_mpost)), col = topo.colors(40), breaks = seq(-10, 10, 0.5), xlab = "Longitude", ylab = "Latitude", main = "Matern posterior Error field", xlim = c(0, 1), ylim = c(0,1)) points(xyg[,1], xyg[,2], cex = xyg_spost) points(dataloc[,1], dataloc[,2], cex = y_spost, pch = 1, col = 2) ## Add some signals to the data ## 15% points nearer southwest are lower n1 = round(m*ff*0.1) id1 <- sample(which(yyloc[,1] <= 0.5 & yyloc[,2] <= 0.5), n1) y[id1] <- y[id1] - abs(rnorm(n1, sd = 3)) levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, xSim), panel=function(...){ panel.levelplot(...) panel.points(yyloc[id1,], col = "red", pch = 19) }, contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1)) ## 5% points nearer north are higher n2 <- round(m*ff*0.10) id2 <- sample(which(yyloc[,2] > 0.7), n2) y[id2] <- y[id2] + abs(rnorm(n2, sd = 2)) levelplot(row.values = proj$x, column.values = proj$y, x = inla.mesh.project(proj, xSim), panel=function(...){ panel.levelplot(...) panel.points(yyloc[id1,], col = "red", pch = 19) panel.points(yyloc[id2,], col = "blue", pch = 19) }, contour = TRUE, aspect = "fill", labels = FALSE, xlab = "", ylab = "", xlim = c(0,1), ylim = c(0,1))
#' Subset pairs-function matrix by selected flow #' #' @param pairs_func_matrix binary #' @param flow_df subset of input data by flow #' #' @return subset of binary mat subsetFuncMatBYFlow <- function(pairs_func_matrix, flow_df){ sub.mat <- pairs_func_matrix[rownames(pairs_func_matrix) %in% unique(flow_df$int_pair),] #remove empty columns sub.mat <- sub.mat[, colSums(sub.mat) != 0] return(sub.mat) } #' Get dendrogram of int pair modules #' #' @param pairs_func_matrix binary matrix pairs x functions #' #' @return list with dendrogram, hclust and umap #' @importFrom umap umap #' @importFrom stats hclust dist dendroIntPairModules <- function(pairs_func_matrix){ intPairs_umap <- umap(pairs_func_matrix, n_neighbors = ifelse(nrow(pairs_func_matrix) > 15, 15, nrow(pairs_func_matrix)-1), n_components = 2, metric = "cosine", input= "data", min_dist = 0.001) umap.embed <- data.frame(UMAP_1 = intPairs_umap$layout[,1], UMAP_2 = intPairs_umap$layout[,2], int_pair = dimnames(intPairs_umap$layout)[[1]]) ## Hierarchical clust d <- dist(umap.embed[, c("UMAP_1", "UMAP_2")], method="euclidean") h_clust <- hclust(d, method = "ward.D2") return(list(d = d, h_clust = h_clust, umap = umap.embed)) } #' Determine the elbow point on a curve (from package akmedoids) #' @description Given a list of x, y coordinates on a curve, function determines the elbow point of the curve. #' #' @param x vector of x coordinates of points on the curve #' @param y vector of y coordinates of points on the curve #' #' @details highlight the maximum curvature to identify the elbow point (credit: 'github.com/agentlans') #' @return an x, y coordinates of the elbow point. #' @importFrom stats approx approxfun optimize predict smooth.spline #' @importFrom signal sgolayfilt elbowPoint <- function(x, y) { # check for non-numeric or infinite values in the inputs is.invalid <- function(x) { any((!is.numeric(x)) | is.infinite(x)) } if (is.invalid(x) || is.invalid(y)) { stop("x and y must be finite and numeric. Missing values are not allowed.") } if (length(x) != length(y)) { stop("x and y must be of equal length.") } # generate value of curve at equally-spaced points new.x <- seq(from=min(x), to=max(x), length.out=length(x)) # Smooths out noise using a spline sp <- smooth.spline(x, y) new.y <- predict(sp, new.x)$y # Finds largest odd number below given number largest.odd.num.lte <- function(x) { x.int <- floor(x) if (x.int %% 2 == 0) { x.int - 1 } else { x.int } } # Use Savitzky-Golay filter to get derivatives smoothen <- function(y, p=p, filt.length=NULL, ...) { # Time scaling factor so that the derivatives are on same scale as original data ts <- (max(new.x) - min(new.x)) / length(new.x) p <- 3 # Degree of polynomial to estimate curve # Set filter length to be fraction of length of data # (must be an odd number) if (is.null(filt.length)) { filt.length <- min(largest.odd.num.lte(length(new.x)), 7) } if (filt.length <= p) { stop("Need more points to find cutoff.") } signal::sgolayfilt(y, p=p, n=filt.length, ts=ts, ...) } # Calculate first and second derivatives first.deriv <- smoothen(new.y, m=1) second.deriv <- smoothen(new.y, m=2) # Check the signs of the 2 derivatives to see whether to flip the curve # (Pick sign of the most extreme observation) pick.sign <- function(x) { most.extreme <- which(abs(x) == max(abs(x), na.rm=TRUE))[1] sign(x[most.extreme]) } first.deriv.sign <- pick.sign(first.deriv) second.deriv.sign <- pick.sign(second.deriv) # The signs for which to flip the x and y axes x.sign <- 1 y.sign <- 1 if ((first.deriv.sign == -1) && (second.deriv.sign == -1)) { x.sign <- -1 } else if ((first.deriv.sign == -1) && (second.deriv.sign == 1)) { y.sign <- -1 } else if ((first.deriv.sign == 1) && (second.deriv.sign == 1)) { x.sign <- -1 y.sign <- -1 } # If curve needs flipping, then run same routine on flipped curve then # flip the results back if ((x.sign == -1) || (y.sign == -1)) { results <- elbowPoint(x.sign * x, y.sign * y) return(list(x = x.sign * results$x, y = y.sign * results$y)) } # Find cutoff point for x cutoff.x <- NA # Find x where curvature is maximum curvature <- abs(second.deriv) / (1 + first.deriv^2)^(3/2) if (max(curvature) < min(curvature) | max(curvature) < max(curvature)) { cutoff.x = NA } else { # Interpolation function f <- approxfun(new.x, curvature, rule=1) # Minimize |f(new.x) - max(curvature)| over range of new.x cutoff.x = optimize(function(new.x) abs(f(new.x) - max(curvature)), range(new.x))$minimum } if (is.na(cutoff.x)) { warning("Cutoff point is beyond range. Returning NA.") list(x=NA, y=NA) } else { # Return cutoff point on curve approx(new.x, new.y, cutoff.x) } } #' Get UMAP for IP modules #' #' @param intPairs.dendro list output of dendrogram #' @param gpModules_assign named vector of module assignment #' @param ipm_colors for intpair modules #' #' @return plotly umap #' @importFrom plotly plot_ly layout config getUMAPipModules <- function(intPairs.dendro, gpModules_assign, ipm_colors){ umap.embed <- intPairs.dendro$umap umap.embed$hclust <- as.factor(gpModules_assign[ match(umap.embed$int_pair, names(gpModules_assign))]) colors <- ipm_colors color_var <- "hclust" ax <- list(zeroline=FALSE) fig <- plot_ly(data = umap.embed, x= ~UMAP_1, y= ~UMAP_2, type='scatter', mode='markers', color = umap.embed[, color_var], text = ~as.character(int_pair), hoverinfo='text', colors = colors) fig <- fig %>% layout(xaxis = ax, yaxis = ax, title="<b>UMAP of Int-pairs</b>") fig <- fig %>% config(modeBarButtonsToRemove = c( 'sendDataToCloud', 'autoScale2d', 'resetScale2d', 'hoverClosestCartesian', 'hoverCompareCartesian', 'zoom2d','pan2d','select2d','lasso2d')) return(fig) } #' Plot circle plot #' #' @param data subset of input data by flow / intpair module #' @param cluster_colors global #' @param ipm_color single color for chosen int-pair module #' @param int_flow string specifying the flow #' @param link.color string specifying variable by which to color links #' #' @return circle plot #' #' @importFrom circlize circos.par chordDiagram circos.trackPlotRegion #' get.cell.meta.data circos.text highlight.sector circos.clear uh CELL_META #' @importFrom ComplexHeatmap Legend circlePlot <- function(data, cluster_colors, ipm_color, int_flow, link.color){ cell_types <- unique(c(data$clustA, data$clustB)) # Abbreviate long names for int-pairs data$int_pair <- gsub("beta", "B", data$int_pair) data$int_pair <- gsub("inhibitor", "inh", data$int_pair) data$int_pair <- gsub("receptor", "rec", data$int_pair) partnerA <- unlist(sapply(strsplit(data$int_pair, " & "), function(x) x[1])) partnerB <- unlist(sapply(strsplit(data$int_pair, " & "), function(x) x[2])) genes <- c(structure(partnerA, names = data$clustA), structure(partnerB, names = data$clustB)) genes <- genes[!duplicated(paste(names(genes), genes))] genes <- genes[order(names(genes))] if(length(cell_types)!=1){ gap.degree <- do.call("c", lapply(table(names(genes)), function(i) c(rep(1, i-1), 8))) }else{ gap.degree <- do.call("c", lapply(table(names(genes)), function(i) c(rep(1, i)))) } # parameters if(int_flow == "undirected"){ directional <- 0 direction.type <- "diffHeight" } else{ directional <- 1 direction.type <- c("diffHeight", "arrows") } track.height.genes <- ifelse(max(nchar(c(partnerA, partnerB))) >= 10, 0.25, 0.2) cex.genes <- 0.9 if(link.color == "ipm"){ col <- NULL } else { # scale avg scores between -2 and 2 scaled_scores <- scales::rescale(data$score, to = c(-2,2)) col_fun <- circlize::colorRamp2(c(-2,0,2), c("gray88", "gray70", "black")) col <- col_fun(scaled_scores) lgd_links <- ComplexHeatmap::Legend(at = c(-2, -1, 0, 1, 2), col_fun = col_fun, title_position = "topleft", title = "Scaled Int Score") } df <- data.frame(from = paste(data$clustA,partnerA), to = paste(data$clustB,partnerB), stringsAsFactors = FALSE) circos.par(gap.degree = gap.degree) chordDiagram(df, order=paste(names(genes),genes), grid.col = ipm_color, col = col, transparency = 0.2, directional = directional, direction.type = direction.type, link.arr.type = "big.arrow", annotationTrack = "grid", preAllocateTracks = list( list(track.height = uh(1.2,'mm')), list(track.height = track.height.genes)), annotationTrackHeight = c(0.01,0.01)) circos.trackPlotRegion(track.index = 2, panel.fun = function(x, y) { sector.index = genes[get.cell.meta.data("sector.numeric.index")] circos.text(CELL_META$xcenter, CELL_META$cell.ylim[1], sector.index, col = "black", cex = cex.genes, adj = c(0, 0.5), facing = 'clockwise', niceFacing = TRUE) }, bg.border = NA) for(c in unique(names(genes))) { gene = as.character(genes[names(genes) == c]) highlight.sector(sector.index = paste(c,gene), track.index = 1, col = ifelse(length(cluster_colors)==1, cluster_colors, cluster_colors[c]), text = c, text.vjust = '0.4cm', niceFacing = TRUE, lwd=1, facing = "bending.inside") } if(link.color != "ipm"){ ComplexHeatmap::draw(lgd_links, x = grid::unit(1, "cm"), y = grid::unit(1, "cm"), just = c("left", "bottom")) } circos.clear() } #' Subfunction to calculate significant functions by permutation test #' #' @param mat binary matrix of functional terms by int-pairs #' @param gpModules_assign assignment of intpairs to modules #' #' @return matrix with hits #' #' Example # mat <- t(as.matrix(data.frame(f_term1 = c(0,1,1,0,0), # f_term2 = c(1,1,1,0,0), # f_term3 = c(0,0,1,0,1), # row.names = paste0("ip", 1:5)))) # gpModules_assign <- c("cond1", "cond1", "cond2", "cond2", "cond2") # names(gpModules_assign) <- paste0("ip", 1:5) getHitsf <- function(mat, gpModules_assign){ hits <- matrix(0, nrow = nrow(mat), ncol = length(unique(gpModules_assign))) rownames(hits) <- rownames(mat) colnames(hits) <- unique(gpModules_assign) for(gi in unique(gpModules_assign)){ sub.mat <- mat[, names(gpModules_assign)[gpModules_assign == gi]] hits[, gi] <- rowSums(sub.mat)/ncol(sub.mat) } return(hits) } #' Calculate significant function per intpair module #' #' @param subGenePairs_func_mat subset of binary mat #' @param gpModules_assign assignment of intpairs to modules #' @param rank.terms table of ranked functions #' @param input_maxPval threshold of significance #' #' @return table with significant functions #' @importFrom tidyr gather getSignificantFunctions <- function(subGenePairs_func_mat, gpModules_assign, rank.terms, input_maxPval){ permMat <- t(subGenePairs_func_mat) hits_true <- getHitsf(permMat, gpModules_assign) hits_perm <- list() for(np in seq_len(999)){ # shuffle cols of original matrix (int-pairs, assigned to modules) shufMat <- permMat[,sample(colnames(permMat), ncol(permMat), replace = FALSE)] colnames(shufMat) <- colnames(permMat) hits_perm[[np]] <- getHitsf(shufMat, gpModules_assign) } # calculate empirical pvalue emp_pvalue <- matrix(0, nrow = nrow(permMat), ncol = length(unique(gpModules_assign))) rownames(emp_pvalue) <- rownames(permMat) colnames(emp_pvalue) <- unique(gpModules_assign) for(gM in seq_len(ncol(hits_true))){ for(fM in seq_len(nrow(hits_true))){ hits_gm_fm <- unlist(lapply(hits_perm, function(x) x[fM, gM])) emp_pvalue[fM,gM] <- (1 + sum(hits_gm_fm >= hits_true[fM,gM]))/1000 } } pvalue_df <- cbind(emp_pvalue, functionalTerm = rownames(emp_pvalue)) pvalue_df <- tidyr::gather(as.data.frame(pvalue_df), key = "int_pairModule", value = "p_value", unique(gpModules_assign), factor_key = FALSE) signFun <- pvalue_df[pvalue_df$p_value <= input_maxPval,] ## Adding int_pairs from selected Module to each functional term if(nrow(signFun) > 0){ for(r in seq_len(nrow(signFun))){ int_pairs_all <- rownames(subGenePairs_func_mat)[ subGenePairs_func_mat[, signFun$functionalTerm[r]] == 1] signFun[r, "int_pair_list"] <- paste( intersect(int_pairs_all, names(gpModules_assign)[ gpModules_assign == signFun$int_pairModule[r]]), collapse = ",") } genes_all <- rownames(subGenePairs_func_mat)[subGenePairs_func_mat[, signFun$fTerm[1]] == 1] paste(intersect(genes_all, names(gpModules_assign)[gpModules_assign == 1]), collapse = ",") signFun$source <- rank.terms$source[ match(tolower(signFun$functionalTerm), tolower(rank.terms$functional_term))] } return(signFun) }
/R/fct_int_pair_modules.R
permissive
martaint/InterCellar
R
false
false
15,419
r
#' Subset pairs-function matrix by selected flow #' #' @param pairs_func_matrix binary #' @param flow_df subset of input data by flow #' #' @return subset of binary mat subsetFuncMatBYFlow <- function(pairs_func_matrix, flow_df){ sub.mat <- pairs_func_matrix[rownames(pairs_func_matrix) %in% unique(flow_df$int_pair),] #remove empty columns sub.mat <- sub.mat[, colSums(sub.mat) != 0] return(sub.mat) } #' Get dendrogram of int pair modules #' #' @param pairs_func_matrix binary matrix pairs x functions #' #' @return list with dendrogram, hclust and umap #' @importFrom umap umap #' @importFrom stats hclust dist dendroIntPairModules <- function(pairs_func_matrix){ intPairs_umap <- umap(pairs_func_matrix, n_neighbors = ifelse(nrow(pairs_func_matrix) > 15, 15, nrow(pairs_func_matrix)-1), n_components = 2, metric = "cosine", input= "data", min_dist = 0.001) umap.embed <- data.frame(UMAP_1 = intPairs_umap$layout[,1], UMAP_2 = intPairs_umap$layout[,2], int_pair = dimnames(intPairs_umap$layout)[[1]]) ## Hierarchical clust d <- dist(umap.embed[, c("UMAP_1", "UMAP_2")], method="euclidean") h_clust <- hclust(d, method = "ward.D2") return(list(d = d, h_clust = h_clust, umap = umap.embed)) } #' Determine the elbow point on a curve (from package akmedoids) #' @description Given a list of x, y coordinates on a curve, function determines the elbow point of the curve. #' #' @param x vector of x coordinates of points on the curve #' @param y vector of y coordinates of points on the curve #' #' @details highlight the maximum curvature to identify the elbow point (credit: 'github.com/agentlans') #' @return an x, y coordinates of the elbow point. #' @importFrom stats approx approxfun optimize predict smooth.spline #' @importFrom signal sgolayfilt elbowPoint <- function(x, y) { # check for non-numeric or infinite values in the inputs is.invalid <- function(x) { any((!is.numeric(x)) | is.infinite(x)) } if (is.invalid(x) || is.invalid(y)) { stop("x and y must be finite and numeric. Missing values are not allowed.") } if (length(x) != length(y)) { stop("x and y must be of equal length.") } # generate value of curve at equally-spaced points new.x <- seq(from=min(x), to=max(x), length.out=length(x)) # Smooths out noise using a spline sp <- smooth.spline(x, y) new.y <- predict(sp, new.x)$y # Finds largest odd number below given number largest.odd.num.lte <- function(x) { x.int <- floor(x) if (x.int %% 2 == 0) { x.int - 1 } else { x.int } } # Use Savitzky-Golay filter to get derivatives smoothen <- function(y, p=p, filt.length=NULL, ...) { # Time scaling factor so that the derivatives are on same scale as original data ts <- (max(new.x) - min(new.x)) / length(new.x) p <- 3 # Degree of polynomial to estimate curve # Set filter length to be fraction of length of data # (must be an odd number) if (is.null(filt.length)) { filt.length <- min(largest.odd.num.lte(length(new.x)), 7) } if (filt.length <= p) { stop("Need more points to find cutoff.") } signal::sgolayfilt(y, p=p, n=filt.length, ts=ts, ...) } # Calculate first and second derivatives first.deriv <- smoothen(new.y, m=1) second.deriv <- smoothen(new.y, m=2) # Check the signs of the 2 derivatives to see whether to flip the curve # (Pick sign of the most extreme observation) pick.sign <- function(x) { most.extreme <- which(abs(x) == max(abs(x), na.rm=TRUE))[1] sign(x[most.extreme]) } first.deriv.sign <- pick.sign(first.deriv) second.deriv.sign <- pick.sign(second.deriv) # The signs for which to flip the x and y axes x.sign <- 1 y.sign <- 1 if ((first.deriv.sign == -1) && (second.deriv.sign == -1)) { x.sign <- -1 } else if ((first.deriv.sign == -1) && (second.deriv.sign == 1)) { y.sign <- -1 } else if ((first.deriv.sign == 1) && (second.deriv.sign == 1)) { x.sign <- -1 y.sign <- -1 } # If curve needs flipping, then run same routine on flipped curve then # flip the results back if ((x.sign == -1) || (y.sign == -1)) { results <- elbowPoint(x.sign * x, y.sign * y) return(list(x = x.sign * results$x, y = y.sign * results$y)) } # Find cutoff point for x cutoff.x <- NA # Find x where curvature is maximum curvature <- abs(second.deriv) / (1 + first.deriv^2)^(3/2) if (max(curvature) < min(curvature) | max(curvature) < max(curvature)) { cutoff.x = NA } else { # Interpolation function f <- approxfun(new.x, curvature, rule=1) # Minimize |f(new.x) - max(curvature)| over range of new.x cutoff.x = optimize(function(new.x) abs(f(new.x) - max(curvature)), range(new.x))$minimum } if (is.na(cutoff.x)) { warning("Cutoff point is beyond range. Returning NA.") list(x=NA, y=NA) } else { # Return cutoff point on curve approx(new.x, new.y, cutoff.x) } } #' Get UMAP for IP modules #' #' @param intPairs.dendro list output of dendrogram #' @param gpModules_assign named vector of module assignment #' @param ipm_colors for intpair modules #' #' @return plotly umap #' @importFrom plotly plot_ly layout config getUMAPipModules <- function(intPairs.dendro, gpModules_assign, ipm_colors){ umap.embed <- intPairs.dendro$umap umap.embed$hclust <- as.factor(gpModules_assign[ match(umap.embed$int_pair, names(gpModules_assign))]) colors <- ipm_colors color_var <- "hclust" ax <- list(zeroline=FALSE) fig <- plot_ly(data = umap.embed, x= ~UMAP_1, y= ~UMAP_2, type='scatter', mode='markers', color = umap.embed[, color_var], text = ~as.character(int_pair), hoverinfo='text', colors = colors) fig <- fig %>% layout(xaxis = ax, yaxis = ax, title="<b>UMAP of Int-pairs</b>") fig <- fig %>% config(modeBarButtonsToRemove = c( 'sendDataToCloud', 'autoScale2d', 'resetScale2d', 'hoverClosestCartesian', 'hoverCompareCartesian', 'zoom2d','pan2d','select2d','lasso2d')) return(fig) } #' Plot circle plot #' #' @param data subset of input data by flow / intpair module #' @param cluster_colors global #' @param ipm_color single color for chosen int-pair module #' @param int_flow string specifying the flow #' @param link.color string specifying variable by which to color links #' #' @return circle plot #' #' @importFrom circlize circos.par chordDiagram circos.trackPlotRegion #' get.cell.meta.data circos.text highlight.sector circos.clear uh CELL_META #' @importFrom ComplexHeatmap Legend circlePlot <- function(data, cluster_colors, ipm_color, int_flow, link.color){ cell_types <- unique(c(data$clustA, data$clustB)) # Abbreviate long names for int-pairs data$int_pair <- gsub("beta", "B", data$int_pair) data$int_pair <- gsub("inhibitor", "inh", data$int_pair) data$int_pair <- gsub("receptor", "rec", data$int_pair) partnerA <- unlist(sapply(strsplit(data$int_pair, " & "), function(x) x[1])) partnerB <- unlist(sapply(strsplit(data$int_pair, " & "), function(x) x[2])) genes <- c(structure(partnerA, names = data$clustA), structure(partnerB, names = data$clustB)) genes <- genes[!duplicated(paste(names(genes), genes))] genes <- genes[order(names(genes))] if(length(cell_types)!=1){ gap.degree <- do.call("c", lapply(table(names(genes)), function(i) c(rep(1, i-1), 8))) }else{ gap.degree <- do.call("c", lapply(table(names(genes)), function(i) c(rep(1, i)))) } # parameters if(int_flow == "undirected"){ directional <- 0 direction.type <- "diffHeight" } else{ directional <- 1 direction.type <- c("diffHeight", "arrows") } track.height.genes <- ifelse(max(nchar(c(partnerA, partnerB))) >= 10, 0.25, 0.2) cex.genes <- 0.9 if(link.color == "ipm"){ col <- NULL } else { # scale avg scores between -2 and 2 scaled_scores <- scales::rescale(data$score, to = c(-2,2)) col_fun <- circlize::colorRamp2(c(-2,0,2), c("gray88", "gray70", "black")) col <- col_fun(scaled_scores) lgd_links <- ComplexHeatmap::Legend(at = c(-2, -1, 0, 1, 2), col_fun = col_fun, title_position = "topleft", title = "Scaled Int Score") } df <- data.frame(from = paste(data$clustA,partnerA), to = paste(data$clustB,partnerB), stringsAsFactors = FALSE) circos.par(gap.degree = gap.degree) chordDiagram(df, order=paste(names(genes),genes), grid.col = ipm_color, col = col, transparency = 0.2, directional = directional, direction.type = direction.type, link.arr.type = "big.arrow", annotationTrack = "grid", preAllocateTracks = list( list(track.height = uh(1.2,'mm')), list(track.height = track.height.genes)), annotationTrackHeight = c(0.01,0.01)) circos.trackPlotRegion(track.index = 2, panel.fun = function(x, y) { sector.index = genes[get.cell.meta.data("sector.numeric.index")] circos.text(CELL_META$xcenter, CELL_META$cell.ylim[1], sector.index, col = "black", cex = cex.genes, adj = c(0, 0.5), facing = 'clockwise', niceFacing = TRUE) }, bg.border = NA) for(c in unique(names(genes))) { gene = as.character(genes[names(genes) == c]) highlight.sector(sector.index = paste(c,gene), track.index = 1, col = ifelse(length(cluster_colors)==1, cluster_colors, cluster_colors[c]), text = c, text.vjust = '0.4cm', niceFacing = TRUE, lwd=1, facing = "bending.inside") } if(link.color != "ipm"){ ComplexHeatmap::draw(lgd_links, x = grid::unit(1, "cm"), y = grid::unit(1, "cm"), just = c("left", "bottom")) } circos.clear() } #' Subfunction to calculate significant functions by permutation test #' #' @param mat binary matrix of functional terms by int-pairs #' @param gpModules_assign assignment of intpairs to modules #' #' @return matrix with hits #' #' Example # mat <- t(as.matrix(data.frame(f_term1 = c(0,1,1,0,0), # f_term2 = c(1,1,1,0,0), # f_term3 = c(0,0,1,0,1), # row.names = paste0("ip", 1:5)))) # gpModules_assign <- c("cond1", "cond1", "cond2", "cond2", "cond2") # names(gpModules_assign) <- paste0("ip", 1:5) getHitsf <- function(mat, gpModules_assign){ hits <- matrix(0, nrow = nrow(mat), ncol = length(unique(gpModules_assign))) rownames(hits) <- rownames(mat) colnames(hits) <- unique(gpModules_assign) for(gi in unique(gpModules_assign)){ sub.mat <- mat[, names(gpModules_assign)[gpModules_assign == gi]] hits[, gi] <- rowSums(sub.mat)/ncol(sub.mat) } return(hits) } #' Calculate significant function per intpair module #' #' @param subGenePairs_func_mat subset of binary mat #' @param gpModules_assign assignment of intpairs to modules #' @param rank.terms table of ranked functions #' @param input_maxPval threshold of significance #' #' @return table with significant functions #' @importFrom tidyr gather getSignificantFunctions <- function(subGenePairs_func_mat, gpModules_assign, rank.terms, input_maxPval){ permMat <- t(subGenePairs_func_mat) hits_true <- getHitsf(permMat, gpModules_assign) hits_perm <- list() for(np in seq_len(999)){ # shuffle cols of original matrix (int-pairs, assigned to modules) shufMat <- permMat[,sample(colnames(permMat), ncol(permMat), replace = FALSE)] colnames(shufMat) <- colnames(permMat) hits_perm[[np]] <- getHitsf(shufMat, gpModules_assign) } # calculate empirical pvalue emp_pvalue <- matrix(0, nrow = nrow(permMat), ncol = length(unique(gpModules_assign))) rownames(emp_pvalue) <- rownames(permMat) colnames(emp_pvalue) <- unique(gpModules_assign) for(gM in seq_len(ncol(hits_true))){ for(fM in seq_len(nrow(hits_true))){ hits_gm_fm <- unlist(lapply(hits_perm, function(x) x[fM, gM])) emp_pvalue[fM,gM] <- (1 + sum(hits_gm_fm >= hits_true[fM,gM]))/1000 } } pvalue_df <- cbind(emp_pvalue, functionalTerm = rownames(emp_pvalue)) pvalue_df <- tidyr::gather(as.data.frame(pvalue_df), key = "int_pairModule", value = "p_value", unique(gpModules_assign), factor_key = FALSE) signFun <- pvalue_df[pvalue_df$p_value <= input_maxPval,] ## Adding int_pairs from selected Module to each functional term if(nrow(signFun) > 0){ for(r in seq_len(nrow(signFun))){ int_pairs_all <- rownames(subGenePairs_func_mat)[ subGenePairs_func_mat[, signFun$functionalTerm[r]] == 1] signFun[r, "int_pair_list"] <- paste( intersect(int_pairs_all, names(gpModules_assign)[ gpModules_assign == signFun$int_pairModule[r]]), collapse = ",") } genes_all <- rownames(subGenePairs_func_mat)[subGenePairs_func_mat[, signFun$fTerm[1]] == 1] paste(intersect(genes_all, names(gpModules_assign)[gpModules_assign == 1]), collapse = ",") signFun$source <- rank.terms$source[ match(tolower(signFun$functionalTerm), tolower(rank.terms$functional_term))] } return(signFun) }
#Loading the dataset into R with '?' turning into NAs electric_power_consumption<-read.table("./Downloads/household_power_consumption.txt",sep=";",na.strings=c("?",""),header=TRUE) #Dates that we are interested in dates=c("1/2/2007","2/2/2007") #Subsetting the data to include just those dates data<-electric_power_consumption[ electric_power_consumption$Date %in% dates, ] #Combining the Date and time to create a DateTime variable data$DateTime <- as.POSIXct(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S") #This is to allow us to present 4 graphs and I played around with the margins but decided to keep #them the same(figure it is good to keep it there if I refer to this program later and want to #change them to see how things look) par(mfrow=c(2,2), mar=c(4,4,2,2)) #top left plot plot(data$DateTime,data$Global_active_power, type='l', ylab='Global Active Power',xlab='' ) #top right plot plot(data$DateTime,data$Voltage, type='l', ylab='Voltage', xlab='datetime' ) #bottom left plot where legend size has been reduced and the box that surrounds the #legend has been removed plot(data$DateTime,data$Sub_metering_1, type='l', ylab='Energy sub metering',xlab='') lines(data$DateTime,data$Sub_metering_2, type='l', col='red') lines(data$DateTime,data$Sub_metering_3, type='l', col='blue') legend("topright", pch='-', col=c('black','red','blue'), cex=0.5, bty='n', legend=c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 ") ) #bottom right plot plot(data$DateTime,data$Global_reactive_power, type='l', ylab='Global_reactive_power', xlab='datetime') #Copying the plot to a file plot4.png with 480 by 480 size dev.copy(png,file="./plot4.png", width=480, height=480) dev.off()
/plot4.R
no_license
cmichaelski/ExData_Plotting1
R
false
false
1,769
r
#Loading the dataset into R with '?' turning into NAs electric_power_consumption<-read.table("./Downloads/household_power_consumption.txt",sep=";",na.strings=c("?",""),header=TRUE) #Dates that we are interested in dates=c("1/2/2007","2/2/2007") #Subsetting the data to include just those dates data<-electric_power_consumption[ electric_power_consumption$Date %in% dates, ] #Combining the Date and time to create a DateTime variable data$DateTime <- as.POSIXct(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S") #This is to allow us to present 4 graphs and I played around with the margins but decided to keep #them the same(figure it is good to keep it there if I refer to this program later and want to #change them to see how things look) par(mfrow=c(2,2), mar=c(4,4,2,2)) #top left plot plot(data$DateTime,data$Global_active_power, type='l', ylab='Global Active Power',xlab='' ) #top right plot plot(data$DateTime,data$Voltage, type='l', ylab='Voltage', xlab='datetime' ) #bottom left plot where legend size has been reduced and the box that surrounds the #legend has been removed plot(data$DateTime,data$Sub_metering_1, type='l', ylab='Energy sub metering',xlab='') lines(data$DateTime,data$Sub_metering_2, type='l', col='red') lines(data$DateTime,data$Sub_metering_3, type='l', col='blue') legend("topright", pch='-', col=c('black','red','blue'), cex=0.5, bty='n', legend=c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 ") ) #bottom right plot plot(data$DateTime,data$Global_reactive_power, type='l', ylab='Global_reactive_power', xlab='datetime') #Copying the plot to a file plot4.png with 480 by 480 size dev.copy(png,file="./plot4.png", width=480, height=480) dev.off()
# Reads in outcome data and finds # the hospital with given rank (either best, worst, or a number) # in each state. Outputs a data frame of the hospital name and the state rankall = function(outcome, num = "best") { hospital = read.csv("outcome-of-care-measures.csv", colClasses="character") hospital[,11] = as.numeric(hospital[,11]) hospital[,17] = as.numeric(hospital[,17]) hospital[,23] = as.numeric(hospital[,23]) all_states = as.character(hospital$State) outcomes = c("heart attack","heart failure","pneumonia") #Error testing if (!(outcome %in% outcomes)) { stop("invalid outcome") } if (outcome == "heart attack") { col = 11 } else if (outcome == "heart failure") { col = 17 } else { col = 23 } #Remove NAs and sort from lowest to highest based on outcome one_state = subset(hospital,hospital[,col] != "Not Available") one_state = one_state[order(one_state[,col],one_state[,2]),] data = data.frame(hospital=c(),state=c()) if (num == "best") { new_data = lapply(split(one_state,one_state$State), function(x) { return (c(x[1,2],x[1,7])) }) } else if (num == "worst") { new_data = lapply(split(one_state,one_state$State), function(x) { return (c(tail(x[,2],n=1),tail(x[,7],n=1))) }) } else if (as.numeric(num) > length(one_state)) { new_data = lapply(split(one_state,one_state$State), function(x) { return (c(NA,NA)) }) } else { new_data = lapply(split(one_state,one_state$State), function(x) { return (c(x[as.numeric(num),2],x[as.numeric(num),7])) }) } table = lapply(new_data,function(x){ add = data.frame(hospital=c(x[[1]][1]),state=c(x[[2]][1])) return (add) }) table = do.call("rbind",table) print(table) }
/A3/rankall.R
no_license
gawecoti/R-scripts
R
false
false
1,883
r
# Reads in outcome data and finds # the hospital with given rank (either best, worst, or a number) # in each state. Outputs a data frame of the hospital name and the state rankall = function(outcome, num = "best") { hospital = read.csv("outcome-of-care-measures.csv", colClasses="character") hospital[,11] = as.numeric(hospital[,11]) hospital[,17] = as.numeric(hospital[,17]) hospital[,23] = as.numeric(hospital[,23]) all_states = as.character(hospital$State) outcomes = c("heart attack","heart failure","pneumonia") #Error testing if (!(outcome %in% outcomes)) { stop("invalid outcome") } if (outcome == "heart attack") { col = 11 } else if (outcome == "heart failure") { col = 17 } else { col = 23 } #Remove NAs and sort from lowest to highest based on outcome one_state = subset(hospital,hospital[,col] != "Not Available") one_state = one_state[order(one_state[,col],one_state[,2]),] data = data.frame(hospital=c(),state=c()) if (num == "best") { new_data = lapply(split(one_state,one_state$State), function(x) { return (c(x[1,2],x[1,7])) }) } else if (num == "worst") { new_data = lapply(split(one_state,one_state$State), function(x) { return (c(tail(x[,2],n=1),tail(x[,7],n=1))) }) } else if (as.numeric(num) > length(one_state)) { new_data = lapply(split(one_state,one_state$State), function(x) { return (c(NA,NA)) }) } else { new_data = lapply(split(one_state,one_state$State), function(x) { return (c(x[as.numeric(num),2],x[as.numeric(num),7])) }) } table = lapply(new_data,function(x){ add = data.frame(hospital=c(x[[1]][1]),state=c(x[[2]][1])) return (add) }) table = do.call("rbind",table) print(table) }
library(OpVaR) ### Name: buildPlainSevdist ### Title: Building a sevdist object with a plain distribution ### Aliases: buildPlainSevdist ### ** Examples # Log-gamma distributed severity with shape = 2.2 and rate = 1.2 sevdist1 = buildPlainSevdist("lgamma", c(2.2, 1.2)) plot(sevdist1)
/data/genthat_extracted_code/OpVaR/examples/buildPlainSevdist.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
301
r
library(OpVaR) ### Name: buildPlainSevdist ### Title: Building a sevdist object with a plain distribution ### Aliases: buildPlainSevdist ### ** Examples # Log-gamma distributed severity with shape = 2.2 and rate = 1.2 sevdist1 = buildPlainSevdist("lgamma", c(2.2, 1.2)) plot(sevdist1)
library("partykit") library("rpart") library("party") rm(list=ls()) setwd("~/workspace/github.com/ChrisLynch96/data-analytics/labs/lab4") Data <- read.csv("RT.csv", header=TRUE, sep=";") Data attach(Data) RTModel <- rpart(Weight ~ Height, method="anova", control=rpart.control(minsplit=5, minbucket=2, maxdepth=4)) Fig1 <- plot(as.party(RTModel)) print(RTModel) summary(RTModel) rsq.rpart(RTModel) Data2 <- read.csv("Fuel.csv", header=TRUE, sep=";") Data2 attach(Data2) str(Data2) Country <- as.factor(Country) Year <- as.factor(Year) RTModel2 <- rpart(mpg ~., data=Data2, method="anova", control=rpart.control(minsplit=6, minbucket=5, maxdepth=3)) Fig2 <- plot(as.party(RTModel2)) print(RTModel2) summary(RTModel2) rsq.rpart(RTModel2)
/labs/lab4/RT.R
no_license
ChrisLynch96/data-analytics
R
false
false
779
r
library("partykit") library("rpart") library("party") rm(list=ls()) setwd("~/workspace/github.com/ChrisLynch96/data-analytics/labs/lab4") Data <- read.csv("RT.csv", header=TRUE, sep=";") Data attach(Data) RTModel <- rpart(Weight ~ Height, method="anova", control=rpart.control(minsplit=5, minbucket=2, maxdepth=4)) Fig1 <- plot(as.party(RTModel)) print(RTModel) summary(RTModel) rsq.rpart(RTModel) Data2 <- read.csv("Fuel.csv", header=TRUE, sep=";") Data2 attach(Data2) str(Data2) Country <- as.factor(Country) Year <- as.factor(Year) RTModel2 <- rpart(mpg ~., data=Data2, method="anova", control=rpart.control(minsplit=6, minbucket=5, maxdepth=3)) Fig2 <- plot(as.party(RTModel2)) print(RTModel2) summary(RTModel2) rsq.rpart(RTModel2)
suppressMessages(library(colorspace)) suppressMessages(library(pheatmap)) suppressMessages(library(tidyverse)) suppressMessages(library(RColorBrewer)) suppressMessages(library(dplyr)) doheatmap <- function(dat, clus, clus2, ht, rn, cn, col,scale_range) { col_filter <- apply(as.data.frame(dat), 2, function(z) all(is.na(z))) row_filter <- apply(as.data.frame(dat), 1, function(z) length(z[is.na(z)]) > 10) dat <- dat[!row_filter, !col_filter] col.pal <- diverging_hcl(n=100, palette=col) if (FALSE) { col.pal = rev(col.pal) } # define metrics for clustering drows1 <- "correlation" dcols1 <- "correlation" minx = floor(min(dat)) maxx = ceiling(max(dat)) breaks = seq(-1*scale_range, scale_range, length=100) legbreaks = seq(-1*scale_range, scale_range, length=5) breaks = sapply(breaks, signif, 4) legbreaks = sapply(legbreaks, signif, 4) treeheight <- 25 hm.parameters <- list( dat, color=col.pal, legend_breaks=legbreaks, cellwidth=if(0 > 0){ 14 }else{ NA }, cellheight=if(0 > 0){ 14 }else{ NA }, scale="none", treeheight_col=treeheight, treeheight_row=treeheight, kmeans_k=NA, breaks=breaks, # height=80, fontsize=14, fontsize_row=if(0 > 0){ NA }else{ 14 }, fontsize_col=if(0 > 0){ NA }else{ 14 }, show_rownames=rn, show_colnames=cn, cluster_rows=FALSE, cluster_cols=FALSE, cutree_rows=1, annotation_col = annotation_col, annotation_colors = annot_col, clustering_distance_rows = "correlation", clustering_distance_cols = "correlation", labels_col = labels_col, annotation_names_col = FALSE, na_col = "#000000" ) # print('calculated mat') p <- do.call("pheatmap", c(hm.parameters)) return(p) } df <- read.csv("../results/naive_plot_counts.csv", header = TRUE, check.names = FALSE) samples_to_include = c("V002_d2-d1","V004_d2-d1","V005_d2-d1","V006_d2-d1","V007_d2-d1","V008_d2-d1","V009_d2-d1","V010_d2-d1","V011_d2-d1","V013_d2-d1","V015_d2-d1","V016_d2-d1","V017_d2-d1","V019_d2-d1","V020_d2-d1","V021_d2-d1","V022_d2-d1","V024_d2-d1","V025_d2-d1","V026_d2-d1","V027_d2-d1","V028_d2-d1","V029_d2-d1","V030_d2-d1","V031_d2-d1","V033_d2-d1","V034_d2-d1","V036_d2-d1","V037_d2-d1","V039_d2-d1","V041_d2-d1","V043_d2-d1","V045_d2-d1","V048_d2-d1","V049_d2-d1","V050_d2-d1","V051_d2-d1","V054_d2-d1","V056_d2-d1","V057_d2-d1","V058_d2-d1","V061_d2-d1","V063_d2-d1","V064_d2-d1","V066_d2-d1","V067_d2-d1","V068_d2-d1","V070_d2-d1","V071_d2-d1","V073_d2-d1","V077_d2-d1","V140_d2-d1","V141_d2-d1","V142_d2-d1","V143_d2-d1","V145_d2-d1","V147_d2-d1","V002_d23-d22","V004_d23-d22","V005_d23-d22","V006_d23-d22","V007_d23-d22","V008_d23-d22","V009_d23-d22","V011_d23-d22","V013_d23-d22","V015_d23-d22","V016_d23-d22","V017_d23-d22","V019_d23-d22","V020_d23-d22","V021_d23-d22","V022_d23-d22","V024_d23-d22","V025_d23-d22","V026_d23-d22","V027_d23-d22","V028_d23-d22","V029_d23-d22","V030_d23-d22","V031_d23-d22","V033_d23-d22","V034_d23-d22","V036_d23-d22","V037_d23-d22","V039_d23-d22","V041_d23-d22","V043_d23-d22","V045_d23-d22","V048_d23-d22","V049_d23-d22","V050_d23-d22","V051_d23-d22","V054_d23-d22","V056_d23-d22","V057_d23-d22","V058_d23-d22","V061_d23-d22","V063_d23-d22","V064_d23-d22","V066_d23-d22","V067_d23-d22","V068_d23-d22","V070_d23-d22","V071_d23-d22","V073_d23-d22","V077_d23-d22","V140_d23-d22","V141_d23-d22","V142_d23-d22","V143_d23-d22","V145_d23-d22","V147_d23-d22","V148_d23-d22","V002_d23-d22-d2-d1","V004_d23-d22-d2-d1","V005_d23-d22-d2-d1","V006_d23-d22-d2-d1","V007_d23-d22-d2-d1","V008_d23-d22-d2-d1","V009_d23-d22-d2-d1","V010_d23-d22-d2-d1","V011_d23-d22-d2-d1","V013_d23-d22-d2-d1","V015_d23-d22-d2-d1","V016_d23-d22-d2-d1","V017_d23-d22-d2-d1","V019_d23-d22-d2-d1","V020_d23-d22-d2-d1","V021_d23-d22-d2-d1","V022_d23-d22-d2-d1","V024_d23-d22-d2-d1","V025_d23-d22-d2-d1","V026_d23-d22-d2-d1","V027_d23-d22-d2-d1","V028_d23-d22-d2-d1","V029_d23-d22-d2-d1","V030_d23-d22-d2-d1","V031_d23-d22-d2-d1","V033_d23-d22-d2-d1","V034_d23-d22-d2-d1","V036_d23-d22-d2-d1","V037_d23-d22-d2-d1","V039_d23-d22-d2-d1","V041_d23-d22-d2-d1","V043_d23-d22-d2-d1","V045_d23-d22-d2-d1","V048_d23-d22-d2-d1","V049_d23-d22-d2-d1","V050_d23-d22-d2-d1","V051_d23-d22-d2-d1","V054_d23-d22-d2-d1","V056_d23-d22-d2-d1","V057_d23-d22-d2-d1","V058_d23-d22-d2-d1","V061_d23-d22-d2-d1","V063_d23-d22-d2-d1","V064_d23-d22-d2-d1") df.orig <- df %>% dplyr::select(one_of(c("Gene",samples_to_include))) df.mat = df.orig[ , (colnames(df.orig) != "Gene" )] %>% as.data.frame row.names(df.mat) <- df.orig$Gene df.mat <- as.data.frame(df.mat) annot <- read.csv("../results/naive_plot_metadata.csv", header = TRUE) annot %>% dplyr::filter(sample_id %in% samples_to_include) -> annot annot$contrast <- factor(annot$contrast) print(annot$contrast) groups = c("contrast") relevel_factors <- FALSE factor_relevel <- c("contrast:d2_d1,d23_d22,vac2_vac1") if(relevel_factors){ for(f in factor_relevel){ variable <- unlist(str_split(f,":"))[1] if(!variable %in% groups){ next } levels <- unlist(str_split(unlist(str_split(f,":"))[2],",")) annot[,variable] <- factor(as.character(annot[,variable]), levels = levels) } } if(TRUE){ annot %>% dplyr::arrange_(.dots=groups) -> annot df.mat <- df.mat[,match(annot$sample_id,colnames(df.mat))] } annot %>% dplyr::select(groups) -> annotation_col annotation_col = as.data.frame(unclass(annotation_col)) annotation_col[] <- lapply(annotation_col,factor) rownames(annotation_col) <- annot$sample_id annot_col = list() qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',] qual_col_pals = qual_col_pals[c(7,6,2,1,8,3,4,5),] colors = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals))) sample_color_palette <- FALSE set_color_seed = FALSE if(sample_color_palette || set_color_seed){ seed <- if(set_color_seed){ 1234 }else{ sample(1:2^15,1) } cat(paste0("Using color seed: ",seed,"\n")) set.seed(seed) colors <- sample(colors) } #override colors colors <- c("#66C2A5","#984EA3","#B3B3B3") #cat("Annotation color palette:\n") #print(colors) b=1 i=1 while (i <= length(groups)){ nam <- groups[i] grp <- as.factor(annotation_col[,i]) c <- b+length(levels(grp))-1 col = colors[b:c] names(col) <- levels(grp) assign(nam,col) annot_col = append(annot_col,mget(nam)) b = b+c i=i+1 } cat("Annotation color palette:\n") print(annot_col) print(paste0("The total number of genes in heatmap: ", nrow(df.mat))) labels_col <- colnames(df.mat) imageWidth = 3000 imageHeight = 1500 dpi = 300 p = doheatmap(dat=df.mat, clus=FALSE, clus2=TRUE, ht=50, rn=TRUE, cn=FALSE, col="Blue-Red 3",scale_range=2.5) png( filename="../plots/naive_heatmap_vaccination_II.png", width=imageWidth, height=imageHeight, units="px", pointsize=4, bg="white", res=dpi, type="cairo") p dev.off()
/code/naive_heatmap_vaccination_II.R
no_license
NCI-VB/felber_covid_vaccine
R
false
false
6,900
r
suppressMessages(library(colorspace)) suppressMessages(library(pheatmap)) suppressMessages(library(tidyverse)) suppressMessages(library(RColorBrewer)) suppressMessages(library(dplyr)) doheatmap <- function(dat, clus, clus2, ht, rn, cn, col,scale_range) { col_filter <- apply(as.data.frame(dat), 2, function(z) all(is.na(z))) row_filter <- apply(as.data.frame(dat), 1, function(z) length(z[is.na(z)]) > 10) dat <- dat[!row_filter, !col_filter] col.pal <- diverging_hcl(n=100, palette=col) if (FALSE) { col.pal = rev(col.pal) } # define metrics for clustering drows1 <- "correlation" dcols1 <- "correlation" minx = floor(min(dat)) maxx = ceiling(max(dat)) breaks = seq(-1*scale_range, scale_range, length=100) legbreaks = seq(-1*scale_range, scale_range, length=5) breaks = sapply(breaks, signif, 4) legbreaks = sapply(legbreaks, signif, 4) treeheight <- 25 hm.parameters <- list( dat, color=col.pal, legend_breaks=legbreaks, cellwidth=if(0 > 0){ 14 }else{ NA }, cellheight=if(0 > 0){ 14 }else{ NA }, scale="none", treeheight_col=treeheight, treeheight_row=treeheight, kmeans_k=NA, breaks=breaks, # height=80, fontsize=14, fontsize_row=if(0 > 0){ NA }else{ 14 }, fontsize_col=if(0 > 0){ NA }else{ 14 }, show_rownames=rn, show_colnames=cn, cluster_rows=FALSE, cluster_cols=FALSE, cutree_rows=1, annotation_col = annotation_col, annotation_colors = annot_col, clustering_distance_rows = "correlation", clustering_distance_cols = "correlation", labels_col = labels_col, annotation_names_col = FALSE, na_col = "#000000" ) # print('calculated mat') p <- do.call("pheatmap", c(hm.parameters)) return(p) } df <- read.csv("../results/naive_plot_counts.csv", header = TRUE, check.names = FALSE) samples_to_include = c("V002_d2-d1","V004_d2-d1","V005_d2-d1","V006_d2-d1","V007_d2-d1","V008_d2-d1","V009_d2-d1","V010_d2-d1","V011_d2-d1","V013_d2-d1","V015_d2-d1","V016_d2-d1","V017_d2-d1","V019_d2-d1","V020_d2-d1","V021_d2-d1","V022_d2-d1","V024_d2-d1","V025_d2-d1","V026_d2-d1","V027_d2-d1","V028_d2-d1","V029_d2-d1","V030_d2-d1","V031_d2-d1","V033_d2-d1","V034_d2-d1","V036_d2-d1","V037_d2-d1","V039_d2-d1","V041_d2-d1","V043_d2-d1","V045_d2-d1","V048_d2-d1","V049_d2-d1","V050_d2-d1","V051_d2-d1","V054_d2-d1","V056_d2-d1","V057_d2-d1","V058_d2-d1","V061_d2-d1","V063_d2-d1","V064_d2-d1","V066_d2-d1","V067_d2-d1","V068_d2-d1","V070_d2-d1","V071_d2-d1","V073_d2-d1","V077_d2-d1","V140_d2-d1","V141_d2-d1","V142_d2-d1","V143_d2-d1","V145_d2-d1","V147_d2-d1","V002_d23-d22","V004_d23-d22","V005_d23-d22","V006_d23-d22","V007_d23-d22","V008_d23-d22","V009_d23-d22","V011_d23-d22","V013_d23-d22","V015_d23-d22","V016_d23-d22","V017_d23-d22","V019_d23-d22","V020_d23-d22","V021_d23-d22","V022_d23-d22","V024_d23-d22","V025_d23-d22","V026_d23-d22","V027_d23-d22","V028_d23-d22","V029_d23-d22","V030_d23-d22","V031_d23-d22","V033_d23-d22","V034_d23-d22","V036_d23-d22","V037_d23-d22","V039_d23-d22","V041_d23-d22","V043_d23-d22","V045_d23-d22","V048_d23-d22","V049_d23-d22","V050_d23-d22","V051_d23-d22","V054_d23-d22","V056_d23-d22","V057_d23-d22","V058_d23-d22","V061_d23-d22","V063_d23-d22","V064_d23-d22","V066_d23-d22","V067_d23-d22","V068_d23-d22","V070_d23-d22","V071_d23-d22","V073_d23-d22","V077_d23-d22","V140_d23-d22","V141_d23-d22","V142_d23-d22","V143_d23-d22","V145_d23-d22","V147_d23-d22","V148_d23-d22","V002_d23-d22-d2-d1","V004_d23-d22-d2-d1","V005_d23-d22-d2-d1","V006_d23-d22-d2-d1","V007_d23-d22-d2-d1","V008_d23-d22-d2-d1","V009_d23-d22-d2-d1","V010_d23-d22-d2-d1","V011_d23-d22-d2-d1","V013_d23-d22-d2-d1","V015_d23-d22-d2-d1","V016_d23-d22-d2-d1","V017_d23-d22-d2-d1","V019_d23-d22-d2-d1","V020_d23-d22-d2-d1","V021_d23-d22-d2-d1","V022_d23-d22-d2-d1","V024_d23-d22-d2-d1","V025_d23-d22-d2-d1","V026_d23-d22-d2-d1","V027_d23-d22-d2-d1","V028_d23-d22-d2-d1","V029_d23-d22-d2-d1","V030_d23-d22-d2-d1","V031_d23-d22-d2-d1","V033_d23-d22-d2-d1","V034_d23-d22-d2-d1","V036_d23-d22-d2-d1","V037_d23-d22-d2-d1","V039_d23-d22-d2-d1","V041_d23-d22-d2-d1","V043_d23-d22-d2-d1","V045_d23-d22-d2-d1","V048_d23-d22-d2-d1","V049_d23-d22-d2-d1","V050_d23-d22-d2-d1","V051_d23-d22-d2-d1","V054_d23-d22-d2-d1","V056_d23-d22-d2-d1","V057_d23-d22-d2-d1","V058_d23-d22-d2-d1","V061_d23-d22-d2-d1","V063_d23-d22-d2-d1","V064_d23-d22-d2-d1") df.orig <- df %>% dplyr::select(one_of(c("Gene",samples_to_include))) df.mat = df.orig[ , (colnames(df.orig) != "Gene" )] %>% as.data.frame row.names(df.mat) <- df.orig$Gene df.mat <- as.data.frame(df.mat) annot <- read.csv("../results/naive_plot_metadata.csv", header = TRUE) annot %>% dplyr::filter(sample_id %in% samples_to_include) -> annot annot$contrast <- factor(annot$contrast) print(annot$contrast) groups = c("contrast") relevel_factors <- FALSE factor_relevel <- c("contrast:d2_d1,d23_d22,vac2_vac1") if(relevel_factors){ for(f in factor_relevel){ variable <- unlist(str_split(f,":"))[1] if(!variable %in% groups){ next } levels <- unlist(str_split(unlist(str_split(f,":"))[2],",")) annot[,variable] <- factor(as.character(annot[,variable]), levels = levels) } } if(TRUE){ annot %>% dplyr::arrange_(.dots=groups) -> annot df.mat <- df.mat[,match(annot$sample_id,colnames(df.mat))] } annot %>% dplyr::select(groups) -> annotation_col annotation_col = as.data.frame(unclass(annotation_col)) annotation_col[] <- lapply(annotation_col,factor) rownames(annotation_col) <- annot$sample_id annot_col = list() qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',] qual_col_pals = qual_col_pals[c(7,6,2,1,8,3,4,5),] colors = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals))) sample_color_palette <- FALSE set_color_seed = FALSE if(sample_color_palette || set_color_seed){ seed <- if(set_color_seed){ 1234 }else{ sample(1:2^15,1) } cat(paste0("Using color seed: ",seed,"\n")) set.seed(seed) colors <- sample(colors) } #override colors colors <- c("#66C2A5","#984EA3","#B3B3B3") #cat("Annotation color palette:\n") #print(colors) b=1 i=1 while (i <= length(groups)){ nam <- groups[i] grp <- as.factor(annotation_col[,i]) c <- b+length(levels(grp))-1 col = colors[b:c] names(col) <- levels(grp) assign(nam,col) annot_col = append(annot_col,mget(nam)) b = b+c i=i+1 } cat("Annotation color palette:\n") print(annot_col) print(paste0("The total number of genes in heatmap: ", nrow(df.mat))) labels_col <- colnames(df.mat) imageWidth = 3000 imageHeight = 1500 dpi = 300 p = doheatmap(dat=df.mat, clus=FALSE, clus2=TRUE, ht=50, rn=TRUE, cn=FALSE, col="Blue-Red 3",scale_range=2.5) png( filename="../plots/naive_heatmap_vaccination_II.png", width=imageWidth, height=imageHeight, units="px", pointsize=4, bg="white", res=dpi, type="cairo") p dev.off()
/Source/03_EX_02(array)_Datafile.R
no_license
Kim-DaeHo/R-for-Big-Data-Analysis
R
false
false
2,151
r
library("ggplot2") library("dplyr") library("reshape") library("reshape2") library("grid") # library("nnet") # library("boot") # library("scatterplot3d") # library(Rcmdr) #load in data on index. Index ticker symbol can be assigned to index variable #SnP 500 GSPC #STI STI #Hang Seng Index HSI #FTSE FTSE #Shanghai Composite SSEC #Korean Index KS11 #Swiss Index SSMI #Athen Index GD.AT #Nikkei N225 N225 index = "GSPC" stock = paste("http://real-chart.finance.yahoo.com/table.csv?s=%5E",index,"&a=00&b=1&c=1900&d=01&e=26&f=2017&g=d&ignore=.csv", sep = "") # stock = paste("http://real-chart.finance.yahoo.com/table.csv?s=GD.AT","&a=00&b=1&c=1900&d=00&e=26&f=2016&g=d&ignore=.csv", sep = "") yahoo.read <- function(url){ dat <- read.table(url,header=TRUE,sep=",") df <- dat[,c(1,7)] #7 is adj close df$Date <- as.Date(as.character(df$Date)) return(df)} stock_price <- yahoo.read(stock) #create a loop and lag x times lag_days = 260 # stk_price = function(lag_days){ for(i in 1:lag_days) { #lag_days = 260 # as.data.frame(lag(stock_price,i)) a = assign(paste("lag",i,sep=""),as.data.frame(lead(stock_price$Adj.Close,i))) names(a) = paste("lag",i) stock_price = cbind(stock_price,a) rm(list = ls()[grepl("lag", ls())]) #remove lags from each iteration, ls refers to list of dataframes } # return (stock_price)} # stk_price(260) #something wrong from creating a function nrows = dim(a)[1] #Create a % of 52 week high, 52 week low #% 52 week high max_52weeks = as.data.frame(apply(stock_price[,-1],1,max)) lower_52weekhigh = (stock_price$Adj.Close - max_52weeks)/max_52weeks names(lower_52weekhigh)[1] = "col1" hist(lower_52weekhigh[,1]) ggplot(lower_52weekhigh,aes(col1)) + geom_histogram() + ggtitle("Frequency of Change from 52 Week High, STI")+ xlab("Change from 52 Week High")+ylab("Counts")+ theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20))+ xlim(-0.6, 0) ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/lower_52weekhigh_hist.jpeg") ggplot(lower_52weekhigh,aes(col1)) + geom_freqpoly() + ggtitle("Frequency of Change from 52 Week High, STI")+ xlab("Change from 52 Week High")+ylab("Counts")+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20))+ xlim(-0.6, 0) ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/lower_52weekhigh_kernel density.jpeg") #% off 52 week low min_52weeks = as.data.frame(apply(stock_price[,-1],1,min)) higher_52weeklow = (stock_price$Adj.Close - min_52weeks)/min_52weeks names(higher_52weeklow )[1] = "col1" hist(higher_52weeklow ) d= density(higher_52weeklow [-(nrows-260):-nrows,1], na.rm = FALSE) plot(d) #plot kernel density and histogram to see frequency quantile(lower_52weekhigh[-(nrows-260):-nrows,1], c(0.025,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) #Create a lag of 1,3,5 years ago leadprice = as.data.frame(lag(stock_price$Adj.Close,260)) names(leadprice)[1] = "lead1yr" leadprice$lead2yr = lag(stock_price$Adj.Close,520) leadprice$lead3yr = lag(stock_price$Adj.Close,780) #Compute returns returns= as.data.frame((leadprice$lead1yr - stock_price$Adj.Close)/stock_price$Adj.Close) names(returns)[1] = "returns_1yr" returns$returns_2yr = (leadprice$lead2yr - stock_price$Adj.Close)/stock_price$Adj.Close returns$returns_3yr = (leadprice$lead3yr - stock_price$Adj.Close)/stock_price$Adj.Close #Histogram on 1 year returns (creat a dummy for each 10 year period and use the stacked histogram code in http://docs.ggplot2.org/current/geom_histogram.html) ggplot(returns,aes(returns_1yr)) + geom_histogram() + ggtitle("No. of 1 Year Returns")+xlab("No. of 1 Year Returns")+ylab("Counts")+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+theme(plot.title=element_text(face="bold", size=20)) ggplot(returns,aes(returns_1yr)) + geom_freqpoly() + ggtitle("Frequency of 1 Year Returns")+xlab("Frequency of 1 Year Returns")+ylab("Frequency")+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+theme(plot.title=element_text(face="bold", size=20)) #Look at returns-%52 week high scatterplot dat = cbind(stock_price$Date,lower_52weekhigh,higher_52weeklow,returns) names(dat) = c("Date","lower_52weekhigh","higher_52weeklow", "returns_1yr","returns_2yr","returns_3yr") #Example using cars # myPlot <- ggplot(cars, aes(speed, dist, color=as.integer(dt) ) ) + # geom_point() +scale_colour_gradient(low="blue", high="red",breaks=myBreaks) + # labs(color="Date") myBreaks <- function(x){ breaks <- c(min(x),median(x),max(x)) breaks = quantile(x, c(0, 0.2,0.4,0.6,0.8,1)) attr(breaks,"labels") <- as.Date(breaks, origin="1970-01-01") names(breaks) <- attr(breaks,"labels") return(breaks) } yr1_date = ggplot(dat,aes(Date,returns_1yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours = rainbow(5),breaks=myBreaks) + ggtitle("1yr returns vs. Date")+xlab("Date")+ylab("Returns 1 year later") yr1_date = yr1_date+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr1_date+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20)) + labs(color="date") yr1_high = ggplot(dat,aes(lower_52weekhigh,returns_1yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours = rainbow(5), breaks=myBreaks) + ggtitle("1yr returns vs. Change from 52 week high, STI")+xlab("Change from 52 week high")+ylab("Returns 1 year later") yr1_high = yr1_high+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr1_high = yr1_high +theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20)) + labs(color="date") yr1_high ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/1yr returns vs. Change from 52 week high_STI.jpeg") yr1 = lm(dat$returns_1yr~ dat$lower_52weekhigh) yr1$coefficients[2]*(-0.03) + yr1$coefficients[1] yr2_high = ggplot(dat,aes(lower_52weekhigh,returns_2yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours = rainbow(7), breaks=myBreaks) + ggtitle("2yr returns vs. Change from 52 week high, STI")+xlab("Change from 52 week high")+ylab("Returns 2 year later") yr2_high = yr2_high+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr2_high = yr2_high +theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20)) + labs(color="date") yr2_high ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/2yr returns vs. Change from 52 week high_STI.jpeg") yr2 = lm(dat$returns_2yr~ dat$lower_52weekhigh) yr2$coefficients[2]*(-0.03) + yr2$coefficients[1] yr3_high = ggplot(dat,aes(lower_52weekhigh,returns_3yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours = rainbow(7), breaks=myBreaks) + ggtitle("3yr returns vs. Change from 52 week high, STI")+xlab("Change from 52 week high")+ylab("Returns 3 year later") yr3_high = yr3_high+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr3_high = yr3_high +theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20)) + labs(color="date") yr3_high ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/3yr returns vs. Change from 52 week high_STI.jpeg") yr3 = lm(dat$returns_3yr~ dat$lower_52weekhigh) yr3$coefficients[2]*(-0.03) + yr3$coefficients[1] #function to part the plots above vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y) grid.newpage() pushViewport(viewport(layout = grid.layout(2, 2))) print(yr1_high, vp = vplayout(1, 1)) print(yr2_high, vp = vplayout(1, 2)) print(yr3_high, vp = vplayout(2, 1)) #Look at returns-%52 week low scatterplot yr1_low = ggplot(dat,aes(higher_52weeklow,returns_1yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours=c('red','green','blue'), breaks=myBreaks) + ggtitle("1yr returns vs. % off 52 week low") yr1_low+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr2_low = ggplot(dat,aes(higher_52weeklow,returns_2yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours=c('red','green','blue'), breaks=myBreaks) + ggtitle("2yr returns vs. % off 52 week low") yr2_low+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr3_low = ggplot(dat,aes(higher_52weeklow,returns_3yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours=c('red','green','blue'), breaks=myBreaks) + ggtitle("3yr returns vs. % off 52 week low") yr3_low+geom_smooth(method = "lm", se = TRUE) + stat_smooth() #Compute returns wiht a 52 week low vs high matrix #Scatter3d #Use ggplot2 version # s3d_1yr = scatterplot3d(lower_52weekhigh[-(nrows-260):-nrows,1],higher_52weeklow[-(nrows-260):-nrows,1],returns[-(nrows-260):-nrows,1]) # fit <- lm(returns[-(nrows-260):-nrows,1] ~ lower_52weekhigh[-(nrows-260):-nrows,1]+higher_52weeklow[-(nrows-260):-nrows,1]) # s3d_1yr$plane3d(fit) # # s3d_2yr = scatterplot3d(lower_52weekhigh[-(nrows-520):-nrows,1],higher_52weeklow[-(nrows-520):-nrows,1],returns[-(nrows-520):-nrows,2]) # fit <- lm(returns[-(nrows-520):-nrows,2] ~ lower_52weekhigh[-(nrows-520):-nrows,1]+higher_52weeklow[-(nrows-520):-nrows,1]) # s3d_2yr$plane3d(fit)
/Index/Index Analysis.R
no_license
jironghuang/Index-analysis
R
false
false
9,601
r
library("ggplot2") library("dplyr") library("reshape") library("reshape2") library("grid") # library("nnet") # library("boot") # library("scatterplot3d") # library(Rcmdr) #load in data on index. Index ticker symbol can be assigned to index variable #SnP 500 GSPC #STI STI #Hang Seng Index HSI #FTSE FTSE #Shanghai Composite SSEC #Korean Index KS11 #Swiss Index SSMI #Athen Index GD.AT #Nikkei N225 N225 index = "GSPC" stock = paste("http://real-chart.finance.yahoo.com/table.csv?s=%5E",index,"&a=00&b=1&c=1900&d=01&e=26&f=2017&g=d&ignore=.csv", sep = "") # stock = paste("http://real-chart.finance.yahoo.com/table.csv?s=GD.AT","&a=00&b=1&c=1900&d=00&e=26&f=2016&g=d&ignore=.csv", sep = "") yahoo.read <- function(url){ dat <- read.table(url,header=TRUE,sep=",") df <- dat[,c(1,7)] #7 is adj close df$Date <- as.Date(as.character(df$Date)) return(df)} stock_price <- yahoo.read(stock) #create a loop and lag x times lag_days = 260 # stk_price = function(lag_days){ for(i in 1:lag_days) { #lag_days = 260 # as.data.frame(lag(stock_price,i)) a = assign(paste("lag",i,sep=""),as.data.frame(lead(stock_price$Adj.Close,i))) names(a) = paste("lag",i) stock_price = cbind(stock_price,a) rm(list = ls()[grepl("lag", ls())]) #remove lags from each iteration, ls refers to list of dataframes } # return (stock_price)} # stk_price(260) #something wrong from creating a function nrows = dim(a)[1] #Create a % of 52 week high, 52 week low #% 52 week high max_52weeks = as.data.frame(apply(stock_price[,-1],1,max)) lower_52weekhigh = (stock_price$Adj.Close - max_52weeks)/max_52weeks names(lower_52weekhigh)[1] = "col1" hist(lower_52weekhigh[,1]) ggplot(lower_52weekhigh,aes(col1)) + geom_histogram() + ggtitle("Frequency of Change from 52 Week High, STI")+ xlab("Change from 52 Week High")+ylab("Counts")+ theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20))+ xlim(-0.6, 0) ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/lower_52weekhigh_hist.jpeg") ggplot(lower_52weekhigh,aes(col1)) + geom_freqpoly() + ggtitle("Frequency of Change from 52 Week High, STI")+ xlab("Change from 52 Week High")+ylab("Counts")+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20))+ xlim(-0.6, 0) ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/lower_52weekhigh_kernel density.jpeg") #% off 52 week low min_52weeks = as.data.frame(apply(stock_price[,-1],1,min)) higher_52weeklow = (stock_price$Adj.Close - min_52weeks)/min_52weeks names(higher_52weeklow )[1] = "col1" hist(higher_52weeklow ) d= density(higher_52weeklow [-(nrows-260):-nrows,1], na.rm = FALSE) plot(d) #plot kernel density and histogram to see frequency quantile(lower_52weekhigh[-(nrows-260):-nrows,1], c(0.025,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) #Create a lag of 1,3,5 years ago leadprice = as.data.frame(lag(stock_price$Adj.Close,260)) names(leadprice)[1] = "lead1yr" leadprice$lead2yr = lag(stock_price$Adj.Close,520) leadprice$lead3yr = lag(stock_price$Adj.Close,780) #Compute returns returns= as.data.frame((leadprice$lead1yr - stock_price$Adj.Close)/stock_price$Adj.Close) names(returns)[1] = "returns_1yr" returns$returns_2yr = (leadprice$lead2yr - stock_price$Adj.Close)/stock_price$Adj.Close returns$returns_3yr = (leadprice$lead3yr - stock_price$Adj.Close)/stock_price$Adj.Close #Histogram on 1 year returns (creat a dummy for each 10 year period and use the stacked histogram code in http://docs.ggplot2.org/current/geom_histogram.html) ggplot(returns,aes(returns_1yr)) + geom_histogram() + ggtitle("No. of 1 Year Returns")+xlab("No. of 1 Year Returns")+ylab("Counts")+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+theme(plot.title=element_text(face="bold", size=20)) ggplot(returns,aes(returns_1yr)) + geom_freqpoly() + ggtitle("Frequency of 1 Year Returns")+xlab("Frequency of 1 Year Returns")+ylab("Frequency")+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+theme(plot.title=element_text(face="bold", size=20)) #Look at returns-%52 week high scatterplot dat = cbind(stock_price$Date,lower_52weekhigh,higher_52weeklow,returns) names(dat) = c("Date","lower_52weekhigh","higher_52weeklow", "returns_1yr","returns_2yr","returns_3yr") #Example using cars # myPlot <- ggplot(cars, aes(speed, dist, color=as.integer(dt) ) ) + # geom_point() +scale_colour_gradient(low="blue", high="red",breaks=myBreaks) + # labs(color="Date") myBreaks <- function(x){ breaks <- c(min(x),median(x),max(x)) breaks = quantile(x, c(0, 0.2,0.4,0.6,0.8,1)) attr(breaks,"labels") <- as.Date(breaks, origin="1970-01-01") names(breaks) <- attr(breaks,"labels") return(breaks) } yr1_date = ggplot(dat,aes(Date,returns_1yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours = rainbow(5),breaks=myBreaks) + ggtitle("1yr returns vs. Date")+xlab("Date")+ylab("Returns 1 year later") yr1_date = yr1_date+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr1_date+theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20)) + labs(color="date") yr1_high = ggplot(dat,aes(lower_52weekhigh,returns_1yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours = rainbow(5), breaks=myBreaks) + ggtitle("1yr returns vs. Change from 52 week high, STI")+xlab("Change from 52 week high")+ylab("Returns 1 year later") yr1_high = yr1_high+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr1_high = yr1_high +theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20)) + labs(color="date") yr1_high ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/1yr returns vs. Change from 52 week high_STI.jpeg") yr1 = lm(dat$returns_1yr~ dat$lower_52weekhigh) yr1$coefficients[2]*(-0.03) + yr1$coefficients[1] yr2_high = ggplot(dat,aes(lower_52weekhigh,returns_2yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours = rainbow(7), breaks=myBreaks) + ggtitle("2yr returns vs. Change from 52 week high, STI")+xlab("Change from 52 week high")+ylab("Returns 2 year later") yr2_high = yr2_high+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr2_high = yr2_high +theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20)) + labs(color="date") yr2_high ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/2yr returns vs. Change from 52 week high_STI.jpeg") yr2 = lm(dat$returns_2yr~ dat$lower_52weekhigh) yr2$coefficients[2]*(-0.03) + yr2$coefficients[1] yr3_high = ggplot(dat,aes(lower_52weekhigh,returns_3yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours = rainbow(7), breaks=myBreaks) + ggtitle("3yr returns vs. Change from 52 week high, STI")+xlab("Change from 52 week high")+ylab("Returns 3 year later") yr3_high = yr3_high+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr3_high = yr3_high +theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"))+ theme(plot.title=element_text(face="bold", size=20)) + labs(color="date") yr3_high ggsave("C:/Users/Huang Jirong/Desktop/Index Analysis/3yr returns vs. Change from 52 week high_STI.jpeg") yr3 = lm(dat$returns_3yr~ dat$lower_52weekhigh) yr3$coefficients[2]*(-0.03) + yr3$coefficients[1] #function to part the plots above vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y) grid.newpage() pushViewport(viewport(layout = grid.layout(2, 2))) print(yr1_high, vp = vplayout(1, 1)) print(yr2_high, vp = vplayout(1, 2)) print(yr3_high, vp = vplayout(2, 1)) #Look at returns-%52 week low scatterplot yr1_low = ggplot(dat,aes(higher_52weeklow,returns_1yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours=c('red','green','blue'), breaks=myBreaks) + ggtitle("1yr returns vs. % off 52 week low") yr1_low+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr2_low = ggplot(dat,aes(higher_52weeklow,returns_2yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours=c('red','green','blue'), breaks=myBreaks) + ggtitle("2yr returns vs. % off 52 week low") yr2_low+geom_smooth(method = "lm", se = TRUE) + stat_smooth() yr3_low = ggplot(dat,aes(higher_52weeklow,returns_3yr,colour=as.integer(Date))) + geom_point(alpha = 0.6) + scale_colour_gradientn(colours=c('red','green','blue'), breaks=myBreaks) + ggtitle("3yr returns vs. % off 52 week low") yr3_low+geom_smooth(method = "lm", se = TRUE) + stat_smooth() #Compute returns wiht a 52 week low vs high matrix #Scatter3d #Use ggplot2 version # s3d_1yr = scatterplot3d(lower_52weekhigh[-(nrows-260):-nrows,1],higher_52weeklow[-(nrows-260):-nrows,1],returns[-(nrows-260):-nrows,1]) # fit <- lm(returns[-(nrows-260):-nrows,1] ~ lower_52weekhigh[-(nrows-260):-nrows,1]+higher_52weeklow[-(nrows-260):-nrows,1]) # s3d_1yr$plane3d(fit) # # s3d_2yr = scatterplot3d(lower_52weekhigh[-(nrows-520):-nrows,1],higher_52weeklow[-(nrows-520):-nrows,1],returns[-(nrows-520):-nrows,2]) # fit <- lm(returns[-(nrows-520):-nrows,2] ~ lower_52weekhigh[-(nrows-520):-nrows,1]+higher_52weeklow[-(nrows-520):-nrows,1]) # s3d_2yr$plane3d(fit)
library(dplyr) library(devtools) library(httr) parse_file <- function(x) { base_url <- "http://www.ars.usda.gov/SP2UserFiles/Place/12354500/Data/SR26/asc/" resp <- GET(paste0(base_url, x)) text <- content(resp, "text", encoding = "ISO-8859-1") read.delim(text = text, sep = "^", quote = "~", na.strings = c("^^", "~~"), header = FALSE, stringsAsFactors = FALSE) %>% tbl_df() } food <- parse_file("FOOD_DES.txt") names(food) <- c("food_id", "grp_id", "food", "food_abbr", "common", "manufacturer", "survey", "refuse", "ref_pct", "scientific", "n_factor", "pro_factor", "fat_factor", "carb_factor") food$survey <- food$survey == "Y" use_data(food) food_group <- parse_file("FD_GROUP.txt") names(food_group) <- c("grp_id", "group") use_data(food_group) nutrient <- parse_file("NUT_DATA.txt") names(nutrient) <- c("food_id", "nutr_id", "value", "num_points", "se", "source_type_id", "deriv_id", "impute_id", "fortified", "num_studies", "min", "max", "df", "lwr", "upr", "comments", "modified", "cc") nutrient$fortified[nutrient$fortified == ""] <- NA use_data(nutrient) nutrient_def <- parse_file("NUTR_DEF.txt") names(nutrient_def) <- c("nutr_id", "unit", "nutr_abbr", "nutr", "precision", "seq") use_data(nutrient_def) source_type <- parse_file("SRC_CD.txt") names(source_type) <- c("source_type_id", "source_type") use_data(source_type) deriv <- parse_file("DERIV_CD.txt") names(deriv) <- c("deriv_id", "deriv") use_data(deriv) weight <- parse_file("WEIGHT.txt") names(weight) <- c("food_id", "seq", "amount", "desc", "weight", "num_points", "sd") use_data(weight) footnote <- parse_file("FOOTNOTE.txt") names(footnote) <- c("food_id", "seq", "type", "nutr_id", "footnote") use_data(footnote) reference <- parse_file("DATA_SRC.txt") names(reference) <- c("ref_id", "authors", "title", "year", "journal", "vol_city", "issue_state", "start_page", "end_page") use_data(reference) nutrient_source <- parse_file("DATSRCLN.txt") names(nutrient_source) <- c("food_id", "nutr_id", "ref_id") use_data(nutrient_source)
/data-raw/sr26.R
no_license
zachary-foster/usdanutrients28
R
false
false
2,044
r
library(dplyr) library(devtools) library(httr) parse_file <- function(x) { base_url <- "http://www.ars.usda.gov/SP2UserFiles/Place/12354500/Data/SR26/asc/" resp <- GET(paste0(base_url, x)) text <- content(resp, "text", encoding = "ISO-8859-1") read.delim(text = text, sep = "^", quote = "~", na.strings = c("^^", "~~"), header = FALSE, stringsAsFactors = FALSE) %>% tbl_df() } food <- parse_file("FOOD_DES.txt") names(food) <- c("food_id", "grp_id", "food", "food_abbr", "common", "manufacturer", "survey", "refuse", "ref_pct", "scientific", "n_factor", "pro_factor", "fat_factor", "carb_factor") food$survey <- food$survey == "Y" use_data(food) food_group <- parse_file("FD_GROUP.txt") names(food_group) <- c("grp_id", "group") use_data(food_group) nutrient <- parse_file("NUT_DATA.txt") names(nutrient) <- c("food_id", "nutr_id", "value", "num_points", "se", "source_type_id", "deriv_id", "impute_id", "fortified", "num_studies", "min", "max", "df", "lwr", "upr", "comments", "modified", "cc") nutrient$fortified[nutrient$fortified == ""] <- NA use_data(nutrient) nutrient_def <- parse_file("NUTR_DEF.txt") names(nutrient_def) <- c("nutr_id", "unit", "nutr_abbr", "nutr", "precision", "seq") use_data(nutrient_def) source_type <- parse_file("SRC_CD.txt") names(source_type) <- c("source_type_id", "source_type") use_data(source_type) deriv <- parse_file("DERIV_CD.txt") names(deriv) <- c("deriv_id", "deriv") use_data(deriv) weight <- parse_file("WEIGHT.txt") names(weight) <- c("food_id", "seq", "amount", "desc", "weight", "num_points", "sd") use_data(weight) footnote <- parse_file("FOOTNOTE.txt") names(footnote) <- c("food_id", "seq", "type", "nutr_id", "footnote") use_data(footnote) reference <- parse_file("DATA_SRC.txt") names(reference) <- c("ref_id", "authors", "title", "year", "journal", "vol_city", "issue_state", "start_page", "end_page") use_data(reference) nutrient_source <- parse_file("DATSRCLN.txt") names(nutrient_source) <- c("food_id", "nutr_id", "ref_id") use_data(nutrient_source)
jFileMPMP<-read.table("~/Documents/git/rupak/scripts/reports/rq34/j_file_mpmp.rpt",sep="|",header=T) jFileRPRP<-read.table("~/Documents/git/rupak/scripts/reports/rq34/j_file_rprp.rpt",sep="|",header=T) plot(jFileMPMP$release_pair, jFileMPMP$j_index, ylim=c(0,0.40), type="b", lwd=1.5, lty=1, col="red",xlab="release pairs", ylab="") lines(jFileMPMP$release_pair, jFileRPRP$j_index, type="b", lwd=1.5, lty=2, col="blue") legend("topright", lty=c(1,2), col = c("red","blue"), legend = c("J index for DP","J index for RP"))
/paper1/images/j_file_MPMPvRPRP.r
no_license
tajmilur-rahman/SOEN691D
R
false
false
522
r
jFileMPMP<-read.table("~/Documents/git/rupak/scripts/reports/rq34/j_file_mpmp.rpt",sep="|",header=T) jFileRPRP<-read.table("~/Documents/git/rupak/scripts/reports/rq34/j_file_rprp.rpt",sep="|",header=T) plot(jFileMPMP$release_pair, jFileMPMP$j_index, ylim=c(0,0.40), type="b", lwd=1.5, lty=1, col="red",xlab="release pairs", ylab="") lines(jFileMPMP$release_pair, jFileRPRP$j_index, type="b", lwd=1.5, lty=2, col="blue") legend("topright", lty=c(1,2), col = c("red","blue"), legend = c("J index for DP","J index for RP"))
####Testing for Missing Values is.na(x) # returns TRUE of x is missing y <- c(1,2,3,NA) is.na(y) # returns a vector (F F F T) ####Recoding Values to Missing # recode 99 to missing for variable v1 # select rows where v1 is 99 and recode column v1 mydata$v1[mydata$v1==99] <- NA ####Excluding Missing Values from Analyses ####Arithmetic functions on missing values yield missing values. x <- c(1,2,NA,3) mean(x) # returns NA mean(x, na.rm=TRUE) # returns 2 ####The function complete.cases() returns a logical vector indicating which cases are complete. # list rows of data that have missing values mydata[!complete.cases(mydata),] ####The function na.omit() returns the object with listwise deletion of missing values. # create new dataset without missing data aesis_data_core <- na.omit(aesis_data_core) aesis_data_core_bak <- aesis_data_core
/Script/AESIS/Missing Data.R
no_license
setite/beer-recommendations
R
false
false
852
r
####Testing for Missing Values is.na(x) # returns TRUE of x is missing y <- c(1,2,3,NA) is.na(y) # returns a vector (F F F T) ####Recoding Values to Missing # recode 99 to missing for variable v1 # select rows where v1 is 99 and recode column v1 mydata$v1[mydata$v1==99] <- NA ####Excluding Missing Values from Analyses ####Arithmetic functions on missing values yield missing values. x <- c(1,2,NA,3) mean(x) # returns NA mean(x, na.rm=TRUE) # returns 2 ####The function complete.cases() returns a logical vector indicating which cases are complete. # list rows of data that have missing values mydata[!complete.cases(mydata),] ####The function na.omit() returns the object with listwise deletion of missing values. # create new dataset without missing data aesis_data_core <- na.omit(aesis_data_core) aesis_data_core_bak <- aesis_data_core
library(e1071) library(CVXR) library(kernlab) library(MASS) rm(list=ls()) rm(list = ls()) par(mfrow=c(1,1)) setwd("~/Desktop/Multiclass Classification/MSVM Code") source("~/Desktop/Multiclass Classification/MSVM Code/primary form functions.R") # source("~/Desktop/Multiclass Classification/MSVM Code/Dual form functions.R") source("~/Desktop/Multiclass Classification/MSVM Code/SDCA functions.R") PL_WW_Loss <- function(W, K, Y, lambda){ s <- W%*%K PL_WW_V <- s - t(replicate(m, s[Y == 1])) + (1-Y) PL_WW_v <- mean(apply(PL_WW_V, MARGIN = 2, FUN = sum)) PL_WW_v <- PL_WW_v + lambda/2*sum(diag(W%*%K%*%t(W))) return(PL_WW_v) } DL_WW_Loss <- function(A, K, Y, lambda){ DL_WW_v <- -sum(A[Y==1])/ncol(K) - 1/(2*lambda*ncol(X)^2)*sum(diag(A%*%K%*%t(A))) return(DL_WW_v) } data_generate <- function(n,mu_list, sep = 1, v = 1.5^2, m){ y <- sort(sample(seq(1,m), size = n-m, replace = T, prob = 1/rep(m,m))) n_list <- sapply(seq(1:m), function(t){sum(y==t)+1}) X <- apply(cbind(mu_list, n_list), MARGIN = 1, FUN = function(mu){matrix(mvrnorm(mu[3], sep*mu[1:2], diag(v,nrow=2)), nrow = mu[3])}) X <- do.call(rbind,X) y <- unlist(sapply(seq(1:m), function(t){rep(t,n_list[t])})) return(list(X = X,y = y)) } set.seed(153) data_generate <- function(n,mu_list, sep = 1, v = 1.5^2, m){ y <- sort(sample(seq(1,m), size = n-m, replace = T, prob = 1/rep(m,m))) n_list <- sapply(seq(1:m), function(t){sum(y==t)+1}) X <- apply(cbind(mu_list, n_list), MARGIN = 1, FUN = function(mu){matrix(mvrnorm(mu[3], sep*mu[1:2], diag(v,nrow=2)), nrow = mu[3])}) X <- do.call(rbind,X) y <- unlist(sapply(seq(1:m), function(t){rep(t,n_list[t])})) return(list(X = X,y = y)) } p <- 2 m <- 4 mu_list <- matrix(rnorm(2*m, mean =0, sd = 10), m) data <- data_generate(200, mu_list, sep = 3, v = 4.5^2, m) X <- t(data$X) y <- data$y m <- length(unique(y)) n <- ncol(X) Y <- t(sapply(sort(unique(y)), function(id){as.numeric(y==id)})) start_time = Sys.time() W <- matrix(0, m, n) A <- matrix(0, m, n) # A <- matrix(rnorm(m*n), m, n) # K <- t(X)%*%X # s <- t(W)%*%X kernel <- rbfdot(sigma = 2^-4) K <- as.matrix(kernelMatrix(kernel, t(X))) lambda <- 1e-2 bt <- max_iter_num <- 10000 P_obj <- rep(0, max_iter_num) D_obj <- rep(0, length(P_obj)) P_obj[1] <- (m-1)/m D_obj[1] <- 0 ############## Cyclic Update################### for(t in 2:max_iter_num){ perm_idx <- sample(n, replace = F) j=1 for(j in perm_idx){ ##Option 6 q <- c(A%*%K[,j] - K[j,j]*A[,j]) # q2 <- Y[,j] - (lambda*n*Y[,j] + q)/K[j,j] q2 <- -(lambda*n*Y[,j] + q)/K[j,j] values.cand <- c(Weights%*%sort(b, decreasing = T) - 1/seq(1,m)) x_res6 <- b - max(values.cand) x_res6[x_res6 <= 0] <- 0 A[y[j],j] <- x_res6[y[j]] - 1 A[-y[j],j] <- x_res6[-y[j]] } W <- -A/lambda/n P_obj[t] <- PL_WW_Loss(W,K,Y,lambda) D_obj[t] <- DL_WW_Loss(A,K,Y,lambda) cat("P:", P_obj[t],"D:", D_obj[t],"Gap:", P_obj[t] - D_obj[t],"\n") if(P_obj[t]-D_obj[t] < 1e-2){ bt <- t break } }
/SDCA_WW_kernel.R
no_license
XinweiZhang/MSVM
R
false
false
3,040
r
library(e1071) library(CVXR) library(kernlab) library(MASS) rm(list=ls()) rm(list = ls()) par(mfrow=c(1,1)) setwd("~/Desktop/Multiclass Classification/MSVM Code") source("~/Desktop/Multiclass Classification/MSVM Code/primary form functions.R") # source("~/Desktop/Multiclass Classification/MSVM Code/Dual form functions.R") source("~/Desktop/Multiclass Classification/MSVM Code/SDCA functions.R") PL_WW_Loss <- function(W, K, Y, lambda){ s <- W%*%K PL_WW_V <- s - t(replicate(m, s[Y == 1])) + (1-Y) PL_WW_v <- mean(apply(PL_WW_V, MARGIN = 2, FUN = sum)) PL_WW_v <- PL_WW_v + lambda/2*sum(diag(W%*%K%*%t(W))) return(PL_WW_v) } DL_WW_Loss <- function(A, K, Y, lambda){ DL_WW_v <- -sum(A[Y==1])/ncol(K) - 1/(2*lambda*ncol(X)^2)*sum(diag(A%*%K%*%t(A))) return(DL_WW_v) } data_generate <- function(n,mu_list, sep = 1, v = 1.5^2, m){ y <- sort(sample(seq(1,m), size = n-m, replace = T, prob = 1/rep(m,m))) n_list <- sapply(seq(1:m), function(t){sum(y==t)+1}) X <- apply(cbind(mu_list, n_list), MARGIN = 1, FUN = function(mu){matrix(mvrnorm(mu[3], sep*mu[1:2], diag(v,nrow=2)), nrow = mu[3])}) X <- do.call(rbind,X) y <- unlist(sapply(seq(1:m), function(t){rep(t,n_list[t])})) return(list(X = X,y = y)) } set.seed(153) data_generate <- function(n,mu_list, sep = 1, v = 1.5^2, m){ y <- sort(sample(seq(1,m), size = n-m, replace = T, prob = 1/rep(m,m))) n_list <- sapply(seq(1:m), function(t){sum(y==t)+1}) X <- apply(cbind(mu_list, n_list), MARGIN = 1, FUN = function(mu){matrix(mvrnorm(mu[3], sep*mu[1:2], diag(v,nrow=2)), nrow = mu[3])}) X <- do.call(rbind,X) y <- unlist(sapply(seq(1:m), function(t){rep(t,n_list[t])})) return(list(X = X,y = y)) } p <- 2 m <- 4 mu_list <- matrix(rnorm(2*m, mean =0, sd = 10), m) data <- data_generate(200, mu_list, sep = 3, v = 4.5^2, m) X <- t(data$X) y <- data$y m <- length(unique(y)) n <- ncol(X) Y <- t(sapply(sort(unique(y)), function(id){as.numeric(y==id)})) start_time = Sys.time() W <- matrix(0, m, n) A <- matrix(0, m, n) # A <- matrix(rnorm(m*n), m, n) # K <- t(X)%*%X # s <- t(W)%*%X kernel <- rbfdot(sigma = 2^-4) K <- as.matrix(kernelMatrix(kernel, t(X))) lambda <- 1e-2 bt <- max_iter_num <- 10000 P_obj <- rep(0, max_iter_num) D_obj <- rep(0, length(P_obj)) P_obj[1] <- (m-1)/m D_obj[1] <- 0 ############## Cyclic Update################### for(t in 2:max_iter_num){ perm_idx <- sample(n, replace = F) j=1 for(j in perm_idx){ ##Option 6 q <- c(A%*%K[,j] - K[j,j]*A[,j]) # q2 <- Y[,j] - (lambda*n*Y[,j] + q)/K[j,j] q2 <- -(lambda*n*Y[,j] + q)/K[j,j] values.cand <- c(Weights%*%sort(b, decreasing = T) - 1/seq(1,m)) x_res6 <- b - max(values.cand) x_res6[x_res6 <= 0] <- 0 A[y[j],j] <- x_res6[y[j]] - 1 A[-y[j],j] <- x_res6[-y[j]] } W <- -A/lambda/n P_obj[t] <- PL_WW_Loss(W,K,Y,lambda) D_obj[t] <- DL_WW_Loss(A,K,Y,lambda) cat("P:", P_obj[t],"D:", D_obj[t],"Gap:", P_obj[t] - D_obj[t],"\n") if(P_obj[t]-D_obj[t] < 1e-2){ bt <- t break } }
#' @importFrom magrittr %>% #' @title Future Probability of Failure for 6.6/11kV and 20kV Transformers #' @description This function calculates the future #' annual probability of failure for 6.6/11kV and 20kV transformers. #' The function is a cubic curve that is based on #' the first three terms of the Taylor series for an #' exponential function. For more information about the #' probability of failure function see section 6 #' on page 30 in CNAIM (2017). #' @inheritParams pof_transformer_11_20kv #' @param simulation_end_year Numeric. The last year of simulating probability #' of failure. Default is 100. #' @return Numeric array. Future probability of failure. #' @source DNO Common Network Asset Indices Methodology (CNAIM), #' Health & Criticality - Version 1.1, 2017: #' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf} #' @export #' @examples #' # Future probability of a 6.6/11 kV transformer #' future_pof_transformer <- #' pof_future_transformer_11_20kv(hv_transformer_type = "6.6/11kV Transformer (GM)", #' utilisation_pct = "Default", #'placement = "Default", #'altitude_m = "Default", #'distance_from_coast_km = "Default", #'corrosion_category_index = "Default", #'age = 20, #'partial_discharge = "Default", #'oil_acidity = "Default", #'temperature_reading = "Default", #'observed_condition = "Default", #'reliability_factor = "Default", #'simulation_end_year = 100) # # Plot #'plot(future_pof_transformer$PoF * 100, #'type = "line", ylab = "%", xlab = "years", #'main = "PoF") pof_future_transformer_11_20kv <- function(hv_transformer_type = "6.6/11kV Transformer (GM)", utilisation_pct = "Default", placement = "Default", altitude_m = "Default", distance_from_coast_km = "Default", corrosion_category_index = "Default", age, partial_discharge = "Default", oil_acidity = "Default", temperature_reading = "Default", observed_condition = "Default", reliability_factor = "Default", simulation_end_year = 100) { `Asset Register Category` = `Health Index Asset Category` = `Generic Term...1` = `Generic Term...2` = `Functional Failure Category` = `K-Value (%)` = `C-Value` = `Asset Register Category` = NULL # due to NSE notes in R CMD check # Ref. table Categorisation of Assets and Generic Terms for Assets -- asset_type <- hv_transformer_type asset_category <- gb_ref$categorisation_of_assets %>% dplyr::filter(`Asset Register Category` == asset_type) %>% dplyr::select(`Health Index Asset Category`) %>% dplyr::pull() generic_term_1 <- gb_ref$generic_terms_for_assets %>% dplyr::filter(`Health Index Asset Category` == asset_category) %>% dplyr::select(`Generic Term...1`) %>% dplyr::pull() generic_term_2 <- gb_ref$generic_terms_for_assets %>% dplyr::filter(`Health Index Asset Category` == asset_category) %>% dplyr::select(`Generic Term...2`) %>% dplyr::pull() # Normal expected life for 6.6/11 kV transformer ------------------------------ normal_expected_life <- gb_ref$normal_expected_life %>% dplyr::filter(`Asset Register Category` == asset_type) %>% dplyr::pull() # Constants C and K for PoF function -------------------------------------- k <- gb_ref$pof_curve_parameters %>% dplyr::filter(`Functional Failure Category` == asset_category) %>% dplyr::select(`K-Value (%)`) %>% dplyr::pull()/100 c <- gb_ref$pof_curve_parameters %>% dplyr::filter(`Functional Failure Category` == asset_category) %>% dplyr::select(`C-Value`) %>% dplyr::pull() # Duty factor ------------------------------------------------------------- duty_factor_tf_11kv <- duty_factor_transformer_11_20kv(utilisation_pct) # Location factor ---------------------------------------------------- location_factor_transformer <- location_factor(placement, altitude_m, distance_from_coast_km, corrosion_category_index, asset_type) # Expected life for 6.6/11 kV transformer ------------------------------ expected_life_years <- expected_life(normal_expected_life, duty_factor_tf_11kv, location_factor_transformer) # b1 (Initial Ageing Rate) ------------------------------------------------ b1 <- beta_1(expected_life_years) # Initial health score ---------------------------------------------------- initial_health_score <- initial_health(b1, age) ## NOTE # Typically, the Health Score Collar is 0.5 and # Health Score Cap is 10, implying no overriding # of the Health Score. However, in some instances # these parameters are set to other values in the # Health Score Modifier calibration tables. # These overriding values are shown in Table 34 to Table 195 # and Table 200 in Appendix B. # Measured condition inputs --------------------------------------------- mcm_mmi_cal_df <- gb_ref$measured_cond_modifier_mmi_cal mcm_mmi_cal_df <- mcm_mmi_cal_df[which(mcm_mmi_cal_df$`Asset Category` == asset_category), ] factor_divider_1 <- as.numeric(mcm_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Factor Divider 1`) factor_divider_2 <- as.numeric(mcm_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Factor Divider 2`) max_no_combined_factors <- as.numeric(mcm_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Max. No. of Combined Factors` ) # Partial discharge ------------------------------------------------------- mci_hv_tf_partial_discharge <- gb_ref$mci_hv_tf_partial_discharge ci_factor_partial_discharge <- mci_hv_tf_partial_discharge$`Condition Input Factor`[which( mci_hv_tf_partial_discharge$ `Condition Criteria: Partial Discharge Test Result` == partial_discharge)] ci_cap_partial_discharge <- mci_hv_tf_partial_discharge$`Condition Input Cap`[which( mci_hv_tf_partial_discharge$ `Condition Criteria: Partial Discharge Test Result` == partial_discharge)] ci_collar_partial_discharge <- mci_hv_tf_partial_discharge$`Condition Input Collar`[which( mci_hv_tf_partial_discharge$ `Condition Criteria: Partial Discharge Test Result` == partial_discharge)] # Oil acidity ------------------------------------------------------------- mci_hv_tf_oil_acidity <- gb_ref$mci_hv_tf_oil_acidity ci_cap_oil_acidity <- 10 ci_collar_oil_acidity <- 0.5 if (oil_acidity == "Default") { ci_factor_oil_acidity <- 1 } else if (oil_acidity <= 0.15) { ci_factor_oil_acidity <- 0.9 } else if (0.15 < oil_acidity && oil_acidity <= 0.3) { ci_factor_oil_acidity <- 1 } else if (0.3 < oil_acidity && oil_acidity <= 0.5) { ci_factor_oil_acidity <- 1.15 } else { ci_factor_oil_acidity <- 1.4 } # Temperature readings ---------------------------------------------------- mci_hv_tf_temp_readings <- gb_ref$mci_hv_tf_temp_readings ci_factor_temp_reading <- mci_hv_tf_temp_readings$`Condition Input Factor`[which( mci_hv_tf_temp_readings$ `Condition Criteria: Temperature Reading` == temperature_reading)] ci_cap_temp_reading <- mci_hv_tf_temp_readings$`Condition Input Cap`[which( mci_hv_tf_temp_readings$ `Condition Criteria: Temperature Reading` == temperature_reading)] ci_collar_temp_reading <- mci_hv_tf_temp_readings$`Condition Input Collar`[which( mci_hv_tf_temp_readings$ `Condition Criteria: Temperature Reading` == temperature_reading)] # measured condition factor ----------------------------------------------- factors <- c(ci_factor_partial_discharge, ci_factor_oil_acidity, ci_factor_temp_reading) measured_condition_factor <- mmi(factors, factor_divider_1, factor_divider_2, max_no_combined_factors) # Measured condition cap -------------------------------------------------- caps <- c(ci_cap_partial_discharge, ci_cap_oil_acidity, ci_cap_temp_reading) measured_condition_cap <- min(caps) # Measured condition collar ----------------------------------------------- collars <- c(ci_collar_partial_discharge, ci_collar_oil_acidity, ci_collar_temp_reading) measured_condition_collar <- max(collars) # Measured condition modifier --------------------------------------------- measured_condition_modifier <- data.frame(measured_condition_factor, measured_condition_cap, measured_condition_collar) # Observed condition inputs --------------------------------------------- oci_mmi_cal_df <- gb_ref$observed_cond_modifier_mmi_cal oci_mmi_cal_df <- oci_mmi_cal_df[which(oci_mmi_cal_df$`Asset Category` == asset_category), ] factor_divider_1 <- as.numeric(oci_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Factor Divider 1`) factor_divider_2 <- as.numeric(oci_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Factor Divider 2`) max_no_combined_factors <- as.numeric(oci_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Max. No. of Combined Factors` ) oci_hv_tf_tf_ext_cond_df <- gb_ref$oci_hv_tf_tf_ext_cond ci_factor_ext_cond <- oci_hv_tf_tf_ext_cond_df$`Condition Input Factor`[which( oci_hv_tf_tf_ext_cond_df$`Condition Criteria: Observed Condition` == observed_condition)] ci_cap_ext_cond <- oci_hv_tf_tf_ext_cond_df$`Condition Input Cap`[which( oci_hv_tf_tf_ext_cond_df$`Condition Criteria: Observed Condition` == observed_condition)] ci_collar_ext_cond <- oci_hv_tf_tf_ext_cond_df$`Condition Input Collar`[which( oci_hv_tf_tf_ext_cond_df$`Condition Criteria: Observed Condition` == observed_condition)] # Observed condition factor ----------------------------------------------- observed_condition_factor <- mmi(factors = ci_factor_ext_cond, factor_divider_1, factor_divider_2, max_no_combined_factors) # Observed condition cap --------------------------------------------- observed_condition_cap <- ci_cap_ext_cond # Observed condition collar --------------------------------------------- observed_condition_collar <- ci_collar_ext_cond # Observed condition modifier --------------------------------------------- observed_condition_modifier <- data.frame(observed_condition_factor, observed_condition_cap, observed_condition_collar) # Health score factor --------------------------------------------------- health_score_factor <- health_score_excl_ehv_132kv_tf(observed_condition_factor, measured_condition_factor) # Health score cap -------------------------------------------------------- health_score_cap <- min(observed_condition_cap, measured_condition_cap) # Health score collar ----------------------------------------------------- health_score_collar <- max(observed_condition_collar, measured_condition_collar) # Health score modifier --------------------------------------------------- health_score_modifier <- data.frame(health_score_factor, health_score_cap, health_score_collar) # Current health score ---------------------------------------------------- current_health_score <- current_health(initial_health_score, health_score_modifier$health_score_factor, health_score_modifier$health_score_cap, health_score_modifier$health_score_collar, reliability_factor = reliability_factor) # Probability of failure --------------------------------------------------- probability_of_failure <- k * (1 + (c * current_health_score) + (((c * current_health_score)^2) / factorial(2)) + (((c * current_health_score)^3) / factorial(3))) # Future probability of failure ------------------------------------------- # the Health Score of a new asset H_new <- 0.5 # the Health Score of the asset when it reaches its Expected Life b2 <- beta_2(current_health_score, age) if (b2 > 2*b1){ b2 <- b1 } else if (current_health_score == 0.5){ b2 <- b1 } if (current_health_score < 2) { ageing_reduction_factor <- 1 } else if (current_health_score <= 5.5) { ageing_reduction_factor <- ((current_health_score - 2)/7) + 1 } else { ageing_reduction_factor <- 1.5 } # Dynamic part pof_year <- list() year <- seq(from=0,to=simulation_end_year,by=1) for (y in 1:length(year)){ t <- year[y] future_health_Score <- current_health_score*exp((b2/ageing_reduction_factor) * t) H <- future_health_Score future_health_score_limit <- 15 if (H > future_health_score_limit){ H <- future_health_score_limit } pof_year[[paste(y)]] <- k * (1 + (c * H) + (((c * H)^2) / factorial(2)) + (((c * H)^3) / factorial(3))) } pof_future <- data.frame(year=year, PoF=as.numeric(unlist(pof_year))) pof_future$age <- NA pof_future$age[1] <- age for(i in 2:nrow(pof_future)) { pof_future$age[i] <- age + i -1 } return(pof_future) }
/R/pof_future_transformer_11_20kv.R
permissive
scoultersdcoe/CNAIM
R
false
false
14,570
r
#' @importFrom magrittr %>% #' @title Future Probability of Failure for 6.6/11kV and 20kV Transformers #' @description This function calculates the future #' annual probability of failure for 6.6/11kV and 20kV transformers. #' The function is a cubic curve that is based on #' the first three terms of the Taylor series for an #' exponential function. For more information about the #' probability of failure function see section 6 #' on page 30 in CNAIM (2017). #' @inheritParams pof_transformer_11_20kv #' @param simulation_end_year Numeric. The last year of simulating probability #' of failure. Default is 100. #' @return Numeric array. Future probability of failure. #' @source DNO Common Network Asset Indices Methodology (CNAIM), #' Health & Criticality - Version 1.1, 2017: #' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf} #' @export #' @examples #' # Future probability of a 6.6/11 kV transformer #' future_pof_transformer <- #' pof_future_transformer_11_20kv(hv_transformer_type = "6.6/11kV Transformer (GM)", #' utilisation_pct = "Default", #'placement = "Default", #'altitude_m = "Default", #'distance_from_coast_km = "Default", #'corrosion_category_index = "Default", #'age = 20, #'partial_discharge = "Default", #'oil_acidity = "Default", #'temperature_reading = "Default", #'observed_condition = "Default", #'reliability_factor = "Default", #'simulation_end_year = 100) # # Plot #'plot(future_pof_transformer$PoF * 100, #'type = "line", ylab = "%", xlab = "years", #'main = "PoF") pof_future_transformer_11_20kv <- function(hv_transformer_type = "6.6/11kV Transformer (GM)", utilisation_pct = "Default", placement = "Default", altitude_m = "Default", distance_from_coast_km = "Default", corrosion_category_index = "Default", age, partial_discharge = "Default", oil_acidity = "Default", temperature_reading = "Default", observed_condition = "Default", reliability_factor = "Default", simulation_end_year = 100) { `Asset Register Category` = `Health Index Asset Category` = `Generic Term...1` = `Generic Term...2` = `Functional Failure Category` = `K-Value (%)` = `C-Value` = `Asset Register Category` = NULL # due to NSE notes in R CMD check # Ref. table Categorisation of Assets and Generic Terms for Assets -- asset_type <- hv_transformer_type asset_category <- gb_ref$categorisation_of_assets %>% dplyr::filter(`Asset Register Category` == asset_type) %>% dplyr::select(`Health Index Asset Category`) %>% dplyr::pull() generic_term_1 <- gb_ref$generic_terms_for_assets %>% dplyr::filter(`Health Index Asset Category` == asset_category) %>% dplyr::select(`Generic Term...1`) %>% dplyr::pull() generic_term_2 <- gb_ref$generic_terms_for_assets %>% dplyr::filter(`Health Index Asset Category` == asset_category) %>% dplyr::select(`Generic Term...2`) %>% dplyr::pull() # Normal expected life for 6.6/11 kV transformer ------------------------------ normal_expected_life <- gb_ref$normal_expected_life %>% dplyr::filter(`Asset Register Category` == asset_type) %>% dplyr::pull() # Constants C and K for PoF function -------------------------------------- k <- gb_ref$pof_curve_parameters %>% dplyr::filter(`Functional Failure Category` == asset_category) %>% dplyr::select(`K-Value (%)`) %>% dplyr::pull()/100 c <- gb_ref$pof_curve_parameters %>% dplyr::filter(`Functional Failure Category` == asset_category) %>% dplyr::select(`C-Value`) %>% dplyr::pull() # Duty factor ------------------------------------------------------------- duty_factor_tf_11kv <- duty_factor_transformer_11_20kv(utilisation_pct) # Location factor ---------------------------------------------------- location_factor_transformer <- location_factor(placement, altitude_m, distance_from_coast_km, corrosion_category_index, asset_type) # Expected life for 6.6/11 kV transformer ------------------------------ expected_life_years <- expected_life(normal_expected_life, duty_factor_tf_11kv, location_factor_transformer) # b1 (Initial Ageing Rate) ------------------------------------------------ b1 <- beta_1(expected_life_years) # Initial health score ---------------------------------------------------- initial_health_score <- initial_health(b1, age) ## NOTE # Typically, the Health Score Collar is 0.5 and # Health Score Cap is 10, implying no overriding # of the Health Score. However, in some instances # these parameters are set to other values in the # Health Score Modifier calibration tables. # These overriding values are shown in Table 34 to Table 195 # and Table 200 in Appendix B. # Measured condition inputs --------------------------------------------- mcm_mmi_cal_df <- gb_ref$measured_cond_modifier_mmi_cal mcm_mmi_cal_df <- mcm_mmi_cal_df[which(mcm_mmi_cal_df$`Asset Category` == asset_category), ] factor_divider_1 <- as.numeric(mcm_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Factor Divider 1`) factor_divider_2 <- as.numeric(mcm_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Factor Divider 2`) max_no_combined_factors <- as.numeric(mcm_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Max. No. of Combined Factors` ) # Partial discharge ------------------------------------------------------- mci_hv_tf_partial_discharge <- gb_ref$mci_hv_tf_partial_discharge ci_factor_partial_discharge <- mci_hv_tf_partial_discharge$`Condition Input Factor`[which( mci_hv_tf_partial_discharge$ `Condition Criteria: Partial Discharge Test Result` == partial_discharge)] ci_cap_partial_discharge <- mci_hv_tf_partial_discharge$`Condition Input Cap`[which( mci_hv_tf_partial_discharge$ `Condition Criteria: Partial Discharge Test Result` == partial_discharge)] ci_collar_partial_discharge <- mci_hv_tf_partial_discharge$`Condition Input Collar`[which( mci_hv_tf_partial_discharge$ `Condition Criteria: Partial Discharge Test Result` == partial_discharge)] # Oil acidity ------------------------------------------------------------- mci_hv_tf_oil_acidity <- gb_ref$mci_hv_tf_oil_acidity ci_cap_oil_acidity <- 10 ci_collar_oil_acidity <- 0.5 if (oil_acidity == "Default") { ci_factor_oil_acidity <- 1 } else if (oil_acidity <= 0.15) { ci_factor_oil_acidity <- 0.9 } else if (0.15 < oil_acidity && oil_acidity <= 0.3) { ci_factor_oil_acidity <- 1 } else if (0.3 < oil_acidity && oil_acidity <= 0.5) { ci_factor_oil_acidity <- 1.15 } else { ci_factor_oil_acidity <- 1.4 } # Temperature readings ---------------------------------------------------- mci_hv_tf_temp_readings <- gb_ref$mci_hv_tf_temp_readings ci_factor_temp_reading <- mci_hv_tf_temp_readings$`Condition Input Factor`[which( mci_hv_tf_temp_readings$ `Condition Criteria: Temperature Reading` == temperature_reading)] ci_cap_temp_reading <- mci_hv_tf_temp_readings$`Condition Input Cap`[which( mci_hv_tf_temp_readings$ `Condition Criteria: Temperature Reading` == temperature_reading)] ci_collar_temp_reading <- mci_hv_tf_temp_readings$`Condition Input Collar`[which( mci_hv_tf_temp_readings$ `Condition Criteria: Temperature Reading` == temperature_reading)] # measured condition factor ----------------------------------------------- factors <- c(ci_factor_partial_discharge, ci_factor_oil_acidity, ci_factor_temp_reading) measured_condition_factor <- mmi(factors, factor_divider_1, factor_divider_2, max_no_combined_factors) # Measured condition cap -------------------------------------------------- caps <- c(ci_cap_partial_discharge, ci_cap_oil_acidity, ci_cap_temp_reading) measured_condition_cap <- min(caps) # Measured condition collar ----------------------------------------------- collars <- c(ci_collar_partial_discharge, ci_collar_oil_acidity, ci_collar_temp_reading) measured_condition_collar <- max(collars) # Measured condition modifier --------------------------------------------- measured_condition_modifier <- data.frame(measured_condition_factor, measured_condition_cap, measured_condition_collar) # Observed condition inputs --------------------------------------------- oci_mmi_cal_df <- gb_ref$observed_cond_modifier_mmi_cal oci_mmi_cal_df <- oci_mmi_cal_df[which(oci_mmi_cal_df$`Asset Category` == asset_category), ] factor_divider_1 <- as.numeric(oci_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Factor Divider 1`) factor_divider_2 <- as.numeric(oci_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Factor Divider 2`) max_no_combined_factors <- as.numeric(oci_mmi_cal_df$ `Parameters for Combination Using MMI Technique - Max. No. of Combined Factors` ) oci_hv_tf_tf_ext_cond_df <- gb_ref$oci_hv_tf_tf_ext_cond ci_factor_ext_cond <- oci_hv_tf_tf_ext_cond_df$`Condition Input Factor`[which( oci_hv_tf_tf_ext_cond_df$`Condition Criteria: Observed Condition` == observed_condition)] ci_cap_ext_cond <- oci_hv_tf_tf_ext_cond_df$`Condition Input Cap`[which( oci_hv_tf_tf_ext_cond_df$`Condition Criteria: Observed Condition` == observed_condition)] ci_collar_ext_cond <- oci_hv_tf_tf_ext_cond_df$`Condition Input Collar`[which( oci_hv_tf_tf_ext_cond_df$`Condition Criteria: Observed Condition` == observed_condition)] # Observed condition factor ----------------------------------------------- observed_condition_factor <- mmi(factors = ci_factor_ext_cond, factor_divider_1, factor_divider_2, max_no_combined_factors) # Observed condition cap --------------------------------------------- observed_condition_cap <- ci_cap_ext_cond # Observed condition collar --------------------------------------------- observed_condition_collar <- ci_collar_ext_cond # Observed condition modifier --------------------------------------------- observed_condition_modifier <- data.frame(observed_condition_factor, observed_condition_cap, observed_condition_collar) # Health score factor --------------------------------------------------- health_score_factor <- health_score_excl_ehv_132kv_tf(observed_condition_factor, measured_condition_factor) # Health score cap -------------------------------------------------------- health_score_cap <- min(observed_condition_cap, measured_condition_cap) # Health score collar ----------------------------------------------------- health_score_collar <- max(observed_condition_collar, measured_condition_collar) # Health score modifier --------------------------------------------------- health_score_modifier <- data.frame(health_score_factor, health_score_cap, health_score_collar) # Current health score ---------------------------------------------------- current_health_score <- current_health(initial_health_score, health_score_modifier$health_score_factor, health_score_modifier$health_score_cap, health_score_modifier$health_score_collar, reliability_factor = reliability_factor) # Probability of failure --------------------------------------------------- probability_of_failure <- k * (1 + (c * current_health_score) + (((c * current_health_score)^2) / factorial(2)) + (((c * current_health_score)^3) / factorial(3))) # Future probability of failure ------------------------------------------- # the Health Score of a new asset H_new <- 0.5 # the Health Score of the asset when it reaches its Expected Life b2 <- beta_2(current_health_score, age) if (b2 > 2*b1){ b2 <- b1 } else if (current_health_score == 0.5){ b2 <- b1 } if (current_health_score < 2) { ageing_reduction_factor <- 1 } else if (current_health_score <= 5.5) { ageing_reduction_factor <- ((current_health_score - 2)/7) + 1 } else { ageing_reduction_factor <- 1.5 } # Dynamic part pof_year <- list() year <- seq(from=0,to=simulation_end_year,by=1) for (y in 1:length(year)){ t <- year[y] future_health_Score <- current_health_score*exp((b2/ageing_reduction_factor) * t) H <- future_health_Score future_health_score_limit <- 15 if (H > future_health_score_limit){ H <- future_health_score_limit } pof_year[[paste(y)]] <- k * (1 + (c * H) + (((c * H)^2) / factorial(2)) + (((c * H)^3) / factorial(3))) } pof_future <- data.frame(year=year, PoF=as.numeric(unlist(pof_year))) pof_future$age <- NA pof_future$age[1] <- age for(i in 2:nrow(pof_future)) { pof_future$age[i] <- age + i -1 } return(pof_future) }
setwd("./dataset/final/en_US") library(dplyr) library(tm) library(NLP) library(RWeka) library(stringi) library(ggplot2) library(RColorBrewer) # read the files con<- file("en_US.twitter.txt", "r") tfile<-readLines(con) close(con) stri_stats_general(tfile) con<- file("en_US.blogs.txt", "r") bfile<-readLines(con) close(con) stri_stats_general(bfile) con<- file("en_US.news.txt", "r") nfile<-readLines(con) close(con) stri_stats_general(nfile) ## read the files con<- file("en_US.twitter.txt", "r") twf<-readLines(con,500000) save(twf, file = "./sub/twf.txt") close(con) con<- file("en_US.blogs.txt", "r") bwf<-readLines(con,300000) save(bwf, file = "./sub/bwf.txt") close(con) con<- file("en_US.news.txt", "r") nwf<-readLines(con,100000) save(nwf, file = "./sub/nwf.txt") close(con) ## create a Corpus of 3 files - Twitter, Blogs and News filecr <- Corpus(DirSource("C:/Users/Manish/Documents/Analytics/Coursera/Assignment/Capstone/dataset/final/en_US/sub"), readerControl = list(language="en_US")) names(filecr) summary(filecr) ## clean up the files - convert to lowecase filecr<-tm_map(filecr, content_transformer(tolower)) ## remove punctuations, numbers, whitespaces and stopwords ## For stopwords - English language stopwords have been used ## for Profanity, manual list was created and the words were removed filecr<-tm_map(filecr, FUN = removePunctuation) filecr<-tm_map(filecr, FUN = removeNumbers) filecr<-tm_map(filecr, FUN = stripWhitespace) # mystopwords<-stopwords("english") # filecr<-tm_map(filecr, removeWords, mystopwords) prowords<-c("fuck", "ass", "asshole", "shit", "crap", "bitch", "cock", "cunt") filecr<-tm_map(filecr, removeWords, prowords) filecr <- tm_map(filecr, PlainTextDocument) ## termdoc matix was created for count based evaluation filedtm<-DocumentTermMatrix(filecr) filedtm frqt<-findFreqTerms(filedtm, lowfreq = 100, highfreq=Inf) frqt #assoc<-findAssocs(filedtm, c("one"), c(.95)) #assoc freq <- sort(colSums(as.matrix(filedtm)), decreasing=TRUE) wordcount <- data.frame(word=names(freq), freq=freq) # Plot Histogram # subset(wordcount, freq>75) %>% # ggplot(aes(word, freq)) + # geom_bar(stat="identity", fill="lightblue", colour="black") + # theme(axis.text.x=element_text(angle=45, hjust=1)) # library(wordcloud) # set.seed(111) # wordcloud(names(freq), freq, min.freq=50, colors=brewer.pal(8, "Dark2")) BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2)) dtmtoken <- DocumentTermMatrix(filecr, control = list(tokenize = BigramTokenizer)) freq <- sort(colSums(as.matrix(dtmtoken)), decreasing=TRUE) wof <- data.frame(word=names(freq), freq=freq) ## break prefixes in words bivar<-as.character(wof$word) freq2<-wof$freq bivar2<-as.data.frame(do.call(rbind,strsplit(bivar, " "))) bivar2final<-cbind(bivar2,freq2) names(bivar2final)<-c("prefix", "word", "frequency") # bivar<-as.character(wof$word) # bivar<-strsplit(bivar, " ") # bivar<-unlist(bivar) # # bivart<-c(bivar[[1]][1],bivar[[1]][2])) # # bivar <- lapply(bivar, function(x) c(paste(x[[1]][1], x[[1]][2]))) # bivar1<-tolower(bivar[seq(1, length(bivar), 2)]) # bivar2<-bivar[seq(2, length(bivar), 2)] # bivar <- unlist(bivar) pl <- ggplot(subset(wof, freq >10) ,aes(word, freq)) pl <- pl + geom_bar(stat="identity", fill="lightblue", colour="black") pl + theme(axis.text.x=element_text(angle=45, hjust=1)) + ggtitle("2 - Gram Frequency") TrigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3)) dtmtoken3 <- DocumentTermMatrix(filecr, control = list(tokenize = TrigramTokenizer)) freq3 <- sort(colSums(as.matrix(dtmtoken3)), decreasing=TRUE) wof3 <- data.frame(word3=names(freq3), freq3=freq3) pl3 <- ggplot(subset(wof3, freq3 >10) ,aes(word3, freq3)) pl3 <- pl3 + geom_bar(stat="identity", fill="lightblue", colour="black") pl3 + theme(axis.text.x=element_text(angle=45, hjust=1)) + ggtitle("3 - Gram Frequency") trivar<-as.character(wof3$word3) freq3<-wof3$freq3 trivar2<-as.data.frame(do.call(rbind,strsplit(trivar, " "))) trivar2m<-as.data.frame(paste(trivar2$V1,trivar2$V2,sep=" ")) trivar2final<-cbind(trivar2m, trivar2$V3, freq3) names(trivar2final)<-c("prefix", "word", "frequency") FourgramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4)) dtmtoken4 <- DocumentTermMatrix(filecr, control = list(tokenize = FourgramTokenizer)) freq4 <- sort(colSums(as.matrix(dtmtoken4)), decreasing=TRUE) wof4 <- data.frame(word4=names(freq4), freq4=freq4) pl3 <- ggplot(subset(wof3, freq3 >10) ,aes(word3, freq3)) pl3 <- pl3 + geom_bar(stat="identity", fill="lightblue", colour="black") pl3 + theme(axis.text.x=element_text(angle=45, hjust=1)) + ggtitle("3 - Gram Frequency") fourvar<-as.character(wof4$word4) freq4<-wof4$freq4 fourvar2<-as.data.frame(do.call(rbind,strsplit(fourvar, " "))) fourvar2m<-as.data.frame(paste(fourvar2$V1,fourvar2$V2,fourvar2$V3,sep=" ")) fourvar2final<-cbind(fourvar2m, fourvar2$V4, freq4) names(fourvar2final)<-c("prefix", "word", "frequency") ## Cleaning of input data inputd<-c("good") inputdspl<-as.data.frame(strsplit(inputd, " ")) inputspl<-as.data.frame(do.call(rbind,strsplit(inputd, " "))) inputunigram<-as.data.frame(inputspl[,length(inputspl)]) inputbigram<-as.data.frame(paste(inputspl[,length(inputspl)-1], inputspl[,length(inputspl)],sep=" ")) inputtrigram<-as.data.frame(paste(inputspl[,length(inputspl)-2], inputspl[,length(inputspl)-1], inputspl[,length(inputspl)],sep=" ")) as.character(inputtrigram) ## compare with word grams and predict the next word ## start with 4-grams if(length(inputtrigram==3)){ patternm<-as.character(inputtrigram[1,]) match4g<-grep(patternm,fourvar2final$prefix) nextword<-as.character(fourvar2final[match4g,2]) nextword length(nextword) patternm } if(!length(nextword)){ if(length(inputbigram==2)){ patternm<-as.character(inputbigram[1,]) match4g<-grep(patternm,trivar2final$prefix) nextword<-as.character(trivar2final[match4g,2]) } } if(!length(nextword)){ # inputd<-c("dsfdf dfdsf dfsf dddd tool nicd") # inputdspl<-as.data.frame(strsplit(inputd, " ")) # # inputspl<-as.data.frame(do.call(rbind,strsplit(inputd, " "))) # inputunigram<-as.data.frame(inputspl[,length(inputspl)]) inputunigram patternm<-as.character(inputunigram[1,]) match4g<-grep(patternm,bivar2final$prefix) nextword<-bivar2final[match4g,2:3] } nextword patt<-c("good") match4g1<-grep(patt,bivar2final$prefix) inputunigram
/final assignmentv3.R
no_license
manishtomer/program
R
false
false
6,826
r
setwd("./dataset/final/en_US") library(dplyr) library(tm) library(NLP) library(RWeka) library(stringi) library(ggplot2) library(RColorBrewer) # read the files con<- file("en_US.twitter.txt", "r") tfile<-readLines(con) close(con) stri_stats_general(tfile) con<- file("en_US.blogs.txt", "r") bfile<-readLines(con) close(con) stri_stats_general(bfile) con<- file("en_US.news.txt", "r") nfile<-readLines(con) close(con) stri_stats_general(nfile) ## read the files con<- file("en_US.twitter.txt", "r") twf<-readLines(con,500000) save(twf, file = "./sub/twf.txt") close(con) con<- file("en_US.blogs.txt", "r") bwf<-readLines(con,300000) save(bwf, file = "./sub/bwf.txt") close(con) con<- file("en_US.news.txt", "r") nwf<-readLines(con,100000) save(nwf, file = "./sub/nwf.txt") close(con) ## create a Corpus of 3 files - Twitter, Blogs and News filecr <- Corpus(DirSource("C:/Users/Manish/Documents/Analytics/Coursera/Assignment/Capstone/dataset/final/en_US/sub"), readerControl = list(language="en_US")) names(filecr) summary(filecr) ## clean up the files - convert to lowecase filecr<-tm_map(filecr, content_transformer(tolower)) ## remove punctuations, numbers, whitespaces and stopwords ## For stopwords - English language stopwords have been used ## for Profanity, manual list was created and the words were removed filecr<-tm_map(filecr, FUN = removePunctuation) filecr<-tm_map(filecr, FUN = removeNumbers) filecr<-tm_map(filecr, FUN = stripWhitespace) # mystopwords<-stopwords("english") # filecr<-tm_map(filecr, removeWords, mystopwords) prowords<-c("fuck", "ass", "asshole", "shit", "crap", "bitch", "cock", "cunt") filecr<-tm_map(filecr, removeWords, prowords) filecr <- tm_map(filecr, PlainTextDocument) ## termdoc matix was created for count based evaluation filedtm<-DocumentTermMatrix(filecr) filedtm frqt<-findFreqTerms(filedtm, lowfreq = 100, highfreq=Inf) frqt #assoc<-findAssocs(filedtm, c("one"), c(.95)) #assoc freq <- sort(colSums(as.matrix(filedtm)), decreasing=TRUE) wordcount <- data.frame(word=names(freq), freq=freq) # Plot Histogram # subset(wordcount, freq>75) %>% # ggplot(aes(word, freq)) + # geom_bar(stat="identity", fill="lightblue", colour="black") + # theme(axis.text.x=element_text(angle=45, hjust=1)) # library(wordcloud) # set.seed(111) # wordcloud(names(freq), freq, min.freq=50, colors=brewer.pal(8, "Dark2")) BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2)) dtmtoken <- DocumentTermMatrix(filecr, control = list(tokenize = BigramTokenizer)) freq <- sort(colSums(as.matrix(dtmtoken)), decreasing=TRUE) wof <- data.frame(word=names(freq), freq=freq) ## break prefixes in words bivar<-as.character(wof$word) freq2<-wof$freq bivar2<-as.data.frame(do.call(rbind,strsplit(bivar, " "))) bivar2final<-cbind(bivar2,freq2) names(bivar2final)<-c("prefix", "word", "frequency") # bivar<-as.character(wof$word) # bivar<-strsplit(bivar, " ") # bivar<-unlist(bivar) # # bivart<-c(bivar[[1]][1],bivar[[1]][2])) # # bivar <- lapply(bivar, function(x) c(paste(x[[1]][1], x[[1]][2]))) # bivar1<-tolower(bivar[seq(1, length(bivar), 2)]) # bivar2<-bivar[seq(2, length(bivar), 2)] # bivar <- unlist(bivar) pl <- ggplot(subset(wof, freq >10) ,aes(word, freq)) pl <- pl + geom_bar(stat="identity", fill="lightblue", colour="black") pl + theme(axis.text.x=element_text(angle=45, hjust=1)) + ggtitle("2 - Gram Frequency") TrigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3)) dtmtoken3 <- DocumentTermMatrix(filecr, control = list(tokenize = TrigramTokenizer)) freq3 <- sort(colSums(as.matrix(dtmtoken3)), decreasing=TRUE) wof3 <- data.frame(word3=names(freq3), freq3=freq3) pl3 <- ggplot(subset(wof3, freq3 >10) ,aes(word3, freq3)) pl3 <- pl3 + geom_bar(stat="identity", fill="lightblue", colour="black") pl3 + theme(axis.text.x=element_text(angle=45, hjust=1)) + ggtitle("3 - Gram Frequency") trivar<-as.character(wof3$word3) freq3<-wof3$freq3 trivar2<-as.data.frame(do.call(rbind,strsplit(trivar, " "))) trivar2m<-as.data.frame(paste(trivar2$V1,trivar2$V2,sep=" ")) trivar2final<-cbind(trivar2m, trivar2$V3, freq3) names(trivar2final)<-c("prefix", "word", "frequency") FourgramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4)) dtmtoken4 <- DocumentTermMatrix(filecr, control = list(tokenize = FourgramTokenizer)) freq4 <- sort(colSums(as.matrix(dtmtoken4)), decreasing=TRUE) wof4 <- data.frame(word4=names(freq4), freq4=freq4) pl3 <- ggplot(subset(wof3, freq3 >10) ,aes(word3, freq3)) pl3 <- pl3 + geom_bar(stat="identity", fill="lightblue", colour="black") pl3 + theme(axis.text.x=element_text(angle=45, hjust=1)) + ggtitle("3 - Gram Frequency") fourvar<-as.character(wof4$word4) freq4<-wof4$freq4 fourvar2<-as.data.frame(do.call(rbind,strsplit(fourvar, " "))) fourvar2m<-as.data.frame(paste(fourvar2$V1,fourvar2$V2,fourvar2$V3,sep=" ")) fourvar2final<-cbind(fourvar2m, fourvar2$V4, freq4) names(fourvar2final)<-c("prefix", "word", "frequency") ## Cleaning of input data inputd<-c("good") inputdspl<-as.data.frame(strsplit(inputd, " ")) inputspl<-as.data.frame(do.call(rbind,strsplit(inputd, " "))) inputunigram<-as.data.frame(inputspl[,length(inputspl)]) inputbigram<-as.data.frame(paste(inputspl[,length(inputspl)-1], inputspl[,length(inputspl)],sep=" ")) inputtrigram<-as.data.frame(paste(inputspl[,length(inputspl)-2], inputspl[,length(inputspl)-1], inputspl[,length(inputspl)],sep=" ")) as.character(inputtrigram) ## compare with word grams and predict the next word ## start with 4-grams if(length(inputtrigram==3)){ patternm<-as.character(inputtrigram[1,]) match4g<-grep(patternm,fourvar2final$prefix) nextword<-as.character(fourvar2final[match4g,2]) nextword length(nextword) patternm } if(!length(nextword)){ if(length(inputbigram==2)){ patternm<-as.character(inputbigram[1,]) match4g<-grep(patternm,trivar2final$prefix) nextword<-as.character(trivar2final[match4g,2]) } } if(!length(nextword)){ # inputd<-c("dsfdf dfdsf dfsf dddd tool nicd") # inputdspl<-as.data.frame(strsplit(inputd, " ")) # # inputspl<-as.data.frame(do.call(rbind,strsplit(inputd, " "))) # inputunigram<-as.data.frame(inputspl[,length(inputspl)]) inputunigram patternm<-as.character(inputunigram[1,]) match4g<-grep(patternm,bivar2final$prefix) nextword<-bivar2final[match4g,2:3] } nextword patt<-c("good") match4g1<-grep(patt,bivar2final$prefix) inputunigram
## TODO: return clustering object instead of cluster$order ## TODO: provide examples for adjusting legend size / spacing #' @title Visual Summary of Hill Landform Positions #' #' @description A unique display of landform position probability. #' #' @param x \code{data.frame} as created by \code{soilDB::fetchOSD(..., extended=TRUE)}, see details #' #' @param s an optional soil series name, highlighted in the figure #' #' @param annotations logical, add number of record and normalized Shannon entropy values #' #' @param annotation.cex annotation label scaling factor #' #' @return a \code{list} with the following elements: #' #' \item{fig}{lattice object (the figure)} #' \item{order}{ordering of soil series} #' #' @details See the \href{http://ncss-tech.github.io/AQP/soilDB/soil-series-query-functions.html}{Soil Series Query Functions} tutorial for more information. #' #' @author D.E. Beaudette #' #' vizGeomorphicComponent <- function(x, s=NULL, annotations = TRUE, annotation.cex = 0.75) { # check for required packages if(!requireNamespace('dendextend', quietly=TRUE) | !requireNamespace('latticeExtra', quietly=TRUE)) stop('please install the `dendextend` and `latticeExtra` packages', call.=FALSE) # CRAN CHECK hack geomcomp <- NULL # save row names as they are lost in the distance matrix calc row.names(x) <- x$series # save number of records n.records <- x$n # save normalized Shannon entropy H <- x$shannon_entropy # mask-out some columns we don't need x$n <- NULL x$shannon_entropy <- NULL ## convert proportions to long format for plotting x.long <- melt(x, id.vars = 'series') # fix names: second column contains labels names(x.long)[2] <- 'geomcomp' # make some colors, and set style cols <- brewer.pal(6, 'Spectral') tps <- list(superpose.polygon=list(col=cols, lwd=2, lend=2)) # re-order labels based on sorting of proportions: "hydrologic" ordering hyd.order <- order(rowSums(sweep(x[, -1], 2, STATS=c(4, 2, 1, 1, -2, -4), FUN = '*')), decreasing = TRUE) # cluster proportions: results are not in "hydrologic" order, but close x.d <- as.hclust(diana(daisy(x[, -1]))) # rotate clustering according to hydrologic ordering x.d.hydro <- dendextend::rotate(x.d, order = x$series[hyd.order]) # dendextend approach # re-order labels levels based on clustering x.long$series <- factor(x.long$series, levels=x.long$series[x.d.hydro$order]) # hack to ensure that simpleKey works as expected suppressWarnings(trellis.par.set(tps)) # must manually create a key, for some reason auto.key doesn't work with fancy dendrogram sk <- simpleKey(space='top', columns=6, text=levels(x.long$geomcomp), rectangles = TRUE, points=FALSE, between.columns=1, between=1, cex=0.75) leg <- list(right=list(fun=latticeExtra::dendrogramGrob, args=list(x = as.dendrogram(x.d.hydro), side="right", size=10))) pp <- barchart(series ~ value, groups=geomcomp, data=x.long, horiz=TRUE, stack=TRUE, xlab='Proportion', scales = list(cex=1), key = sk, legend = leg, panel = function(...) { panel.barchart(...) if(annotations) { # annotation coords x.pos.N <- unit(0.03, 'npc') x.pos.H <- unit(0.97, 'npc') y.pos <- unit((1:nrow(x)) - 0.25, 'native') y.pos.annotation <- unit(nrow(x) + 0.25, 'native') # annotate with number of records grid.text( as.character(n.records[x.d.hydro$order]), x = x.pos.N, y = y.pos, gp = gpar(cex = annotation.cex, font = 1) ) # annotate with H grid.text( as.character(round(H[x.d.hydro$order], 2)), x = x.pos.H, y = y.pos, gp = gpar(cex = annotation.cex, font = 3) ) # annotation labels grid.text( c('N', 'H'), x = c(x.pos.N, x.pos.H), y = y.pos.annotation, gp = gpar(cex = annotation.cex, font = c(2, 4)) ) } }, yscale.components=function(..., s.to.bold=s) { temp <- yscale.components.default(...) if(!is.null(s.to.bold)) { temp$left$labels$labels <- sapply( temp$left$labels$labels, function(x) { if(grepl(s.to.bold, x, ignore.case = TRUE)) { as.expression(bquote( bold(.(x)))) } else { as.expression(bquote(.(x))) } } ) } return(temp) }) # embed styling pp <- update(pp, par.settings = tps) # the figure and ordering are returned return(list(fig=pp, order=x.d.hydro$order)) }
/R/vizGeomorphicComponent.R
no_license
trilnick/sharpshootR
R
false
false
5,527
r
## TODO: return clustering object instead of cluster$order ## TODO: provide examples for adjusting legend size / spacing #' @title Visual Summary of Hill Landform Positions #' #' @description A unique display of landform position probability. #' #' @param x \code{data.frame} as created by \code{soilDB::fetchOSD(..., extended=TRUE)}, see details #' #' @param s an optional soil series name, highlighted in the figure #' #' @param annotations logical, add number of record and normalized Shannon entropy values #' #' @param annotation.cex annotation label scaling factor #' #' @return a \code{list} with the following elements: #' #' \item{fig}{lattice object (the figure)} #' \item{order}{ordering of soil series} #' #' @details See the \href{http://ncss-tech.github.io/AQP/soilDB/soil-series-query-functions.html}{Soil Series Query Functions} tutorial for more information. #' #' @author D.E. Beaudette #' #' vizGeomorphicComponent <- function(x, s=NULL, annotations = TRUE, annotation.cex = 0.75) { # check for required packages if(!requireNamespace('dendextend', quietly=TRUE) | !requireNamespace('latticeExtra', quietly=TRUE)) stop('please install the `dendextend` and `latticeExtra` packages', call.=FALSE) # CRAN CHECK hack geomcomp <- NULL # save row names as they are lost in the distance matrix calc row.names(x) <- x$series # save number of records n.records <- x$n # save normalized Shannon entropy H <- x$shannon_entropy # mask-out some columns we don't need x$n <- NULL x$shannon_entropy <- NULL ## convert proportions to long format for plotting x.long <- melt(x, id.vars = 'series') # fix names: second column contains labels names(x.long)[2] <- 'geomcomp' # make some colors, and set style cols <- brewer.pal(6, 'Spectral') tps <- list(superpose.polygon=list(col=cols, lwd=2, lend=2)) # re-order labels based on sorting of proportions: "hydrologic" ordering hyd.order <- order(rowSums(sweep(x[, -1], 2, STATS=c(4, 2, 1, 1, -2, -4), FUN = '*')), decreasing = TRUE) # cluster proportions: results are not in "hydrologic" order, but close x.d <- as.hclust(diana(daisy(x[, -1]))) # rotate clustering according to hydrologic ordering x.d.hydro <- dendextend::rotate(x.d, order = x$series[hyd.order]) # dendextend approach # re-order labels levels based on clustering x.long$series <- factor(x.long$series, levels=x.long$series[x.d.hydro$order]) # hack to ensure that simpleKey works as expected suppressWarnings(trellis.par.set(tps)) # must manually create a key, for some reason auto.key doesn't work with fancy dendrogram sk <- simpleKey(space='top', columns=6, text=levels(x.long$geomcomp), rectangles = TRUE, points=FALSE, between.columns=1, between=1, cex=0.75) leg <- list(right=list(fun=latticeExtra::dendrogramGrob, args=list(x = as.dendrogram(x.d.hydro), side="right", size=10))) pp <- barchart(series ~ value, groups=geomcomp, data=x.long, horiz=TRUE, stack=TRUE, xlab='Proportion', scales = list(cex=1), key = sk, legend = leg, panel = function(...) { panel.barchart(...) if(annotations) { # annotation coords x.pos.N <- unit(0.03, 'npc') x.pos.H <- unit(0.97, 'npc') y.pos <- unit((1:nrow(x)) - 0.25, 'native') y.pos.annotation <- unit(nrow(x) + 0.25, 'native') # annotate with number of records grid.text( as.character(n.records[x.d.hydro$order]), x = x.pos.N, y = y.pos, gp = gpar(cex = annotation.cex, font = 1) ) # annotate with H grid.text( as.character(round(H[x.d.hydro$order], 2)), x = x.pos.H, y = y.pos, gp = gpar(cex = annotation.cex, font = 3) ) # annotation labels grid.text( c('N', 'H'), x = c(x.pos.N, x.pos.H), y = y.pos.annotation, gp = gpar(cex = annotation.cex, font = c(2, 4)) ) } }, yscale.components=function(..., s.to.bold=s) { temp <- yscale.components.default(...) if(!is.null(s.to.bold)) { temp$left$labels$labels <- sapply( temp$left$labels$labels, function(x) { if(grepl(s.to.bold, x, ignore.case = TRUE)) { as.expression(bquote( bold(.(x)))) } else { as.expression(bquote(.(x))) } } ) } return(temp) }) # embed styling pp <- update(pp, par.settings = tps) # the figure and ordering are returned return(list(fig=pp, order=x.d.hydro$order)) }
pollutantmean <- function(directory,pollutant,id=1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'pollutant' is a character vector of length 1 indicating ## the name of the pollutant for which we will calculate the ## mean; either "sulfate" or "nitrate". ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return the mean of the pollutant across all monitors list ## in the 'id' vector (ignoring NA values) Pollutiondf <- data.frame() Pfiles <- list.files(path=directory, pattern = "*.csv*", full.names = T) for (i in id) { if (pollutant == "sulfate") { Pollutiondf <- rbind(Pollutiondf, read.csv(Pfiles[i], colClasses = c(NULL, NA, NULL, NA))) Pmean <- mean(Pollutiondf$sulfate, na.rm = T) } else if (pollutant == "nitrate") { Pollutiondf <- rbind(Pollutiondf, read.csv(Pfiles[i], colClasses = c(NULL, NULL, NA, NA))) Pmean <- mean(Pollutiondf$nitrate, na.rm = T) } } Pmean }
/pollutantmean.R
no_license
harshagarwal87/Air-Pollution
R
false
false
1,270
r
pollutantmean <- function(directory,pollutant,id=1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'pollutant' is a character vector of length 1 indicating ## the name of the pollutant for which we will calculate the ## mean; either "sulfate" or "nitrate". ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return the mean of the pollutant across all monitors list ## in the 'id' vector (ignoring NA values) Pollutiondf <- data.frame() Pfiles <- list.files(path=directory, pattern = "*.csv*", full.names = T) for (i in id) { if (pollutant == "sulfate") { Pollutiondf <- rbind(Pollutiondf, read.csv(Pfiles[i], colClasses = c(NULL, NA, NULL, NA))) Pmean <- mean(Pollutiondf$sulfate, na.rm = T) } else if (pollutant == "nitrate") { Pollutiondf <- rbind(Pollutiondf, read.csv(Pfiles[i], colClasses = c(NULL, NULL, NA, NA))) Pmean <- mean(Pollutiondf$nitrate, na.rm = T) } } Pmean }
#' Repair a project #' #' Use `repair()` to recover from some common issues that can occur with #' a project. Currently, two operations are performed: #' #' 1. Packages with broken symlinks into the cache will be re-installed. #' #' 2. Packages that were installed from sources, but appear to be from #' an remote source (e.g. GitHub), will have their `DESCRIPTION` files #' updated to record that remote source explicitly. #' #' @inheritParams renv-params #' #' @param lockfile The path to a lockfile (if any). When available, renv #' will use the lockfile when attempting to infer the remote associated #' with the inaccessible version of each missing package. When `NULL` #' (the default), the project lockfile will be used. #' #' @export repair <- function(library = NULL, lockfile = NULL, project = NULL) { renv_consent_check() renv_scope_error_handler() project <- renv_project_resolve(project) renv_project_lock(project = project) libpaths <- renv_path_normalize(library %||% renv_libpaths_all()) library <- libpaths[[1L]] writef(header("Library cache links")) renv_repair_links(library, lockfile, project) writef() writef(header("Package sources")) renv_repair_sources(library, lockfile, project) writef() invisible() } renv_repair_links <- function(library, lockfile, project) { # figure out which library paths (junction points?) appear to be broken paths <- list.files(library, full.names = TRUE) broken <- renv_file_broken(paths) packages <- basename(paths[broken]) if (empty(packages)) { writef("- No issues found with the project library's cache links.") return(invisible(packages)) } # try to find records for these packages in the lockfile # TODO: what if one of the requested packages isn't in the lockfile? lockfile <- lockfile %||% renv_lockfile_load(project = project) records <- renv_repair_records(packages, lockfile, project) # install these records install( packages = records, library = library, project = project ) } renv_repair_records <- function(packages, lockfile, project) { map(packages, function(package) { lockfile$Packages[[package]] %||% package }) } renv_repair_sources <- function(library, lockfile, project) { # get package description files db <- installed_packages(lib.loc = library, priority = NA_character_) descpaths <- with(db, file.path(LibPath, Package, "DESCRIPTION")) dcfs <- map(descpaths, renv_description_read) names(dcfs) <- map_chr(dcfs, `[[`, "Package") # try to infer sources as necessary inferred <- map(dcfs, renv_repair_sources_infer) inferred <- filter(inferred, Negate(is.null)) if (length(inferred) == 0L) { writef("- All installed packages appear to be from a known source.") return(TRUE) } # ask used renv_scope_options(renv.verbose = TRUE) caution_bullets( c( "The following package(s) do not have an explicitly-declared remote source.", "However, renv was available to infer remote sources from their DESCRIPTION file." ), sprintf("%s [%s]", format(names(inferred)), inferred), "`renv::restore()` may fail for packages without an explicitly-declared remote source." ) choice <- menu( choices = c( update = "Let renv infer the remote sources for these packages.", cancel = "Do nothing and resolve the situation another way." ), title = "What would you like to do?" ) cancel_if(identical(choice, "cancel")) enumerate(inferred, function(package, remote) { record <- renv_remotes_resolve(remote) record[["RemoteSha"]] <- NULL renv_package_augment(file.path(library, package), record) }) n <- length(inferred) writef("- Updated %i package DESCRIPTION %s.", n, nplural("file", n)) TRUE } renv_repair_sources_infer <- function(dcf) { # if this package appears to have a declared remote, use as-is for (field in c("RemoteType", "Repository", "biocViews")) if (!is.null(dcf[[field]])) return(NULL) # ok, this is a package installed from sources that "looks" like # the development version of a package; try to guess its remote guess <- function(pattern, field) { urls <- strsplit(dcf[[field]] %||% "", "\\s*,\\s*")[[1L]] for (url in urls) { matches <- regmatches(url, regexec(pattern, url, perl = TRUE))[[1L]] if (length(matches) == 3L) return(paste(matches[[2L]], matches[[3L]], sep = "/")) } } # first, check bug reports remote <- guess("^https://(?:www\\.)?github\\.com/([^/]+)/([^/]+)/issues$", "BugReports") if (!is.null(remote)) return(remote) # next, check the URL field remote <- guess("^https://(?:www\\.)?github\\.com/([^/]+)/([^/]+)", "URL") if (!is.null(remote)) return(remote) }
/R/repair.R
permissive
rstudio/renv
R
false
false
4,807
r
#' Repair a project #' #' Use `repair()` to recover from some common issues that can occur with #' a project. Currently, two operations are performed: #' #' 1. Packages with broken symlinks into the cache will be re-installed. #' #' 2. Packages that were installed from sources, but appear to be from #' an remote source (e.g. GitHub), will have their `DESCRIPTION` files #' updated to record that remote source explicitly. #' #' @inheritParams renv-params #' #' @param lockfile The path to a lockfile (if any). When available, renv #' will use the lockfile when attempting to infer the remote associated #' with the inaccessible version of each missing package. When `NULL` #' (the default), the project lockfile will be used. #' #' @export repair <- function(library = NULL, lockfile = NULL, project = NULL) { renv_consent_check() renv_scope_error_handler() project <- renv_project_resolve(project) renv_project_lock(project = project) libpaths <- renv_path_normalize(library %||% renv_libpaths_all()) library <- libpaths[[1L]] writef(header("Library cache links")) renv_repair_links(library, lockfile, project) writef() writef(header("Package sources")) renv_repair_sources(library, lockfile, project) writef() invisible() } renv_repair_links <- function(library, lockfile, project) { # figure out which library paths (junction points?) appear to be broken paths <- list.files(library, full.names = TRUE) broken <- renv_file_broken(paths) packages <- basename(paths[broken]) if (empty(packages)) { writef("- No issues found with the project library's cache links.") return(invisible(packages)) } # try to find records for these packages in the lockfile # TODO: what if one of the requested packages isn't in the lockfile? lockfile <- lockfile %||% renv_lockfile_load(project = project) records <- renv_repair_records(packages, lockfile, project) # install these records install( packages = records, library = library, project = project ) } renv_repair_records <- function(packages, lockfile, project) { map(packages, function(package) { lockfile$Packages[[package]] %||% package }) } renv_repair_sources <- function(library, lockfile, project) { # get package description files db <- installed_packages(lib.loc = library, priority = NA_character_) descpaths <- with(db, file.path(LibPath, Package, "DESCRIPTION")) dcfs <- map(descpaths, renv_description_read) names(dcfs) <- map_chr(dcfs, `[[`, "Package") # try to infer sources as necessary inferred <- map(dcfs, renv_repair_sources_infer) inferred <- filter(inferred, Negate(is.null)) if (length(inferred) == 0L) { writef("- All installed packages appear to be from a known source.") return(TRUE) } # ask used renv_scope_options(renv.verbose = TRUE) caution_bullets( c( "The following package(s) do not have an explicitly-declared remote source.", "However, renv was available to infer remote sources from their DESCRIPTION file." ), sprintf("%s [%s]", format(names(inferred)), inferred), "`renv::restore()` may fail for packages without an explicitly-declared remote source." ) choice <- menu( choices = c( update = "Let renv infer the remote sources for these packages.", cancel = "Do nothing and resolve the situation another way." ), title = "What would you like to do?" ) cancel_if(identical(choice, "cancel")) enumerate(inferred, function(package, remote) { record <- renv_remotes_resolve(remote) record[["RemoteSha"]] <- NULL renv_package_augment(file.path(library, package), record) }) n <- length(inferred) writef("- Updated %i package DESCRIPTION %s.", n, nplural("file", n)) TRUE } renv_repair_sources_infer <- function(dcf) { # if this package appears to have a declared remote, use as-is for (field in c("RemoteType", "Repository", "biocViews")) if (!is.null(dcf[[field]])) return(NULL) # ok, this is a package installed from sources that "looks" like # the development version of a package; try to guess its remote guess <- function(pattern, field) { urls <- strsplit(dcf[[field]] %||% "", "\\s*,\\s*")[[1L]] for (url in urls) { matches <- regmatches(url, regexec(pattern, url, perl = TRUE))[[1L]] if (length(matches) == 3L) return(paste(matches[[2L]], matches[[3L]], sep = "/")) } } # first, check bug reports remote <- guess("^https://(?:www\\.)?github\\.com/([^/]+)/([^/]+)/issues$", "BugReports") if (!is.null(remote)) return(remote) # next, check the URL field remote <- guess("^https://(?:www\\.)?github\\.com/([^/]+)/([^/]+)", "URL") if (!is.null(remote)) return(remote) }
######################################################### ##### Install packages needed for ##### ##### SeaLampreyRapture analyses ##### ######################################################### list.of.packages <- c("tidyverse", "vcfR", "ggrepel", "devtools", "adegenet", "hierfstat", "grid", "mmod", "gridExtra", "ggthemes") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages) #### Install qvalue from BioConductor if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") source("http://bioconductor.org/biocLite.R") biocLite("qvalue") biocLite("VariantAnnotation") biocLite("snpStats") biocLite("quantsmooth") install_github("whitlock/outFLANK", force = TRUE)
/analysis/installPackages.R
permissive
ScribnerLab/SeaLampreyRapture
R
false
false
1,030
r
######################################################### ##### Install packages needed for ##### ##### SeaLampreyRapture analyses ##### ######################################################### list.of.packages <- c("tidyverse", "vcfR", "ggrepel", "devtools", "adegenet", "hierfstat", "grid", "mmod", "gridExtra", "ggthemes") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages) #### Install qvalue from BioConductor if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") source("http://bioconductor.org/biocLite.R") biocLite("qvalue") biocLite("VariantAnnotation") biocLite("snpStats") biocLite("quantsmooth") install_github("whitlock/outFLANK", force = TRUE)
s_v <- get_sentences("I begin this story with a neutral statement. Now I add a statement about how much I despise cats. I am allergic to them. I hate them. Basically this is a very silly test. But I do love dogs!") raw_values <- get_sentiment(s_v, method = "syuzhet") dct_vals <- get_dct_transform(raw_values) plot(dct_vals, type="l", ylim=c(-0.1,.1))
/git work/Code/trial.R
no_license
NAKAWUNGUZAHARAH/data-science-projects
R
false
false
381
r
s_v <- get_sentences("I begin this story with a neutral statement. Now I add a statement about how much I despise cats. I am allergic to them. I hate them. Basically this is a very silly test. But I do love dogs!") raw_values <- get_sentiment(s_v, method = "syuzhet") dct_vals <- get_dct_transform(raw_values) plot(dct_vals, type="l", ylim=c(-0.1,.1))
library(broman) source("mtb_analysis.R") red <- brocolors("crayons")["Blush"] green <- brocolors("crayons")["Cornflower"] blue <- brocolors("crayons")["Apricot"] png("../../Figs/Mtb/mcmc01.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") #for(i in 1:75) # points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc02.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,49),2:50,col=red,pch=16) points(1,1) arrows(5,1,2,1,lwd=2,len=0.1) text(6,1,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc03.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,48),3:50,col=red,pch=16) points(1,1,col=c(red,green)[out$output[1,mywh[1]]+1],pch=16) points(1,2) arrows(5,2,2,2,lwd=2,len=0.1) text(6,2,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc04.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,47),4:50,col=red,pch=16) points(rep(1,2),1:2,col=c(red,green)[out$output[1,mywh[1:2]]+1],pch=16) points(1,3) arrows(5,3,2,3,lwd=2,len=0.1) text(6,3,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc05.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,46),5:50,col=red,pch=16) points(rep(1,3),1:3,col=c(red,green)[out$output[1,mywh[1:3]]+1],pch=16) points(1,4) arrows(5,4,2,4,lwd=2,len=0.1) text(6,4,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc06.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,45),6:50,col=red,pch=16) points(rep(1,4),1:4,col=c(red,green)[out$output[1,mywh[1:4]]+1],pch=16) points(1,5) arrows(5,5,2,5,lwd=2,len=0.1) text(6,5,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc07.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,44),7:50,col=red,pch=16) points(rep(1,5),1:5,col=c(red,green)[out$output[1,mywh[1:5]]+1],pch=16) points(1,6) arrows(5,6,2,6,lwd=2,len=0.1) text(6,6,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc08.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,50),1:50,col=c(red,green)[out$output[1,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc09.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:2) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc10.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:3) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc11.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:4) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc12.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:75) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc13.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:75) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) rect(-0.6,21.5,75.6,22.5,lwd=2) arrows(-20,22,-1,22,len=0.1,lwd=2,col=blue) dev.off()
/R/Mtb/mcmc_example.R
permissive
kbroman/Talk_iGraphs4Teaching
R
false
false
5,010
r
library(broman) source("mtb_analysis.R") red <- brocolors("crayons")["Blush"] green <- brocolors("crayons")["Cornflower"] blue <- brocolors("crayons")["Apricot"] png("../../Figs/Mtb/mcmc01.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") #for(i in 1:75) # points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc02.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,49),2:50,col=red,pch=16) points(1,1) arrows(5,1,2,1,lwd=2,len=0.1) text(6,1,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc03.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,48),3:50,col=red,pch=16) points(1,1,col=c(red,green)[out$output[1,mywh[1]]+1],pch=16) points(1,2) arrows(5,2,2,2,lwd=2,len=0.1) text(6,2,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc04.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,47),4:50,col=red,pch=16) points(rep(1,2),1:2,col=c(red,green)[out$output[1,mywh[1:2]]+1],pch=16) points(1,3) arrows(5,3,2,3,lwd=2,len=0.1) text(6,3,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc05.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,46),5:50,col=red,pch=16) points(rep(1,3),1:3,col=c(red,green)[out$output[1,mywh[1:3]]+1],pch=16) points(1,4) arrows(5,4,2,4,lwd=2,len=0.1) text(6,4,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc06.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,45),6:50,col=red,pch=16) points(rep(1,4),1:4,col=c(red,green)[out$output[1,mywh[1:4]]+1],pch=16) points(1,5) arrows(5,5,2,5,lwd=2,len=0.1) text(6,5,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc07.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,44),7:50,col=red,pch=16) points(rep(1,5),1:5,col=c(red,green)[out$output[1,mywh[1:5]]+1],pch=16) points(1,6) arrows(5,6,2,6,lwd=2,len=0.1) text(6,6,"?",font=2,cex=1.6) dev.off() png("../../Figs/Mtb/mcmc08.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") points(rep(1,50),1:50,col=c(red,green)[out$output[1,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc09.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:2) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc10.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:3) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc11.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:4) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc12.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:75) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) dev.off() png("../../Figs/Mtb/mcmc13.png",height=700,width=1032,res=108,pointsize=14,bg="transparent") par(fg="white",mar=c(0.1,0.1,0.1,0.1)) plot(rep(0,50),1:50,col=red,pch=16,xlim=c(-0.5,75.5),xlab="",ylab="", xaxt="n",yaxt="n",bty="n") for(i in 1:75) points(rep(i,50),1:50,col=c(red,green)[out$output[i,mywh]+1],pch=16) rect(-0.6,21.5,75.6,22.5,lwd=2) arrows(-20,22,-1,22,len=0.1,lwd=2,col=blue) dev.off()
library( "ape" ) library( "geiger" ) library( "expm" ) library( "nloptr" ) source( "masternegloglikeeps1.R" ) source( "Qmatrixwoodherb2.R" ) source("Pruning2.R") sim.tree<-read.tree("tree2500taxa10.txt") sim.chrom<-read.table("chrom2500taxa10.txt", header=FALSE) last.state=50 x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14)) p.0<-rep(1,2*(last.state+1))/(2*(last.state+1)) results<-rep(0,11) my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000) mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0) print(mle) results[1:10]<-mle$solution results[11]<-mle$objective write.table(results,file="globalmax2500taxa10.csv",sep=",")
/SImulations number of taxa/2500 taxa/optim2500taxa10.R
no_license
roszenil/Bichromdryad
R
false
false
827
r
library( "ape" ) library( "geiger" ) library( "expm" ) library( "nloptr" ) source( "masternegloglikeeps1.R" ) source( "Qmatrixwoodherb2.R" ) source("Pruning2.R") sim.tree<-read.tree("tree2500taxa10.txt") sim.chrom<-read.table("chrom2500taxa10.txt", header=FALSE) last.state=50 x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14)) p.0<-rep(1,2*(last.state+1))/(2*(last.state+1)) results<-rep(0,11) my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000) mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0) print(mle) results[1:10]<-mle$solution results[11]<-mle$objective write.table(results,file="globalmax2500taxa10.csv",sep=",")
# 1. [Dissertativa] Paralelizar a modelos de deep learning diretamente é muito # difícil pois o TensorFlow já utiliza todos os recursos disponíveis do # computador. Tendo isso em vista, como você faria para implementar uma versão # paralelizada do resto do código apresentado em aula?
/exercicios/16-tjba.R
no_license
rgcardos/202106-web-scraping
R
false
false
292
r
# 1. [Dissertativa] Paralelizar a modelos de deep learning diretamente é muito # difícil pois o TensorFlow já utiliza todos os recursos disponíveis do # computador. Tendo isso em vista, como você faria para implementar uma versão # paralelizada do resto do código apresentado em aula?
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_interface.R \name{sedona_write_geojson} \alias{sedona_write_geojson} \title{Write SpatialRDD into a GeoJSON file.} \usage{ sedona_write_geojson(x, output_location) } \arguments{ \item{x}{The SpatialRDD object.} \item{output_location}{Location of the output file.} } \description{ Export serialized data from a Sedona SpatialRDD into a GeoJSON file. } \seealso{ Other Sedona data inferface functions: \code{\link{sedona_read_dsv_to_typed_rdd}()}, \code{\link{sedona_read_geojson_to_typed_rdd}()}, \code{\link{sedona_read_geojson}()}, \code{\link{sedona_read_shapefile_to_typed_rdd}()}, \code{\link{sedona_read_shapefile}()}, \code{\link{sedona_read_wkb}()}, \code{\link{sedona_read_wkt}()}, \code{\link{sedona_save_spatial_rdd}()}, \code{\link{sedona_write_wkb}()}, \code{\link{sedona_write_wkt}()} } \concept{Sedona data inferface functions}
/man/sedona_write_geojson.Rd
permissive
lorenzwalthert/sparklyr.sedona
R
false
true
927
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_interface.R \name{sedona_write_geojson} \alias{sedona_write_geojson} \title{Write SpatialRDD into a GeoJSON file.} \usage{ sedona_write_geojson(x, output_location) } \arguments{ \item{x}{The SpatialRDD object.} \item{output_location}{Location of the output file.} } \description{ Export serialized data from a Sedona SpatialRDD into a GeoJSON file. } \seealso{ Other Sedona data inferface functions: \code{\link{sedona_read_dsv_to_typed_rdd}()}, \code{\link{sedona_read_geojson_to_typed_rdd}()}, \code{\link{sedona_read_geojson}()}, \code{\link{sedona_read_shapefile_to_typed_rdd}()}, \code{\link{sedona_read_shapefile}()}, \code{\link{sedona_read_wkb}()}, \code{\link{sedona_read_wkt}()}, \code{\link{sedona_save_spatial_rdd}()}, \code{\link{sedona_write_wkb}()}, \code{\link{sedona_write_wkt}()} } \concept{Sedona data inferface functions}
# Author - anupama rajaram # description - Using Shiny’s layout, HTML, and img functions to create an attractive user-interface. # ui.R shinyUI(fluidPage( titlePanel("Trial Shiny Web Interface"), sidebarLayout(position = "right", sidebarPanel( h2("Installation"), p("Shiny is available on CRAN, so you can install it in the usual way from your R console:"), code('install.packages("shiny")'), br(), br(), img(src = "data-globe.png", height = 72, width = 72), "shiny is a product of ", span("RStudio", style = "color:blue") # you can also specify hex codes for the color # so, alternatively you can write # span("RStudio", style = "color:#31E8F9") ), mainPanel( h1("Introducing Shiny"), p("Shiny is a new package from RStudio that makes it ", em("incredibly easy"), " to build interactive web applications with R."), br(), p("For an introduction and live examples, visit the ", a("Shiny homepage.", href = "http://www.rstudio.com/shiny")), # by default, weblinks are skyblue in color, and open the link in a new window. br(), h2("Features"), p("* Build useful web applications with only a few lines of code—no JavaScript required."), p("* Shiny applications are automatically “live”€ in the same way that ", strong("spreadsheets"), " are live. Outputs change instantly as users modify inputs, without requiring a reload of the browser.") ) ) ))
/ui_web_trial1.R
no_license
anurajaram/shiny-project
R
false
false
1,570
r
# Author - anupama rajaram # description - Using Shiny’s layout, HTML, and img functions to create an attractive user-interface. # ui.R shinyUI(fluidPage( titlePanel("Trial Shiny Web Interface"), sidebarLayout(position = "right", sidebarPanel( h2("Installation"), p("Shiny is available on CRAN, so you can install it in the usual way from your R console:"), code('install.packages("shiny")'), br(), br(), img(src = "data-globe.png", height = 72, width = 72), "shiny is a product of ", span("RStudio", style = "color:blue") # you can also specify hex codes for the color # so, alternatively you can write # span("RStudio", style = "color:#31E8F9") ), mainPanel( h1("Introducing Shiny"), p("Shiny is a new package from RStudio that makes it ", em("incredibly easy"), " to build interactive web applications with R."), br(), p("For an introduction and live examples, visit the ", a("Shiny homepage.", href = "http://www.rstudio.com/shiny")), # by default, weblinks are skyblue in color, and open the link in a new window. br(), h2("Features"), p("* Build useful web applications with only a few lines of code—no JavaScript required."), p("* Shiny applications are automatically “live”€ in the same way that ", strong("spreadsheets"), " are live. Outputs change instantly as users modify inputs, without requiring a reload of the browser.") ) ) ))
### Librerias library(sp) library(raster) library(agricolae) library(nnet) library(caret) ##Lectura datos pclas <- shapefile(file.choose()) img2 <- stack(file.choose()) img3 <- as.data.frame(img2,xy=TRUE) pclas <- cbind(pclas, extract(img2, pclas)) pclas2<-as.data.frame(pclas,xy=TRUE) #Clasificacion con Arboles de decision library(rpart) class.rpart <- rpart(Type~RGB_res.1+RGB_res.2+RGB_res.3+RGB_res.4, data = pclas2) plot(class.rpart, compress=TRUE,uniform=TRUE) text(class.rpart,use.n=T,all=T,cex=.7,pretty=0,xpd=TRUE, col="red") #Alternativo library(rattle) drawTreeNodes(class.rpart,cex=.6,pch=11,size=4*.8, col=NULL,nodeinfo=TRUE, units="",cases="obs",digits=getOption("digits"),decimals=2,print.levels=TRUE, new=TRUE) #Prediccion con arbol img3[,"clase"] <- predict(object = class.rpart,newdata = img3,type ="class") img3[,"clase2"] <- as.numeric(img3$clase) write.table(img3,"/home/quinua2/Documentos/Quinua_Smart_App/Drones/clasificacion.txt", sep="\t", row.names=F) #Validacion confusionMatrix(data=predict(class.rpart, type="class"), reference=pclas2)
/process_rgb.R
no_license
Rioshi/QuinuaSmartApp
R
false
false
1,099
r
### Librerias library(sp) library(raster) library(agricolae) library(nnet) library(caret) ##Lectura datos pclas <- shapefile(file.choose()) img2 <- stack(file.choose()) img3 <- as.data.frame(img2,xy=TRUE) pclas <- cbind(pclas, extract(img2, pclas)) pclas2<-as.data.frame(pclas,xy=TRUE) #Clasificacion con Arboles de decision library(rpart) class.rpart <- rpart(Type~RGB_res.1+RGB_res.2+RGB_res.3+RGB_res.4, data = pclas2) plot(class.rpart, compress=TRUE,uniform=TRUE) text(class.rpart,use.n=T,all=T,cex=.7,pretty=0,xpd=TRUE, col="red") #Alternativo library(rattle) drawTreeNodes(class.rpart,cex=.6,pch=11,size=4*.8, col=NULL,nodeinfo=TRUE, units="",cases="obs",digits=getOption("digits"),decimals=2,print.levels=TRUE, new=TRUE) #Prediccion con arbol img3[,"clase"] <- predict(object = class.rpart,newdata = img3,type ="class") img3[,"clase2"] <- as.numeric(img3$clase) write.table(img3,"/home/quinua2/Documentos/Quinua_Smart_App/Drones/clasificacion.txt", sep="\t", row.names=F) #Validacion confusionMatrix(data=predict(class.rpart, type="class"), reference=pclas2)
# TINST490A Healthcare Informatics II # Data Analysis and Visualization # 8/14/2013 # Si-Chi Chin # Topic: Decision Tree & K Nearest-Neighbor # Import data hepatitis.csv # You would want to change the path of the file data <- read.csv("~/Dropbox/UW-Tacoma/TINST490A_su13/Lecture_slides/Data_Visualization/hepatitis.csv", na.strings = "" ) attach(data) # Construct a decision tree using "rpart" package library(rpart) ?rpart # Use all variables to predict the class label formula <- class ~ . fit.tree <- rpart(formula, data=data) fit.tree # Plot the decision tree plot(fit.tree, uniform=TRUE, cex=0.8, main="Decision Tree Using rpart", compress =TRUE) text(fit.tree, cex=0.8, use.n=TRUE, all=TRUE) # Construct a decision tree using "party" package library(party) formula1 <- class ~ . fit1.tree <- ctree(formula1, data=data) print(fit1.tree) plot(fit1.tree, cex=0.5) plot(fit1.tree, type="simple") # Training and Testing # Train on 2/3 of the data, test on the remaining 1/3 set.seed(1234) #Sample data ind <- sample(2, nrow(data), replace=TRUE, prob=c(0.66, 0.34)) hepatitis.train <- data[ind==1,] hepatitis.test <- data[ind==2,] # Build a decision tree on training data hepatitis.tree <- rpart(formula, method = "class",data=hepatitis.train, control = rpart.control(minsplit = 5)) hepatitis.tree <- rpart(formula, method = "class",data=hepatitis.train) print(hepatitis.tree) summary(hepatitis.tree) plot(hepatitis.tree, uniform=TRUE, cex=0.8, main="Classification for Hepatitis Data", compress=TRUE) text(hepatitis.tree, use.n=TRUE, all=TRUE, cex=0.8) fit2.tree <- ctree(formula1, data=hepatitis.train) print(fit2.tree) plot(fit2.tree) plot(fit2.tree, type = "simple") # Make prediction using testing data hepatitis.pred <- predict(hepatitis.tree, newdata=hepatitis.test, type="class") hepatitis.pred # View confusion matrix for the results from rpart table(hepatitis.pred, hepatitis.test$class) fit2.pred <- predict(fit2.tree, newdata=hepatitis.test) # View confusion matrix for the results from party table(fit2.pred, hepatitis.test$class) # Compute accuracy matrix <- table(fit2.pred, hepatitis.test$class) sum <- length(fit2.pred) accuracy <- (matrix[1,1] + matrix[2,2]) / sum accuracy # K Nearest Neighbor Classification # Remove rows with missing values # Select *only* the class column and numeric attributes data.complete <- na.omit(data[,c(1,2,15,16,17,18)]) set.seed(1234) #Sample data ind <- sample(2, nrow(data.complete), replace=TRUE, prob=c(0.66, 0.34)) complete.train <- data.complete[ind==1,] complete.test <- data.complete[ind==2,] length(complete.train$class) length(complete.test$class) # Train on 2/3, test on 1/3 hepatitis.knn <- knn(complete.train[,-1], complete.test[,-1], complete.train[,1], k = 5, prob=TRUE) # View the confusion matrix table(hepatitis.knn, complete.test$class) matrix <- table(hepatitis.knn, complete.test$class) sum <- length(hepatitis.knn) accuracy <- (matrix[1,1] + matrix[2,2]) / sum accuracy # Leave-one-out cross validation cv.knn <-knn.cv(complete.train[,-1], complete.train[,1], k = 5, prob=TRUE) # View the confusion matrix table(cv.knn, complete.train$class) # What is the accuracy for the cross validation results?
/R_script081413.R
no_license
sichichin/ihme_sample
R
false
false
3,203
r
# TINST490A Healthcare Informatics II # Data Analysis and Visualization # 8/14/2013 # Si-Chi Chin # Topic: Decision Tree & K Nearest-Neighbor # Import data hepatitis.csv # You would want to change the path of the file data <- read.csv("~/Dropbox/UW-Tacoma/TINST490A_su13/Lecture_slides/Data_Visualization/hepatitis.csv", na.strings = "" ) attach(data) # Construct a decision tree using "rpart" package library(rpart) ?rpart # Use all variables to predict the class label formula <- class ~ . fit.tree <- rpart(formula, data=data) fit.tree # Plot the decision tree plot(fit.tree, uniform=TRUE, cex=0.8, main="Decision Tree Using rpart", compress =TRUE) text(fit.tree, cex=0.8, use.n=TRUE, all=TRUE) # Construct a decision tree using "party" package library(party) formula1 <- class ~ . fit1.tree <- ctree(formula1, data=data) print(fit1.tree) plot(fit1.tree, cex=0.5) plot(fit1.tree, type="simple") # Training and Testing # Train on 2/3 of the data, test on the remaining 1/3 set.seed(1234) #Sample data ind <- sample(2, nrow(data), replace=TRUE, prob=c(0.66, 0.34)) hepatitis.train <- data[ind==1,] hepatitis.test <- data[ind==2,] # Build a decision tree on training data hepatitis.tree <- rpart(formula, method = "class",data=hepatitis.train, control = rpart.control(minsplit = 5)) hepatitis.tree <- rpart(formula, method = "class",data=hepatitis.train) print(hepatitis.tree) summary(hepatitis.tree) plot(hepatitis.tree, uniform=TRUE, cex=0.8, main="Classification for Hepatitis Data", compress=TRUE) text(hepatitis.tree, use.n=TRUE, all=TRUE, cex=0.8) fit2.tree <- ctree(formula1, data=hepatitis.train) print(fit2.tree) plot(fit2.tree) plot(fit2.tree, type = "simple") # Make prediction using testing data hepatitis.pred <- predict(hepatitis.tree, newdata=hepatitis.test, type="class") hepatitis.pred # View confusion matrix for the results from rpart table(hepatitis.pred, hepatitis.test$class) fit2.pred <- predict(fit2.tree, newdata=hepatitis.test) # View confusion matrix for the results from party table(fit2.pred, hepatitis.test$class) # Compute accuracy matrix <- table(fit2.pred, hepatitis.test$class) sum <- length(fit2.pred) accuracy <- (matrix[1,1] + matrix[2,2]) / sum accuracy # K Nearest Neighbor Classification # Remove rows with missing values # Select *only* the class column and numeric attributes data.complete <- na.omit(data[,c(1,2,15,16,17,18)]) set.seed(1234) #Sample data ind <- sample(2, nrow(data.complete), replace=TRUE, prob=c(0.66, 0.34)) complete.train <- data.complete[ind==1,] complete.test <- data.complete[ind==2,] length(complete.train$class) length(complete.test$class) # Train on 2/3, test on 1/3 hepatitis.knn <- knn(complete.train[,-1], complete.test[,-1], complete.train[,1], k = 5, prob=TRUE) # View the confusion matrix table(hepatitis.knn, complete.test$class) matrix <- table(hepatitis.knn, complete.test$class) sum <- length(hepatitis.knn) accuracy <- (matrix[1,1] + matrix[2,2]) / sum accuracy # Leave-one-out cross validation cv.knn <-knn.cv(complete.train[,-1], complete.train[,1], k = 5, prob=TRUE) # View the confusion matrix table(cv.knn, complete.train$class) # What is the accuracy for the cross validation results?
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/acop7-data.R \docType{data} \name{iv.1cmt.mm} \alias{iv.1cmt.mm} \title{Bolus 1 compartment model with Michelis-Menton elimination} \format{An object of class \code{data.frame} with 8875 rows and 14 columns.} \source{ AcOP7 nlmixr poster } \usage{ iv.1cmt.mm } \description{ This is a simulated dataset from Schoemaker2016 for comparing nlmixr to NONMEM. } \details{ \describe{ \item{ID}{Siumlated Subject ID} \item{Time}{Simulated Time} \item{DV}{Simulated Objective Function} \item{LNDV}{Log(DV)} \item{MDV}{Missing DV parameter} \item{EVID}{Event ID, transformed for nlmixr instead of NONMEM, includes infusionr data} \item{DOSE}{Dose} \item{CMT}{Compartment} } } \keyword{datasets}
/man/iv.1cmt.mm.Rd
no_license
mattfidler/nlmixr
R
false
true
780
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/acop7-data.R \docType{data} \name{iv.1cmt.mm} \alias{iv.1cmt.mm} \title{Bolus 1 compartment model with Michelis-Menton elimination} \format{An object of class \code{data.frame} with 8875 rows and 14 columns.} \source{ AcOP7 nlmixr poster } \usage{ iv.1cmt.mm } \description{ This is a simulated dataset from Schoemaker2016 for comparing nlmixr to NONMEM. } \details{ \describe{ \item{ID}{Siumlated Subject ID} \item{Time}{Simulated Time} \item{DV}{Simulated Objective Function} \item{LNDV}{Log(DV)} \item{MDV}{Missing DV parameter} \item{EVID}{Event ID, transformed for nlmixr instead of NONMEM, includes infusionr data} \item{DOSE}{Dose} \item{CMT}{Compartment} } } \keyword{datasets}
## Below are two functions that are used to create a special matrix ## object that stores a numeric vector and cache's its inverse ## This function creates a special "matrix" object that ## can cache its inverse makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(solve) m <<- solve getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## This function computes the inverse of the special "matrix" ## returned by makeCacheMatrix cacheSolve <- function(x, ...) { m <- x$getinv() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinv(m) m } mat <- matrix(1:2, nrow = 2, ncol = 2) # test 1 mat <- matrix(c(1,3,9,7,5,3,9,5,4), nrow = 3, ncol = 3) # test 2 solve(mat) # check if it is invertable matObj <- makeCacheMatrix(mat) # class(matObj) # check if it is a list cacheSolve(matObj) cacheSolve(matObj)
/cachematrix.R
no_license
tejafabjan/ProgrammingAssignment2
R
false
false
1,085
r
## Below are two functions that are used to create a special matrix ## object that stores a numeric vector and cache's its inverse ## This function creates a special "matrix" object that ## can cache its inverse makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(solve) m <<- solve getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## This function computes the inverse of the special "matrix" ## returned by makeCacheMatrix cacheSolve <- function(x, ...) { m <- x$getinv() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinv(m) m } mat <- matrix(1:2, nrow = 2, ncol = 2) # test 1 mat <- matrix(c(1,3,9,7,5,3,9,5,4), nrow = 3, ncol = 3) # test 2 solve(mat) # check if it is invertable matObj <- makeCacheMatrix(mat) # class(matObj) # check if it is a list cacheSolve(matObj) cacheSolve(matObj)
library(sqldf) library(plyr) query = 'select * from file where Date="1/2/2007" or Date="2/2/2007"' power <- read.csv.sql('household_power_consumption.txt', sql = query, sep = ';') power[power == '?'] = NA power <- power[complete.cases(power),] power$index <- as.numeric(rownames(power)) power$DateTime <- as.Date(strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S")) power$Day <- strftime(power$DateTime, '%a') xAxis <- ddply(power,.(Day), summarize, atValue=min(index)) atValue <- xAxis$atValue labelValue <- xAxis$Day atValue <- c(atValue, length(power$index) + 1) labelValue <- c(labelValue, 'Sat') png('plot4.png') par('mfrow' = c(2,2)) par('mar' = c(4.1,4.1,2,2)) plot(power$index, power$Global_active_power, type='n', ylab = 'Global Active Power', xlab = '', xaxt='n') axis(1, at = atValue, labels=labelValue) with(power, lines(index, Global_active_power)) plot(power$index, power$Voltage, type='n', ylab = 'Voltage', xlab = 'datetime', xaxt='n') axis(1, at = atValue, labels=labelValue) with(power, lines(index, Voltage)) plot(power$index, power$Sub_metering_1, type='n', ylab = 'Energy sub metering', xlab = '', xaxt='n') axis(1, at = atValue, labels=labelValue) with(power, lines(index, Sub_metering_1)) with(power, lines(index, Sub_metering_2, col = 'red')) with(power, lines(index, Sub_metering_3, col = 'blue')) legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue")) plot(power$index, power$Global_reactive_power, type='n', ylab = 'Global_reactive_power', xlab = 'datetime', xaxt='n') axis(1, at = atValue, labels=labelValue) with(power, lines(index, Global_reactive_power)) dev.off()
/plot4.R
no_license
ai2160/ExData_Plotting1
R
false
false
1,728
r
library(sqldf) library(plyr) query = 'select * from file where Date="1/2/2007" or Date="2/2/2007"' power <- read.csv.sql('household_power_consumption.txt', sql = query, sep = ';') power[power == '?'] = NA power <- power[complete.cases(power),] power$index <- as.numeric(rownames(power)) power$DateTime <- as.Date(strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S")) power$Day <- strftime(power$DateTime, '%a') xAxis <- ddply(power,.(Day), summarize, atValue=min(index)) atValue <- xAxis$atValue labelValue <- xAxis$Day atValue <- c(atValue, length(power$index) + 1) labelValue <- c(labelValue, 'Sat') png('plot4.png') par('mfrow' = c(2,2)) par('mar' = c(4.1,4.1,2,2)) plot(power$index, power$Global_active_power, type='n', ylab = 'Global Active Power', xlab = '', xaxt='n') axis(1, at = atValue, labels=labelValue) with(power, lines(index, Global_active_power)) plot(power$index, power$Voltage, type='n', ylab = 'Voltage', xlab = 'datetime', xaxt='n') axis(1, at = atValue, labels=labelValue) with(power, lines(index, Voltage)) plot(power$index, power$Sub_metering_1, type='n', ylab = 'Energy sub metering', xlab = '', xaxt='n') axis(1, at = atValue, labels=labelValue) with(power, lines(index, Sub_metering_1)) with(power, lines(index, Sub_metering_2, col = 'red')) with(power, lines(index, Sub_metering_3, col = 'blue')) legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue")) plot(power$index, power$Global_reactive_power, type='n', ylab = 'Global_reactive_power', xlab = 'datetime', xaxt='n') axis(1, at = atValue, labels=labelValue) with(power, lines(index, Global_reactive_power)) dev.off()
#endpoint <- 'http://statistics.gov.scot/sparql' #sparql_endpoint <- 'http://nhs.publishmydata.com/sparql' sparql_endpoint <- 'http://nhs.publishmydata.com/sparql'
/SparqlQueries/sparqlEndpoint.R
no_license
sinclr4/DataHoles
R
false
false
165
r
#endpoint <- 'http://statistics.gov.scot/sparql' #sparql_endpoint <- 'http://nhs.publishmydata.com/sparql' sparql_endpoint <- 'http://nhs.publishmydata.com/sparql'
library(igraph) mysource <- function(p) source(file.path('nimble/R', p)) mysource('all_utils.R') mysource('options.R') nimbleOptions$notUsingPackage <- TRUE mysource('distributions_inputList.R') mysource('distributions_processInputList.R') mysource('distributions_implementations.R') mysource('BUGS_BUGSdecl.R') mysource('BUGS_nodeInfo.R') mysource('BUGS_contexts.R') mysource('BUGS_modelDef.R') mysource('BUGS_model.R') mysource('BUGS_graphNodeMaps.R') mysource('BUGS_readBUGS.R') mysource('BUGS_testBUGS.R') mysource('BUGS_getDependencies.R') mysource('BUGS_utils.R') mysource('BUGS_mathCompatibility.R') mysource('genCpp_exprClass.R') mysource('genCpp_operatorLists.R') mysource('genCpp_RparseTree2exprClasses.R') mysource('genCpp_initSizes.R') mysource('genCpp_buildIntermediates.R') mysource('genCpp_processSpecificCalls.R') mysource('genCpp_sizeProcessing.R') mysource('genCpp_insertAssertions.R') mysource('genCpp_maps.R') mysource('genCpp_liftMaps.R') mysource('genCpp_eigenization.R') mysource('genCpp_addDebugMarks.R') mysource('genCpp_generateCpp.R') mysource('RCfunction_core.R') mysource('RCfunction_compile.R') mysource('nimbleFunction_util.R') mysource('nimbleFunction_core.R') mysource('nimbleFunction_nodeFunction.R') mysource('nimbleFunction_Rexecution.R') mysource('nimbleFunction_compile.R') mysource('types_util.R') mysource('types_symbolTable.R') mysource('types_modelValues.R') mysource('types_modelValuesAccessor.R') mysource('types_modelVariableAccessor.R') mysource('types_nimbleFunctionList.R') mysource('types_nodeFxnVector.R') mysource('types_numericLists.R') mysource('cppDefs_utils.R') mysource('cppDefs_variables.R') mysource('cppDefs_core.R') mysource('cppDefs_namedObjects.R') mysource('cppDefs_BUGSmodel.R') mysource('cppDefs_RCfunction.R') mysource('cppDefs_nimbleFunction.R') mysource('cppDefs_modelValues.R') mysource('cppDefs_cppProject.R') mysource('cppDefs_outputCppFromRparseTree.R') mysource('cppInterfaces_utils.R') mysource('cppInterfaces_models.R') mysource('cppInterfaces_modelValues.R') mysource('cppInterfaces_nimbleFunctions.R') mysource('cppInterfaces_otherTypes.R') mysource('nimbleProject.R') mysource('MCEM_build.R') mysource('MCMC_utils.R') mysource('MCMC_spec.R') mysource('MCMC_build.R') mysource('MCMC_samplers.R') mysource('MCMC_conjugacy.R') mysource('MCMC_suite.R') mysource('NF_utils.R') mysource('makevars.R') .NimbleUseRegistration = FALSE mysource('registration.R') mysource('zzz.R') NeedMakevarsFile = TRUE IncludeCodeDir <- "nimble/inst/include/nimble" NimbleCodeDir <- "nimble/inst/CppCode" options(nimble.Makevars.file = if(.Platform$OS.type == "windows" && file.exists("Makevars.win")) "Makevars.win" else "MyMakevars") if(Sys.getenv("NIMBLE_PKG_SRC_DIR") == "") { path = normalizePath("nimble/inst/CppCode") if(.Platform$OS.type == "windows") { # check for cygwin??? # gsub("C:", "/cygdrive/c", path) ? path = gsub("\\\\", "/", shortPathName(path)) # You need to adjust MyMakevars to have the full path # to the local directory .../nimble/packages # Copy MyMakevars_template to MyMakevars and then edit # You also need to make a copy of the directory Eigen_local to Eigen # in nimble/packages/nimble/ # Do not put Eigen or MyMakevars under version control options(nimble.Makevars.file = "MyMakevars") Sys.setenv("NIMBLE_PKG_SRC_DIR" = path) NimbleCodeDir = path } else { Sys.setenv("NIMBLE_PKG_SRC_DIR" = normalizePath("nimble/inst")) NimbleCodeDir = path } }
/packages/loadAllCode.R
no_license
peterasujan/nimble
R
false
false
3,568
r
library(igraph) mysource <- function(p) source(file.path('nimble/R', p)) mysource('all_utils.R') mysource('options.R') nimbleOptions$notUsingPackage <- TRUE mysource('distributions_inputList.R') mysource('distributions_processInputList.R') mysource('distributions_implementations.R') mysource('BUGS_BUGSdecl.R') mysource('BUGS_nodeInfo.R') mysource('BUGS_contexts.R') mysource('BUGS_modelDef.R') mysource('BUGS_model.R') mysource('BUGS_graphNodeMaps.R') mysource('BUGS_readBUGS.R') mysource('BUGS_testBUGS.R') mysource('BUGS_getDependencies.R') mysource('BUGS_utils.R') mysource('BUGS_mathCompatibility.R') mysource('genCpp_exprClass.R') mysource('genCpp_operatorLists.R') mysource('genCpp_RparseTree2exprClasses.R') mysource('genCpp_initSizes.R') mysource('genCpp_buildIntermediates.R') mysource('genCpp_processSpecificCalls.R') mysource('genCpp_sizeProcessing.R') mysource('genCpp_insertAssertions.R') mysource('genCpp_maps.R') mysource('genCpp_liftMaps.R') mysource('genCpp_eigenization.R') mysource('genCpp_addDebugMarks.R') mysource('genCpp_generateCpp.R') mysource('RCfunction_core.R') mysource('RCfunction_compile.R') mysource('nimbleFunction_util.R') mysource('nimbleFunction_core.R') mysource('nimbleFunction_nodeFunction.R') mysource('nimbleFunction_Rexecution.R') mysource('nimbleFunction_compile.R') mysource('types_util.R') mysource('types_symbolTable.R') mysource('types_modelValues.R') mysource('types_modelValuesAccessor.R') mysource('types_modelVariableAccessor.R') mysource('types_nimbleFunctionList.R') mysource('types_nodeFxnVector.R') mysource('types_numericLists.R') mysource('cppDefs_utils.R') mysource('cppDefs_variables.R') mysource('cppDefs_core.R') mysource('cppDefs_namedObjects.R') mysource('cppDefs_BUGSmodel.R') mysource('cppDefs_RCfunction.R') mysource('cppDefs_nimbleFunction.R') mysource('cppDefs_modelValues.R') mysource('cppDefs_cppProject.R') mysource('cppDefs_outputCppFromRparseTree.R') mysource('cppInterfaces_utils.R') mysource('cppInterfaces_models.R') mysource('cppInterfaces_modelValues.R') mysource('cppInterfaces_nimbleFunctions.R') mysource('cppInterfaces_otherTypes.R') mysource('nimbleProject.R') mysource('MCEM_build.R') mysource('MCMC_utils.R') mysource('MCMC_spec.R') mysource('MCMC_build.R') mysource('MCMC_samplers.R') mysource('MCMC_conjugacy.R') mysource('MCMC_suite.R') mysource('NF_utils.R') mysource('makevars.R') .NimbleUseRegistration = FALSE mysource('registration.R') mysource('zzz.R') NeedMakevarsFile = TRUE IncludeCodeDir <- "nimble/inst/include/nimble" NimbleCodeDir <- "nimble/inst/CppCode" options(nimble.Makevars.file = if(.Platform$OS.type == "windows" && file.exists("Makevars.win")) "Makevars.win" else "MyMakevars") if(Sys.getenv("NIMBLE_PKG_SRC_DIR") == "") { path = normalizePath("nimble/inst/CppCode") if(.Platform$OS.type == "windows") { # check for cygwin??? # gsub("C:", "/cygdrive/c", path) ? path = gsub("\\\\", "/", shortPathName(path)) # You need to adjust MyMakevars to have the full path # to the local directory .../nimble/packages # Copy MyMakevars_template to MyMakevars and then edit # You also need to make a copy of the directory Eigen_local to Eigen # in nimble/packages/nimble/ # Do not put Eigen or MyMakevars under version control options(nimble.Makevars.file = "MyMakevars") Sys.setenv("NIMBLE_PKG_SRC_DIR" = path) NimbleCodeDir = path } else { Sys.setenv("NIMBLE_PKG_SRC_DIR" = normalizePath("nimble/inst")) NimbleCodeDir = path } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/D1Object.R \docType{methods} \name{setPublicAccess,D1Object-method} \alias{setPublicAccess,D1Object-method} \title{Make the object publicly readable.} \usage{ \S4method{setPublicAccess}{D1Object}(x) } \arguments{ \item{x}{D1Object} \item{...}{(not yet used)} } \value{ D1Object with modified access rules } \description{ This method should be called prior to creating the object in DataONE. When called before creating the object, adds a rule to the access policy that makes this object publicly readable. If called after creation, it will only change the system metadata locally, and will not have any effect on remotely uploaded copies of the D1Object. } \seealso{ \code{\link[=DataObject-class]{DataObject}}{ class description.} }
/man/setPublicAccess.Rd
permissive
AnneMTreasure/rdataone
R
false
true
817
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/D1Object.R \docType{methods} \name{setPublicAccess,D1Object-method} \alias{setPublicAccess,D1Object-method} \title{Make the object publicly readable.} \usage{ \S4method{setPublicAccess}{D1Object}(x) } \arguments{ \item{x}{D1Object} \item{...}{(not yet used)} } \value{ D1Object with modified access rules } \description{ This method should be called prior to creating the object in DataONE. When called before creating the object, adds a rule to the access policy that makes this object publicly readable. If called after creation, it will only change the system metadata locally, and will not have any effect on remotely uploaded copies of the D1Object. } \seealso{ \code{\link[=DataObject-class]{DataObject}}{ class description.} }
library(ape) testtree <- read.tree("4843_2.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="4843_2_unrooted.txt")
/codeml_files/newick_trees_processed/4843_2/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
135
r
library(ape) testtree <- read.tree("4843_2.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="4843_2_unrooted.txt")
#' Pipe graphics #' #' Like dplyr, ggvis also uses the pipe function, \code{\%>\%} to turn #' function composition into a series of imperative statements. #' #' @importFrom magrittr %>% #' @name %>% #' @rdname pipe #' @export #' @param lhs,rhs A visualisation and a function to apply to it NULL #' Ensure internval availability of magrittr to package #' #' @importFrom magrittr %<>% #' @name %<>% #' @importFrom magrittr %$% #' @name %$% NULL
/R/pipe.R
no_license
moj-analytical-services/costmodelr
R
false
false
446
r
#' Pipe graphics #' #' Like dplyr, ggvis also uses the pipe function, \code{\%>\%} to turn #' function composition into a series of imperative statements. #' #' @importFrom magrittr %>% #' @name %>% #' @rdname pipe #' @export #' @param lhs,rhs A visualisation and a function to apply to it NULL #' Ensure internval availability of magrittr to package #' #' @importFrom magrittr %<>% #' @name %<>% #' @importFrom magrittr %$% #' @name %$% NULL
\name{sominit.pca.default} \alias{sominit.pca.default} \title{Initialise the prototypes of a SOM with PCA} \description{ Initialise the prototypes of a Self-Organising Map with Principal Component Analysis. The prototypes are regulary positioned (according to the prior structure) in the subspace spanned by the two first principal components. } \usage{ \method{sominit.pca}{default}(data, somgrid, weights, with.princomp=FALSE, \dots) } \arguments{ \item{data}{the data to which the SOM will be fitted, a matrix or data frame of observations (which should be scaled)} \item{somgrid}{a \code{somgrid} object} \item{weights}{optional weights for the data points} \item{with.princomp}{switch specifying whether the \code{\link{princomp}} should be used instead of the \code{\link{prcomp}} method for computing the principal components when no weights are given (see details)} \item{\dots}{not used} } \details{ When the optional \code{weights} are specified, the weighted covariance of the data is computed via \code{\link{cov.wt}}. Then \code{\link{princomp}} is used to find the two first principal components of the data. When \code{weights} are missing, the PCA is conducted via \code{\link{prcomp}}, expect is the function is told to use \code{\link{princomp}} via the \code{with.princomp} parameter. As a consequence, if \code{with.princomp=FALSE}, the results of the function applied to unweighted data points are likely to differ from the ones obtained on the same data points with uniform weights. } \value{ A list with the following components \item{prototype}{a matrix containing appropriate initial prototypes} \item{data.pca}{the results of the PCA conducted on the data via a call to \code{\link{prcomp}} or \code{\link{princomp}}} } \author{Fabrice Rossi} \seealso{\code{\link{somgrid}} for specifying the prior structure and \code{\link{sominit.random}} for random based initialisations.} \examples{ X <- cbind(rnorm(500),rnorm(500)) sg <- somgrid(xdim=7,ydim=7,topo="rect") proto <- sominit.pca(X,sg)$prototypes plot(X,pch="+",col="red",xlim=range(X[,1],proto[,1]), ylim=range(X[,2],proto[,2])) points(proto,pch=20) } \keyword{cluster}
/man/sominit.pca.default.Rd
no_license
lorenalves/yasomi
R
false
false
2,198
rd
\name{sominit.pca.default} \alias{sominit.pca.default} \title{Initialise the prototypes of a SOM with PCA} \description{ Initialise the prototypes of a Self-Organising Map with Principal Component Analysis. The prototypes are regulary positioned (according to the prior structure) in the subspace spanned by the two first principal components. } \usage{ \method{sominit.pca}{default}(data, somgrid, weights, with.princomp=FALSE, \dots) } \arguments{ \item{data}{the data to which the SOM will be fitted, a matrix or data frame of observations (which should be scaled)} \item{somgrid}{a \code{somgrid} object} \item{weights}{optional weights for the data points} \item{with.princomp}{switch specifying whether the \code{\link{princomp}} should be used instead of the \code{\link{prcomp}} method for computing the principal components when no weights are given (see details)} \item{\dots}{not used} } \details{ When the optional \code{weights} are specified, the weighted covariance of the data is computed via \code{\link{cov.wt}}. Then \code{\link{princomp}} is used to find the two first principal components of the data. When \code{weights} are missing, the PCA is conducted via \code{\link{prcomp}}, expect is the function is told to use \code{\link{princomp}} via the \code{with.princomp} parameter. As a consequence, if \code{with.princomp=FALSE}, the results of the function applied to unweighted data points are likely to differ from the ones obtained on the same data points with uniform weights. } \value{ A list with the following components \item{prototype}{a matrix containing appropriate initial prototypes} \item{data.pca}{the results of the PCA conducted on the data via a call to \code{\link{prcomp}} or \code{\link{princomp}}} } \author{Fabrice Rossi} \seealso{\code{\link{somgrid}} for specifying the prior structure and \code{\link{sominit.random}} for random based initialisations.} \examples{ X <- cbind(rnorm(500),rnorm(500)) sg <- somgrid(xdim=7,ydim=7,topo="rect") proto <- sominit.pca(X,sg)$prototypes plot(X,pch="+",col="red",xlim=range(X[,1],proto[,1]), ylim=range(X[,2],proto[,2])) points(proto,pch=20) } \keyword{cluster}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{gcd} \alias{gcd} \title{Computes the greatest common divisor with several algorithms available. The default algorithm is the recursive method, which is typically faster than the others.} \usage{ gcd(a, b, method = "recursive") } \arguments{ \item{a}{First integer} \item{b}{Second integer} \item{method}{Specifies the algorithm used to calculate the greatest common divisior. Defaults to 'recursive', which is generally faster than other methods. Other algorithms available include 'division' and 'subtraction'.} } \value{ The greatest common divisor } \description{ Computes the greatest common divisor with several algorithms available. The default algorithm is the recursive method, which is typically faster than the others. } \examples{ gcd(21, 28) gcd(30, 10, 'subtraction') gcd(21, 6, 'division') }
/man/gcd.Rd
no_license
aschleg/numberr
R
false
true
906
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{gcd} \alias{gcd} \title{Computes the greatest common divisor with several algorithms available. The default algorithm is the recursive method, which is typically faster than the others.} \usage{ gcd(a, b, method = "recursive") } \arguments{ \item{a}{First integer} \item{b}{Second integer} \item{method}{Specifies the algorithm used to calculate the greatest common divisior. Defaults to 'recursive', which is generally faster than other methods. Other algorithms available include 'division' and 'subtraction'.} } \value{ The greatest common divisor } \description{ Computes the greatest common divisor with several algorithms available. The default algorithm is the recursive method, which is typically faster than the others. } \examples{ gcd(21, 28) gcd(30, 10, 'subtraction') gcd(21, 6, 'division') }
#!/usr/bin/R # this script graphs the total absences, insertions, and references per transposon family # USE: family_freq.R library(dplyr) library(tidyr) library(stringr) library(ggplot2) library(grid) library(stringr) setwd("/Users/kristen/Documents/transposon_figure_data/data") summarydata <- read.table("T_kin_C_matrix_full.txt",header=TRUE) #remove ZERO_new traits summarydata<-subset(summarydata,!grepl('^ZERO_new', summarydata$trait)) #clean trait names summarydata$trait <- gsub("_C$" ,"",summarydata$trait) summarydata$trait <- gsub("^ONE_new" ,"new",summarydata$trait) classdata<- read.table("CtCp_all_nonredundant.txt") names(classdata)<-c("chr","start","end","TE","orientation","method","strain","class") # add te class info to summarydata(new_TRANS_end_tes will be removed) classdata$family<- stringr::str_split_fixed(classdata$TE, regex("_(non-)?reference"),2)[,1] classdata$family<- paste(stringr::str_split_fixed(classdata$family, "_",4)[,3],stringr::str_split_fixed(classdata$family, "_",4)[,4],sep="_") classdata$family <- gsub("_$" ,"",classdata$family) classdata$family <- gsub("_non-reference(.*)$" ,"",classdata$family) classdata<-mutate(classdata, trait=paste(method,"TRANS",family,sep="_")) class_subset<-distinct(classdata, family,class,.keep_all=TRUE) class_subset<-dplyr::select(class_subset,family,class) #class_subset <- classdata %>% distinct(family,class) %>% select(family,class) summarydata$family<- stringr::str_split_fixed(summarydata$trait, "_TRANS_",2)[,2] summarydata <-merge(summarydata, class_subset, by="family") summarydata<-dplyr::select(summarydata, -family) #revalue classes summarydata$class <- factor(summarydata$class, levels = c("dnatransposon", "retrotransposon","unknown"), labels = c("DNA Transposon", "Retrotransposon", "Unknown")) #double check removed total, but shouls have been removed in the merge summarydata<-filter(summarydata,!grepl('total', trait)) summarydata <- filter(summarydata, trait != "coverage" ) no_cols<-ncol(summarydata)-1 print(summarydata[,no_cols]) summarydata<-mutate(summarydata, TOTAL=rowSums(summarydata[2:no_cols],na.rm = TRUE)) #new column that specifies what caller was used summarydata$caller<- stringr::str_split_fixed(summarydata$trait, "_TRANS_",2)[,1] #new column that specifies TE family summarydata$transposon<- stringr::str_split_fixed(summarydata$trait, "_TRANS_",2)[,2] #revalue summarydata$caller<- factor(summarydata$caller, levels = c("new","reference","absent"), labels = c("Insertion","Reference","Absence")) summarydata$transposon<-gsub("_CE$","",summarydata$transposon) summarydata$transposon<-gsub("WBTransposon","WBT",summarydata$transposon) #colnames(summarydata) summarydata<-dplyr::select(summarydata,trait,class,TOTAL,caller,transposon) ######## summarydata<-summarydata summarydata<-dplyr::select(summarydata,-trait) summarydata<-spread(summarydata,caller,TOTAL) summarydata$Reference[is.na(summarydata$Reference)] <- 0 summarydata<-mutate(summarydata,All=Insertion+Reference) summarydata<-dplyr::select(summarydata,-Reference) summarydata<-gather(summarydata, "caller","TOTAL",Insertion,Absence,All) #### summarydata$caller = factor(summarydata$caller, levels=c('Insertion','Absence','All'), labels = c("Insertion Sites", "Active Reference Sites","All Transposon Sites")) m <- ggplot(summarydata,aes(y=transposon,x=TOTAL)) m <- m + geom_point(size=1.25,aes(color=class))+ facet_grid(.~caller, scale="free") + theme(strip.background = element_rect(fill="white"), strip.text.x = element_text(size = 9, colour = "black",face="bold"), panel.spacing = unit(.6, "lines"), panel.spacing.y=unit(.50,"cm"), plot.margin=unit(c(.1,.1,0,.1), "cm"), panel.border = element_rect(fill=NA,colour = "black"), panel.background = element_rect(fill = "white"), panel.grid.major = element_line(colour = "grey87"), panel.grid.minor = element_line(colour = "grey87"), axis.ticks =element_line(colour = "black"), axis.title=element_text(size=9), axis.title.x=element_text(face="bold"), axis.text.y = element_text(colour = "black",size=5), axis.text.x = element_text(color="black",size=8), axis.line.y = element_line(colour = "black"), axis.line.x = element_line(colour = "black"), legend.title=element_blank(), legend.background = element_rect(fill=FALSE), legend.key=element_rect(fill=NA), legend.position="none", legend.text=element_text(size=9))+ scale_color_manual(values = c("DNA Transposon" = "navy", "Retrotransposon"="brown3","Unknown"="darkgoldenrod2"))+ labs(y="", x="Total Sites") m setwd("/Users/kristen/Documents/transposon_figure_data/figures") ggsave(filename="Family_Frequency.tiff", dpi=300, width=7.5, height=10, units="in") ggsave(filename="Family_Frequency.png", dpi=300, width=7.5, height=10, units="in") setwd("/Users/kristen/Documents/transposon_figure_data/data") total_means<-summarydata %>% group_by(caller,class) %>% summarise(mean=mean(TOTAL,na.rm=TRUE),SD=sd(TOTAL, na.rm=TRUE)) total_means<- mutate(total_means, id = paste(class,caller,sep="_")) summarydata<- mutate(summarydata, id = paste(class,caller,sep="_")) merged<-merge(summarydata, total_means, by="id") merged<-mutate(merged, outL=ifelse(abs(TOTAL-mean)>SD, "OUTLIER", "n")) outliers<-filter(merged,outL=="OUTLIER") save(outliers,file="outlier_total_events_per_family.Rda") setwd("/Users/kristen/Documents/transposon_figure_data/figures") outlier_table<-dplyr::select(outliers,transposon, caller.x,class.x,TOTAL,mean,SD) outlier_table$mean<-signif(outlier_table$mean,4) outlier_table$SD<-signif(outlier_table$SD,4) outlier_table<-arrange(outlier_table,transposon,TOTAL,caller.x) colnames(outlier_table)<-c("Transposon", "Site Type","Class","Total Number","Mean","SD") write.table(outlier_table, file="Outlier_Table.txt",sep="\t",quote=FALSE,row.names=FALSE) names(summarydata) test<-distinct(summarydata,caller, transposon) RR<-filter(summarydata,caller=="Reference") AA<-filter(summarydata,caller=="Active References") NN<-filter(summarydata,caller=="Insertions") CC<-filter(summarydata,caller=="Reference"|caller=="Active References") length(unique(summarydata$transposon)) (unique(summarydata$caller)) length(unique(RR$transposon)) length(unique(AA$transposon)) length(unique(NN$transposon)) length(unique(CC$transposon)) test<-distinct(CC,caller, transposon) length(unique(CC$trait)) test<-filter(merged,transposon=="MARINER2") test<-filter(merged,caller.y=="Insertions")
/family_freq.R
permissive
klaricch/TransposonFigures
R
false
false
6,700
r
#!/usr/bin/R # this script graphs the total absences, insertions, and references per transposon family # USE: family_freq.R library(dplyr) library(tidyr) library(stringr) library(ggplot2) library(grid) library(stringr) setwd("/Users/kristen/Documents/transposon_figure_data/data") summarydata <- read.table("T_kin_C_matrix_full.txt",header=TRUE) #remove ZERO_new traits summarydata<-subset(summarydata,!grepl('^ZERO_new', summarydata$trait)) #clean trait names summarydata$trait <- gsub("_C$" ,"",summarydata$trait) summarydata$trait <- gsub("^ONE_new" ,"new",summarydata$trait) classdata<- read.table("CtCp_all_nonredundant.txt") names(classdata)<-c("chr","start","end","TE","orientation","method","strain","class") # add te class info to summarydata(new_TRANS_end_tes will be removed) classdata$family<- stringr::str_split_fixed(classdata$TE, regex("_(non-)?reference"),2)[,1] classdata$family<- paste(stringr::str_split_fixed(classdata$family, "_",4)[,3],stringr::str_split_fixed(classdata$family, "_",4)[,4],sep="_") classdata$family <- gsub("_$" ,"",classdata$family) classdata$family <- gsub("_non-reference(.*)$" ,"",classdata$family) classdata<-mutate(classdata, trait=paste(method,"TRANS",family,sep="_")) class_subset<-distinct(classdata, family,class,.keep_all=TRUE) class_subset<-dplyr::select(class_subset,family,class) #class_subset <- classdata %>% distinct(family,class) %>% select(family,class) summarydata$family<- stringr::str_split_fixed(summarydata$trait, "_TRANS_",2)[,2] summarydata <-merge(summarydata, class_subset, by="family") summarydata<-dplyr::select(summarydata, -family) #revalue classes summarydata$class <- factor(summarydata$class, levels = c("dnatransposon", "retrotransposon","unknown"), labels = c("DNA Transposon", "Retrotransposon", "Unknown")) #double check removed total, but shouls have been removed in the merge summarydata<-filter(summarydata,!grepl('total', trait)) summarydata <- filter(summarydata, trait != "coverage" ) no_cols<-ncol(summarydata)-1 print(summarydata[,no_cols]) summarydata<-mutate(summarydata, TOTAL=rowSums(summarydata[2:no_cols],na.rm = TRUE)) #new column that specifies what caller was used summarydata$caller<- stringr::str_split_fixed(summarydata$trait, "_TRANS_",2)[,1] #new column that specifies TE family summarydata$transposon<- stringr::str_split_fixed(summarydata$trait, "_TRANS_",2)[,2] #revalue summarydata$caller<- factor(summarydata$caller, levels = c("new","reference","absent"), labels = c("Insertion","Reference","Absence")) summarydata$transposon<-gsub("_CE$","",summarydata$transposon) summarydata$transposon<-gsub("WBTransposon","WBT",summarydata$transposon) #colnames(summarydata) summarydata<-dplyr::select(summarydata,trait,class,TOTAL,caller,transposon) ######## summarydata<-summarydata summarydata<-dplyr::select(summarydata,-trait) summarydata<-spread(summarydata,caller,TOTAL) summarydata$Reference[is.na(summarydata$Reference)] <- 0 summarydata<-mutate(summarydata,All=Insertion+Reference) summarydata<-dplyr::select(summarydata,-Reference) summarydata<-gather(summarydata, "caller","TOTAL",Insertion,Absence,All) #### summarydata$caller = factor(summarydata$caller, levels=c('Insertion','Absence','All'), labels = c("Insertion Sites", "Active Reference Sites","All Transposon Sites")) m <- ggplot(summarydata,aes(y=transposon,x=TOTAL)) m <- m + geom_point(size=1.25,aes(color=class))+ facet_grid(.~caller, scale="free") + theme(strip.background = element_rect(fill="white"), strip.text.x = element_text(size = 9, colour = "black",face="bold"), panel.spacing = unit(.6, "lines"), panel.spacing.y=unit(.50,"cm"), plot.margin=unit(c(.1,.1,0,.1), "cm"), panel.border = element_rect(fill=NA,colour = "black"), panel.background = element_rect(fill = "white"), panel.grid.major = element_line(colour = "grey87"), panel.grid.minor = element_line(colour = "grey87"), axis.ticks =element_line(colour = "black"), axis.title=element_text(size=9), axis.title.x=element_text(face="bold"), axis.text.y = element_text(colour = "black",size=5), axis.text.x = element_text(color="black",size=8), axis.line.y = element_line(colour = "black"), axis.line.x = element_line(colour = "black"), legend.title=element_blank(), legend.background = element_rect(fill=FALSE), legend.key=element_rect(fill=NA), legend.position="none", legend.text=element_text(size=9))+ scale_color_manual(values = c("DNA Transposon" = "navy", "Retrotransposon"="brown3","Unknown"="darkgoldenrod2"))+ labs(y="", x="Total Sites") m setwd("/Users/kristen/Documents/transposon_figure_data/figures") ggsave(filename="Family_Frequency.tiff", dpi=300, width=7.5, height=10, units="in") ggsave(filename="Family_Frequency.png", dpi=300, width=7.5, height=10, units="in") setwd("/Users/kristen/Documents/transposon_figure_data/data") total_means<-summarydata %>% group_by(caller,class) %>% summarise(mean=mean(TOTAL,na.rm=TRUE),SD=sd(TOTAL, na.rm=TRUE)) total_means<- mutate(total_means, id = paste(class,caller,sep="_")) summarydata<- mutate(summarydata, id = paste(class,caller,sep="_")) merged<-merge(summarydata, total_means, by="id") merged<-mutate(merged, outL=ifelse(abs(TOTAL-mean)>SD, "OUTLIER", "n")) outliers<-filter(merged,outL=="OUTLIER") save(outliers,file="outlier_total_events_per_family.Rda") setwd("/Users/kristen/Documents/transposon_figure_data/figures") outlier_table<-dplyr::select(outliers,transposon, caller.x,class.x,TOTAL,mean,SD) outlier_table$mean<-signif(outlier_table$mean,4) outlier_table$SD<-signif(outlier_table$SD,4) outlier_table<-arrange(outlier_table,transposon,TOTAL,caller.x) colnames(outlier_table)<-c("Transposon", "Site Type","Class","Total Number","Mean","SD") write.table(outlier_table, file="Outlier_Table.txt",sep="\t",quote=FALSE,row.names=FALSE) names(summarydata) test<-distinct(summarydata,caller, transposon) RR<-filter(summarydata,caller=="Reference") AA<-filter(summarydata,caller=="Active References") NN<-filter(summarydata,caller=="Insertions") CC<-filter(summarydata,caller=="Reference"|caller=="Active References") length(unique(summarydata$transposon)) (unique(summarydata$caller)) length(unique(RR$transposon)) length(unique(AA$transposon)) length(unique(NN$transposon)) length(unique(CC$transposon)) test<-distinct(CC,caller, transposon) length(unique(CC$trait)) test<-filter(merged,transposon=="MARINER2") test<-filter(merged,caller.y=="Insertions")
/겨울방학데이터_기계학습특강/R_script(3주차).R
no_license
jeognah0304/Lecture
R
false
false
6,322
r
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/StatMixHMMR.R \docType{class} \name{StatMixHMMR-class} \alias{StatMixHMMR-class} \alias{StatMixHMMR} \title{A Reference Class which contains statistics of a mixture of HMMR models.} \description{ StatMixHMMR contains all the statistics associated to a \link[=ParamMixHMMR]{MixHMMR} model, in particular the E-Step of the EM algorithm. } \section{Fields}{ \describe{ \item{\code{tau_ik}}{Matrix of size \eqn{(n, K)} giving the posterior probabilities that the curve \eqn{\boldsymbol{y}_{i}}{y_{i}} originates from the \eqn{k}-th HMMR model.} \item{\code{gamma_ikjr}}{Array of size \eqn{(nm, R, K)} giving the posterior probabilities that the observation \eqn{\boldsymbol{y}_{ij}}{y_{ij}} originates from the \eqn{r}-th regime of the \eqn{k}-th HMM model.} \item{\code{loglik}}{Numeric. Log-likelihood of the MixHMMR model.} \item{\code{stored_loglik}}{Numeric vector. Stored values of the log-likelihood at each iteration of the EM algorithm.} \item{\code{klas}}{Row matrix of the labels issued from \code{tau_ik}. Its elements are \eqn{klas[i] = z\_i}{klas[i] = z_i}, \eqn{i = 1,\dots,n}.} \item{\code{z_ik}}{Hard segmentation logical matrix of dimension \eqn{(n, K)} obtained by the Maximum a posteriori (MAP) rule: \eqn{z\_ik = 1 \ \textrm{if} \ z\_i = \textrm{arg} \ \textrm{max}_{k} \ P(z_{ik} = 1 | \boldsymbol{y}_{i}; \boldsymbol{\Psi}) = tau\_ik;\ 0 \ \textrm{otherwise}}{z_ik = 1 if z_i = arg max_k P(z_{ik} = 1 | y_{i}; \Psi) = tau_ik; 0 otherwise}.} \item{\code{smoothed}}{Matrix of size \eqn{(m, K)} giving the smoothed time series. The smoothed time series are computed by combining the polynomial regression components with both the estimated posterior regime probabilities \code{gamma_ikjr} and the corresponding estimated posterior cluster probability \code{tau_ik}. The k-th column gives the estimated mean series of cluster k.} \item{\code{BIC}}{Numeric. Value of BIC (Bayesian Information Criterion).} \item{\code{AIC}}{Numeric. Value of AIC (Akaike Information Criterion).} \item{\code{ICL1}}{Numeric. Value of ICL (Integrated Completed Likelihood Criterion).} \item{\code{log_alpha_k_fyi}}{Private. Only defined for calculations.} \item{\code{exp_num_trans}}{Private. Only defined for calculations.} \item{\code{exp_num_trans_from_l}}{Private. Only defined for calculations.} }} \section{Methods}{ \describe{ \item{\code{computeStats(paramMixHMMR)}}{Method used in the EM algorithm to compute statistics based on parameters provided by the object \code{paramMixHMMR} of class \link{ParamMixHMMR}.} \item{\code{EStep(paramMixHMMR)}}{Method used in the EM algorithm to update statistics based on parameters provided by the object \code{paramMixHMMR} of class \link{ParamMixHMMR} (prior and posterior probabilities).} \item{\code{MAP()}}{MAP calculates values of the fields \code{z_ik} and \code{klas} by applying the Maximum A Posteriori Bayes allocation rule. \eqn{z\_ik = 1 \ \textrm{if} \ z\_i = \textrm{arg} \ \textrm{max}_{k} \ P(z_{ik} = 1 | \boldsymbol{y}_{i}; \boldsymbol{\Psi}) = tau\_ik;\ 0 \ \textrm{otherwise}}{z_ik = 1 if z_i = arg max_k P(z_{ik} = 1 | y_{i}; \Psi) = tau_ik; 0 otherwise}.} }} \seealso{ \link{ParamMixHMMR} }
/fuzzedpackages/flamingos/man/StatMixHMMR-class.Rd
no_license
akhikolla/testpackages
R
false
true
3,292
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/StatMixHMMR.R \docType{class} \name{StatMixHMMR-class} \alias{StatMixHMMR-class} \alias{StatMixHMMR} \title{A Reference Class which contains statistics of a mixture of HMMR models.} \description{ StatMixHMMR contains all the statistics associated to a \link[=ParamMixHMMR]{MixHMMR} model, in particular the E-Step of the EM algorithm. } \section{Fields}{ \describe{ \item{\code{tau_ik}}{Matrix of size \eqn{(n, K)} giving the posterior probabilities that the curve \eqn{\boldsymbol{y}_{i}}{y_{i}} originates from the \eqn{k}-th HMMR model.} \item{\code{gamma_ikjr}}{Array of size \eqn{(nm, R, K)} giving the posterior probabilities that the observation \eqn{\boldsymbol{y}_{ij}}{y_{ij}} originates from the \eqn{r}-th regime of the \eqn{k}-th HMM model.} \item{\code{loglik}}{Numeric. Log-likelihood of the MixHMMR model.} \item{\code{stored_loglik}}{Numeric vector. Stored values of the log-likelihood at each iteration of the EM algorithm.} \item{\code{klas}}{Row matrix of the labels issued from \code{tau_ik}. Its elements are \eqn{klas[i] = z\_i}{klas[i] = z_i}, \eqn{i = 1,\dots,n}.} \item{\code{z_ik}}{Hard segmentation logical matrix of dimension \eqn{(n, K)} obtained by the Maximum a posteriori (MAP) rule: \eqn{z\_ik = 1 \ \textrm{if} \ z\_i = \textrm{arg} \ \textrm{max}_{k} \ P(z_{ik} = 1 | \boldsymbol{y}_{i}; \boldsymbol{\Psi}) = tau\_ik;\ 0 \ \textrm{otherwise}}{z_ik = 1 if z_i = arg max_k P(z_{ik} = 1 | y_{i}; \Psi) = tau_ik; 0 otherwise}.} \item{\code{smoothed}}{Matrix of size \eqn{(m, K)} giving the smoothed time series. The smoothed time series are computed by combining the polynomial regression components with both the estimated posterior regime probabilities \code{gamma_ikjr} and the corresponding estimated posterior cluster probability \code{tau_ik}. The k-th column gives the estimated mean series of cluster k.} \item{\code{BIC}}{Numeric. Value of BIC (Bayesian Information Criterion).} \item{\code{AIC}}{Numeric. Value of AIC (Akaike Information Criterion).} \item{\code{ICL1}}{Numeric. Value of ICL (Integrated Completed Likelihood Criterion).} \item{\code{log_alpha_k_fyi}}{Private. Only defined for calculations.} \item{\code{exp_num_trans}}{Private. Only defined for calculations.} \item{\code{exp_num_trans_from_l}}{Private. Only defined for calculations.} }} \section{Methods}{ \describe{ \item{\code{computeStats(paramMixHMMR)}}{Method used in the EM algorithm to compute statistics based on parameters provided by the object \code{paramMixHMMR} of class \link{ParamMixHMMR}.} \item{\code{EStep(paramMixHMMR)}}{Method used in the EM algorithm to update statistics based on parameters provided by the object \code{paramMixHMMR} of class \link{ParamMixHMMR} (prior and posterior probabilities).} \item{\code{MAP()}}{MAP calculates values of the fields \code{z_ik} and \code{klas} by applying the Maximum A Posteriori Bayes allocation rule. \eqn{z\_ik = 1 \ \textrm{if} \ z\_i = \textrm{arg} \ \textrm{max}_{k} \ P(z_{ik} = 1 | \boldsymbol{y}_{i}; \boldsymbol{\Psi}) = tau\_ik;\ 0 \ \textrm{otherwise}}{z_ik = 1 if z_i = arg max_k P(z_{ik} = 1 | y_{i}; \Psi) = tau_ik; 0 otherwise}.} }} \seealso{ \link{ParamMixHMMR} }
library(caret); library(ggplot2); library(randomForest) ## Load data fileURL1<- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv" fileURL2<- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv" if(!file.exists("training")|!file.exists("testing")){ # create the placeholder file td = tempdir() # download into the placeholder file tf1 = tempfile(tmpdir=td) download.file(fileURL1, tf1) pmltraining<- read.csv(tf1) tf2 = tempfile(tmpdir=td) download.file(fileURL2, tf2) pmltesting<- read.csv(tf2) } dim(pmltraining) dim(pmltesting) ## Data pre-processing nzv<- nearZeroVar(pmltraining) pmltrainingtemp<- pmltraining[,-nzv] threshod<- dim(pmltraining)[1]*0.9 badcolumes<- which(apply(is.na(pmltrainingtemp), 2, sum) > threshod) pmltrainingtidy<- pmltrainingtemp[,-badcolumes] pmltestingtemp<- pmltesting[,-nzv] pmltestingtidy<- pmltestingtemp[, -badcolumes] ## Removing non-measurement data RemoveInx1 <- grepl("X|timestamp|user_name|problem_id", names(pmltrainingtidy)) RemoveInx2 <- grepl("X|timestamp|user_name|problem_id", names(pmltestingtidy)) pmltrainingtidy<- pmltrainingtidy[, which(RemoveInx1==FALSE)] pmltestingtidy<- pmltestingtidy[, which(RemoveInx2==FALSE)] ## Data splitting for resampling set.seed(123) inTrain<- createDataPartition(y=pmltrainingtidy$classe, p=0.7, list = FALSE) training<- pmltrainingtidy[inTrain,] testing<- pmltrainingtidy[-inTrain,] ## random forest modfit1<- randomForest(classe~., data = training) prediction1<- predict(modfit1, testing) confusionMatrix(testing$classe, prediction1) ## decision tree modfit2<- train(classe~., method="rpart", data = training) prediction2<- predict(modfit2, testing) confusionMatrix(testing$classe, prediction2) ## LDA modfit3<- train(classe~., method="lda", data = training) prediction3<- predict(modfit3, testing) confusionMatrix(testing$classe, prediction3) ## gbm modfit4<- train(classe~., method="gbm", data = training) prediction4<- predict(modfit4, testing) confusionMatrix(testing$classe, prediction4) ## important variables varImpPlot(modfit1, n.var = 20, main = "Top20 important variables in random forest modeling") predanswer<- predict(modfit2, pmltestingtidy) predanswer
/Prediction Assignment Writeup.R
no_license
NingGuo1982/Machine-Learning-Course-Assignment
R
false
false
2,286
r
library(caret); library(ggplot2); library(randomForest) ## Load data fileURL1<- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv" fileURL2<- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv" if(!file.exists("training")|!file.exists("testing")){ # create the placeholder file td = tempdir() # download into the placeholder file tf1 = tempfile(tmpdir=td) download.file(fileURL1, tf1) pmltraining<- read.csv(tf1) tf2 = tempfile(tmpdir=td) download.file(fileURL2, tf2) pmltesting<- read.csv(tf2) } dim(pmltraining) dim(pmltesting) ## Data pre-processing nzv<- nearZeroVar(pmltraining) pmltrainingtemp<- pmltraining[,-nzv] threshod<- dim(pmltraining)[1]*0.9 badcolumes<- which(apply(is.na(pmltrainingtemp), 2, sum) > threshod) pmltrainingtidy<- pmltrainingtemp[,-badcolumes] pmltestingtemp<- pmltesting[,-nzv] pmltestingtidy<- pmltestingtemp[, -badcolumes] ## Removing non-measurement data RemoveInx1 <- grepl("X|timestamp|user_name|problem_id", names(pmltrainingtidy)) RemoveInx2 <- grepl("X|timestamp|user_name|problem_id", names(pmltestingtidy)) pmltrainingtidy<- pmltrainingtidy[, which(RemoveInx1==FALSE)] pmltestingtidy<- pmltestingtidy[, which(RemoveInx2==FALSE)] ## Data splitting for resampling set.seed(123) inTrain<- createDataPartition(y=pmltrainingtidy$classe, p=0.7, list = FALSE) training<- pmltrainingtidy[inTrain,] testing<- pmltrainingtidy[-inTrain,] ## random forest modfit1<- randomForest(classe~., data = training) prediction1<- predict(modfit1, testing) confusionMatrix(testing$classe, prediction1) ## decision tree modfit2<- train(classe~., method="rpart", data = training) prediction2<- predict(modfit2, testing) confusionMatrix(testing$classe, prediction2) ## LDA modfit3<- train(classe~., method="lda", data = training) prediction3<- predict(modfit3, testing) confusionMatrix(testing$classe, prediction3) ## gbm modfit4<- train(classe~., method="gbm", data = training) prediction4<- predict(modfit4, testing) confusionMatrix(testing$classe, prediction4) ## important variables varImpPlot(modfit1, n.var = 20, main = "Top20 important variables in random forest modeling") predanswer<- predict(modfit2, pmltestingtidy) predanswer
#' focus_20200323_picco UI Function #' #' @description A shiny Module. #' #' @param id,input,output,session Internal parameters for {shiny}. #' #' @noRd #' #' @importFrom shiny NS tagList mod_0323_picco_ui <- function(id) { ns <- NS(id) obs_t <- dpc_covid19_ita_andamento_nazionale[["data"]] obs_y <- dpc_covid19_ita_andamento_nazionale[["totale_casi"]] pred_val <- growthcurver::SummarizeGrowth( data_t = seq_along(obs_t), data_n = obs_y )$vals fluidPage( box( width = 12, title = "Informazioni sulla lettura e uso dei grafici", p("Nuovi casi giornalieri positivi italiani e regionali (punti in colore) e stima previsiva ipotizzando un andamento logistico (punti in nero)."), p("\u00C8 possibile visualizzare le variazioni di previsione in funzione dei paramentri selezionati, a partire da quelli di migliore approssimazione."), p("Variando i paramentri nazionali rispetto a quelli di migliore approssimazione (escursione ammessa entro l'intervallo di confidenza al 99%), varieranno modificati, in proporzione, i corrispondenti parametri per le stime regionali."), actionButton(ns("reset"), "Rispristino parametri iniziali") ), fluidRow( box( width = 4, footer = "Capacit\u00E0 portante popolazione: massimo numero di casi positivi che possono essere presenti per un tempo indefinito.", sliderInput(ns("k"), "Parametro k", min = round(pred_val$k - 2.576 * pred_val$k_se), max = round(pred_val$k + 2.576 * pred_val$k_se), value = round(pred_val$k), step = round(pred_val$k_se / 10) ) ), box( width = 4, footer = "Casi iniziali.", sliderInput(ns("n0"), "Parametro N0", min = round(pred_val$n0 - 2.576 * pred_val$n0_se), max = round(pred_val$n0 + 2.576 * pred_val$n0_se), value = round(pred_val$n0), step = round(pred_val$n0_se / 10) ) ), box( width = 4, footer = "Tasso esponenziale di crescita.", sliderInput(ns("r"), "Parametro r", min = round(pred_val$r - 2.576 * pred_val$r_se, 4), max = round(pred_val$r + 2.576 * pred_val$r_se, 4), value = round(pred_val$r, 4), step = round(pred_val$r_se / 10, 4) ) ) ), plotlyOutput(ns("picco")), shiny::selectInput(ns("whichRegion"), "Selezionare le regioni da visualizzare", choices = regions(), selectize = TRUE, selected = c("Veneto", "Lombardia", "Sicilia"), multiple = TRUE, width = "100%" ), plotlyOutput(ns("picco_reg")), ) } #' focus_20200323_picco Server Function #' #' @noRd mod_0323_picco_server <- function(id) { # national setup # obs_t <- dpc_covid19_ita_andamento_nazionale[["data"]] obs_y <- dpc_covid19_ita_andamento_nazionale[["totale_casi"]] pred_val_origin <- growthcurver::SummarizeGrowth( data_t = seq_along(obs_t), data_n = obs_y )$vals obs_db <- tibble::tibble( t = as.Date(obs_t), y = (obs_y - dplyr::lag(obs_y, default = 0)) ) pred_t <- c(obs_t, obs_t[[length(obs_t)]] + lubridate::days(1:28)) # regional setup # obs_reg <- dpc_covid19_ita_regioni %>% dplyr::transmute( t = as.Date(.data$data), regione = .data$denominazione_regione, totale_casi = .data$totale_casi ) %>% dplyr::group_by(.data$regione) %>% dplyr::arrange(.data$t) %>% dplyr::mutate( y = (.data$totale_casi - dplyr::lag(.data$totale_casi, default = 0)) ) %>% dplyr::ungroup() obs_reg_plate <- obs_reg %>% dplyr::select(-.data$y) %>% tidyr::pivot_wider( names_from = .data$regione, values_from = .data$totale_casi ) %>% dplyr::mutate(time = seq_along(.data$t)) %>% dplyr::select(-.data$t) pred_db_reg <- obs_reg_plate %>% growthcurver::SummarizeGrowthByPlate() %>% dplyr::rename(regione = .data$sample) %>% dplyr::select(.data$regione, .data$k, .data$n0, .data$r) callModule(id = id, function(input, output, session) { ns <- session$ns observeEvent(input$reset, { updateNumericInput(session, "k", value = pred_val_origin[["k"]]) updateNumericInput(session, "n0", value = pred_val_origin[["n0"]]) updateNumericInput(session, "r", value = pred_val_origin[["r"]]) }) # national plot # n0 <- reactive({ req(input$n0) }) k <- reactive({ req(input$k) }) r <- reactive({ req(input$r) }) pred_n <- reactive({ res <- growthcurver::NAtT( k = k(), n0 = n0(), r = r(), t = seq_along(pred_t) ) res - dplyr::lag(res, default = 0) }) output$picco <- renderPlotly({ gg_ita <- tibble::tibble(t = as.Date(pred_t), y = pred_n()) %>% ggplot(aes(x = .data$t, y = .data$y)) + geom_point() + geom_line() + geom_point(data = obs_db, colour = "red") + ylab("Numero di nuovi casi") + xlab("") + scale_x_date(date_breaks = "1 day", date_labels = "%b %d") + theme( axis.text.x = element_text(angle = 60, hjust = 1, vjust = 0.5) ) ggplotly(gg_ita) }) # regional plot # pred_val_reg <- reactive({ k_ita <- req(input$k) n0_ita <- req(input$n0) r_ita <- req(input$r) pred_db_reg %>% dplyr::mutate( k = (.data$k * k_ita) / pred_val_origin[["k"]], n0 = (.data$n0 * n0_ita) / pred_val_origin[["n0"]], r = (.data$r * r_ita) / pred_val_origin[["r"]], natt = purrr::pmap( list(.data$k, .data$n0, .data$r), function(k, n0, r) { tibble::tibble( t = as.Date(pred_t), y = growthcurver::NAtT(k, n0, r, t = seq_along(pred_t)) ) } ) ) %>% tidyr::unnest(cols = .data$natt) %>% dplyr::group_by(.data$regione) %>% dplyr::arrange(.data$t) %>% dplyr::mutate(y = .data$y - dplyr::lag(.data$y, default = 0)) %>% dplyr::ungroup() }) output$picco_reg <- renderPlotly({ reg <- req(input$whichRegion) gg_reg <- pred_val_reg() %>% dplyr::filter(.data$regione %in% reg) %>% ggplot(aes(x = .data$t, y = .data$y, colour = .data$regione)) + geom_point(colour = "black") + geom_line(colour = "black") + geom_point( data = dplyr::filter(obs_reg, .data$regione %in% reg) ) + facet_wrap(~ .data$regione, scales = "free_y") + ylab("Numero di nuovi casi") + xlab("") + scale_x_date(date_breaks = "1 day", date_labels = "%b %d") + theme( axis.text.x = element_text( angle = 60, hjust = 1, vjust = 0.5 ), panel.spacing.y = unit(2, "lines"), legend.position = "none" ) ggplotly(gg_reg) }) }) } ## To be copied in the UI #> mod_0323_picco_ui("focus_20200323_picco_ui_1") ## To be copied in the server #> callModule(mod_0323_picco_server, "focus_20200323_picco_ui_1")
/R/mod_focus_20200323_picco.R
permissive
UBESP-DCTV/covid19ita
R
false
false
7,146
r
#' focus_20200323_picco UI Function #' #' @description A shiny Module. #' #' @param id,input,output,session Internal parameters for {shiny}. #' #' @noRd #' #' @importFrom shiny NS tagList mod_0323_picco_ui <- function(id) { ns <- NS(id) obs_t <- dpc_covid19_ita_andamento_nazionale[["data"]] obs_y <- dpc_covid19_ita_andamento_nazionale[["totale_casi"]] pred_val <- growthcurver::SummarizeGrowth( data_t = seq_along(obs_t), data_n = obs_y )$vals fluidPage( box( width = 12, title = "Informazioni sulla lettura e uso dei grafici", p("Nuovi casi giornalieri positivi italiani e regionali (punti in colore) e stima previsiva ipotizzando un andamento logistico (punti in nero)."), p("\u00C8 possibile visualizzare le variazioni di previsione in funzione dei paramentri selezionati, a partire da quelli di migliore approssimazione."), p("Variando i paramentri nazionali rispetto a quelli di migliore approssimazione (escursione ammessa entro l'intervallo di confidenza al 99%), varieranno modificati, in proporzione, i corrispondenti parametri per le stime regionali."), actionButton(ns("reset"), "Rispristino parametri iniziali") ), fluidRow( box( width = 4, footer = "Capacit\u00E0 portante popolazione: massimo numero di casi positivi che possono essere presenti per un tempo indefinito.", sliderInput(ns("k"), "Parametro k", min = round(pred_val$k - 2.576 * pred_val$k_se), max = round(pred_val$k + 2.576 * pred_val$k_se), value = round(pred_val$k), step = round(pred_val$k_se / 10) ) ), box( width = 4, footer = "Casi iniziali.", sliderInput(ns("n0"), "Parametro N0", min = round(pred_val$n0 - 2.576 * pred_val$n0_se), max = round(pred_val$n0 + 2.576 * pred_val$n0_se), value = round(pred_val$n0), step = round(pred_val$n0_se / 10) ) ), box( width = 4, footer = "Tasso esponenziale di crescita.", sliderInput(ns("r"), "Parametro r", min = round(pred_val$r - 2.576 * pred_val$r_se, 4), max = round(pred_val$r + 2.576 * pred_val$r_se, 4), value = round(pred_val$r, 4), step = round(pred_val$r_se / 10, 4) ) ) ), plotlyOutput(ns("picco")), shiny::selectInput(ns("whichRegion"), "Selezionare le regioni da visualizzare", choices = regions(), selectize = TRUE, selected = c("Veneto", "Lombardia", "Sicilia"), multiple = TRUE, width = "100%" ), plotlyOutput(ns("picco_reg")), ) } #' focus_20200323_picco Server Function #' #' @noRd mod_0323_picco_server <- function(id) { # national setup # obs_t <- dpc_covid19_ita_andamento_nazionale[["data"]] obs_y <- dpc_covid19_ita_andamento_nazionale[["totale_casi"]] pred_val_origin <- growthcurver::SummarizeGrowth( data_t = seq_along(obs_t), data_n = obs_y )$vals obs_db <- tibble::tibble( t = as.Date(obs_t), y = (obs_y - dplyr::lag(obs_y, default = 0)) ) pred_t <- c(obs_t, obs_t[[length(obs_t)]] + lubridate::days(1:28)) # regional setup # obs_reg <- dpc_covid19_ita_regioni %>% dplyr::transmute( t = as.Date(.data$data), regione = .data$denominazione_regione, totale_casi = .data$totale_casi ) %>% dplyr::group_by(.data$regione) %>% dplyr::arrange(.data$t) %>% dplyr::mutate( y = (.data$totale_casi - dplyr::lag(.data$totale_casi, default = 0)) ) %>% dplyr::ungroup() obs_reg_plate <- obs_reg %>% dplyr::select(-.data$y) %>% tidyr::pivot_wider( names_from = .data$regione, values_from = .data$totale_casi ) %>% dplyr::mutate(time = seq_along(.data$t)) %>% dplyr::select(-.data$t) pred_db_reg <- obs_reg_plate %>% growthcurver::SummarizeGrowthByPlate() %>% dplyr::rename(regione = .data$sample) %>% dplyr::select(.data$regione, .data$k, .data$n0, .data$r) callModule(id = id, function(input, output, session) { ns <- session$ns observeEvent(input$reset, { updateNumericInput(session, "k", value = pred_val_origin[["k"]]) updateNumericInput(session, "n0", value = pred_val_origin[["n0"]]) updateNumericInput(session, "r", value = pred_val_origin[["r"]]) }) # national plot # n0 <- reactive({ req(input$n0) }) k <- reactive({ req(input$k) }) r <- reactive({ req(input$r) }) pred_n <- reactive({ res <- growthcurver::NAtT( k = k(), n0 = n0(), r = r(), t = seq_along(pred_t) ) res - dplyr::lag(res, default = 0) }) output$picco <- renderPlotly({ gg_ita <- tibble::tibble(t = as.Date(pred_t), y = pred_n()) %>% ggplot(aes(x = .data$t, y = .data$y)) + geom_point() + geom_line() + geom_point(data = obs_db, colour = "red") + ylab("Numero di nuovi casi") + xlab("") + scale_x_date(date_breaks = "1 day", date_labels = "%b %d") + theme( axis.text.x = element_text(angle = 60, hjust = 1, vjust = 0.5) ) ggplotly(gg_ita) }) # regional plot # pred_val_reg <- reactive({ k_ita <- req(input$k) n0_ita <- req(input$n0) r_ita <- req(input$r) pred_db_reg %>% dplyr::mutate( k = (.data$k * k_ita) / pred_val_origin[["k"]], n0 = (.data$n0 * n0_ita) / pred_val_origin[["n0"]], r = (.data$r * r_ita) / pred_val_origin[["r"]], natt = purrr::pmap( list(.data$k, .data$n0, .data$r), function(k, n0, r) { tibble::tibble( t = as.Date(pred_t), y = growthcurver::NAtT(k, n0, r, t = seq_along(pred_t)) ) } ) ) %>% tidyr::unnest(cols = .data$natt) %>% dplyr::group_by(.data$regione) %>% dplyr::arrange(.data$t) %>% dplyr::mutate(y = .data$y - dplyr::lag(.data$y, default = 0)) %>% dplyr::ungroup() }) output$picco_reg <- renderPlotly({ reg <- req(input$whichRegion) gg_reg <- pred_val_reg() %>% dplyr::filter(.data$regione %in% reg) %>% ggplot(aes(x = .data$t, y = .data$y, colour = .data$regione)) + geom_point(colour = "black") + geom_line(colour = "black") + geom_point( data = dplyr::filter(obs_reg, .data$regione %in% reg) ) + facet_wrap(~ .data$regione, scales = "free_y") + ylab("Numero di nuovi casi") + xlab("") + scale_x_date(date_breaks = "1 day", date_labels = "%b %d") + theme( axis.text.x = element_text( angle = 60, hjust = 1, vjust = 0.5 ), panel.spacing.y = unit(2, "lines"), legend.position = "none" ) ggplotly(gg_reg) }) }) } ## To be copied in the UI #> mod_0323_picco_ui("focus_20200323_picco_ui_1") ## To be copied in the server #> callModule(mod_0323_picco_server, "focus_20200323_picco_ui_1")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/OurLogo.R \name{OurLogo} \alias{OurLogo} \title{OurLogo} \usage{ OurLogo(logo = "bb", rmd = TRUE, width = "200px") } \arguments{ \item{logo}{choose which logo to print. Input "bb" for Brown and Brown logo, "arrowhead" for Arrowhead General Insurance logo, and "kittens" for kittens package logo} \item{rmd}{default is TRUE, print logo using htmltools package for Rmd version. rmd = FALSE will print a regular PNG picture on the screen.} \item{width}{default is 200px, if desired specify the desired width in pixels} } \value{ logos } \description{ return logos from Brown & Brown Insurance company logo, Arrowhead logo, and kittens package logo Follow this link to see Brown and Brown logo styleguide: (https://bbins365.sharepoint.com/sites/intranet/departments/communications) } \examples{ # Brown and Brown logo from using htmltools in an Rmd \dontrun{ OurLogo(logo = "bb") } # Arrowhead logo print picture only \dontrun{ OurLogo(logo = "arrowhead", rmd = FALSE) } }
/OurLogo.Rd
no_license
preethinarayanan/R-Package-Development
R
false
true
1,089
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/OurLogo.R \name{OurLogo} \alias{OurLogo} \title{OurLogo} \usage{ OurLogo(logo = "bb", rmd = TRUE, width = "200px") } \arguments{ \item{logo}{choose which logo to print. Input "bb" for Brown and Brown logo, "arrowhead" for Arrowhead General Insurance logo, and "kittens" for kittens package logo} \item{rmd}{default is TRUE, print logo using htmltools package for Rmd version. rmd = FALSE will print a regular PNG picture on the screen.} \item{width}{default is 200px, if desired specify the desired width in pixels} } \value{ logos } \description{ return logos from Brown & Brown Insurance company logo, Arrowhead logo, and kittens package logo Follow this link to see Brown and Brown logo styleguide: (https://bbins365.sharepoint.com/sites/intranet/departments/communications) } \examples{ # Brown and Brown logo from using htmltools in an Rmd \dontrun{ OurLogo(logo = "bb") } # Arrowhead logo print picture only \dontrun{ OurLogo(logo = "arrowhead", rmd = FALSE) } }
library(treemap) ts<-read.table('pages_per_user.txt') ts$V3<-as.character(runif(length(ts$V2),max=11)+1) #ts$V3<-as.character(seq(1,length(ts$V2))) pdf(file="pages_per_user.pdf",width=64,height=48,pointsize=50) treemap(ts,index='V1',vSize='V2',vColor='V1',type='categorical',fontsize.labels=0,lowerbound.cex.labels=0,title='',position.legend='none')
/monitoring/pages_per_user/plot_pages_per_user.R
no_license
oldweather/oldWeather5
R
false
false
351
r
library(treemap) ts<-read.table('pages_per_user.txt') ts$V3<-as.character(runif(length(ts$V2),max=11)+1) #ts$V3<-as.character(seq(1,length(ts$V2))) pdf(file="pages_per_user.pdf",width=64,height=48,pointsize=50) treemap(ts,index='V1',vSize='V2',vColor='V1',type='categorical',fontsize.labels=0,lowerbound.cex.labels=0,title='',position.legend='none')
#' @title Import detected circRNAs #' #' @description The function getBackSplicedJunctions() reads the #' circRNAs_X.txt with the detected circRNAs, adapts the content and generates #' a unique data frame with all circRNAs identified by each circRNA detection #' tool and the occurrences found in each sample (named as reported in the #' column label in experiment.txt). #' #' @param gtf A data frame containing the annotation information. It can be #' generated with \code{\link{formatGTF}}. #' #' @param pathToExperiment A string containing the path to the experiment.txt #' file. The file experiment.txt contains the experiment design information. #' It must have at least 3 columns with headers: #' \describe{ #' \item{label:}{(1st column) - unique names of the samples (short but informative).} #' \item{fileName:}{(2nd column) - name of the input files - e.g. circRNAs_X.txt, where #' x can be can be 001, 002 etc.} #' \item{group:}{ (3rd column) - biological conditions - e.g. A or B; healthy or diseased #' if you have only 2 conditions.} #' } #' #' By default pathToExperiment is set to NULL and the file it is searched in #' the working directory. If experiment.txt is located in a different directory #' then the path needs to be specified. #' #' @return A data frame. #' #' @examples #' check <- checkProjectFolder() #' #' if(check == 0){ #' # Create gtf object #' gtf <- formatGTF(pathToGTF) #' #' # Read and adapt detected circRNAs #' backSplicedJunctions<- getBackSplicedJunctions(gtf)} #' #' @seealso #' \code{\link{backSplicedJunctions}} for a description of the data frame #' containing back-spliced junctions coordinates. #' #' @import dplyr #' @importFrom magrittr %>% #' @importFrom utils read.table #' @importFrom rlang .data #' @export getBackSplicedJunctions <- function(gtf, pathToExperiment = NULL) { # Read experiment.txt experiment <- .readExperiment(pathToExperiment) if (nrow(experiment)) { fileNames <- list.files() # Retrieve the code for each circRNA prediction tool detectionTools <- getDetectionTools() # Keep tools that have been used for circRNA detection detectionToolsUsed <- detectionTools %>% dplyr::filter(.data$name %in% fileNames) if (nrow(detectionToolsUsed) > 0) { # Create backSplicedJunctions data frame backSplicedJunctions <- .createBackSplicedJunctionsDF(addColNames = "tool") for (j in seq_along(detectionToolsUsed$name)) { # data frame to store circRNA prediction backSplicedJunctionsTool <- .createBackSplicedJunctionsDF() for (i in seq_along(experiment$fileName)) { # Read the file containing the prediction one at the time pathToFile <- file.path(detectionToolsUsed$name[j], experiment$fileName[i]) nameTool <- detectionToolsUsed$name[j] # A specific import function is called adaptedPatientBSJunctions <- .getAdaptedPatientBSJunctions(nameTool, pathToFile, gtf) # Check validity of adaptedPatientBSJunctions. adaptedPatientBSJunctions <- .checkBSJsDF(adaptedPatientBSJunctions, addColNames = "coverage") patientBSJunctions <- adaptedPatientBSJunctions indexCoverage <- which(colnames(patientBSJunctions) == "coverage") colnames(patientBSJunctions)[indexCoverage] <- experiment$label[i] # Merge circRNAs basicColumns <- .getBasicColNames() backSplicedJunctionsTool <- base::merge( backSplicedJunctionsTool, patientBSJunctions, by = basicColumns, all = TRUE, sort = FALSE ) } tool <- rep(detectionToolsUsed$code[j], nrow(backSplicedJunctionsTool)) # Repalce NA values with 0 (zero) backSplicedJunctionsTool <- backSplicedJunctionsTool %>% dplyr::mutate_at(experiment$label, ~ replace(., is.na(.), 0)) %>% dplyr::mutate(tool = tool) # rbind the data frame containing circRNA prediction backSplicedJunctions <- dplyr::bind_rows(backSplicedJunctions, backSplicedJunctionsTool) } # Round backSplicedJunctions[, experiment$label] <- round(backSplicedJunctions[, experiment$label], 0) } else{ backSplicedJunctions <- data.frame() cat( "Missing folders with circRNA_X.txt files. Check working directory or type getDetectionTools() to see the name of the circRNA detection tools." ) } } else{ backSplicedJunctions <- data.frame() cat("experiment.txt not found (or empty). The analysis can not start. Type ?getBackSplicedJunctions and see pathToExperiment param.\n") } return(backSplicedJunctions) } #' @title Create data frame with circRNA detection codes #' #' @description The function getDetectionTools() creates a data frame #' containing the codes corresponding to each circRNA detection tool for which #' a specific import function has been developed. #' #' @return A data frame #' #' @examples #' getDetectionTools() #' #' @export getDetectionTools <- function() { # Create a data frame detectionTools <- data.frame(matrix(nrow = 7, ncol = 2)) colnames(detectionTools) <- c("name", "code") detectionTools$name[1] <- "mapsplice" detectionTools$code[1] <- "ms" detectionTools$name[2] <- "nclscan" detectionTools$code[2] <- "ns" detectionTools$name[3] <- "circexplorer2" detectionTools$code[3] <- "ce" detectionTools$name[4] <- "knife" detectionTools$code[4] <- "kn" detectionTools$name[5] <- "other" detectionTools$code[5] <- "ot" detectionTools$name[6] <- "circmarker" detectionTools$code[6] <- "cm" detectionTools$name[7] <- "uroborus" detectionTools$code[7] <- "ur" return(detectionTools) } #' @title Group circRNAs identified by multiple prediction tools #' #' @description The function mergeBSJunctions() shrinks the data frame by #' grouping back-spliced junctions commonly identified by multiple #' detection tools. The read counts of the samples reported in the final #' data frame will be the ones of the tool that detected the highest total mean #' across all samples. All the tools that detected the back-spliced junctions #' are then listed in the column "tool" of the final data frame. #' See \code{\link{getDetectionTools}} for more detail about the code #' corresponding to each circRNA detection tool. #' #' NOTE: Since different detection tools can report sligtly different coordinates #' before grouping the back-spliced junctions, it is possible to fix the latter #' using the gtf file. In this way the back-spliced junctions coordinates will #' correspond to the exon coordinates reported in the gtf file. A difference of #' maximum 2 nucleodites is allowed between the bsj and exon coordinates. #' See param fixBSJsWithGTF. #' #' @param backSplicedJunctions A data frame containing back-spliced junction #' coordinates and counts generated with \code{\link{getBackSplicedJunctions}}. #' #' @param gtf A data frame containing genome annotation information, #' generated with \code{\link{formatGTF}}. #' #' @param pathToExperiment A string containing the path to the experiment.txt #' file. The file experiment.txt contains the experiment design information. #' It must have at least 3 columns with headers: #' - label (1st column): unique names of the samples (short but informative). #' - fileName (2nd column): name of the input files - e.g. circRNAs_X.txt, where #' x can be can be 001, 002 etc. #' - group (3rd column): biological conditions - e.g. A or B; healthy or #' diseased if you have only 2 conditions. #' #' By default pathToExperiment i set to NULL and the file it is searched in #' the working directory. If experiment.txt is located in a different directory #' then the path needs to be specified. #' #' @param exportAntisense A logical specifying whether to export the identified #' antisense circRNAs in a file named antisenseCircRNAs.txt. Default value is #' FALSE. A circRNA is defined antisense if the strand reported in the prediction #' results is different from the strand reported in the genome annotation file. #' The antisense circRNAs are removed from the returned data frame. #' #' @param fixBSJsWithGTF A logical specifying whether to fix the back-spliced #' junctions coordinates using the GTF file. Default value is FALSE. #' #' @return A data frame. #' #' @examples #' # Load detected back-soliced junctions #' data("backSplicedJunctions") #' #' # Load short version of the gencode v19 annotation file #' data("gtf") #' #' pathToExperiment <- system.file("extdata", "experiment.txt", #' package ="circRNAprofiler") #' #' # Merge commonly identified circRNAs #' mergedBSJunctions <- mergeBSJunctions(backSplicedJunctions, gtf, #' pathToExperiment) #' #' @importFrom magrittr %>% #' @importFrom utils read.table #' @importFrom utils write.table #' @importFrom rlang .data #' @import dplyr #' @export mergeBSJunctions <- function(backSplicedJunctions, gtf, pathToExperiment = NULL, exportAntisense = FALSE, fixBSJsWithGTF= FALSE) { # Read experiment.txt experiment <- .readExperiment(pathToExperiment) if (nrow(experiment) > 0) { if(fixBSJsWithGTF){ # Fix coordinates with GTF backSplicedJunctionsFixed<- .fixCoordsWithGTF(backSplicedJunctions, gtf) id <- .getID(backSplicedJunctionsFixed) backSplicedJunctionsFixed$id <- id }else{ backSplicedJunctionsFixed<-backSplicedJunctions } # Find and merge commonly identified back-spliced junctions mergedBSJunctions <- backSplicedJunctionsFixed %>% dplyr::mutate(mean = rowMeans(.[, experiment$label])) %>% dplyr::group_by(.data$strand, .data$chrom, .data$startUpBSE, .data$endDownBSE) %>% dplyr::arrange(desc(mean)) %>% dplyr::mutate(mergedTools = paste(sort(unique(.data$tool)), collapse = ",")) %>% dplyr::filter(row_number() == 1) %>% dplyr::ungroup() %>% dplyr::select(-c(.data$tool, .data$mean)) %>% dplyr::rename(tool = .data$mergedTools) %>% dplyr::select( .data$id, .data$gene, .data$strand, .data$chrom, .data$startUpBSE, .data$endDownBSE, .data$tool, everything() ) %>% as.data.frame() # Identified antisense circRNAs antisenseCircRNAs <- .getAntisenseCircRNAs(mergedBSJunctions, gtf, exportAntisense) # Remove from the dataframe the antisense circRNAs mergedBSJunctionsClenead <- mergedBSJunctions %>% dplyr::filter(!(mergedBSJunctions$id %in% antisenseCircRNAs$id)) } else{ mergedBSJunctionsClenead <- backSplicedJunctions cat("experiment.txt not found in wd (or empty), data frame can not be merged. Type ?mergeBSJunctions and see pathToExperiment param.\n") } return(mergedBSJunctionsClenead) } # Create backSplicedJunctions data frame .createBackSplicedJunctionsDF <- function(addColNames = NULL) { # Get basic colum names basicColumns <- .getBasicColNames() # Create the data frame that will be filled with the circRNA prediction # perfomed by the prediction tools used. backSplicedJunctions <- data.frame(matrix(nrow = 0, ncol = length(c( basicColumns, addColNames )))) colnames(backSplicedJunctions) <- c(basicColumns, addColNames) backSplicedJunctions$id <- as.character(backSplicedJunctions$id) backSplicedJunctions$gene <- as.character(backSplicedJunctions$gene ) backSplicedJunctions$strand <- as.character(backSplicedJunctions$strand) backSplicedJunctions$chrom <- as.character(backSplicedJunctions$chrom ) backSplicedJunctions$startUpBSE <- as.numeric(backSplicedJunctions$startUpBSE ) backSplicedJunctions$endDownBSE <- as.numeric(backSplicedJunctions$endDownBSE) if(!is.null(addColNames)){ backSplicedJunctions[,7]<- as.character(backSplicedJunctions[,7]) } return(backSplicedJunctions) } # Get the adaptedPatientBSJunctions data frame with circRNA predictions .getAdaptedPatientBSJunctions <- function(nameTool, pathToFile, gtf) { # A specific import function is called adaptedPatientBSJunctions <- switch( nameTool, mapsplice = importMapSplice(pathToFile), nclscan = importNCLscan(pathToFile), knife = importKnife(pathToFile), circexplorer2 = importCircExplorer2(pathToFile), other = importOther(pathToFile), circmarker = importCircMarker(pathToFile, gtf), uroborus <- importUroborus(pathToFile) ) return(adaptedPatientBSJunctions) } # For some circRNAs the strand reported in prediction results is # sometimes different from the strand reported in the gtf file. # With this function we identified the antisense circRNAs .getAntisenseCircRNAs <- function(mergedBSJunctions, gtf, exportAntisense = FALSE) { shrinkedGTF <- gtf %>% dplyr::select(.data$gene_name, .data$strand) %>% dplyr::group_by(.data$gene_name) %>% dplyr::filter(row_number() == 1) colnames(shrinkedGTF)<- paste0(colnames(shrinkedGTF),'1') mt <- match(mergedBSJunctions$gene, shrinkedGTF$gene_name1) antisenseCircRNAs <- dplyr::bind_cols(mergedBSJunctions, shrinkedGTF[mt,]) %>% dplyr::filter(.data$strand != .data$strand1) %>% dplyr::select(-c(.data$gene_name1, .data$strand1)) if (exportAntisense) { utils::write.table( antisenseCircRNAs, "antisenseCircRNAs.txt", quote = FALSE, row.names = FALSE, col.names = TRUE, sep = "\t" ) } return(antisenseCircRNAs) } # If the function you are looking for is not here check supportFunction.R # Functions in supportFunction.R are used by multiple functions.
/R/getBackSplicedJunctions.R
no_license
Aufiero/circRNAprofiler
R
false
false
15,087
r
#' @title Import detected circRNAs #' #' @description The function getBackSplicedJunctions() reads the #' circRNAs_X.txt with the detected circRNAs, adapts the content and generates #' a unique data frame with all circRNAs identified by each circRNA detection #' tool and the occurrences found in each sample (named as reported in the #' column label in experiment.txt). #' #' @param gtf A data frame containing the annotation information. It can be #' generated with \code{\link{formatGTF}}. #' #' @param pathToExperiment A string containing the path to the experiment.txt #' file. The file experiment.txt contains the experiment design information. #' It must have at least 3 columns with headers: #' \describe{ #' \item{label:}{(1st column) - unique names of the samples (short but informative).} #' \item{fileName:}{(2nd column) - name of the input files - e.g. circRNAs_X.txt, where #' x can be can be 001, 002 etc.} #' \item{group:}{ (3rd column) - biological conditions - e.g. A or B; healthy or diseased #' if you have only 2 conditions.} #' } #' #' By default pathToExperiment is set to NULL and the file it is searched in #' the working directory. If experiment.txt is located in a different directory #' then the path needs to be specified. #' #' @return A data frame. #' #' @examples #' check <- checkProjectFolder() #' #' if(check == 0){ #' # Create gtf object #' gtf <- formatGTF(pathToGTF) #' #' # Read and adapt detected circRNAs #' backSplicedJunctions<- getBackSplicedJunctions(gtf)} #' #' @seealso #' \code{\link{backSplicedJunctions}} for a description of the data frame #' containing back-spliced junctions coordinates. #' #' @import dplyr #' @importFrom magrittr %>% #' @importFrom utils read.table #' @importFrom rlang .data #' @export getBackSplicedJunctions <- function(gtf, pathToExperiment = NULL) { # Read experiment.txt experiment <- .readExperiment(pathToExperiment) if (nrow(experiment)) { fileNames <- list.files() # Retrieve the code for each circRNA prediction tool detectionTools <- getDetectionTools() # Keep tools that have been used for circRNA detection detectionToolsUsed <- detectionTools %>% dplyr::filter(.data$name %in% fileNames) if (nrow(detectionToolsUsed) > 0) { # Create backSplicedJunctions data frame backSplicedJunctions <- .createBackSplicedJunctionsDF(addColNames = "tool") for (j in seq_along(detectionToolsUsed$name)) { # data frame to store circRNA prediction backSplicedJunctionsTool <- .createBackSplicedJunctionsDF() for (i in seq_along(experiment$fileName)) { # Read the file containing the prediction one at the time pathToFile <- file.path(detectionToolsUsed$name[j], experiment$fileName[i]) nameTool <- detectionToolsUsed$name[j] # A specific import function is called adaptedPatientBSJunctions <- .getAdaptedPatientBSJunctions(nameTool, pathToFile, gtf) # Check validity of adaptedPatientBSJunctions. adaptedPatientBSJunctions <- .checkBSJsDF(adaptedPatientBSJunctions, addColNames = "coverage") patientBSJunctions <- adaptedPatientBSJunctions indexCoverage <- which(colnames(patientBSJunctions) == "coverage") colnames(patientBSJunctions)[indexCoverage] <- experiment$label[i] # Merge circRNAs basicColumns <- .getBasicColNames() backSplicedJunctionsTool <- base::merge( backSplicedJunctionsTool, patientBSJunctions, by = basicColumns, all = TRUE, sort = FALSE ) } tool <- rep(detectionToolsUsed$code[j], nrow(backSplicedJunctionsTool)) # Repalce NA values with 0 (zero) backSplicedJunctionsTool <- backSplicedJunctionsTool %>% dplyr::mutate_at(experiment$label, ~ replace(., is.na(.), 0)) %>% dplyr::mutate(tool = tool) # rbind the data frame containing circRNA prediction backSplicedJunctions <- dplyr::bind_rows(backSplicedJunctions, backSplicedJunctionsTool) } # Round backSplicedJunctions[, experiment$label] <- round(backSplicedJunctions[, experiment$label], 0) } else{ backSplicedJunctions <- data.frame() cat( "Missing folders with circRNA_X.txt files. Check working directory or type getDetectionTools() to see the name of the circRNA detection tools." ) } } else{ backSplicedJunctions <- data.frame() cat("experiment.txt not found (or empty). The analysis can not start. Type ?getBackSplicedJunctions and see pathToExperiment param.\n") } return(backSplicedJunctions) } #' @title Create data frame with circRNA detection codes #' #' @description The function getDetectionTools() creates a data frame #' containing the codes corresponding to each circRNA detection tool for which #' a specific import function has been developed. #' #' @return A data frame #' #' @examples #' getDetectionTools() #' #' @export getDetectionTools <- function() { # Create a data frame detectionTools <- data.frame(matrix(nrow = 7, ncol = 2)) colnames(detectionTools) <- c("name", "code") detectionTools$name[1] <- "mapsplice" detectionTools$code[1] <- "ms" detectionTools$name[2] <- "nclscan" detectionTools$code[2] <- "ns" detectionTools$name[3] <- "circexplorer2" detectionTools$code[3] <- "ce" detectionTools$name[4] <- "knife" detectionTools$code[4] <- "kn" detectionTools$name[5] <- "other" detectionTools$code[5] <- "ot" detectionTools$name[6] <- "circmarker" detectionTools$code[6] <- "cm" detectionTools$name[7] <- "uroborus" detectionTools$code[7] <- "ur" return(detectionTools) } #' @title Group circRNAs identified by multiple prediction tools #' #' @description The function mergeBSJunctions() shrinks the data frame by #' grouping back-spliced junctions commonly identified by multiple #' detection tools. The read counts of the samples reported in the final #' data frame will be the ones of the tool that detected the highest total mean #' across all samples. All the tools that detected the back-spliced junctions #' are then listed in the column "tool" of the final data frame. #' See \code{\link{getDetectionTools}} for more detail about the code #' corresponding to each circRNA detection tool. #' #' NOTE: Since different detection tools can report sligtly different coordinates #' before grouping the back-spliced junctions, it is possible to fix the latter #' using the gtf file. In this way the back-spliced junctions coordinates will #' correspond to the exon coordinates reported in the gtf file. A difference of #' maximum 2 nucleodites is allowed between the bsj and exon coordinates. #' See param fixBSJsWithGTF. #' #' @param backSplicedJunctions A data frame containing back-spliced junction #' coordinates and counts generated with \code{\link{getBackSplicedJunctions}}. #' #' @param gtf A data frame containing genome annotation information, #' generated with \code{\link{formatGTF}}. #' #' @param pathToExperiment A string containing the path to the experiment.txt #' file. The file experiment.txt contains the experiment design information. #' It must have at least 3 columns with headers: #' - label (1st column): unique names of the samples (short but informative). #' - fileName (2nd column): name of the input files - e.g. circRNAs_X.txt, where #' x can be can be 001, 002 etc. #' - group (3rd column): biological conditions - e.g. A or B; healthy or #' diseased if you have only 2 conditions. #' #' By default pathToExperiment i set to NULL and the file it is searched in #' the working directory. If experiment.txt is located in a different directory #' then the path needs to be specified. #' #' @param exportAntisense A logical specifying whether to export the identified #' antisense circRNAs in a file named antisenseCircRNAs.txt. Default value is #' FALSE. A circRNA is defined antisense if the strand reported in the prediction #' results is different from the strand reported in the genome annotation file. #' The antisense circRNAs are removed from the returned data frame. #' #' @param fixBSJsWithGTF A logical specifying whether to fix the back-spliced #' junctions coordinates using the GTF file. Default value is FALSE. #' #' @return A data frame. #' #' @examples #' # Load detected back-soliced junctions #' data("backSplicedJunctions") #' #' # Load short version of the gencode v19 annotation file #' data("gtf") #' #' pathToExperiment <- system.file("extdata", "experiment.txt", #' package ="circRNAprofiler") #' #' # Merge commonly identified circRNAs #' mergedBSJunctions <- mergeBSJunctions(backSplicedJunctions, gtf, #' pathToExperiment) #' #' @importFrom magrittr %>% #' @importFrom utils read.table #' @importFrom utils write.table #' @importFrom rlang .data #' @import dplyr #' @export mergeBSJunctions <- function(backSplicedJunctions, gtf, pathToExperiment = NULL, exportAntisense = FALSE, fixBSJsWithGTF= FALSE) { # Read experiment.txt experiment <- .readExperiment(pathToExperiment) if (nrow(experiment) > 0) { if(fixBSJsWithGTF){ # Fix coordinates with GTF backSplicedJunctionsFixed<- .fixCoordsWithGTF(backSplicedJunctions, gtf) id <- .getID(backSplicedJunctionsFixed) backSplicedJunctionsFixed$id <- id }else{ backSplicedJunctionsFixed<-backSplicedJunctions } # Find and merge commonly identified back-spliced junctions mergedBSJunctions <- backSplicedJunctionsFixed %>% dplyr::mutate(mean = rowMeans(.[, experiment$label])) %>% dplyr::group_by(.data$strand, .data$chrom, .data$startUpBSE, .data$endDownBSE) %>% dplyr::arrange(desc(mean)) %>% dplyr::mutate(mergedTools = paste(sort(unique(.data$tool)), collapse = ",")) %>% dplyr::filter(row_number() == 1) %>% dplyr::ungroup() %>% dplyr::select(-c(.data$tool, .data$mean)) %>% dplyr::rename(tool = .data$mergedTools) %>% dplyr::select( .data$id, .data$gene, .data$strand, .data$chrom, .data$startUpBSE, .data$endDownBSE, .data$tool, everything() ) %>% as.data.frame() # Identified antisense circRNAs antisenseCircRNAs <- .getAntisenseCircRNAs(mergedBSJunctions, gtf, exportAntisense) # Remove from the dataframe the antisense circRNAs mergedBSJunctionsClenead <- mergedBSJunctions %>% dplyr::filter(!(mergedBSJunctions$id %in% antisenseCircRNAs$id)) } else{ mergedBSJunctionsClenead <- backSplicedJunctions cat("experiment.txt not found in wd (or empty), data frame can not be merged. Type ?mergeBSJunctions and see pathToExperiment param.\n") } return(mergedBSJunctionsClenead) } # Create backSplicedJunctions data frame .createBackSplicedJunctionsDF <- function(addColNames = NULL) { # Get basic colum names basicColumns <- .getBasicColNames() # Create the data frame that will be filled with the circRNA prediction # perfomed by the prediction tools used. backSplicedJunctions <- data.frame(matrix(nrow = 0, ncol = length(c( basicColumns, addColNames )))) colnames(backSplicedJunctions) <- c(basicColumns, addColNames) backSplicedJunctions$id <- as.character(backSplicedJunctions$id) backSplicedJunctions$gene <- as.character(backSplicedJunctions$gene ) backSplicedJunctions$strand <- as.character(backSplicedJunctions$strand) backSplicedJunctions$chrom <- as.character(backSplicedJunctions$chrom ) backSplicedJunctions$startUpBSE <- as.numeric(backSplicedJunctions$startUpBSE ) backSplicedJunctions$endDownBSE <- as.numeric(backSplicedJunctions$endDownBSE) if(!is.null(addColNames)){ backSplicedJunctions[,7]<- as.character(backSplicedJunctions[,7]) } return(backSplicedJunctions) } # Get the adaptedPatientBSJunctions data frame with circRNA predictions .getAdaptedPatientBSJunctions <- function(nameTool, pathToFile, gtf) { # A specific import function is called adaptedPatientBSJunctions <- switch( nameTool, mapsplice = importMapSplice(pathToFile), nclscan = importNCLscan(pathToFile), knife = importKnife(pathToFile), circexplorer2 = importCircExplorer2(pathToFile), other = importOther(pathToFile), circmarker = importCircMarker(pathToFile, gtf), uroborus <- importUroborus(pathToFile) ) return(adaptedPatientBSJunctions) } # For some circRNAs the strand reported in prediction results is # sometimes different from the strand reported in the gtf file. # With this function we identified the antisense circRNAs .getAntisenseCircRNAs <- function(mergedBSJunctions, gtf, exportAntisense = FALSE) { shrinkedGTF <- gtf %>% dplyr::select(.data$gene_name, .data$strand) %>% dplyr::group_by(.data$gene_name) %>% dplyr::filter(row_number() == 1) colnames(shrinkedGTF)<- paste0(colnames(shrinkedGTF),'1') mt <- match(mergedBSJunctions$gene, shrinkedGTF$gene_name1) antisenseCircRNAs <- dplyr::bind_cols(mergedBSJunctions, shrinkedGTF[mt,]) %>% dplyr::filter(.data$strand != .data$strand1) %>% dplyr::select(-c(.data$gene_name1, .data$strand1)) if (exportAntisense) { utils::write.table( antisenseCircRNAs, "antisenseCircRNAs.txt", quote = FALSE, row.names = FALSE, col.names = TRUE, sep = "\t" ) } return(antisenseCircRNAs) } # If the function you are looking for is not here check supportFunction.R # Functions in supportFunction.R are used by multiple functions.
#Today we want to start to look at some canonical plots. We will #go relatively fast through these canonical plots while trying to cover many plots #and will try to focus on thinking through some of the issues with #plots in terms of communication about things like variation etc. #Today we will do so as an exploration of the salaries dataset, in leading #up to a solution of the daily assignment problem for today. #One thing about today is that you will see many a pipeline. salaries <- read_csv('Salaries.csv') batting <- read_csv('Batting.csv') head(salaries) #First we play around with our dataset. Get to know it. We: #ask questions of our data. we learn from the answers. #if anything doesn't conform to your intuition of the dataset #find out why! When you plot data, if something looks odd #go figure it out! #Here's an example of a way we can get to know our data #number of entries by player (as we will see, not the same as number of years played!) salaries %>% group_by(playerID) %>% summarize(count=n()) #arranged by largest number salaries %>% group_by(playerID) %>% summarize(count=n()) %>% arrange(desc(count)) #look at one player salaries %>% filter(playerID=="moyerja01") %>% tail() #tail here looks at the end of his career #do any players play with more than one team in a single year? #first check the actual number of years played #this is not just the number of lines in a players individual database salaries %>% group_by(playerID) %>% summarize(numYearsPlayed = length(unique(yearID)), count=n()) #create a variable that says if numYearsPlayed not the same as #the number of lines in the dataset. If true, a player played #for more than one team in a year. salaries %>% group_by(playerID) %>% summarize(numYearsPlayed = length(unique(yearID)), count=n(), multTeamsInYear=numYearsPlayed!=count) #summarize... salaries %>% group_by(playerID) %>% summarize(numYearsPlayed = length(unique(yearID)), count=n(), multTeamsInYear=numYearsPlayed!=count) %>% summary() #we see in the summary that there are indeed players such as these. #do people skip years? #a lag function works as follows: #lag(x(t)) = x(t-1) #in other words, the lag at time t of function x() is equal to the function #x() at time t-1. You can also look at lag(x, 2) which is lag(x(t))=x(t-2) #in tibbles, the rows are ordered, so lag(yearID) says the the value of #lag(yearID) is equal to the value of yearID in the previous row. salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% filter(yearID>1985) #this step bc 1985 is just NA (first year of dataset) #anywhere you see an NA in this table it means they haven't shown up #in this dataset prior to that year. #get summaries of the lag salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% summary() salaries %>% filter(playerID=="venabma01") ############################################# ############ Now we start plotting ########## ############################################# ############################################# ######## 1-D Categorical Variables ########## ############################################# #let's plot these gaps in play with a bar plot salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% ggplot(aes(x=factor(yearContinuity))) + geom_bar() #vast majority of the time, players play consecutive years. #scale in bar plots (and many others) is an issue. SO MANY #entries in yearContinuity are 1, that it (possibly) obscures #the other entries. #look closer at gaps less than 10 years salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% filter(yearContinuity<=10) %>% ggplot(aes(x=as.factor(yearContinuity))) + geom_bar() #if these are factors, why do 6, 7, 8, and 10 show up? 9 didn't... #again, 1 is dominating the conversation here. The plot is more #worried about accommodating 1 than making sure that the other numbers speak. #look at 6 to 10 salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% filter(yearContinuity<=10 & yearContinuity>=6) %>% ggplot(aes(x=factor(yearContinuity))) + geom_bar() #moral of the story: sometimes big values (counts here) can #make small ones (or smaller differences) disappear. #look at plot of number of years played (number of unique salary years for each player). #unique(yearID) returns the same vector, but with any duplicates removed. #length(unique(yearID)) just asks how many unique years there are in a players tibble. #this is just the years the player was in the MLB salaries %>% group_by(playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar() #look at number of years played for a given team (remember some players play for multiple teams) #before we had players years in MLB. Now we want for individual teams. So our tibbles now #need to comprise of one player and one team. salaries %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar() #exponential distribution -ish. I wouldn't call it exponential #but if you were squinting, or if you were just glancing #it wouldn't be a bad guess. #bar chart filtered; JUST Bos, Was, and Phi #we only filter. Otherwise we do everything the same. salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar() ################################################### ############ 2 Categorical Variables ############## ################################################### #let's start to deal with 2 categorical variables in our plots #stacked bar chart salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(aes(fill=teamID)) #there are various bits of info contained in this chart #what's easy to see? What's hard to see? #relative proportions, salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(aes(fill=teamID), position="fill") #side-by-side (by Sondheim!) proportions salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(aes(fill=teamID), position="dodge") ##### polar coordinates (think pie chart) ################ use with extreme caution!!! #stacked bar polar, now make teamID the "x" axis salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=teamID)) + geom_bar(width=1, aes(fill=factor(numYearsPlayed))) + coord_polar() #relative polar, back to numYearsPlayed as x salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(width=1, aes(fill=teamID), position="fill") + coord_polar() #side-by-side polar salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(width=1, aes(fill=teamID), position="dodge") + coord_polar() #all the angles are equal in the above- what about a more traditional pie chart? salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=factor(1), fill=teamID)) + geom_bar(width=1) + coord_polar(theta="y") #or a 'target' chart salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=factor(1), fill=teamID)) + geom_bar(width=1) + coord_polar() #point is, ggplot2 is happy to do things with you. It will create many #pie-flavored charts. But at the end of the day you need to be really #cautious with pie-esque charts. As you can see, they often require us to #make area comparisons. Very bad! #stick to eating pies. not plotting them. ################################################# ######## One more plot, then to the hw ########## ################################################# #let's look at batting batting %>% filter(yearID >= 2000 & yearID <=2010) #let's plot number of games played batting %>% filter(yearID >= 2000 & yearID <=2010) %>% ggplot(aes(x=G)) + geom_histogram(binwidth=2) #why the spike? This is another example of where #asking questions of your data yields some insight #into the dataset, and can help you check intuitions. ################################################### ############### HW Solution ####################### ################################################### #and now for the homework solution inner_join(salaries, batting) #note what this joins by # filter years inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) # let's get rid of a bunch of variables inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) # calculate salary per game inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) #notice here each year for each player (etc) has a salary per game. # each player has (possibly) multiple years # can calculate a median salary/G per player (across years) # group by playerID and teamID (takes care of multiple teams) inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) # calculate median per player per team inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) %>% summarize(median=median(salaryPerGame)) #note that now years are gone (we aggreagated them out through #the median). Players have multiple teams, though # First attemp at calculating the per-team mean of the medians inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) %>% summarize(median=median(salaryPerGame)) %>% summarize(mean=mean(median)) #but this didn't do what we wanted! It aggregated over teams #we want to aggregate over players # second attempt: first specify group_by(teamID) so now the # grouping over playerID is broken inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) %>% summarize(median=median(salaryPerGame)) %>% group_by(teamID) %>% summarize(mean=mean(median)) #this does what we want # plot it inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) %>% summarize(median=median(salaryPerGame)) %>% group_by(teamID) %>% summarize(mean=mean(median)) %>% ggplot() + geom_bar(stat="identity", aes(x=teamID, y=mean)) # plot it, ordered, with colors by league! (notice how we had to incorporate lgID) inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010)%>% select(yearID, teamID, lgID, playerID, salary, G)%>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID, lgID) %>% summarize(median=median(salaryPerGame)) %>% group_by(teamID, lgID) %>% summarize(mean=mean(median))%>% ggplot() + geom_bar(stat="identity", aes(x=reorder(teamID, -mean), y=mean, fill=lgID))
/3_Canonical Plots/Canonical.R
no_license
youbeen-shim/data-visualization-and-presentation
R
false
false
12,934
r
#Today we want to start to look at some canonical plots. We will #go relatively fast through these canonical plots while trying to cover many plots #and will try to focus on thinking through some of the issues with #plots in terms of communication about things like variation etc. #Today we will do so as an exploration of the salaries dataset, in leading #up to a solution of the daily assignment problem for today. #One thing about today is that you will see many a pipeline. salaries <- read_csv('Salaries.csv') batting <- read_csv('Batting.csv') head(salaries) #First we play around with our dataset. Get to know it. We: #ask questions of our data. we learn from the answers. #if anything doesn't conform to your intuition of the dataset #find out why! When you plot data, if something looks odd #go figure it out! #Here's an example of a way we can get to know our data #number of entries by player (as we will see, not the same as number of years played!) salaries %>% group_by(playerID) %>% summarize(count=n()) #arranged by largest number salaries %>% group_by(playerID) %>% summarize(count=n()) %>% arrange(desc(count)) #look at one player salaries %>% filter(playerID=="moyerja01") %>% tail() #tail here looks at the end of his career #do any players play with more than one team in a single year? #first check the actual number of years played #this is not just the number of lines in a players individual database salaries %>% group_by(playerID) %>% summarize(numYearsPlayed = length(unique(yearID)), count=n()) #create a variable that says if numYearsPlayed not the same as #the number of lines in the dataset. If true, a player played #for more than one team in a year. salaries %>% group_by(playerID) %>% summarize(numYearsPlayed = length(unique(yearID)), count=n(), multTeamsInYear=numYearsPlayed!=count) #summarize... salaries %>% group_by(playerID) %>% summarize(numYearsPlayed = length(unique(yearID)), count=n(), multTeamsInYear=numYearsPlayed!=count) %>% summary() #we see in the summary that there are indeed players such as these. #do people skip years? #a lag function works as follows: #lag(x(t)) = x(t-1) #in other words, the lag at time t of function x() is equal to the function #x() at time t-1. You can also look at lag(x, 2) which is lag(x(t))=x(t-2) #in tibbles, the rows are ordered, so lag(yearID) says the the value of #lag(yearID) is equal to the value of yearID in the previous row. salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% filter(yearID>1985) #this step bc 1985 is just NA (first year of dataset) #anywhere you see an NA in this table it means they haven't shown up #in this dataset prior to that year. #get summaries of the lag salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% summary() salaries %>% filter(playerID=="venabma01") ############################################# ############ Now we start plotting ########## ############################################# ############################################# ######## 1-D Categorical Variables ########## ############################################# #let's plot these gaps in play with a bar plot salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% ggplot(aes(x=factor(yearContinuity))) + geom_bar() #vast majority of the time, players play consecutive years. #scale in bar plots (and many others) is an issue. SO MANY #entries in yearContinuity are 1, that it (possibly) obscures #the other entries. #look closer at gaps less than 10 years salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% filter(yearContinuity<=10) %>% ggplot(aes(x=as.factor(yearContinuity))) + geom_bar() #if these are factors, why do 6, 7, 8, and 10 show up? 9 didn't... #again, 1 is dominating the conversation here. The plot is more #worried about accommodating 1 than making sure that the other numbers speak. #look at 6 to 10 salaries %>% group_by(playerID) %>% mutate(yearContinuity = yearID - lag(yearID)) %>% filter(yearContinuity<=10 & yearContinuity>=6) %>% ggplot(aes(x=factor(yearContinuity))) + geom_bar() #moral of the story: sometimes big values (counts here) can #make small ones (or smaller differences) disappear. #look at plot of number of years played (number of unique salary years for each player). #unique(yearID) returns the same vector, but with any duplicates removed. #length(unique(yearID)) just asks how many unique years there are in a players tibble. #this is just the years the player was in the MLB salaries %>% group_by(playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar() #look at number of years played for a given team (remember some players play for multiple teams) #before we had players years in MLB. Now we want for individual teams. So our tibbles now #need to comprise of one player and one team. salaries %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar() #exponential distribution -ish. I wouldn't call it exponential #but if you were squinting, or if you were just glancing #it wouldn't be a bad guess. #bar chart filtered; JUST Bos, Was, and Phi #we only filter. Otherwise we do everything the same. salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar() ################################################### ############ 2 Categorical Variables ############## ################################################### #let's start to deal with 2 categorical variables in our plots #stacked bar chart salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(aes(fill=teamID)) #there are various bits of info contained in this chart #what's easy to see? What's hard to see? #relative proportions, salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(aes(fill=teamID), position="fill") #side-by-side (by Sondheim!) proportions salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(aes(fill=teamID), position="dodge") ##### polar coordinates (think pie chart) ################ use with extreme caution!!! #stacked bar polar, now make teamID the "x" axis salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=teamID)) + geom_bar(width=1, aes(fill=factor(numYearsPlayed))) + coord_polar() #relative polar, back to numYearsPlayed as x salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(width=1, aes(fill=teamID), position="fill") + coord_polar() #side-by-side polar salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=numYearsPlayed)) + geom_bar(width=1, aes(fill=teamID), position="dodge") + coord_polar() #all the angles are equal in the above- what about a more traditional pie chart? salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=factor(1), fill=teamID)) + geom_bar(width=1) + coord_polar(theta="y") #or a 'target' chart salaries %>% filter(teamID %in% c('BOS', 'WAS', 'PHI')) %>% group_by(teamID, playerID) %>% summarize(numYearsPlayed = length(unique(yearID))) %>% filter(numYearsPlayed <= 5) %>% ggplot(aes(x=factor(1), fill=teamID)) + geom_bar(width=1) + coord_polar() #point is, ggplot2 is happy to do things with you. It will create many #pie-flavored charts. But at the end of the day you need to be really #cautious with pie-esque charts. As you can see, they often require us to #make area comparisons. Very bad! #stick to eating pies. not plotting them. ################################################# ######## One more plot, then to the hw ########## ################################################# #let's look at batting batting %>% filter(yearID >= 2000 & yearID <=2010) #let's plot number of games played batting %>% filter(yearID >= 2000 & yearID <=2010) %>% ggplot(aes(x=G)) + geom_histogram(binwidth=2) #why the spike? This is another example of where #asking questions of your data yields some insight #into the dataset, and can help you check intuitions. ################################################### ############### HW Solution ####################### ################################################### #and now for the homework solution inner_join(salaries, batting) #note what this joins by # filter years inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) # let's get rid of a bunch of variables inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) # calculate salary per game inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) #notice here each year for each player (etc) has a salary per game. # each player has (possibly) multiple years # can calculate a median salary/G per player (across years) # group by playerID and teamID (takes care of multiple teams) inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) # calculate median per player per team inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) %>% summarize(median=median(salaryPerGame)) #note that now years are gone (we aggreagated them out through #the median). Players have multiple teams, though # First attemp at calculating the per-team mean of the medians inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) %>% summarize(median=median(salaryPerGame)) %>% summarize(mean=mean(median)) #but this didn't do what we wanted! It aggregated over teams #we want to aggregate over players # second attempt: first specify group_by(teamID) so now the # grouping over playerID is broken inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) %>% summarize(median=median(salaryPerGame)) %>% group_by(teamID) %>% summarize(mean=mean(median)) #this does what we want # plot it inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010) %>% select(yearID, teamID, playerID, salary, G) %>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID) %>% summarize(median=median(salaryPerGame)) %>% group_by(teamID) %>% summarize(mean=mean(median)) %>% ggplot() + geom_bar(stat="identity", aes(x=teamID, y=mean)) # plot it, ordered, with colors by league! (notice how we had to incorporate lgID) inner_join(salaries, batting) %>% filter(yearID >=2000 & yearID <= 2010)%>% select(yearID, teamID, lgID, playerID, salary, G)%>% mutate(salaryPerGame=salary/G) %>% group_by(playerID, teamID, lgID) %>% summarize(median=median(salaryPerGame)) %>% group_by(teamID, lgID) %>% summarize(mean=mean(median))%>% ggplot() + geom_bar(stat="identity", aes(x=reorder(teamID, -mean), y=mean, fill=lgID))
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 7798 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 7644 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 7644 c c Input Parameter (command line, file): c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf06.01X-QBF.BB1-01X.BB2-Zi.BB3-Zi.with-IOC.unfold-003.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 4724 c no.of clauses 7798 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 7644 c c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf06.01X-QBF.BB1-01X.BB2-Zi.BB3-Zi.with-IOC.unfold-003.qdimacs 4724 7798 E1 [1030 1031 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1578 1579 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 2126 2127 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2159 2161 2163 2165 2167 2169 2171 2173 2175 2177 2179 2181 2183 2185 2189 2199 2201 2203 2205 2207 2209 2211 2213 2215 2217 2219 2221 2223 2225 2227 2229 2231] 0 79 2884 7644 RED
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf06.01X-QBF.BB1-01X.BB2-Zi.BB3-Zi.with-IOC.unfold-003/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf06.01X-QBF.BB1-01X.BB2-Zi.BB3-Zi.with-IOC.unfold-003.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
1,546
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 7798 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 7644 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 7644 c c Input Parameter (command line, file): c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf06.01X-QBF.BB1-01X.BB2-Zi.BB3-Zi.with-IOC.unfold-003.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 4724 c no.of clauses 7798 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 7644 c c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf06.01X-QBF.BB1-01X.BB2-Zi.BB3-Zi.with-IOC.unfold-003.qdimacs 4724 7798 E1 [1030 1031 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1578 1579 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 2126 2127 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2159 2161 2163 2165 2167 2169 2171 2173 2175 2177 2179 2181 2183 2185 2189 2199 2201 2203 2205 2207 2209 2211 2213 2215 2217 2219 2221 2223 2225 2227 2229 2231] 0 79 2884 7644 RED
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get.R \name{get_pixels} \alias{get_pixels} \title{Gets Pixels} \usage{ get_pixels(pixels = NULL, grid = c(28, 28), size = c(250, 250), brush = matrix(c(0, 0.5, 0.8, 0.5, 0, 0.5, 1, 1, 1, 0.5, 0.8, 1, 1, 1, 0.8, 0.5, 1, 1, 1, 0.5, 0, 0.5, 0.8, 0.5, 0), 5, 5), params = list(fill = list(color = "#555555"), grid = list(color = "#EEEEEE"))) } \arguments{ \item{pixels}{The pixels to render as a 1-dimensional vector, row-first order expected.} \item{grid}{The grid dimensions specified as a vector.} \item{size}{The canvas dimensions specified as a vector.} \item{brush}{The brush specified as a matrix.} \item{params}{A set of parameters to customize the visual appearance. #' @examples library(pixels) if (interactive()) { get_pixels() }} } \description{ Creates an ShinyGadget to retrieve pixels. }
/man/get_pixels.Rd
no_license
javierluraschi/pixels
R
false
true
892
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get.R \name{get_pixels} \alias{get_pixels} \title{Gets Pixels} \usage{ get_pixels(pixels = NULL, grid = c(28, 28), size = c(250, 250), brush = matrix(c(0, 0.5, 0.8, 0.5, 0, 0.5, 1, 1, 1, 0.5, 0.8, 1, 1, 1, 0.8, 0.5, 1, 1, 1, 0.5, 0, 0.5, 0.8, 0.5, 0), 5, 5), params = list(fill = list(color = "#555555"), grid = list(color = "#EEEEEE"))) } \arguments{ \item{pixels}{The pixels to render as a 1-dimensional vector, row-first order expected.} \item{grid}{The grid dimensions specified as a vector.} \item{size}{The canvas dimensions specified as a vector.} \item{brush}{The brush specified as a matrix.} \item{params}{A set of parameters to customize the visual appearance. #' @examples library(pixels) if (interactive()) { get_pixels() }} } \description{ Creates an ShinyGadget to retrieve pixels. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.r \docType{data} \name{ms.tst} \alias{ms.tst} \title{Mass testing data} \description{ Data on attendance and number of positives by county and mass testing round in Slovakia }
/man/ms.tst.Rd
permissive
epiforecasts/covid19.slovakia.mass.testing
R
false
true
263
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.r \docType{data} \name{ms.tst} \alias{ms.tst} \title{Mass testing data} \description{ Data on attendance and number of positives by county and mass testing round in Slovakia }
library(sdmpredictors) ### Name: layer_citations ### Title: Generate citations for all layers ### Aliases: layer_citations ### ** Examples # print the citation for the Bio-ORACLE salinity layer print(layer_citations("BO_salinity")) # print the citation for a MARSPEC paleo layer print(layer_citations("MS_biogeo02_aspect_NS_21kya")) # print all citations as Bibtex print(lapply(layer_citations(astext = FALSE), toBibtex))
/data/genthat_extracted_code/sdmpredictors/examples/layer_citations.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
431
r
library(sdmpredictors) ### Name: layer_citations ### Title: Generate citations for all layers ### Aliases: layer_citations ### ** Examples # print the citation for the Bio-ORACLE salinity layer print(layer_citations("BO_salinity")) # print the citation for a MARSPEC paleo layer print(layer_citations("MS_biogeo02_aspect_NS_21kya")) # print all citations as Bibtex print(lapply(layer_citations(astext = FALSE), toBibtex))
#' Attach shinyWidgets dependancies #' #' @param tag An object which has (or should have) HTML dependencies. #' @param widget Name of a widget for particular dependancies #' #' @noRd #' @importFrom utils packageVersion #' @importFrom htmltools htmlDependency attachDependencies findDependencies #' @importFrom shiny icon #' attachShinyWidgetsDep <- function(tag, widget = NULL) { version <- as.character(packageVersion("shinyWidgets")[[1]]) dep <- htmltools::htmlDependency( name = "shinyWidgets", version = version, src = c(href = "shinyWidgets"), script = "shinyWidgets-bindings.min.js", stylesheet = "shinyWidgets.css" ) if (!is.null(widget)) { if (widget == "picker") { dep <- list( dep, htmltools::htmlDependency( name = "selectPicker", version = "1.12.4", src = c(href="shinyWidgets/selectPicker"), script = "js/bootstrap-select.min.js", stylesheet = "css/bootstrap-select.min.css" ) ) } else if (widget == "awesome") { dep <- list( dep, htmltools::htmlDependency( name = "awesome-bootstrap", version = "0.2.0", src = c(href = "shinyWidgets/awesomeRadioCheckbox"), stylesheet = "css/awesome-bootstrap-checkbox-shiny.css" ), htmltools::findDependencies(shiny::icon("rebel"))[[1]] ) } else if (widget == "bsswitch") { dep <- list( dep, htmltools::htmlDependency( name = "bootstrap-switch", version = "3.3.4", src = c(href="shinyWidgets/switchInput/bootstrap-switch-3.3.4"), script = "bootstrap-switch.min.js", stylesheet = "bootstrap-switch.min.css" ) ) } else if (widget == "sweetalert") { dep <- list( dep, htmltools::htmlDependency( name = "sweetAlert", version = "0.2.0", src = c(href="shinyWidgets/sweetAlert"), script = c("sweetalert.min.js", "sweetalert-bindings.js") ) ) } else if (widget == "multi") { dep <- list( dep, htmltools::htmlDependency( name = "multi", version = "0.3.0", src = c(href="shinyWidgets/multi"), script = "multi.min.js", stylesheet = c("multi.min.css") ) ) } else if (widget == "jquery-knob") { dep <- list( dep, htmltools::htmlDependency( name = "jquery-knob", version = "1.2.13", src = c(href = "shinyWidgets/jquery-knob"), script = c("jquery.knob.min.js", "knob-input-binding.js") ) ) } else if (widget == "dropdown") { dep <- list( dep, htmltools::htmlDependency( name = "dropdown-patch", version = version, src = c(href="shinyWidgets/dropdown"), script = "dropdown-click.js" ) ) } else if (widget == "sw-dropdown") { dep <- list( dep, htmltools::htmlDependency( name = "sw-dropdown", version = version, src = c(href="shinyWidgets/sw-dropdown"), script = "sw-dropdown.js", stylesheet = "sw-dropdown.css" ) ) } else if (widget == "animate") { dep <- list( dep, htmltools::htmlDependency( name = "animate", version = version, src = c(href="shinyWidgets/animate"), stylesheet = "animate.min.css" ) ) } else if (widget == "bttn") { dep <- list( dep, htmltools::htmlDependency( name = "bttn", version = version, src = c(href="shinyWidgets/bttn"), stylesheet = "bttn.min.css" ) ) } else if (widget == "spectrum") { dep <- list( dep, htmltools::htmlDependency( name = "spectrum", version = version, src = c(href="shinyWidgets/spectrum"), script = c("spectrum.min.js"), stylesheet = c("spectrum.min.css", "sw-spectrum.css") ) ) } else if (widget == "pretty") { dep <- list( dep, htmltools::htmlDependency( name = "pretty", version = version, src = c(href="shinyWidgets/pretty-checkbox"), stylesheet = "pretty-checkbox.min.css" ) ) } else if (widget == "nouislider") { dep <- list( dep, htmltools::htmlDependency( name = "nouislider", version = "11.0.3", src = c(href="shinyWidgets/nouislider"), script = c("nouislider.min.js", "wNumb.js"), stylesheet = "nouislider.min.css" ) ) } } htmltools::attachDependencies(tag, dep, append = TRUE) }
/R/attachShinyWidgetsDep.R
permissive
statnmap/shinyWidgets
R
false
false
4,822
r
#' Attach shinyWidgets dependancies #' #' @param tag An object which has (or should have) HTML dependencies. #' @param widget Name of a widget for particular dependancies #' #' @noRd #' @importFrom utils packageVersion #' @importFrom htmltools htmlDependency attachDependencies findDependencies #' @importFrom shiny icon #' attachShinyWidgetsDep <- function(tag, widget = NULL) { version <- as.character(packageVersion("shinyWidgets")[[1]]) dep <- htmltools::htmlDependency( name = "shinyWidgets", version = version, src = c(href = "shinyWidgets"), script = "shinyWidgets-bindings.min.js", stylesheet = "shinyWidgets.css" ) if (!is.null(widget)) { if (widget == "picker") { dep <- list( dep, htmltools::htmlDependency( name = "selectPicker", version = "1.12.4", src = c(href="shinyWidgets/selectPicker"), script = "js/bootstrap-select.min.js", stylesheet = "css/bootstrap-select.min.css" ) ) } else if (widget == "awesome") { dep <- list( dep, htmltools::htmlDependency( name = "awesome-bootstrap", version = "0.2.0", src = c(href = "shinyWidgets/awesomeRadioCheckbox"), stylesheet = "css/awesome-bootstrap-checkbox-shiny.css" ), htmltools::findDependencies(shiny::icon("rebel"))[[1]] ) } else if (widget == "bsswitch") { dep <- list( dep, htmltools::htmlDependency( name = "bootstrap-switch", version = "3.3.4", src = c(href="shinyWidgets/switchInput/bootstrap-switch-3.3.4"), script = "bootstrap-switch.min.js", stylesheet = "bootstrap-switch.min.css" ) ) } else if (widget == "sweetalert") { dep <- list( dep, htmltools::htmlDependency( name = "sweetAlert", version = "0.2.0", src = c(href="shinyWidgets/sweetAlert"), script = c("sweetalert.min.js", "sweetalert-bindings.js") ) ) } else if (widget == "multi") { dep <- list( dep, htmltools::htmlDependency( name = "multi", version = "0.3.0", src = c(href="shinyWidgets/multi"), script = "multi.min.js", stylesheet = c("multi.min.css") ) ) } else if (widget == "jquery-knob") { dep <- list( dep, htmltools::htmlDependency( name = "jquery-knob", version = "1.2.13", src = c(href = "shinyWidgets/jquery-knob"), script = c("jquery.knob.min.js", "knob-input-binding.js") ) ) } else if (widget == "dropdown") { dep <- list( dep, htmltools::htmlDependency( name = "dropdown-patch", version = version, src = c(href="shinyWidgets/dropdown"), script = "dropdown-click.js" ) ) } else if (widget == "sw-dropdown") { dep <- list( dep, htmltools::htmlDependency( name = "sw-dropdown", version = version, src = c(href="shinyWidgets/sw-dropdown"), script = "sw-dropdown.js", stylesheet = "sw-dropdown.css" ) ) } else if (widget == "animate") { dep <- list( dep, htmltools::htmlDependency( name = "animate", version = version, src = c(href="shinyWidgets/animate"), stylesheet = "animate.min.css" ) ) } else if (widget == "bttn") { dep <- list( dep, htmltools::htmlDependency( name = "bttn", version = version, src = c(href="shinyWidgets/bttn"), stylesheet = "bttn.min.css" ) ) } else if (widget == "spectrum") { dep <- list( dep, htmltools::htmlDependency( name = "spectrum", version = version, src = c(href="shinyWidgets/spectrum"), script = c("spectrum.min.js"), stylesheet = c("spectrum.min.css", "sw-spectrum.css") ) ) } else if (widget == "pretty") { dep <- list( dep, htmltools::htmlDependency( name = "pretty", version = version, src = c(href="shinyWidgets/pretty-checkbox"), stylesheet = "pretty-checkbox.min.css" ) ) } else if (widget == "nouislider") { dep <- list( dep, htmltools::htmlDependency( name = "nouislider", version = "11.0.3", src = c(href="shinyWidgets/nouislider"), script = c("nouislider.min.js", "wNumb.js"), stylesheet = "nouislider.min.css" ) ) } } htmltools::attachDependencies(tag, dep, append = TRUE) }
file <- "household_power_consumption.txt" data <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,] #str(subSetData) GAP <- as.numeric(subSetData$Global_active_power) png("plot1.png", width=480, height=480) hist(GAP, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)") dev.off()
/plot1.R
no_license
maad2011/ExData_Plotting1
R
false
false
390
r
file <- "household_power_consumption.txt" data <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,] #str(subSetData) GAP <- as.numeric(subSetData$Global_active_power) png("plot1.png", width=480, height=480) hist(GAP, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)") dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/focus.R \name{focus} \alias{focus} \alias{locate} \title{Show features and regions of interest} \usage{ focus( x, ..., .track_id = 2, .max_dist = 10000, .expand = 5000, .overhang = c("drop", "trim", "keep"), .locus_id = str_glue("{seq_id}_lc{row_number()}"), .locus_id_group = seq_id, .locus_bin = c("bin", "seq", "locus"), .locus_score = n(), .locus_filter = TRUE, .loci = NULL ) locate( x, ..., .track_id = 2, .max_dist = 10000, .expand = 5000, .locus_id = str_glue("{seq_id}_lc{row_number()}"), .locus_id_group = seq_id, .locus_bin = c("bin", "seq", "locus"), .locus_score = n(), .locus_filter = TRUE, .locus_track = "loci" ) } \arguments{ \item{x}{A gggenomes object} \item{...}{Logical predicates defined in terms of the variables in the track given by \code{.track_id}. Multiple conditions are combined with ‘&’. Only rows where the condition evaluates to ‘TRUE’ are kept. The arguments in ‘...’ are automatically quoted and evaluated in the context of the data frame. They support unquoting and splicing. See ‘vignette("programming")’ for an introduction to these concepts.} \item{.track_id}{the track to filter from - defaults to first feature track, usually "genes". Can be a quoted or unquoted string or a positional argument giving the index of a track among all tracks (seqs, feats & links).} \item{.max_dist}{Maximum distance between adjacent features to be included into the same locus, default 10kb.} \item{.expand}{The amount to nucleotides to expand the focus around the target features. Default 2kb. Give two values for different up- and downstream expansions.} \item{.overhang}{How to handle features overlapping the locus boundaries (including expand). Options are to "keep" them, "trim" them exactly at the boundaries, or "drop" all features not fully included within the boundaries.} \item{.locus_id, .locus_id_group}{How to generate the ids for the new loci which will eventually become their new \code{seq_id}s.} \item{.locus_bin}{What bin to assign new locus to. Defaults to keeping the original binning, but can be set to the "seq" to bin all loci originating from the same parent sequence, or to "locus" to separate all loci into individual bins.} \item{.locus_score}{An expression evaluated in the context of all features that are combined into a new locus. Results are stored in the column \code{locus_score}. Defaults to the \code{n()}, i.e. the number of features per locus. Set, for example, to \code{sum(bitscore)} to sum over all blast hit bitscore of per locus. Usually used in conjunction with \code{.locus_filter}.} \item{.locus_filter}{An predicate expression used to post-filter identified loci. Set \code{.locus_filter=locus_score >= 3} to only return loci comprising at least 3 target features.} \item{.loci}{A data.frame specifying loci directly. Required columns are \verb{seq_id,start,end}. Supersedes \code{...}.} \item{.locus_track}{The name of the new track containing the identified loci.} } \description{ Show loci containing features of interest. Loci can either be provided as predefined regions directly (\verb{loci=}), or are constructed automatically based on pre-selected features (via \code{...}). Features within \code{max_dist} are greedily combined into the same locus. \code{locate()} adds these loci as new track so that they can be easily visualized. \code{focus()} extracts those loci from their parent sequences making them the new sequence set. These sequences will have their \code{locus_id} as their new \code{seq_id}. } \section{Functions}{ \itemize{ \item \code{focus()}: Identify regions of interest and zoom in on them \item \code{locate()}: Identify regions of interest and add them as new feature track }} \examples{ # Let's hunt some defense systems in marine SAGs # read the genomes s0 <- read_seqs(ex("gorg/gorg.fna")) s1 <- s0 \%>\% # strip trailing number from contigs to get bins mutate(bin_id = str_remove(seq_id, "_\\\\d+$")) # gene annotations from prokka g0 <- read_feats(ex("gorg/gorg.gff")) # best hits to the PADS Arsenal database of prokaryotic defense-system genes # $ mmseqs easy-search gorg.fna pads-arsenal-v1-prf gorg-pads-defense.o6 /tmp \ # --greedy-best-hits f0 <- read_feats(ex("gorg/gorg-pads-defense.o6")) f1 <- f0 \%>\% # parser system/gene info separate(seq_id2, into=qc(seq_id2, system, gene), sep=",") \%>\% filter( evalue < 1e-10, # get rid of some spurious hits # and let's focus just on a few systems for this example system \%in\% c("CRISPR-CAS", "DISARM", "GABIJA", "LAMASSU", "THOERIS")) # plot the distribution of hits across full genomes gggenomes(g0, s1, f1, wrap=2e5) + geom_seq() + geom_bin_label() + scale_color_brewer(palette="Dark2") + geom_point(aes(x=x,y=y, color=system), data=feats()) # hilight the regions containing hits gggenomes(g0, s1, f1, wrap=2e5) \%>\% locate(.track_id = feats) \%>\% identity() + geom_seq() + geom_bin_label() + scale_color_brewer(palette="Dark2") + geom_feat(data=feats(loci), color="plum3") + geom_point(aes(x=x,y=y, color=system), data=feats()) # zoom in on loci gggenomes(g0, s1, f1, wrap=5e4) \%>\% focus(.track_id = feats) + geom_seq() + geom_bin_label() + geom_gene() + geom_feat(aes(color=system)) + geom_feat_tag(aes(label=gene)) + scale_color_brewer(palette="Dark2") }
/man/focus.Rd
permissive
thackl/gggenomes
R
false
true
5,442
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/focus.R \name{focus} \alias{focus} \alias{locate} \title{Show features and regions of interest} \usage{ focus( x, ..., .track_id = 2, .max_dist = 10000, .expand = 5000, .overhang = c("drop", "trim", "keep"), .locus_id = str_glue("{seq_id}_lc{row_number()}"), .locus_id_group = seq_id, .locus_bin = c("bin", "seq", "locus"), .locus_score = n(), .locus_filter = TRUE, .loci = NULL ) locate( x, ..., .track_id = 2, .max_dist = 10000, .expand = 5000, .locus_id = str_glue("{seq_id}_lc{row_number()}"), .locus_id_group = seq_id, .locus_bin = c("bin", "seq", "locus"), .locus_score = n(), .locus_filter = TRUE, .locus_track = "loci" ) } \arguments{ \item{x}{A gggenomes object} \item{...}{Logical predicates defined in terms of the variables in the track given by \code{.track_id}. Multiple conditions are combined with ‘&’. Only rows where the condition evaluates to ‘TRUE’ are kept. The arguments in ‘...’ are automatically quoted and evaluated in the context of the data frame. They support unquoting and splicing. See ‘vignette("programming")’ for an introduction to these concepts.} \item{.track_id}{the track to filter from - defaults to first feature track, usually "genes". Can be a quoted or unquoted string or a positional argument giving the index of a track among all tracks (seqs, feats & links).} \item{.max_dist}{Maximum distance between adjacent features to be included into the same locus, default 10kb.} \item{.expand}{The amount to nucleotides to expand the focus around the target features. Default 2kb. Give two values for different up- and downstream expansions.} \item{.overhang}{How to handle features overlapping the locus boundaries (including expand). Options are to "keep" them, "trim" them exactly at the boundaries, or "drop" all features not fully included within the boundaries.} \item{.locus_id, .locus_id_group}{How to generate the ids for the new loci which will eventually become their new \code{seq_id}s.} \item{.locus_bin}{What bin to assign new locus to. Defaults to keeping the original binning, but can be set to the "seq" to bin all loci originating from the same parent sequence, or to "locus" to separate all loci into individual bins.} \item{.locus_score}{An expression evaluated in the context of all features that are combined into a new locus. Results are stored in the column \code{locus_score}. Defaults to the \code{n()}, i.e. the number of features per locus. Set, for example, to \code{sum(bitscore)} to sum over all blast hit bitscore of per locus. Usually used in conjunction with \code{.locus_filter}.} \item{.locus_filter}{An predicate expression used to post-filter identified loci. Set \code{.locus_filter=locus_score >= 3} to only return loci comprising at least 3 target features.} \item{.loci}{A data.frame specifying loci directly. Required columns are \verb{seq_id,start,end}. Supersedes \code{...}.} \item{.locus_track}{The name of the new track containing the identified loci.} } \description{ Show loci containing features of interest. Loci can either be provided as predefined regions directly (\verb{loci=}), or are constructed automatically based on pre-selected features (via \code{...}). Features within \code{max_dist} are greedily combined into the same locus. \code{locate()} adds these loci as new track so that they can be easily visualized. \code{focus()} extracts those loci from their parent sequences making them the new sequence set. These sequences will have their \code{locus_id} as their new \code{seq_id}. } \section{Functions}{ \itemize{ \item \code{focus()}: Identify regions of interest and zoom in on them \item \code{locate()}: Identify regions of interest and add them as new feature track }} \examples{ # Let's hunt some defense systems in marine SAGs # read the genomes s0 <- read_seqs(ex("gorg/gorg.fna")) s1 <- s0 \%>\% # strip trailing number from contigs to get bins mutate(bin_id = str_remove(seq_id, "_\\\\d+$")) # gene annotations from prokka g0 <- read_feats(ex("gorg/gorg.gff")) # best hits to the PADS Arsenal database of prokaryotic defense-system genes # $ mmseqs easy-search gorg.fna pads-arsenal-v1-prf gorg-pads-defense.o6 /tmp \ # --greedy-best-hits f0 <- read_feats(ex("gorg/gorg-pads-defense.o6")) f1 <- f0 \%>\% # parser system/gene info separate(seq_id2, into=qc(seq_id2, system, gene), sep=",") \%>\% filter( evalue < 1e-10, # get rid of some spurious hits # and let's focus just on a few systems for this example system \%in\% c("CRISPR-CAS", "DISARM", "GABIJA", "LAMASSU", "THOERIS")) # plot the distribution of hits across full genomes gggenomes(g0, s1, f1, wrap=2e5) + geom_seq() + geom_bin_label() + scale_color_brewer(palette="Dark2") + geom_point(aes(x=x,y=y, color=system), data=feats()) # hilight the regions containing hits gggenomes(g0, s1, f1, wrap=2e5) \%>\% locate(.track_id = feats) \%>\% identity() + geom_seq() + geom_bin_label() + scale_color_brewer(palette="Dark2") + geom_feat(data=feats(loci), color="plum3") + geom_point(aes(x=x,y=y, color=system), data=feats()) # zoom in on loci gggenomes(g0, s1, f1, wrap=5e4) \%>\% focus(.track_id = feats) + geom_seq() + geom_bin_label() + geom_gene() + geom_feat(aes(color=system)) + geom_feat_tag(aes(label=gene)) + scale_color_brewer(palette="Dark2") }
\name{utilities} \alias{utilities} \alias{listResources} \alias{listResources,AnnotationHub-method} \alias{loadResources} \alias{loadResources,AnnotationHub-method} \title{ Utility functions for discovering package-specific Hub resources. } \description{ List and load resources from ExperimentHub filtered by package name and optional search terms. } \usage{ listResources(hub, package, filterBy = character()) loadResources(hub, package, filterBy = character()) } \arguments{ \item{hub}{ A \code{Hub} object, e.g., AnnotationHub or ExperimentHub. } \item{package}{ A \code{character(1)} name of a package with resources hosted in the Hub. } \item{filterBy}{ A \code{character()} vector of search terms for additional filtering. Can be any terms found in the metadata (mcols()) of the resources. When not provided, there is no additional filtering and all resources associated with the given package are returned. } } \value{ \code{listResources} returns a character vector; \code{loadResources} returns a list of data objects. } \seealso{ } \examples{ \dontrun{ ## Packages with resources hosted in AnnotationHub: hub <- AnnotationHub() unique(package(hub)) ## Packages with resources hosted in ExperimentHub: require(ExperimentHub) eh <- AnnotationHub() unique(package(eh)) ## All resources associated with the 'GSE62944' package: listResources(eh, "GSE62944") ## Resources associated with the 'curatedMetagenomicData' package ## filtered by 'plaque.abundance': listResources(eh, "curatedMetagenomicData", "plaque.abundance") ## 'loadResources()' returns a list of the data objects: loadResources(eh, "curatedMetagenomicData", "plaque.abundance") } } \keyword{utilities}
/man/listResources.Rd
no_license
alenzhao/AnnotationHub
R
false
false
1,737
rd
\name{utilities} \alias{utilities} \alias{listResources} \alias{listResources,AnnotationHub-method} \alias{loadResources} \alias{loadResources,AnnotationHub-method} \title{ Utility functions for discovering package-specific Hub resources. } \description{ List and load resources from ExperimentHub filtered by package name and optional search terms. } \usage{ listResources(hub, package, filterBy = character()) loadResources(hub, package, filterBy = character()) } \arguments{ \item{hub}{ A \code{Hub} object, e.g., AnnotationHub or ExperimentHub. } \item{package}{ A \code{character(1)} name of a package with resources hosted in the Hub. } \item{filterBy}{ A \code{character()} vector of search terms for additional filtering. Can be any terms found in the metadata (mcols()) of the resources. When not provided, there is no additional filtering and all resources associated with the given package are returned. } } \value{ \code{listResources} returns a character vector; \code{loadResources} returns a list of data objects. } \seealso{ } \examples{ \dontrun{ ## Packages with resources hosted in AnnotationHub: hub <- AnnotationHub() unique(package(hub)) ## Packages with resources hosted in ExperimentHub: require(ExperimentHub) eh <- AnnotationHub() unique(package(eh)) ## All resources associated with the 'GSE62944' package: listResources(eh, "GSE62944") ## Resources associated with the 'curatedMetagenomicData' package ## filtered by 'plaque.abundance': listResources(eh, "curatedMetagenomicData", "plaque.abundance") ## 'loadResources()' returns a list of the data objects: loadResources(eh, "curatedMetagenomicData", "plaque.abundance") } } \keyword{utilities}
\name{TlsParameter-class} \Rdversion{1.1} \docType{class} \alias{TlsParameter-class} \title{Class \code{"TlsParameter"}} \description{The parameter of a location scale t distribution, used by Tlsd-class } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("TlsParameter", ...)}. Usually an object of this class is not needed on its own, it is generated automatically when an object of the class \code{Tlsd} is instantiated. } \section{Slots}{ \describe{ \item{\code{df}:}{Object of class \code{"numeric"} ~~ } \item{\code{location}:}{Object of class \code{"numeric"} ~~ } \item{\code{scale}:}{Object of class \code{"numeric"} ~~ } \item{\code{name}:}{Object of class \code{"character"} ~~ } } } \section{Extends}{ Class \code{"\linkS4class{Parameter}"}, directly. Class \code{"\linkS4class{OptionalParameter}"}, by class "Parameter", distance 2. } \section{Methods}{ No methods defined with class "TlsParameter" in the signature. } \author{ Florian P. Breitwieser, based on original TParameter class. } \seealso{ \code{\linkS4class{Tlsd}} } \examples{ showClass("TlsParameter") } \keyword{classes}
/man/TlsParameter-class.Rd
no_license
fbreitwieser/isobar
R
false
false
1,159
rd
\name{TlsParameter-class} \Rdversion{1.1} \docType{class} \alias{TlsParameter-class} \title{Class \code{"TlsParameter"}} \description{The parameter of a location scale t distribution, used by Tlsd-class } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("TlsParameter", ...)}. Usually an object of this class is not needed on its own, it is generated automatically when an object of the class \code{Tlsd} is instantiated. } \section{Slots}{ \describe{ \item{\code{df}:}{Object of class \code{"numeric"} ~~ } \item{\code{location}:}{Object of class \code{"numeric"} ~~ } \item{\code{scale}:}{Object of class \code{"numeric"} ~~ } \item{\code{name}:}{Object of class \code{"character"} ~~ } } } \section{Extends}{ Class \code{"\linkS4class{Parameter}"}, directly. Class \code{"\linkS4class{OptionalParameter}"}, by class "Parameter", distance 2. } \section{Methods}{ No methods defined with class "TlsParameter" in the signature. } \author{ Florian P. Breitwieser, based on original TParameter class. } \seealso{ \code{\linkS4class{Tlsd}} } \examples{ showClass("TlsParameter") } \keyword{classes}
\name{read.acs} \alias{read.acs} \title{ Reads a comma-delimited file from the American Community Survey and creates an acs object with estimates, standard errors, and associated metadata. } \description{ When passed a comma-delimited file from the U.S. Census American Community Survey (typically downloaded via the FactFinder website and unzipped), read.acs returns an acs object with estimates, standard errors, and associated metadata. Most users will prefer to start with \code{\link{acs.fetch}} to import data; \code{read.acs} is maintained as a "legacy" function, primarily for use in situations where data is not available via the Census API. } \usage{ read.acs(filename, endyear = "auto", span = "auto", col.names= "auto", acs.units = "auto", geocols = "auto", skip = "auto") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{filename}{ the name of the \code{.csv}, \code{.zip}, or \code{.txt} file to be input } \item{endyear}{ an integer (or "auto") indicating the latest year of the data in the survey (e.g., for data from the 2005-2009 5-year ACS data, endyear would be 2009) } \item{span}{ an integer (should be 1, 3, or 5), or "auto" to have read.acs guess the span from the filename (e.g., for data from the 2005-2009 5-year ACS data, span would be 5) } \item{col.names}{a vector of column names to be used as \code{acs.colnames} for the object; defaults to "auto", which will result in auto-generated names from the headers lines of the input file} \item{acs.units}{ a vector of factors indicating what sort of data is contained within each column of data ("count","dollars","proportion", "ratio", "other")} \item{geocols}{ a vector of integers indicating which columns contain the geographic header information; defaults to "auto", which is the same as 3:1, which seems to be the standard for FactFinder-2 downloads} \item{skip}{an integer indicating how many rows to skip before processing the csv file; defaults to "auto", which will try to guess the proper value} } \details{ After executing a query on the U.S. Census American FactFinder site (\url{http://factfinder2.census.gov}), users can download their results as a zip file containing data in comma-delimited file format (for example, "ACS_10_5YR_B19013_with_ann.csv"). \code{read.acs} simplifies the creation of new acs objects from these files. The function uses some rudimentary algorithms to guess intelligently about values for metadata (such as \code{endyear} and \code{geography}), based on current file-format used by the Census "AmericanFactFinder 2" download site. The specified \code{filename} can be an actual \code{.csv} file, or can be the name of a \code{.zip} file downloaded from the FactFinder site. If the latter, \code{read.acs} will extract the necessary data and leave the compressed zipfile in place. As a default, \code{read.acs} assumes the first three columns will contain geographic header information, which seems to be the standard for the new Census American Factfinder download site. Users can also set different values for the \code{geocols=} to specify other columns for this geographic information. The function will use the first of these columns for geographic rownames to label estimates. (By default, then, this would be the third column of the actual file, since \code{geocols=3:1}. For files downloaded via the Census "legacy" version of FactFinder prior to 2012, users will probably want to specify \code{geocols=4:1}. As for column names, by default \code{read.acs} will scan the file to determine how many of the initial rows contain "header" information, and will generate new \code{acs.colnames} by concatenating information found in these rows. Note that this can result in \emph{very long} variable names, and users may want to modify the contents of \code{acs.colnames} after creation. Alternatively, users can inspect downloaded csv files prior to import and specify the \code{skip=} option explicitly, as with \code{read.csv} and other \code{read.XXX} functions (i.e., the value of skip is equal to the number of rows prior to the last header row). Regardless of whether \code{skip=} is set or "auto", however, the column names will be created using all of the rows at the top of the file, \emph{even the "skipped" ones}. Finally, these new \code{acs.colnames} are used to guess intelligently about values for \code{acs.units}, but currently all this includes is a check for the word "dollars" in the names; if this is not found, the columns are assumed to be "counts". When no other values are provided, \code{read.acs} will attempt to determine \code{endyear} and \code{span} from the filename. } \value{ Returns a new acs-class object with estimates, standard errors (derived from the census 90\% margins of error), and metadata associated with the survey, } \author{ Ezra Haber Glenn \email{eglenn@mit.edu} }
/man/read.acs.Rd
no_license
cran/acs
R
false
false
5,001
rd
\name{read.acs} \alias{read.acs} \title{ Reads a comma-delimited file from the American Community Survey and creates an acs object with estimates, standard errors, and associated metadata. } \description{ When passed a comma-delimited file from the U.S. Census American Community Survey (typically downloaded via the FactFinder website and unzipped), read.acs returns an acs object with estimates, standard errors, and associated metadata. Most users will prefer to start with \code{\link{acs.fetch}} to import data; \code{read.acs} is maintained as a "legacy" function, primarily for use in situations where data is not available via the Census API. } \usage{ read.acs(filename, endyear = "auto", span = "auto", col.names= "auto", acs.units = "auto", geocols = "auto", skip = "auto") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{filename}{ the name of the \code{.csv}, \code{.zip}, or \code{.txt} file to be input } \item{endyear}{ an integer (or "auto") indicating the latest year of the data in the survey (e.g., for data from the 2005-2009 5-year ACS data, endyear would be 2009) } \item{span}{ an integer (should be 1, 3, or 5), or "auto" to have read.acs guess the span from the filename (e.g., for data from the 2005-2009 5-year ACS data, span would be 5) } \item{col.names}{a vector of column names to be used as \code{acs.colnames} for the object; defaults to "auto", which will result in auto-generated names from the headers lines of the input file} \item{acs.units}{ a vector of factors indicating what sort of data is contained within each column of data ("count","dollars","proportion", "ratio", "other")} \item{geocols}{ a vector of integers indicating which columns contain the geographic header information; defaults to "auto", which is the same as 3:1, which seems to be the standard for FactFinder-2 downloads} \item{skip}{an integer indicating how many rows to skip before processing the csv file; defaults to "auto", which will try to guess the proper value} } \details{ After executing a query on the U.S. Census American FactFinder site (\url{http://factfinder2.census.gov}), users can download their results as a zip file containing data in comma-delimited file format (for example, "ACS_10_5YR_B19013_with_ann.csv"). \code{read.acs} simplifies the creation of new acs objects from these files. The function uses some rudimentary algorithms to guess intelligently about values for metadata (such as \code{endyear} and \code{geography}), based on current file-format used by the Census "AmericanFactFinder 2" download site. The specified \code{filename} can be an actual \code{.csv} file, or can be the name of a \code{.zip} file downloaded from the FactFinder site. If the latter, \code{read.acs} will extract the necessary data and leave the compressed zipfile in place. As a default, \code{read.acs} assumes the first three columns will contain geographic header information, which seems to be the standard for the new Census American Factfinder download site. Users can also set different values for the \code{geocols=} to specify other columns for this geographic information. The function will use the first of these columns for geographic rownames to label estimates. (By default, then, this would be the third column of the actual file, since \code{geocols=3:1}. For files downloaded via the Census "legacy" version of FactFinder prior to 2012, users will probably want to specify \code{geocols=4:1}. As for column names, by default \code{read.acs} will scan the file to determine how many of the initial rows contain "header" information, and will generate new \code{acs.colnames} by concatenating information found in these rows. Note that this can result in \emph{very long} variable names, and users may want to modify the contents of \code{acs.colnames} after creation. Alternatively, users can inspect downloaded csv files prior to import and specify the \code{skip=} option explicitly, as with \code{read.csv} and other \code{read.XXX} functions (i.e., the value of skip is equal to the number of rows prior to the last header row). Regardless of whether \code{skip=} is set or "auto", however, the column names will be created using all of the rows at the top of the file, \emph{even the "skipped" ones}. Finally, these new \code{acs.colnames} are used to guess intelligently about values for \code{acs.units}, but currently all this includes is a check for the word "dollars" in the names; if this is not found, the columns are assumed to be "counts". When no other values are provided, \code{read.acs} will attempt to determine \code{endyear} and \code{span} from the filename. } \value{ Returns a new acs-class object with estimates, standard errors (derived from the census 90\% margins of error), and metadata associated with the survey, } \author{ Ezra Haber Glenn \email{eglenn@mit.edu} }
# UI # DTG STUDY UI for the Dashboard # Created on 24/09/2020 # Developed by Wafula Erick # This dashboard has been adopted from what we currently have from Search Youth Thanks to James Peng library(shiny) library(shinydashboard) library(shinyWidgets) library(lubridate) library(googleAuthR) library(plotly) ui <- dashboardPage( dashboardHeader(title = "DTG Study Dashboard", titleWidth = 250), dashboardSidebar( sidebarMenu( menuItem("Dashboard", tabName = "first", icon = icon("dashboard"), menuSubItem("Enrollment", tabName = "enrollment"), menuSubItem("Follow-up", tabName = "follow_up") ), menuItem("Endpoint", tabName = "endpoint", icon = icon("hourglass-end"), menuSubItem("Status Overview", tabName = "ep_overview"), menuSubItem("Preliminary Results", tabName = "ep_results")), menuItem("Data QC", tabName = "data_qc",icon = icon("database"), menuSubItem("QC Reports Baseline", tabName = "qc_report"), menuSubItem("QC Reports Followup 1", tabName = "qc_report1"), menuSubItem("QC Reports Followup 3", tabName = "qc_report3"), menuSubItem("QC Reports Followup 6", tabName = "qc_report6")), menuItem("Reports", tabName = "reports", icon = icon("th"), menuSubItem("Scheduled/Missed Visits", tabName = "missed_visit"), menuSubItem("Withdrawals/Move", tabName = "withdrawal"), menuSubItem("Retention", tabName = "retention")), menuItem("Downlaod", tabName = "raw_data_download", icon = icon("download")), hr(), " Find Study Id", sidebarSearchForm(textId = "searchText", buttonId = "searchButton", label = "Enter Study Id..."), uiOutput("logininfo"), uiOutput("last_updated") ) ), dashboardBody( tabItems( tabItem(tabName = "enrollment", h2("Enrollment Summary"), # Screening and Enrollment Numbers fluidRow( # Dynamic valueBoxes #valueBoxOutput("screened"), valueBoxOutput("enrolled"), valueBoxOutput("male"), valueBoxOutput("bmi") ), fluidRow( # Dynamic valueBoxes valueBoxOutput("hypertensive"), valueBoxOutput("diabetic"), valueBoxOutput("cholesterol") ), fluidRow( box(status = 'primary', solidHeader = TRUE,title = 'Breakdowns', selectInput("breakdown", label = "Breakdown: ", choices = c('All','Gender', 'Age-group','Pre-conditions')) ), box(status = 'primary', solidHeader = TRUE,title = 'Sub-Breakdowns', selectInput("subcategory", label = "Sub Category: ", choices = c('All')) ) ), fluidRow( box(title="Enrollment Summary by Gender", htmlOutput("enroll_text_summary"), plotlyOutput("plot1_enrollment") ), box(title="Monthly Enrollment Progress", htmlOutput("enroll_sub"), plotlyOutput("plot2_enrollment"), div(style = 'overflow-x: scroll', DT::dataTableOutput('enrollment_list')), downloadButton("download1","Download csv") ) ) ), tabItem(tabName = "missed_visit", fluidRow( box(status = 'primary', solidHeader = TRUE,title = 'Scheduled Visits', dateInput('svisit_since', 'From:', value = Sys.Date()), dateInput('svisit_to', 'To:', value = Sys.Date()), HTML('<br><br>'), div(style = 'overflow-x: scroll', DT::dataTableOutput('sch_visits')), downloadButton("sch_download","Download csv") ), box(status = 'primary', solidHeader = TRUE,title = 'Missed Visits', dateInput('mvisit_since', 'From:', value = '2019-03-01'), dateInput('mvisit_to', 'To:', value = Sys.Date() - 1), HTML('<br><br>'), div(style = 'overflow-x: scroll', DT::dataTableOutput('m_visits')), downloadButton("m_download","Download csv") ) ) ), tabItem(tabName = "follow_up", # Add variours value boxes to show follow-up visit status h2("Follow-up Visits Summary"), fluidRow( # Dynamic valueBoxes valueBoxOutput("weight_1"), valueBoxOutput("month_3"), valueBoxOutput("month_6") ), fluidRow( # Dynamic valueBoxes valueBoxOutput("new_hypertenstion"), valueBoxOutput("new_diabetic"), valueBoxOutput("new_cholesterol") ), fluidRow( box(status = 'primary', solidHeader = TRUE,title = 'Follow-up Visits', selectInput("fu_visit_selection", label = "Breakdown: ", choices = c('All','Month 1', 'Month 3','Month 6')) ), box(status = 'primary', solidHeader = TRUE,title = 'Incidence', selectInput("fu_incidence_selection", label = "Sub Category: ", choices = c('All','Diabetic','Hypertensive','High Cholesterol','Overweight','Obesity', 'Weight Change')) ) ), fluidRow( box( div(style = 'overflow-x: scroll', DT::dataTableOutput('fu_visits')), downloadButton("fu_download","Download csv") ), box( div(style = 'overflow-x: scroll', DT::dataTableOutput('fu_visits_incidence')), downloadButton("fu_incidence_download","Download csv") ) ) ), tabItem(tabName = "retention", h2("Retention Summary"), fluidRow( # Dynamic valueBoxes valueBoxOutput("retention_Summary_prop"), valueBoxOutput("retention_gender"), valueBoxOutput("retention_hyp") ), fluidRow( box(title="Retention Summary", plotlyOutput("plot3_retention"), div(style = 'overflow-x: scroll', DT::dataTableOutput('retention_list')), downloadButton("download1_ret","Download csv") ), box(title="Retention Summary", plotlyOutput("plot4_retention") #div(style = 'overflow-x: scroll', DT::dataTableOutput('enrollment_list')), #downloadButton("download1","Download csv") ) ) ), tabItem(tabName = "raw_data_download", # Add various buttons to download the raw data fluidRow( box(status = "primary", solidHeader = TRUE, title = "Download raw Screening Data", downloadButton("download_screening", label = "Download Screening csv") ), box(status = "primary", solidHeader = TRUE, title = "Download raw Enrollment Data", downloadButton("download_enrollment", label = "Download Enrollment csv") ), box(status = "primary", solidHeader = TRUE, title = "Download raw Tracking Data", downloadButton("download_tracking", label = "Download Tracking csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Download raw Follow-up Data", downloadButton("download_fup", label = "Download follow-up csv") ), box(status = "primary", solidHeader = TRUE, title = "Download raw Withdrawal Data", downloadButton("download_withdrawal", label = "Download Withdrawal csv") ) ) ), tabItem(tabName = 'qc_report', fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in hemoglobin A1C", div(style = 'overflow-x: scroll', DT::dataTableOutput('hemoglobin')), downloadButton("download_hgb", label = "hemoglobinA1C csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Blood Sugar", div(style = 'overflow-x: scroll', DT::dataTableOutput('fbs')), downloadButton("download_fbs", label = "fasting blood sugar csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting total cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('tchol')), downloadButton("download_ftchol", label = "Fasting total cholesterol csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting HDL cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('hdlchol')), downloadButton("download_fhdlchol", label = "Fasting HDL cholesterol csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Triglycerides ", div(style = 'overflow-x: scroll', DT::dataTableOutput('ftrig')), downloadButton("download_trigchol", label = "Fasting Triglycerides csv") ) ) ), tabItem(tabName = 'qc_report1', fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in hemoglobin A1C", div(style = 'overflow-x: scroll', DT::dataTableOutput('hemoglobin1')), downloadButton("download_hgb1", label = "hemoglobinA1C csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Blood Sugar", div(style = 'overflow-x: scroll', DT::dataTableOutput('fbs1')), downloadButton("download_fbs1", label = "fasting blood sugar csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting total cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('tchol1')), downloadButton("download_ftchol1", label = "Fasting total cholesterol csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting HDL cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('hdlchol1')), downloadButton("download_fhdlchol1", label = "Fasting HDL cholesterol csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Triglycerides ", div(style = 'overflow-x: scroll', DT::dataTableOutput('ftrig1')), downloadButton("download_trigchol1", label = "Fasting Triglycerides csv") ) ) ), tabItem(tabName = 'qc_report3', fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Blood Sugar", div(style = 'overflow-x: scroll', DT::dataTableOutput('fbs3')), downloadButton("download_fbs3", label = "fasting blood sugar csv") ) ) ), tabItem(tabName = 'qc_report6', fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in hemoglobin A1C", div(style = 'overflow-x: scroll', DT::dataTableOutput('hemoglobin6')), downloadButton("download_hgb6", label = "hemoglobinA1C csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Blood Sugar", div(style = 'overflow-x: scroll', DT::dataTableOutput('fbs6')), downloadButton("download_fbs6", label = "fasting blood sugar csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting total cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('tchol6')), downloadButton("download_ftchol6", label = "Fasting total cholesterol csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting HDL cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('hdlchol6')), downloadButton("download_fhdlchol6", label = "Fasting HDL cholesterol csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Triglycerides ", div(style = 'overflow-x: scroll', DT::dataTableOutput('ftrig6')), downloadButton("download_trigchol6", label = "Fasting Triglycerides csv") ) ) ), tabItem(tabName = 'withdrawal', fluidRow( h2("Withdrawn Participants"), div(style = 'overflow-x: scroll', DT::dataTableOutput('withraw_list')), downloadButton("download_withdrawn","Download csv") ) ), tabItem(tabName = 'ep_overview', fluidRow( box(status = 'primary', solidHeader = TRUE, title = "DTG Month 6 Endpoint Overview", htmlOutput("in_window"), radioGroupButtons( inputId = "in_window_type", choices = c("Completed" = "endpoint_complete", "started" = "endpoint_started") ), htmlOutput('in_window_progress')), box(status = "primary", solidHeader = TRUE, title = "Summary Statistics at Month 6 (Started or Completed)", htmlOutput("in_window_weight_change"), htmlOutput("in_window_diabetic"), htmlOutput("in_window_hypertensive"), htmlOutput("in_window_high_chol") ) ), fluidRow(box( HTML("This line list only includes those who are in their endpoint window.<br><br>"), checkboxGroupInput(inputId="ep_vars_to_include", label="Variables to include", choices=c("Endpoint Ascertained", "Endpoint Started", "Withdrawn"), selected = c("Endpoint Ascertained"), inline = TRUE), div(style = 'overflow-x: scroll', DT::dataTableOutput("ep_ind_line_list")), downloadButton('download_ep_line_list', 'Download CSV'), tags$script("$(document).on('click', '#ep_ind_line_list button', function () { Shiny.onInputChange('lastClickId',this.id); Shiny.onInputChange('lastClick', Math.random()) });"), title = 'Individual Line List', width = 12)) ), tabItem('ep_results', fluidRow( box( status = 'primary', title = 'Summary Spreadsheet', div(style = 'overflow-x: scroll', tableOutput('ep_summary_table')), downloadButton('download_ep_summary_ss', 'Download CSV'), width = 12 ) ) ) ) ) )
/ui.R
no_license
Werick/DTG_study_Dashboard
R
false
false
16,529
r
# UI # DTG STUDY UI for the Dashboard # Created on 24/09/2020 # Developed by Wafula Erick # This dashboard has been adopted from what we currently have from Search Youth Thanks to James Peng library(shiny) library(shinydashboard) library(shinyWidgets) library(lubridate) library(googleAuthR) library(plotly) ui <- dashboardPage( dashboardHeader(title = "DTG Study Dashboard", titleWidth = 250), dashboardSidebar( sidebarMenu( menuItem("Dashboard", tabName = "first", icon = icon("dashboard"), menuSubItem("Enrollment", tabName = "enrollment"), menuSubItem("Follow-up", tabName = "follow_up") ), menuItem("Endpoint", tabName = "endpoint", icon = icon("hourglass-end"), menuSubItem("Status Overview", tabName = "ep_overview"), menuSubItem("Preliminary Results", tabName = "ep_results")), menuItem("Data QC", tabName = "data_qc",icon = icon("database"), menuSubItem("QC Reports Baseline", tabName = "qc_report"), menuSubItem("QC Reports Followup 1", tabName = "qc_report1"), menuSubItem("QC Reports Followup 3", tabName = "qc_report3"), menuSubItem("QC Reports Followup 6", tabName = "qc_report6")), menuItem("Reports", tabName = "reports", icon = icon("th"), menuSubItem("Scheduled/Missed Visits", tabName = "missed_visit"), menuSubItem("Withdrawals/Move", tabName = "withdrawal"), menuSubItem("Retention", tabName = "retention")), menuItem("Downlaod", tabName = "raw_data_download", icon = icon("download")), hr(), " Find Study Id", sidebarSearchForm(textId = "searchText", buttonId = "searchButton", label = "Enter Study Id..."), uiOutput("logininfo"), uiOutput("last_updated") ) ), dashboardBody( tabItems( tabItem(tabName = "enrollment", h2("Enrollment Summary"), # Screening and Enrollment Numbers fluidRow( # Dynamic valueBoxes #valueBoxOutput("screened"), valueBoxOutput("enrolled"), valueBoxOutput("male"), valueBoxOutput("bmi") ), fluidRow( # Dynamic valueBoxes valueBoxOutput("hypertensive"), valueBoxOutput("diabetic"), valueBoxOutput("cholesterol") ), fluidRow( box(status = 'primary', solidHeader = TRUE,title = 'Breakdowns', selectInput("breakdown", label = "Breakdown: ", choices = c('All','Gender', 'Age-group','Pre-conditions')) ), box(status = 'primary', solidHeader = TRUE,title = 'Sub-Breakdowns', selectInput("subcategory", label = "Sub Category: ", choices = c('All')) ) ), fluidRow( box(title="Enrollment Summary by Gender", htmlOutput("enroll_text_summary"), plotlyOutput("plot1_enrollment") ), box(title="Monthly Enrollment Progress", htmlOutput("enroll_sub"), plotlyOutput("plot2_enrollment"), div(style = 'overflow-x: scroll', DT::dataTableOutput('enrollment_list')), downloadButton("download1","Download csv") ) ) ), tabItem(tabName = "missed_visit", fluidRow( box(status = 'primary', solidHeader = TRUE,title = 'Scheduled Visits', dateInput('svisit_since', 'From:', value = Sys.Date()), dateInput('svisit_to', 'To:', value = Sys.Date()), HTML('<br><br>'), div(style = 'overflow-x: scroll', DT::dataTableOutput('sch_visits')), downloadButton("sch_download","Download csv") ), box(status = 'primary', solidHeader = TRUE,title = 'Missed Visits', dateInput('mvisit_since', 'From:', value = '2019-03-01'), dateInput('mvisit_to', 'To:', value = Sys.Date() - 1), HTML('<br><br>'), div(style = 'overflow-x: scroll', DT::dataTableOutput('m_visits')), downloadButton("m_download","Download csv") ) ) ), tabItem(tabName = "follow_up", # Add variours value boxes to show follow-up visit status h2("Follow-up Visits Summary"), fluidRow( # Dynamic valueBoxes valueBoxOutput("weight_1"), valueBoxOutput("month_3"), valueBoxOutput("month_6") ), fluidRow( # Dynamic valueBoxes valueBoxOutput("new_hypertenstion"), valueBoxOutput("new_diabetic"), valueBoxOutput("new_cholesterol") ), fluidRow( box(status = 'primary', solidHeader = TRUE,title = 'Follow-up Visits', selectInput("fu_visit_selection", label = "Breakdown: ", choices = c('All','Month 1', 'Month 3','Month 6')) ), box(status = 'primary', solidHeader = TRUE,title = 'Incidence', selectInput("fu_incidence_selection", label = "Sub Category: ", choices = c('All','Diabetic','Hypertensive','High Cholesterol','Overweight','Obesity', 'Weight Change')) ) ), fluidRow( box( div(style = 'overflow-x: scroll', DT::dataTableOutput('fu_visits')), downloadButton("fu_download","Download csv") ), box( div(style = 'overflow-x: scroll', DT::dataTableOutput('fu_visits_incidence')), downloadButton("fu_incidence_download","Download csv") ) ) ), tabItem(tabName = "retention", h2("Retention Summary"), fluidRow( # Dynamic valueBoxes valueBoxOutput("retention_Summary_prop"), valueBoxOutput("retention_gender"), valueBoxOutput("retention_hyp") ), fluidRow( box(title="Retention Summary", plotlyOutput("plot3_retention"), div(style = 'overflow-x: scroll', DT::dataTableOutput('retention_list')), downloadButton("download1_ret","Download csv") ), box(title="Retention Summary", plotlyOutput("plot4_retention") #div(style = 'overflow-x: scroll', DT::dataTableOutput('enrollment_list')), #downloadButton("download1","Download csv") ) ) ), tabItem(tabName = "raw_data_download", # Add various buttons to download the raw data fluidRow( box(status = "primary", solidHeader = TRUE, title = "Download raw Screening Data", downloadButton("download_screening", label = "Download Screening csv") ), box(status = "primary", solidHeader = TRUE, title = "Download raw Enrollment Data", downloadButton("download_enrollment", label = "Download Enrollment csv") ), box(status = "primary", solidHeader = TRUE, title = "Download raw Tracking Data", downloadButton("download_tracking", label = "Download Tracking csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Download raw Follow-up Data", downloadButton("download_fup", label = "Download follow-up csv") ), box(status = "primary", solidHeader = TRUE, title = "Download raw Withdrawal Data", downloadButton("download_withdrawal", label = "Download Withdrawal csv") ) ) ), tabItem(tabName = 'qc_report', fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in hemoglobin A1C", div(style = 'overflow-x: scroll', DT::dataTableOutput('hemoglobin')), downloadButton("download_hgb", label = "hemoglobinA1C csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Blood Sugar", div(style = 'overflow-x: scroll', DT::dataTableOutput('fbs')), downloadButton("download_fbs", label = "fasting blood sugar csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting total cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('tchol')), downloadButton("download_ftchol", label = "Fasting total cholesterol csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting HDL cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('hdlchol')), downloadButton("download_fhdlchol", label = "Fasting HDL cholesterol csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Triglycerides ", div(style = 'overflow-x: scroll', DT::dataTableOutput('ftrig')), downloadButton("download_trigchol", label = "Fasting Triglycerides csv") ) ) ), tabItem(tabName = 'qc_report1', fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in hemoglobin A1C", div(style = 'overflow-x: scroll', DT::dataTableOutput('hemoglobin1')), downloadButton("download_hgb1", label = "hemoglobinA1C csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Blood Sugar", div(style = 'overflow-x: scroll', DT::dataTableOutput('fbs1')), downloadButton("download_fbs1", label = "fasting blood sugar csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting total cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('tchol1')), downloadButton("download_ftchol1", label = "Fasting total cholesterol csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting HDL cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('hdlchol1')), downloadButton("download_fhdlchol1", label = "Fasting HDL cholesterol csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Triglycerides ", div(style = 'overflow-x: scroll', DT::dataTableOutput('ftrig1')), downloadButton("download_trigchol1", label = "Fasting Triglycerides csv") ) ) ), tabItem(tabName = 'qc_report3', fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Blood Sugar", div(style = 'overflow-x: scroll', DT::dataTableOutput('fbs3')), downloadButton("download_fbs3", label = "fasting blood sugar csv") ) ) ), tabItem(tabName = 'qc_report6', fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in hemoglobin A1C", div(style = 'overflow-x: scroll', DT::dataTableOutput('hemoglobin6')), downloadButton("download_hgb6", label = "hemoglobinA1C csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Blood Sugar", div(style = 'overflow-x: scroll', DT::dataTableOutput('fbs6')), downloadButton("download_fbs6", label = "fasting blood sugar csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting total cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('tchol6')), downloadButton("download_ftchol6", label = "Fasting total cholesterol csv") ), box(status = "primary", solidHeader = TRUE, title = "Error in Fasting HDL cholesterol ", div(style = 'overflow-x: scroll', DT::dataTableOutput('hdlchol6')), downloadButton("download_fhdlchol6", label = "Fasting HDL cholesterol csv") ) ), fluidRow( box(status = "primary", solidHeader = TRUE, title = "Error in Fasting Triglycerides ", div(style = 'overflow-x: scroll', DT::dataTableOutput('ftrig6')), downloadButton("download_trigchol6", label = "Fasting Triglycerides csv") ) ) ), tabItem(tabName = 'withdrawal', fluidRow( h2("Withdrawn Participants"), div(style = 'overflow-x: scroll', DT::dataTableOutput('withraw_list')), downloadButton("download_withdrawn","Download csv") ) ), tabItem(tabName = 'ep_overview', fluidRow( box(status = 'primary', solidHeader = TRUE, title = "DTG Month 6 Endpoint Overview", htmlOutput("in_window"), radioGroupButtons( inputId = "in_window_type", choices = c("Completed" = "endpoint_complete", "started" = "endpoint_started") ), htmlOutput('in_window_progress')), box(status = "primary", solidHeader = TRUE, title = "Summary Statistics at Month 6 (Started or Completed)", htmlOutput("in_window_weight_change"), htmlOutput("in_window_diabetic"), htmlOutput("in_window_hypertensive"), htmlOutput("in_window_high_chol") ) ), fluidRow(box( HTML("This line list only includes those who are in their endpoint window.<br><br>"), checkboxGroupInput(inputId="ep_vars_to_include", label="Variables to include", choices=c("Endpoint Ascertained", "Endpoint Started", "Withdrawn"), selected = c("Endpoint Ascertained"), inline = TRUE), div(style = 'overflow-x: scroll', DT::dataTableOutput("ep_ind_line_list")), downloadButton('download_ep_line_list', 'Download CSV'), tags$script("$(document).on('click', '#ep_ind_line_list button', function () { Shiny.onInputChange('lastClickId',this.id); Shiny.onInputChange('lastClick', Math.random()) });"), title = 'Individual Line List', width = 12)) ), tabItem('ep_results', fluidRow( box( status = 'primary', title = 'Summary Spreadsheet', div(style = 'overflow-x: scroll', tableOutput('ep_summary_table')), downloadButton('download_ep_summary_ss', 'Download CSV'), width = 12 ) ) ) ) ) )
#' Brush #' #' Add brush interactions. #' #' @inheritParams geoms #' #' @examples #' range <- range(cars$speed) #' #' g2(cars, asp(speed, dist)) %>% #' fig_point() %>% #' interact_brush() #' #' @name interact #' @export interact_brush <- function(g2) { check_g2(g2) g2$x$brush <- TRUE g2 } #' @rdname interact #' @export interact_slider <- function(g2, ...) { check_g2(g2) g2$x$slider <- list(...) g2 } #' @rdname interact #' @export interact_zoom <- function(g2, ...) { check_g2(g2) g2$x$zoom <- list(...) g2 } #' @rdname interact #' @export interact_drag <- function(g2, ...) { check_g2(g2) g2$x$drag <- list(...) g2 }
/R/interact.R
permissive
JohnCoene/g2r
R
false
false
658
r
#' Brush #' #' Add brush interactions. #' #' @inheritParams geoms #' #' @examples #' range <- range(cars$speed) #' #' g2(cars, asp(speed, dist)) %>% #' fig_point() %>% #' interact_brush() #' #' @name interact #' @export interact_brush <- function(g2) { check_g2(g2) g2$x$brush <- TRUE g2 } #' @rdname interact #' @export interact_slider <- function(g2, ...) { check_g2(g2) g2$x$slider <- list(...) g2 } #' @rdname interact #' @export interact_zoom <- function(g2, ...) { check_g2(g2) g2$x$zoom <- list(...) g2 } #' @rdname interact #' @export interact_drag <- function(g2, ...) { check_g2(g2) g2$x$drag <- list(...) g2 }
## ---- echo = FALSE------------------------------------------------------------ knitr::opts_chunk$set(echo = TRUE, comment = "") knitr::knit_engines$set(list( styler = function(options) { options$comment <- "" knitr::engine_output( options, c("# Before", options$code), c("# After", styler::style_text(options$code)) ) } )) ## ----------------------------------------------------------------------------- library("styler") library("magrittr") style_text("a=3; 2", scope = "spaces") ## ----------------------------------------------------------------------------- style_text("a=3; 2", scope = "tokens") ## ----------------------------------------------------------------------------- style_text("if(x) {66 } else {a=3}", scope = "line_breaks") ## ----------------------------------------------------------------------------- code <- c( "a <- function() { ", " a=3", "}" ) style_text(code, scope = "spaces") ## ----------------------------------------------------------------------------- style_text(code, scope = "indention") ## ----------------------------------------------------------------------------- style_text( "data_frame( small = 2 , medium = 4,#comment without space large = 6 )", strict = FALSE ) ## ----------------------------------------------------------------------------- style_text( "a <- 'one' #just one abc <- 'three' # three", strict = FALSE ) ## ----------------------------------------------------------------------------- style_text( "1++1/2*2^2", math_token_spacing = specify_math_token_spacing(zero = c("'/'", "'*'", "'^'")) ) ## ----------------------------------------------------------------------------- style_text( c( "a <- function() {", "### not to be indented", "# indent normally", "33", "}" ), reindention = specify_reindention(regex_pattern = "###", indention = 0) )
/win-library/4.0/styler/doc/introducing_styler.R
no_license
mspalione/R
R
false
false
1,955
r
## ---- echo = FALSE------------------------------------------------------------ knitr::opts_chunk$set(echo = TRUE, comment = "") knitr::knit_engines$set(list( styler = function(options) { options$comment <- "" knitr::engine_output( options, c("# Before", options$code), c("# After", styler::style_text(options$code)) ) } )) ## ----------------------------------------------------------------------------- library("styler") library("magrittr") style_text("a=3; 2", scope = "spaces") ## ----------------------------------------------------------------------------- style_text("a=3; 2", scope = "tokens") ## ----------------------------------------------------------------------------- style_text("if(x) {66 } else {a=3}", scope = "line_breaks") ## ----------------------------------------------------------------------------- code <- c( "a <- function() { ", " a=3", "}" ) style_text(code, scope = "spaces") ## ----------------------------------------------------------------------------- style_text(code, scope = "indention") ## ----------------------------------------------------------------------------- style_text( "data_frame( small = 2 , medium = 4,#comment without space large = 6 )", strict = FALSE ) ## ----------------------------------------------------------------------------- style_text( "a <- 'one' #just one abc <- 'three' # three", strict = FALSE ) ## ----------------------------------------------------------------------------- style_text( "1++1/2*2^2", math_token_spacing = specify_math_token_spacing(zero = c("'/'", "'*'", "'^'")) ) ## ----------------------------------------------------------------------------- style_text( c( "a <- function() {", "### not to be indented", "# indent normally", "33", "}" ), reindention = specify_reindention(regex_pattern = "###", indention = 0) )
## This program follows the same structure of example code Caching the Mean of ## a Vector. But, instead caching the mean of vector, this function caches the ## inverse of a matrix ## As the same idea of makeVector, this function creates a special Matrix, which ## is really a list containing a function to: ## 1 - set the value of the matrix ## 2 - get the value of the matrix ## 3 - set the value of the inverse of matrix ## 4 - get the value of the inverse of matrix makeCacheMatrix <- function(x = matrix()) { ## It is the best practice to validating input parameters. if (nrow(x) != ncol(x)) { stop("The matrix must be square matrix.") } s <- NULL set <- function(y) { x <<- y s <<- NULL } get <- function() x setsolve <- function(solve) s <<- solve getsolve <- function() s list (set = set, get = get, setsolve = setsolve, getsolve = getsolve) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' s <- x$getsolve() if (!is.null(s)) { message("Getting cache data") return (s) } dataMatrix <- x$get() s <- solve(dataMatrix) x$setsolve(s) s }
/cachematrix.R
no_license
ctakamiya/ProgrammingAssignment2
R
false
false
1,370
r
## This program follows the same structure of example code Caching the Mean of ## a Vector. But, instead caching the mean of vector, this function caches the ## inverse of a matrix ## As the same idea of makeVector, this function creates a special Matrix, which ## is really a list containing a function to: ## 1 - set the value of the matrix ## 2 - get the value of the matrix ## 3 - set the value of the inverse of matrix ## 4 - get the value of the inverse of matrix makeCacheMatrix <- function(x = matrix()) { ## It is the best practice to validating input parameters. if (nrow(x) != ncol(x)) { stop("The matrix must be square matrix.") } s <- NULL set <- function(y) { x <<- y s <<- NULL } get <- function() x setsolve <- function(solve) s <<- solve getsolve <- function() s list (set = set, get = get, setsolve = setsolve, getsolve = getsolve) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' s <- x$getsolve() if (!is.null(s)) { message("Getting cache data") return (s) } dataMatrix <- x$get() s <- solve(dataMatrix) x$setsolve(s) s }
#Source: #Kirill Eremenko #www.superdatascience.com #Notes and Corrections to the data: #Kevin Durant: 2006 - College Data Used #Kevin Durant: 2005 - Proxied With 2006 Data #Derrick Rose: 2012 - Did Not Play #Derrick Rose: 2007 - College Data Used #Derrick Rose: 2006 - Proxied With 2007 Data #Derrick Rose: 2005 - Proxied With 2007 Data #Seasons Seasons <- c("2005","2006","2007","2008","2009","2010","2011","2012","2013","2014") #Players Players <- c("KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade") #Free Throws KobeBryant_FT <- c(696,667,623,483,439,483,381,525,18,196) JoeJohnson_FT <- c(261,235,316,299,220,195,158,132,159,141) LeBronJames_FT <- c(601,489,549,594,593,503,387,403,439,375) CarmeloAnthony_FT <- c(573,459,464,371,508,507,295,425,459,189) DwightHoward_FT <- c(356,390,529,504,483,546,281,355,349,143) ChrisBosh_FT <- c(474,463,472,504,470,384,229,241,223,179) ChrisPaul_FT <- c(394,292,332,455,161,337,260,286,295,289) KevinDurant_FT <- c(209,209,391,452,756,594,431,679,703,146) DerrickRose_FT <- c(146,146,146,197,259,476,194,0,27,152) DwayneWade_FT <- c(629,432,354,590,534,494,235,308,189,284) #Matrix FreeThrows <- rbind(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT) rm(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT) colnames(FreeThrows) <- Seasons rownames(FreeThrows) <- Players #Free Throw Attempts KobeBryant_FTA <- c(819,768,742,564,541,583,451,626,21,241) JoeJohnson_FTA <- c(330,314,379,362,269,243,186,161,195,176) LeBronJames_FTA <- c(814,701,771,762,773,663,502,535,585,528) CarmeloAnthony_FTA <- c(709,568,590,468,612,605,367,512,541,237) DwightHoward_FTA <- c(598,666,897,849,816,916,572,721,638,271) ChrisBosh_FTA <- c(581,590,559,617,590,471,279,302,272,232) ChrisPaul_FTA <- c(465,357,390,524,190,384,302,323,345,321) KevinDurant_FTA <- c(256,256,448,524,840,675,501,750,805,171) DerrickRose_FTA <- c(205,205,205,250,338,555,239,0,32,187) DwayneWade_FTA <- c(803,535,467,771,702,652,297,425,258,370) #Matrix # FreeThrowsFTA <- rbind(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA) rm(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA) colnames(FreeThrowsFTA) <- Seasons rownames(FreeThrowsFTA) <- Players #
/Basketball_Freethrows.R
no_license
shahriya07/Basketball_Trends
R
false
false
2,700
r
#Source: #Kirill Eremenko #www.superdatascience.com #Notes and Corrections to the data: #Kevin Durant: 2006 - College Data Used #Kevin Durant: 2005 - Proxied With 2006 Data #Derrick Rose: 2012 - Did Not Play #Derrick Rose: 2007 - College Data Used #Derrick Rose: 2006 - Proxied With 2007 Data #Derrick Rose: 2005 - Proxied With 2007 Data #Seasons Seasons <- c("2005","2006","2007","2008","2009","2010","2011","2012","2013","2014") #Players Players <- c("KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade") #Free Throws KobeBryant_FT <- c(696,667,623,483,439,483,381,525,18,196) JoeJohnson_FT <- c(261,235,316,299,220,195,158,132,159,141) LeBronJames_FT <- c(601,489,549,594,593,503,387,403,439,375) CarmeloAnthony_FT <- c(573,459,464,371,508,507,295,425,459,189) DwightHoward_FT <- c(356,390,529,504,483,546,281,355,349,143) ChrisBosh_FT <- c(474,463,472,504,470,384,229,241,223,179) ChrisPaul_FT <- c(394,292,332,455,161,337,260,286,295,289) KevinDurant_FT <- c(209,209,391,452,756,594,431,679,703,146) DerrickRose_FT <- c(146,146,146,197,259,476,194,0,27,152) DwayneWade_FT <- c(629,432,354,590,534,494,235,308,189,284) #Matrix FreeThrows <- rbind(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT) rm(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT) colnames(FreeThrows) <- Seasons rownames(FreeThrows) <- Players #Free Throw Attempts KobeBryant_FTA <- c(819,768,742,564,541,583,451,626,21,241) JoeJohnson_FTA <- c(330,314,379,362,269,243,186,161,195,176) LeBronJames_FTA <- c(814,701,771,762,773,663,502,535,585,528) CarmeloAnthony_FTA <- c(709,568,590,468,612,605,367,512,541,237) DwightHoward_FTA <- c(598,666,897,849,816,916,572,721,638,271) ChrisBosh_FTA <- c(581,590,559,617,590,471,279,302,272,232) ChrisPaul_FTA <- c(465,357,390,524,190,384,302,323,345,321) KevinDurant_FTA <- c(256,256,448,524,840,675,501,750,805,171) DerrickRose_FTA <- c(205,205,205,250,338,555,239,0,32,187) DwayneWade_FTA <- c(803,535,467,771,702,652,297,425,258,370) #Matrix # FreeThrowsFTA <- rbind(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA) rm(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA) colnames(FreeThrowsFTA) <- Seasons rownames(FreeThrowsFTA) <- Players #
########################################################################################## # Designed and developed by Tinniam V Ganesh # Date : 26 Mar 2016 # Function: bowlerWicketPlot # This function plots the average wickets taken by bowler versus number of overs bowled # ########################################################################################### #' @title #' Average wickets versus of overs bowled #' #' @description #' This function computes and plots the average wickets taken by the bowler versus the #' number of overs bowled #' @usage #' bowlerWicketPlot(df, name) #' #' @param df #' Data frame #' #' @param name #' Name of bowler #' #' @return None #' @references #' \url{http://cricsheet.org/}\cr #' \url{https://gigadom.wordpress.com/}\cr #' \url{https://github.com/tvganesh/yorkrData} #' @author #' Tinniam V Ganesh #' @note #' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com} #' #' @examples #' \dontrun{ #' # Get the data frame for RA Jadeja #' jadeja <- getBowlerWicketDetails(team="India",name="Jadeja",dir=pathToFile) #' bowlerWicketPlot(jadeja,"RA Jadeja") #' } #' @seealso #' \code{\link{bowlerMeanEconomyRate}}\cr #' \code{\link{bowlerWicketsVenue}}\cr #' \code{\link{bowlerMeanRunsConceded}}\cr #' #' @export #' bowlerWicketPlot <- function(df,name){ overs = runs = maidens = meanRuns = wickets = bowler = meanWickets = NULL c <- summarise(group_by(df,overs),meanRuns=mean(runs),meanMaidens=mean(maidens), meanWickets=mean(wickets)) plot.title <- paste(name,"- Average wickets vs overs") ggplot(c,aes(x=overs, y=meanWickets,fill=overs)) + geom_bar(data=c,stat="identity" ) + xlab("Overs") + ylab("Mean Wickets") + ggtitle(bquote(atop(.(plot.title), atop(italic("Data source:http://cricsheet.org/"),"")))) }
/R/bowlerWicketPlot.R
no_license
bcdunbar/yorkr
R
false
false
1,853
r
########################################################################################## # Designed and developed by Tinniam V Ganesh # Date : 26 Mar 2016 # Function: bowlerWicketPlot # This function plots the average wickets taken by bowler versus number of overs bowled # ########################################################################################### #' @title #' Average wickets versus of overs bowled #' #' @description #' This function computes and plots the average wickets taken by the bowler versus the #' number of overs bowled #' @usage #' bowlerWicketPlot(df, name) #' #' @param df #' Data frame #' #' @param name #' Name of bowler #' #' @return None #' @references #' \url{http://cricsheet.org/}\cr #' \url{https://gigadom.wordpress.com/}\cr #' \url{https://github.com/tvganesh/yorkrData} #' @author #' Tinniam V Ganesh #' @note #' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com} #' #' @examples #' \dontrun{ #' # Get the data frame for RA Jadeja #' jadeja <- getBowlerWicketDetails(team="India",name="Jadeja",dir=pathToFile) #' bowlerWicketPlot(jadeja,"RA Jadeja") #' } #' @seealso #' \code{\link{bowlerMeanEconomyRate}}\cr #' \code{\link{bowlerWicketsVenue}}\cr #' \code{\link{bowlerMeanRunsConceded}}\cr #' #' @export #' bowlerWicketPlot <- function(df,name){ overs = runs = maidens = meanRuns = wickets = bowler = meanWickets = NULL c <- summarise(group_by(df,overs),meanRuns=mean(runs),meanMaidens=mean(maidens), meanWickets=mean(wickets)) plot.title <- paste(name,"- Average wickets vs overs") ggplot(c,aes(x=overs, y=meanWickets,fill=overs)) + geom_bar(data=c,stat="identity" ) + xlab("Overs") + ylab("Mean Wickets") + ggtitle(bquote(atop(.(plot.title), atop(italic("Data source:http://cricsheet.org/"),"")))) }
#load libraries library(plyr) library(dplyr) library(stringr) #load data soraw <- read.csv("./so-make-raw.csv") #extract SO data from notes column and put in its own column; do this in a new dataframe soclean <- mutate(soraw, Make = str_extract_all(soraw$Co.Op.Notes, "SO \\([^()]+\\)")) soclean[,4] <- gsub("SO \\(","",soclean[,4]) soclean[,4] <- gsub("\\)","",soclean[,4]) soclean[,4] <- gsub("character\\(0","",soclean[,4]) #rename columns names(soclean) <- c("Account Name","Customer ID","Notes","Make") #export data write.table(soclean,file="./so-make-clean.csv")
/so-munging.R
no_license
simitpatel/Data-Wrangling-in-R
R
false
false
578
r
#load libraries library(plyr) library(dplyr) library(stringr) #load data soraw <- read.csv("./so-make-raw.csv") #extract SO data from notes column and put in its own column; do this in a new dataframe soclean <- mutate(soraw, Make = str_extract_all(soraw$Co.Op.Notes, "SO \\([^()]+\\)")) soclean[,4] <- gsub("SO \\(","",soclean[,4]) soclean[,4] <- gsub("\\)","",soclean[,4]) soclean[,4] <- gsub("character\\(0","",soclean[,4]) #rename columns names(soclean) <- c("Account Name","Customer ID","Notes","Make") #export data write.table(soclean,file="./so-make-clean.csv")
# Simple model using the top 6 predictors according to the correlation matrix # Analysis done on original data # deed-recode train$deed_simple <- as.character(train$deed) train$deed_simple[-which(train$deed_type %in% c('WD','SJ','SW','RD'))] <- 'Other' train$deed_simple <- as.factor(train$deed_simple) model3 <- lm(totalActualVal ~ TotalFinishedSF + mainfloorSF + nbrFullBaths + nbrBedRoom + carStorageSF + PCT_WHITE + designCodeDscr + range + nbrThreeQtrBaths + nbrHalfBaths + carStorageTypeDscr + deed_simple + township + TotalFinishedSF:township + carStorageSF:township + pct_own, data=train) summary(model3) # deed-recode test test$deed_simple <- as.character(test$deed) test$deed_simple[-which(test$deed_type %in% c('WD','SJ','SW','RD'))] <- 'Other' test$deed_simple <- as.factor(test$deed_simple) mod3 <- predict(model3, test) mod3 <- as.data.frame(cbind(1:10000,mod3)) names(mod3) <- c('id','totalActualVal') head(mod3) write.csv(mod3,file='mod3.csv',row.names=F)
/Models/Model3.R
no_license
kycolton/cloaked-micro-tribble
R
false
false
977
r
# Simple model using the top 6 predictors according to the correlation matrix # Analysis done on original data # deed-recode train$deed_simple <- as.character(train$deed) train$deed_simple[-which(train$deed_type %in% c('WD','SJ','SW','RD'))] <- 'Other' train$deed_simple <- as.factor(train$deed_simple) model3 <- lm(totalActualVal ~ TotalFinishedSF + mainfloorSF + nbrFullBaths + nbrBedRoom + carStorageSF + PCT_WHITE + designCodeDscr + range + nbrThreeQtrBaths + nbrHalfBaths + carStorageTypeDscr + deed_simple + township + TotalFinishedSF:township + carStorageSF:township + pct_own, data=train) summary(model3) # deed-recode test test$deed_simple <- as.character(test$deed) test$deed_simple[-which(test$deed_type %in% c('WD','SJ','SW','RD'))] <- 'Other' test$deed_simple <- as.factor(test$deed_simple) mod3 <- predict(model3, test) mod3 <- as.data.frame(cbind(1:10000,mod3)) names(mod3) <- c('id','totalActualVal') head(mod3) write.csv(mod3,file='mod3.csv',row.names=F)
hr_df <-read.csv("D:/Rlearn/big data/new edited.csv") View(hr_df) ##addition model dat$x5=relevel(dat$x5,ref="1") fitlm=lm(number~yearag+MA,data=dat) summary(fitlm) ##multiplication model dat$x5=relevel(dat$x5,ref="m") fitlm2=lm(number~yearag+yearag:MA,data=dat) summary(fitlm2) ##mixed model fitlm3=lm(number~yearag+MA+yearag:MA,data=dat) summary(fitlm3) ##downsize(grad year >2005) narrow <- subset(hr_df, ugrad_year>2005) ##keep the donation relevented and divided into LTW and annual giving campaign LTW <- subset(narrow, campaign == "Light the World") Annualgiving <- subset(narrow, campaign == "Annual Giving Campaign") ##downsize(grad year >2005) since2006 <- subset(hrdf, ugrad_year > 2005) View(since2006) ##how many students from each state table(since2006$home_state) ##how many LTW sent emails were sent to each state table(LTW$home_state) ##how many LTW sent emails were opened in each state table(LTW$home_state, LTW$email_open == 1) ##how many LTW sent emails were clicked in each state table(LTW$home_state, LTW$click == 1) ##how many Annualgiving sent emails were sent to each state table(Annualgiving$home_state) ##how many Annualgiving sent emails were opened in each state table(Annualgiving$home_state, Annualgiving$email_open == 1) ##how many Annualgiving sent emails were clicked in each state table(Annualgiving$home_state, Annualgiving$click == 1) ##people who received LTW emails don't donate NonDLTW <- subset(LTW, donate == 0) table(NonDLTW$home_state) table(NonDLTW$home_state,NonDLTW$email_open == 1) table(NonDLTW$home_state,NonDLTW$click == 1) ##people who received Annualgiving emails don't donate NonDAnnualgiving <- subset(Annualgiving, donate == 0) table(NonDAnnualgiving$home_state) table(NonDAnnualgiving$home_state, NonDAnnualgiving$email_open == 1) table(NonDAnnualgiving$home_state, NonDAnnualgiving$click == 1)
/R code for pre.R
no_license
BostonCollegeBigData/BILL
R
false
false
1,958
r
hr_df <-read.csv("D:/Rlearn/big data/new edited.csv") View(hr_df) ##addition model dat$x5=relevel(dat$x5,ref="1") fitlm=lm(number~yearag+MA,data=dat) summary(fitlm) ##multiplication model dat$x5=relevel(dat$x5,ref="m") fitlm2=lm(number~yearag+yearag:MA,data=dat) summary(fitlm2) ##mixed model fitlm3=lm(number~yearag+MA+yearag:MA,data=dat) summary(fitlm3) ##downsize(grad year >2005) narrow <- subset(hr_df, ugrad_year>2005) ##keep the donation relevented and divided into LTW and annual giving campaign LTW <- subset(narrow, campaign == "Light the World") Annualgiving <- subset(narrow, campaign == "Annual Giving Campaign") ##downsize(grad year >2005) since2006 <- subset(hrdf, ugrad_year > 2005) View(since2006) ##how many students from each state table(since2006$home_state) ##how many LTW sent emails were sent to each state table(LTW$home_state) ##how many LTW sent emails were opened in each state table(LTW$home_state, LTW$email_open == 1) ##how many LTW sent emails were clicked in each state table(LTW$home_state, LTW$click == 1) ##how many Annualgiving sent emails were sent to each state table(Annualgiving$home_state) ##how many Annualgiving sent emails were opened in each state table(Annualgiving$home_state, Annualgiving$email_open == 1) ##how many Annualgiving sent emails were clicked in each state table(Annualgiving$home_state, Annualgiving$click == 1) ##people who received LTW emails don't donate NonDLTW <- subset(LTW, donate == 0) table(NonDLTW$home_state) table(NonDLTW$home_state,NonDLTW$email_open == 1) table(NonDLTW$home_state,NonDLTW$click == 1) ##people who received Annualgiving emails don't donate NonDAnnualgiving <- subset(Annualgiving, donate == 0) table(NonDAnnualgiving$home_state) table(NonDAnnualgiving$home_state, NonDAnnualgiving$email_open == 1) table(NonDAnnualgiving$home_state, NonDAnnualgiving$click == 1)
#!/usr/local/bin/Rscript library(gbm) # Load the gbm package library(Metrics) # Load Metrics for mae method library(R.utils) # for countLines library(ggplot2) library(scales) trainDataFileName = "train.csv" testDataFileName = "test.csv" # Load training set DataTrain = read.csv(trainDataFileName, header = T, sep = ",", dec = ".", ) # Load testing set DataTest = read.table(testDataFileName, header = T, sep = ",", dec = ".") #summary(DataTest) #number of iterations k = 2 #For error rate testing part on training set cnt <- countLines(trainDataFileName) trainCnt = round(cnt*0.7) testCnt = cnt - trainCnt #trainCnt #Debug #testCnt # Debug testHeader = colnames(DataTest) trainHeader = colnames(DataTrain) #testHeader # Debug #trainHeader# Debug DataTrainLocal = read.csv(trainDataFileName, header = T, nrows = trainCnt, sep = ",", dec = ".", ) #DataTrainLocal #Debug # another way to store only testHeader columns DataTestLocal = read.csv(trainDataFileName, header = F, skip = trainCnt, sep = ",", dec = "." , col.names = trainHeader) testTarget = DataTestLocal[, "target"] DataTestLocal = DataTestLocal[, testHeader] #DataTestLocal #Debug #testTarget #Debug #names(DataTestLocal)#Debug #DataTestLocal #Debug DataTrainLocal$time <- strftime(DataTrainLocal$datetime, format="%H:%M:%S") DataTrainLocal$time <- as.POSIXct(DataTrainLocal$time, format="%H:%M:%S") DataTrainLocal$time <- as.numeric(DataTrainLocal$time) #DataTrainLocal$time #DataTrainLocal$datetime <-NULL #DataTrainLocal$datetime #Debug # Date column DataTrainLocal$date <- strftime(DataTrainLocal$datetime, format="%d") DataTrainLocal$date <- as.POSIXct(DataTrainLocal$date, format="%d") DataTrainLocal$date <- as.numeric(DataTrainLocal$date) a = "------------------" a DataTrainLocal$date a = "------------------" a DataTrainLocal$datetime <-NULL #DataTrainLocal$datetime #Debug # Train the model #gbm1 <- gbm(target ~ . ,distribution="laplace", data=DataTrainLocal, n.trees=3000, interaction.depth =6, shrinkage=0.05) #gbm2 <- gbm(target ~ date ,distribution="laplace", data=DataTrainLocal, n.trees=3000, interaction.depth =6, shrinkage=0.05) #gbm3 <- gbm(target ~ temp + u + v + prmsl +rh,distribution="laplace", data=DataTrainLocal, n.trees=3000, interaction.depth =6, shrinkage=0.05) ## ### Predict the test set DataTestLocal$time <- strftime(DataTestLocal$datetime, format="%H:%M:%S") DataTestLocal$time <- as.POSIXct(DataTestLocal$time, format="%H:%M:%S") DataTestLocal$time <- as.numeric(DataTestLocal$time) #DataTestLocal DataTestLocal$date <- strftime(DataTestLocal$datetime, format="%d") DataTestLocal$date <- as.POSIXct(DataTestLocal$date, format="%d") DataTestLocal$date <- as.numeric(DataTestLocal$date) DataTestLocal$datetime <-NULL #Prediction<- predict.gbm(gbm1, DataTestLocal, n.trees=3000) #Prediction2 <- predict.gbm(gbm2, DataTestLocal, n.trees=3000) #Prediction3 <- predict.gbm(gbm3, DataTestLocal, n.trees=3000) #Prediction <- cbind(Prediction1, Prediction3) #Prediction <- cbind(Prediction, Prediction3) Prediction mymae <- function(par) { gbm1 <- gbm(target ~ . ,distribution="laplace", data=DataTrainLocal, n.trees=3000, interaction.depth =6, shrinkage=0.05) Prediction<- predict.gbm(gbm1, DataTestLocal, n.trees=3000) Prediction err <- mae(testTarget, Prediction) } result <- optim(par = c(0, 1), min.RSS, data = dat) ## #pred=rowMeans(Prediction) #pred <- round(pred, digits = 2) pred <- round(Prediction, digits = 2) write.table(Prediction, file = "submission.csv", sep = ",", qmethod = "double", row.names=FALSE) # #err <- mae(testTarget, Prediction) #err
/crossValid.R~
no_license
nkarapetyan/sepp
R
false
false
3,587
#!/usr/local/bin/Rscript library(gbm) # Load the gbm package library(Metrics) # Load Metrics for mae method library(R.utils) # for countLines library(ggplot2) library(scales) trainDataFileName = "train.csv" testDataFileName = "test.csv" # Load training set DataTrain = read.csv(trainDataFileName, header = T, sep = ",", dec = ".", ) # Load testing set DataTest = read.table(testDataFileName, header = T, sep = ",", dec = ".") #summary(DataTest) #number of iterations k = 2 #For error rate testing part on training set cnt <- countLines(trainDataFileName) trainCnt = round(cnt*0.7) testCnt = cnt - trainCnt #trainCnt #Debug #testCnt # Debug testHeader = colnames(DataTest) trainHeader = colnames(DataTrain) #testHeader # Debug #trainHeader# Debug DataTrainLocal = read.csv(trainDataFileName, header = T, nrows = trainCnt, sep = ",", dec = ".", ) #DataTrainLocal #Debug # another way to store only testHeader columns DataTestLocal = read.csv(trainDataFileName, header = F, skip = trainCnt, sep = ",", dec = "." , col.names = trainHeader) testTarget = DataTestLocal[, "target"] DataTestLocal = DataTestLocal[, testHeader] #DataTestLocal #Debug #testTarget #Debug #names(DataTestLocal)#Debug #DataTestLocal #Debug DataTrainLocal$time <- strftime(DataTrainLocal$datetime, format="%H:%M:%S") DataTrainLocal$time <- as.POSIXct(DataTrainLocal$time, format="%H:%M:%S") DataTrainLocal$time <- as.numeric(DataTrainLocal$time) #DataTrainLocal$time #DataTrainLocal$datetime <-NULL #DataTrainLocal$datetime #Debug # Date column DataTrainLocal$date <- strftime(DataTrainLocal$datetime, format="%d") DataTrainLocal$date <- as.POSIXct(DataTrainLocal$date, format="%d") DataTrainLocal$date <- as.numeric(DataTrainLocal$date) a = "------------------" a DataTrainLocal$date a = "------------------" a DataTrainLocal$datetime <-NULL #DataTrainLocal$datetime #Debug # Train the model #gbm1 <- gbm(target ~ . ,distribution="laplace", data=DataTrainLocal, n.trees=3000, interaction.depth =6, shrinkage=0.05) #gbm2 <- gbm(target ~ date ,distribution="laplace", data=DataTrainLocal, n.trees=3000, interaction.depth =6, shrinkage=0.05) #gbm3 <- gbm(target ~ temp + u + v + prmsl +rh,distribution="laplace", data=DataTrainLocal, n.trees=3000, interaction.depth =6, shrinkage=0.05) ## ### Predict the test set DataTestLocal$time <- strftime(DataTestLocal$datetime, format="%H:%M:%S") DataTestLocal$time <- as.POSIXct(DataTestLocal$time, format="%H:%M:%S") DataTestLocal$time <- as.numeric(DataTestLocal$time) #DataTestLocal DataTestLocal$date <- strftime(DataTestLocal$datetime, format="%d") DataTestLocal$date <- as.POSIXct(DataTestLocal$date, format="%d") DataTestLocal$date <- as.numeric(DataTestLocal$date) DataTestLocal$datetime <-NULL #Prediction<- predict.gbm(gbm1, DataTestLocal, n.trees=3000) #Prediction2 <- predict.gbm(gbm2, DataTestLocal, n.trees=3000) #Prediction3 <- predict.gbm(gbm3, DataTestLocal, n.trees=3000) #Prediction <- cbind(Prediction1, Prediction3) #Prediction <- cbind(Prediction, Prediction3) Prediction mymae <- function(par) { gbm1 <- gbm(target ~ . ,distribution="laplace", data=DataTrainLocal, n.trees=3000, interaction.depth =6, shrinkage=0.05) Prediction<- predict.gbm(gbm1, DataTestLocal, n.trees=3000) Prediction err <- mae(testTarget, Prediction) } result <- optim(par = c(0, 1), min.RSS, data = dat) ## #pred=rowMeans(Prediction) #pred <- round(pred, digits = 2) pred <- round(Prediction, digits = 2) write.table(Prediction, file = "submission.csv", sep = ",", qmethod = "double", row.names=FALSE) # #err <- mae(testTarget, Prediction) #err
################################################################################ # PL Files ################################################################################ #' Read a set of PL Files #' #' PL files come in one of four types and are pipe-delimited with no header row. #' This function speedily reads in the files and assigns the appropriate column #' names and types. #' #' @param path a path to a folder containing PL files. Can also be path or a URL #' for a ZIP file, which will be downloaded and unzipped. #' @param ... passed on to [readr::read_delim()] #' #' @return A list of data frames containing the four PL files. #' #' @examples #' pl_ex_path <- system.file('extdata/ri2018_2020Style.pl', package = 'PL94171') #' pl <- pl_read(pl_ex_path) #' # or try `pl_read(pl_url("RI", 2010))` #' #' @export pl_read = function(path, ...) { if (length(path) > 1) { if (all(stringr::str_detect(path, "^(http://|https://|ftp://|ftps://)"))) { zip_dir = withr::local_tempdir(pattern="pl") for (p in path) { zip_path = withr::local_tempfile(pattern="pl", fileext=".zip") download_census(p, zip_path) utils::unzip(zip_path, exdir=zip_dir) } path = zip_dir } else { stop("Provide a single path to the directory containing the PL files.") } } else if (stringr::str_detect(path, "^(http://|https://|ftp://|ftps://)")) { zip_path = withr::local_tempfile(pattern="pl", fileext=".zip") download_census(path, zip_path) zip_dir = file.path(dirname(zip_path), "PL-unzip") utils::unzip(zip_path, exdir=zip_dir) path = zip_dir } else if (!dir.exists(path)) { # compressed file zip_dir = withr::local_tempdir() utils::unzip(path, exdir=zip_dir) path = zip_dir } files = list.files(path, pattern="\\.u?pl$", ignore.case=TRUE) if (length(files) == 0) stop("No P.L. 94-171 files found in the provided directory.") out = list() ftypes = c("00001", "00002", "00003", "geo") for (fname in files) { file = file.path(path, fname) delim = str_match(readr::read_lines(file, n_max=1), "\\W")[,1] if (delim != " ") { file_type = ftypes[stringr::str_detect(fname, ftypes)] row1 = suppressMessages(readr::read_delim(file, delim=delim, col_names=F, n_max=1)) out[[file_type]] = readr::read_delim(file, delim=delim, col_names=pl_headers[[file_type]][1:ncol(row1)], col_types=str_sub(pl_spec[[file_type]], 1, ncol(row1)), progress=interactive(), ...) } else { # Legacy geo file col_length = nchar(readr::read_lines(file, n_max=1)) spec = if (col_length == 500) fwf_2010 else fwf_2000 types = rep("c", nrow(spec)) names(types) = spec$col_names types["LOGRECNO"] = "i" out$geo = readr::read_fwf(file, spec, col_types=types, progress=interactive()) out$geo$GEOID = with(out$geo, case_when(!is.na(BLOCK) ~ str_c("7500000US", STATE, COUNTY, TRACT, BLOCK), !is.na(BLKGRP) ~ str_c("1500000US", STATE, COUNTY, TRACT, BLKGRP), !is.na(TRACT) ~ str_c("1400000US", STATE, COUNTY, TRACT), !is.na(COUNTY) ~ str_c("0500000US", STATE, COUNTY), TRUE ~ NA_character_)) } } withr::deferred_clear() out } #' @rdname pl_read #' @export read_pl = pl_read #' Get the URL for PL files for a particular state and year #' #' @param abbr The state to download the PL files for #' @param year The year of PL file to download. Supported years: 2000, 2010, #' 2020 (after release). 2000 files are in a different format. #' Earlier years available on tape or CD-ROM only. #' #' @return a character vector containing the URL to a ZIP containing the PL files. #' #' @examples #' pl_url("RI", 2010) #' #' @export pl_url = function(abbr, year=2010) { name = tigris::fips_codes$state_name[match(abbr, tigris::fips_codes$state)] name = stringr::str_replace_all(name, " ", "_") if (year == 2000) { url = str_glue("https://www2.census.gov/census_2000/datasets/redistricting_file--pl_94-171/", "{name}/{tolower(abbr)}{c('00001', '00002', 'geo')}.upl.zip") } else if (year == 2010) { url = str_glue("https://www2.census.gov/census_2010/01-Redistricting_File--PL_94-171/", "{name}/{tolower(abbr)}2010.pl.zip") } else if (year == 2020) { warning("2020 P.L. 94-171 files have not been released yet.\n", "Download Rhode Island prototype data at\n", "<https://www2.census.gov/programs-surveys/decennial/rdo/datasets/2018/ri2018_2020Style.pl.zip>") url = str_glue("https://www2.census.gov/programs-surveys/decennial/2020/data/", "01-Redistricting_File--PL_94-171/{name}/{tolower(abbr)}2020.pl.zip") } as.character(url) }
/R/pl_read.R
permissive
jimhester/PL94171
R
false
false
5,317
r
################################################################################ # PL Files ################################################################################ #' Read a set of PL Files #' #' PL files come in one of four types and are pipe-delimited with no header row. #' This function speedily reads in the files and assigns the appropriate column #' names and types. #' #' @param path a path to a folder containing PL files. Can also be path or a URL #' for a ZIP file, which will be downloaded and unzipped. #' @param ... passed on to [readr::read_delim()] #' #' @return A list of data frames containing the four PL files. #' #' @examples #' pl_ex_path <- system.file('extdata/ri2018_2020Style.pl', package = 'PL94171') #' pl <- pl_read(pl_ex_path) #' # or try `pl_read(pl_url("RI", 2010))` #' #' @export pl_read = function(path, ...) { if (length(path) > 1) { if (all(stringr::str_detect(path, "^(http://|https://|ftp://|ftps://)"))) { zip_dir = withr::local_tempdir(pattern="pl") for (p in path) { zip_path = withr::local_tempfile(pattern="pl", fileext=".zip") download_census(p, zip_path) utils::unzip(zip_path, exdir=zip_dir) } path = zip_dir } else { stop("Provide a single path to the directory containing the PL files.") } } else if (stringr::str_detect(path, "^(http://|https://|ftp://|ftps://)")) { zip_path = withr::local_tempfile(pattern="pl", fileext=".zip") download_census(path, zip_path) zip_dir = file.path(dirname(zip_path), "PL-unzip") utils::unzip(zip_path, exdir=zip_dir) path = zip_dir } else if (!dir.exists(path)) { # compressed file zip_dir = withr::local_tempdir() utils::unzip(path, exdir=zip_dir) path = zip_dir } files = list.files(path, pattern="\\.u?pl$", ignore.case=TRUE) if (length(files) == 0) stop("No P.L. 94-171 files found in the provided directory.") out = list() ftypes = c("00001", "00002", "00003", "geo") for (fname in files) { file = file.path(path, fname) delim = str_match(readr::read_lines(file, n_max=1), "\\W")[,1] if (delim != " ") { file_type = ftypes[stringr::str_detect(fname, ftypes)] row1 = suppressMessages(readr::read_delim(file, delim=delim, col_names=F, n_max=1)) out[[file_type]] = readr::read_delim(file, delim=delim, col_names=pl_headers[[file_type]][1:ncol(row1)], col_types=str_sub(pl_spec[[file_type]], 1, ncol(row1)), progress=interactive(), ...) } else { # Legacy geo file col_length = nchar(readr::read_lines(file, n_max=1)) spec = if (col_length == 500) fwf_2010 else fwf_2000 types = rep("c", nrow(spec)) names(types) = spec$col_names types["LOGRECNO"] = "i" out$geo = readr::read_fwf(file, spec, col_types=types, progress=interactive()) out$geo$GEOID = with(out$geo, case_when(!is.na(BLOCK) ~ str_c("7500000US", STATE, COUNTY, TRACT, BLOCK), !is.na(BLKGRP) ~ str_c("1500000US", STATE, COUNTY, TRACT, BLKGRP), !is.na(TRACT) ~ str_c("1400000US", STATE, COUNTY, TRACT), !is.na(COUNTY) ~ str_c("0500000US", STATE, COUNTY), TRUE ~ NA_character_)) } } withr::deferred_clear() out } #' @rdname pl_read #' @export read_pl = pl_read #' Get the URL for PL files for a particular state and year #' #' @param abbr The state to download the PL files for #' @param year The year of PL file to download. Supported years: 2000, 2010, #' 2020 (after release). 2000 files are in a different format. #' Earlier years available on tape or CD-ROM only. #' #' @return a character vector containing the URL to a ZIP containing the PL files. #' #' @examples #' pl_url("RI", 2010) #' #' @export pl_url = function(abbr, year=2010) { name = tigris::fips_codes$state_name[match(abbr, tigris::fips_codes$state)] name = stringr::str_replace_all(name, " ", "_") if (year == 2000) { url = str_glue("https://www2.census.gov/census_2000/datasets/redistricting_file--pl_94-171/", "{name}/{tolower(abbr)}{c('00001', '00002', 'geo')}.upl.zip") } else if (year == 2010) { url = str_glue("https://www2.census.gov/census_2010/01-Redistricting_File--PL_94-171/", "{name}/{tolower(abbr)}2010.pl.zip") } else if (year == 2020) { warning("2020 P.L. 94-171 files have not been released yet.\n", "Download Rhode Island prototype data at\n", "<https://www2.census.gov/programs-surveys/decennial/rdo/datasets/2018/ri2018_2020Style.pl.zip>") url = str_glue("https://www2.census.gov/programs-surveys/decennial/2020/data/", "01-Redistricting_File--PL_94-171/{name}/{tolower(abbr)}2020.pl.zip") } as.character(url) }
#----------------------- PLOT A MAP ------------------------- #----------------------- LOAD PACKAGES ---------------------- library(rgdal) library(rgeos) library(raster) # if you don't have any of these, you can install them with this command: # install.packages("raster") # Especially for rgdal, if prompted, you want to install the binary, NOT from source. #----------------------- GET YE SOME DATA ------------------------- # "GSHHG: A Global Self-consistent, Hierarchical, High-resolution Geography Database" # http://www.soest.hawaii.edu/pwessel/gshhg/ # also see https://www.ngdc.noaa.gov/mgg/shorelines/gshhs.html # It's kinda large (~150MB on 20160111), but for many purposes is probably the only data you need. # download the shape files: # download.file(url = "http://www.soest.hawaii.edu/pwessel/gshhg/gshhg-shp-2.3.4.zip", destfile = file.path(".", "gshhg-shp-2.3.4.zip")) # You'll then need to unzip it; you could do it from R, but the destination path isn't always the same, and it could overwrite something. # system(unzip "gshhg-shp-2.3.4.zip" -d "gshhg-shp-2.3.4") # The geography data come in five resolutions: # full resolution (f): Original (full) data resolution. # high resolution (h): About 80 % reduction in size and quality. # intermediate resolution (i): Another ~80 % reduction. # low resolution (l): Another ~80 % reduction. # crude resolution (c): Another ~80 % reduction. resolution <- "f" # give the path to the directory containing the resolution you'd like to work with GSHHG_dir <- "/Users/threeprime/Documents/Data/GIS/gshhg-shp-2.3.4/GSHHS_shp/" GSHHG_dir <- paste(GSHHG_dir, resolution, sep = "") # what is the filename (without extension) of the layer you want to load? # layer_of_interest <- "GSHHS_l_L1" layer_of_interest <- paste("GSHHS_", resolution, "_L1", sep = "") # read in the data: remember, this is worldwide data GSHHG_obj <- readOGR(dsn = GSHHG_dir, layer = layer_of_interest) # specify some info about the area of interest. REGION <- "PUGET_SOUND" # PUGET_SOUND , SAN_JUAN_ISLAND if(REGION == "PUGET_SOUND"){ LAT <- c(47,49); LON <- c(-124, -122) } else if(REGION == "SAN_JUAN_ISLAND"){ LAT <- c(48.4, 48.65) ; LON <- c(-123.2, -122.95) } region_lat <- LAT # min, max latitude region_lon <- LON # min, max longitude # rather than clip right at the min and max points, you might want the extent to be a bit larger. Set the following line to the percent increase you'd like expansion_percent_lon <- 10 expansion_percent_lat <- 10 lon_exp <- diff(region_lon) * expansion_percent_lon/100 plot_lon <- region_lon + c(-lon_exp, lon_exp) lat_exp <- diff(region_lat) * expansion_percent_lat/100 plot_lat <- region_lat + c(-lat_exp, lat_exp) bb_region <- rbind(x = plot_lon, y = plot_lat) bb_dim <- apply(bb_region, 1, diff) # subset the worldwide data to the area you specified above PS_map <- crop(GSHHG_obj, extent(c(bb_region[1,], bb_region[2,]))) # presumably, you did the subsetting to reduce the size of the gigantic shapefile and make operations on it (like plotting) faster, so you should probably remove the huge shapefile/thingy to free up some memory. rm(GSHHG_obj) # If you'd like to add some features named_features <- rbind( # c("Puget Sound", -122.5, 48), c("Seattle", -122.333056, 47.609722) ) colnames(named_features) <- c("name", "lon", "lat") #----------------------- PLOT THE FRIGGIN MAP ALREADY! ------------------------- # how wide should the map be? map_width_inches <- 5 # Then that means the height should be... map_height_inches <- round(bb_dim[2] * map_width_inches / bb_dim[1]) # OUTPUT FILE NAME AND DIMENSIONS pdf( file = "site_map.pdf", width = map_width_inches, height = map_height_inches ) # SET BORDERS par(mar = c(4,4,1,1)) # BASE MAP plot( x = PS_map, # axes = TRUE, col = "darkseagreen", border = "grey", bg = "aliceblue", ylim = region_lat, xlim = region_lon ) # POINTS points( x = sites$Lon, y = sites$Lat, col = "black", bg = c("red", "yellow", "orange")[as.numeric(sites$team)], pch = c(21, 24)[as.numeric(sites$Dataset)], cex = 1.5 ) # X Axis (longitude) axis( side = 1, # at = seq(from = region_lon[1], to = region_lon[2]), cex.axis = 0.8, line = 0 ) # Y Axis (latitude) axis( side = 2, # at = seq(from = region_lat[1], to = region_lat[2]), line = 0, cex.axis = 0.8, las = 2 ) # ADD BORDER box() # LABELS title( # main = "Puget Sound plotted after subset", sub = "", xlab = "Longitude", ylab = "Latitude" ) make.italic <- function(x) as.expression(lapply(x, function(y) bquote(italic(.(y))))) # OTHER TEXT text( x = as.numeric(named_features[,"lon"]), y = as.numeric(named_features[,"lat"]), labels = make.italic(named_features[,"name"]), col = c("darkgreen", "lightblue4"), pos = c(4, NULL) ) dev.off() # colors # library(RColorBrewer) # par(mar = c(0, 4, 0, 0)) # display.brewer.all() # plot(1:100, col = brewer.pal(11, "Spectral"))
/R/plot_map.R
no_license
jimmyodonnell/Computips
R
false
false
4,946
r
#----------------------- PLOT A MAP ------------------------- #----------------------- LOAD PACKAGES ---------------------- library(rgdal) library(rgeos) library(raster) # if you don't have any of these, you can install them with this command: # install.packages("raster") # Especially for rgdal, if prompted, you want to install the binary, NOT from source. #----------------------- GET YE SOME DATA ------------------------- # "GSHHG: A Global Self-consistent, Hierarchical, High-resolution Geography Database" # http://www.soest.hawaii.edu/pwessel/gshhg/ # also see https://www.ngdc.noaa.gov/mgg/shorelines/gshhs.html # It's kinda large (~150MB on 20160111), but for many purposes is probably the only data you need. # download the shape files: # download.file(url = "http://www.soest.hawaii.edu/pwessel/gshhg/gshhg-shp-2.3.4.zip", destfile = file.path(".", "gshhg-shp-2.3.4.zip")) # You'll then need to unzip it; you could do it from R, but the destination path isn't always the same, and it could overwrite something. # system(unzip "gshhg-shp-2.3.4.zip" -d "gshhg-shp-2.3.4") # The geography data come in five resolutions: # full resolution (f): Original (full) data resolution. # high resolution (h): About 80 % reduction in size and quality. # intermediate resolution (i): Another ~80 % reduction. # low resolution (l): Another ~80 % reduction. # crude resolution (c): Another ~80 % reduction. resolution <- "f" # give the path to the directory containing the resolution you'd like to work with GSHHG_dir <- "/Users/threeprime/Documents/Data/GIS/gshhg-shp-2.3.4/GSHHS_shp/" GSHHG_dir <- paste(GSHHG_dir, resolution, sep = "") # what is the filename (without extension) of the layer you want to load? # layer_of_interest <- "GSHHS_l_L1" layer_of_interest <- paste("GSHHS_", resolution, "_L1", sep = "") # read in the data: remember, this is worldwide data GSHHG_obj <- readOGR(dsn = GSHHG_dir, layer = layer_of_interest) # specify some info about the area of interest. REGION <- "PUGET_SOUND" # PUGET_SOUND , SAN_JUAN_ISLAND if(REGION == "PUGET_SOUND"){ LAT <- c(47,49); LON <- c(-124, -122) } else if(REGION == "SAN_JUAN_ISLAND"){ LAT <- c(48.4, 48.65) ; LON <- c(-123.2, -122.95) } region_lat <- LAT # min, max latitude region_lon <- LON # min, max longitude # rather than clip right at the min and max points, you might want the extent to be a bit larger. Set the following line to the percent increase you'd like expansion_percent_lon <- 10 expansion_percent_lat <- 10 lon_exp <- diff(region_lon) * expansion_percent_lon/100 plot_lon <- region_lon + c(-lon_exp, lon_exp) lat_exp <- diff(region_lat) * expansion_percent_lat/100 plot_lat <- region_lat + c(-lat_exp, lat_exp) bb_region <- rbind(x = plot_lon, y = plot_lat) bb_dim <- apply(bb_region, 1, diff) # subset the worldwide data to the area you specified above PS_map <- crop(GSHHG_obj, extent(c(bb_region[1,], bb_region[2,]))) # presumably, you did the subsetting to reduce the size of the gigantic shapefile and make operations on it (like plotting) faster, so you should probably remove the huge shapefile/thingy to free up some memory. rm(GSHHG_obj) # If you'd like to add some features named_features <- rbind( # c("Puget Sound", -122.5, 48), c("Seattle", -122.333056, 47.609722) ) colnames(named_features) <- c("name", "lon", "lat") #----------------------- PLOT THE FRIGGIN MAP ALREADY! ------------------------- # how wide should the map be? map_width_inches <- 5 # Then that means the height should be... map_height_inches <- round(bb_dim[2] * map_width_inches / bb_dim[1]) # OUTPUT FILE NAME AND DIMENSIONS pdf( file = "site_map.pdf", width = map_width_inches, height = map_height_inches ) # SET BORDERS par(mar = c(4,4,1,1)) # BASE MAP plot( x = PS_map, # axes = TRUE, col = "darkseagreen", border = "grey", bg = "aliceblue", ylim = region_lat, xlim = region_lon ) # POINTS points( x = sites$Lon, y = sites$Lat, col = "black", bg = c("red", "yellow", "orange")[as.numeric(sites$team)], pch = c(21, 24)[as.numeric(sites$Dataset)], cex = 1.5 ) # X Axis (longitude) axis( side = 1, # at = seq(from = region_lon[1], to = region_lon[2]), cex.axis = 0.8, line = 0 ) # Y Axis (latitude) axis( side = 2, # at = seq(from = region_lat[1], to = region_lat[2]), line = 0, cex.axis = 0.8, las = 2 ) # ADD BORDER box() # LABELS title( # main = "Puget Sound plotted after subset", sub = "", xlab = "Longitude", ylab = "Latitude" ) make.italic <- function(x) as.expression(lapply(x, function(y) bquote(italic(.(y))))) # OTHER TEXT text( x = as.numeric(named_features[,"lon"]), y = as.numeric(named_features[,"lat"]), labels = make.italic(named_features[,"name"]), col = c("darkgreen", "lightblue4"), pos = c(4, NULL) ) dev.off() # colors # library(RColorBrewer) # par(mar = c(0, 4, 0, 0)) # display.brewer.all() # plot(1:100, col = brewer.pal(11, "Spectral"))
.getUGM=function (m, g, w) { if (!all.equal(nrow(m), length(g), length(w))) { stop("nrow of m should be equal to lenght of g and w") } i = order(w, decreasing = T) oki = i[which(!duplicated(g[i]) & !g[i] %in% c("---", " ", "", NA))] okm = m[oki, ] rownames(okm) = g[oki] okm } .internalProjection=function(expg,sys,nlim=4000,center=T,scale=T){ comg=intersect(rownames(sys$gw),rownames(expg)) if(length(comg)<nlim){ return(NA) }else{ invs=MASS::ginv(as.matrix(sys$gw[comg,])) scexp=scale(expg[comg,],center=center,scale=scale) return( (t( scexp )%*% t(invs))[,sys$k] * sys$dir) } } #' projectMolGrad #' #' @description Project a transcriptomic dataset on the Pancreatic Adenocarcinoma Molecular Gradient #' Will throw an error if there is only one sample (one column) #' #' @param newexp gene expression matrix or dataframe with gene in row(names) and samples in columns(names). #' @param geneSymbols vector of gene symbols for the newexp dataset (simply set to rownames if the newexp is already in single values per gene symbols) #' #' @param normalize Normalization (i.e. calibration) of the molecular gradient systems #' #' @return data frame of four projections based on the molecular gradients computed from four different types of expression datasets #' #' @details #' #' #' @export #' #' #' @examples #' g=rownames(pdacmolgrad:::.molGradSys$PDX$gw) #' projectMolGrad(matrix(rnorm(length(g)*10),ncol=10),g) projectMolGrad=function(newexp,geneSymbols,normalize=c("newRef","sameAsRef","raw")){ normalize=match.arg(normalize) if(nrow(newexp)!= length(geneSymbols)){ stop("geneSymbols should be a vector of gene symbols exactly corresponding to each row of the newexp dataset") } expg=pdacmolgrad:::.getUGM(newexp,geneSymbols,matrixStats::rowSds(as.matrix(newexp))) projsL=lapply(.molGradSys,function(mg){ proj=.internalProjection(expg, mg,nlim=4000,center=T,scale=T) if(is.na(proj[1])){ return(rep(NA,ncol(expg))) } switch(normalize, newRef={ fproj=( (proj -mean(proj))/(3*sd(proj)) ) }, sameAsRef={ fproj=( (proj -mg$avg)/(mg$sd) ) }, raw={ fproj=proj }, { fproj=proj } ) return(fproj) }) if(all(sapply(lapply(projsL,is.na),all))){ stop("geneSymbols should be a vector of Hugo gene symbols") } data.frame(do.call(cbind,projsL)) }
/R/project.R
no_license
RemyNicolle/pdacmolgrad
R
false
false
2,528
r
.getUGM=function (m, g, w) { if (!all.equal(nrow(m), length(g), length(w))) { stop("nrow of m should be equal to lenght of g and w") } i = order(w, decreasing = T) oki = i[which(!duplicated(g[i]) & !g[i] %in% c("---", " ", "", NA))] okm = m[oki, ] rownames(okm) = g[oki] okm } .internalProjection=function(expg,sys,nlim=4000,center=T,scale=T){ comg=intersect(rownames(sys$gw),rownames(expg)) if(length(comg)<nlim){ return(NA) }else{ invs=MASS::ginv(as.matrix(sys$gw[comg,])) scexp=scale(expg[comg,],center=center,scale=scale) return( (t( scexp )%*% t(invs))[,sys$k] * sys$dir) } } #' projectMolGrad #' #' @description Project a transcriptomic dataset on the Pancreatic Adenocarcinoma Molecular Gradient #' Will throw an error if there is only one sample (one column) #' #' @param newexp gene expression matrix or dataframe with gene in row(names) and samples in columns(names). #' @param geneSymbols vector of gene symbols for the newexp dataset (simply set to rownames if the newexp is already in single values per gene symbols) #' #' @param normalize Normalization (i.e. calibration) of the molecular gradient systems #' #' @return data frame of four projections based on the molecular gradients computed from four different types of expression datasets #' #' @details #' #' #' @export #' #' #' @examples #' g=rownames(pdacmolgrad:::.molGradSys$PDX$gw) #' projectMolGrad(matrix(rnorm(length(g)*10),ncol=10),g) projectMolGrad=function(newexp,geneSymbols,normalize=c("newRef","sameAsRef","raw")){ normalize=match.arg(normalize) if(nrow(newexp)!= length(geneSymbols)){ stop("geneSymbols should be a vector of gene symbols exactly corresponding to each row of the newexp dataset") } expg=pdacmolgrad:::.getUGM(newexp,geneSymbols,matrixStats::rowSds(as.matrix(newexp))) projsL=lapply(.molGradSys,function(mg){ proj=.internalProjection(expg, mg,nlim=4000,center=T,scale=T) if(is.na(proj[1])){ return(rep(NA,ncol(expg))) } switch(normalize, newRef={ fproj=( (proj -mean(proj))/(3*sd(proj)) ) }, sameAsRef={ fproj=( (proj -mg$avg)/(mg$sd) ) }, raw={ fproj=proj }, { fproj=proj } ) return(fproj) }) if(all(sapply(lapply(projsL,is.na),all))){ stop("geneSymbols should be a vector of Hugo gene symbols") } data.frame(do.call(cbind,projsL)) }
\name{cnDot-method} \alias{cnDot} \alias{cnDot,catNetwork-method} \alias{cnDot,catNetwork,character-method} \alias{cnDot,catNetwork,character-method,character-method} \alias{cnDot,matrix-method} \alias{cnDot,matrix,character-method} \alias{cnDot,matrix,character-method,character-method} \alias{cnDot,list-method} \alias{cnDot,list,character-method} \alias{cnDot,list,character-method,character-method} \title{Network Description File} \description{The function generates a dot-file, the native storage format for \code{Graphviz} software package, that describes the graph structure of a \code{catNetwork} object.} \usage{ cnDot(object, file=NULL, format="ps", nodestyle=NULL, edgestyle=NULL) } \arguments{ \item{object}{a \code{catNetwork}, a list of \code{catNetwork}s or a parent matrix} \item{file}{a \code{character}, an optional output file name} \item{format}{a \code{character}, an optional output file format, "ps" or "pdf"} \item{nodestyle}{a \code{list} of triplets, nodes' shape, color and edge-color} \item{edgestyle}{a \code{list} of triplets, nodes' shape, color and edge-color} } \details{ The function generates a \code{dot}-text file as supported by \code{Graphviz} library. In order to draw a graph the user needs a \code{dot}-file converter and \code{pdf}/postscript viewer. The environment variables \code{R_DOTVIEWER} and \code{R_PDFVIEWER} specify the corresponding executable routines. If \code{Graphviz} is installed and the variable \code{R_DOTVIEWER} is set with the full path to the \code{dot} executable file (the routine that converts a \code{dot}-text file to a postscript or \code{pdf}), a \code{pdf} or postscript file is created depending on the value of the \code{format} parameter. If the \code{file} variable is not specified, then the function just prints out the resulting string which otherwise would be written into a \code{dot} file. Next, if a \code{pdf}-viewer is available, the created postscript or \code{pdf} file is shown. } \value{A \code{character} or a \code{dot}-file} \author{N. Balov} \examples{ cnet <- cnRandomCatnet(numnodes=10, maxpars=3, numcats=2) cnDot(object=cnet, file="cnet") } \seealso{\code{\link{sdnet-package}}, \code{\link{cnPlot}} } \keyword{methods} \keyword{graphs} \keyword{aplot}
/man/cnDot.Rd
no_license
cran/sdnet
R
false
false
2,286
rd
\name{cnDot-method} \alias{cnDot} \alias{cnDot,catNetwork-method} \alias{cnDot,catNetwork,character-method} \alias{cnDot,catNetwork,character-method,character-method} \alias{cnDot,matrix-method} \alias{cnDot,matrix,character-method} \alias{cnDot,matrix,character-method,character-method} \alias{cnDot,list-method} \alias{cnDot,list,character-method} \alias{cnDot,list,character-method,character-method} \title{Network Description File} \description{The function generates a dot-file, the native storage format for \code{Graphviz} software package, that describes the graph structure of a \code{catNetwork} object.} \usage{ cnDot(object, file=NULL, format="ps", nodestyle=NULL, edgestyle=NULL) } \arguments{ \item{object}{a \code{catNetwork}, a list of \code{catNetwork}s or a parent matrix} \item{file}{a \code{character}, an optional output file name} \item{format}{a \code{character}, an optional output file format, "ps" or "pdf"} \item{nodestyle}{a \code{list} of triplets, nodes' shape, color and edge-color} \item{edgestyle}{a \code{list} of triplets, nodes' shape, color and edge-color} } \details{ The function generates a \code{dot}-text file as supported by \code{Graphviz} library. In order to draw a graph the user needs a \code{dot}-file converter and \code{pdf}/postscript viewer. The environment variables \code{R_DOTVIEWER} and \code{R_PDFVIEWER} specify the corresponding executable routines. If \code{Graphviz} is installed and the variable \code{R_DOTVIEWER} is set with the full path to the \code{dot} executable file (the routine that converts a \code{dot}-text file to a postscript or \code{pdf}), a \code{pdf} or postscript file is created depending on the value of the \code{format} parameter. If the \code{file} variable is not specified, then the function just prints out the resulting string which otherwise would be written into a \code{dot} file. Next, if a \code{pdf}-viewer is available, the created postscript or \code{pdf} file is shown. } \value{A \code{character} or a \code{dot}-file} \author{N. Balov} \examples{ cnet <- cnRandomCatnet(numnodes=10, maxpars=3, numcats=2) cnDot(object=cnet, file="cnet") } \seealso{\code{\link{sdnet-package}}, \code{\link{cnPlot}} } \keyword{methods} \keyword{graphs} \keyword{aplot}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ParameterSet_S3methods.R \name{as.ParameterSet} \alias{as.ParameterSet} \alias{as.ParameterSet.data.table} \alias{as.ParameterSet.prm} \alias{as.ParameterSet.list} \title{Coercions to ParameterSet} \usage{ as.ParameterSet(x, ...) \method{as.ParameterSet}{data.table}(x, ...) \method{as.ParameterSet}{prm}(x, ...) \method{as.ParameterSet}{list}(x, ...) } \arguments{ \item{x}{(\code{ANY}) \cr Object to coerce.} \item{...}{(\code{ANY}) \cr Other arguments passed to \link{ParameterSet}, such as \code{tag_properties}.} } \description{ Coercions to ParameterSet }
/man/as.ParameterSet.Rd
permissive
hadley/param6
R
false
true
644
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ParameterSet_S3methods.R \name{as.ParameterSet} \alias{as.ParameterSet} \alias{as.ParameterSet.data.table} \alias{as.ParameterSet.prm} \alias{as.ParameterSet.list} \title{Coercions to ParameterSet} \usage{ as.ParameterSet(x, ...) \method{as.ParameterSet}{data.table}(x, ...) \method{as.ParameterSet}{prm}(x, ...) \method{as.ParameterSet}{list}(x, ...) } \arguments{ \item{x}{(\code{ANY}) \cr Object to coerce.} \item{...}{(\code{ANY}) \cr Other arguments passed to \link{ParameterSet}, such as \code{tag_properties}.} } \description{ Coercions to ParameterSet }
library(AnaCoDa) library(profmem) library(argparse) rm(list=ls()) parser <- ArgumentParser() parser$add_argument("-i","--input",type="character",default="./") parser$add_argument("-o","--output",type="character",default="./") parser$add_argument("-d","--div",type="integer",default=4) parser$add_argument("-s","--samp",type="integer",default=1000) parser$add_argument("-a","--adapt",type="integer",default=100) parser$add_argument("-t","--thin",type="integer",default=10) parser$add_argument("-n","--threads",type="integer",default=1) args <- parser$parse_args() div <- args$div input <- args$input directory <- args$output thin <- args$thin adapt <- args$adapt samp <- args$samp num_threads <- args$threads #createParameterOutput <- function(parameter, numMixtures,samples, mixture.labels,samples.percent.keep=1, relative.to.optimal.codon=F, report.original.ref=T) #{ # for (i in 1:numMixtures) # { # getCSPEstimates(parameter,paste(dir_name,"Parameter_est",mixture.labels[i],sep="/"),i,samples*samples.percent.keep,relative.to.optimal.codon=relative.to.optimal.codon,report.original.ref = report.original.ref) # } #} createTracePlots <- function(trace, model,genome,numMixtures,samples,mixture.labels,samples.percent.keep=1) { for (i in 1:numMixtures) { plot(trace, what = "Mutation", mixture = i) plot(trace, what = "Selection", mixture = i) plot(model, genome, samples = samples*samples.percent.keep, mixture = i,main = mixture.labels[i]) } } fasta.folders <- input #, "../data/cds/sampled/", "../data/cds/sampled/", "../data/cds/filtered/") fasta.files <- list.files(path=fasta.folders,pattern="*.fasta",full.names = F) print(fasta.files) mixture.labels <- unlist(strsplit(fasta.files,split=".fasta")) fasta.paths <- paste0(fasta.folders, fasta.files) numMixtures <- length(fasta.files) mixture.sizes <- rep(0, numMixtures) ## Note: writing a for loop to deal with all mixtures (1 - n.mixtures) is tricky. ## Part of the issue is the appending of the object defined in the command and the assignment of the output mixture.index <- 1; genome <- initializeGenomeObject(file=fasta.paths[mixture.index],match.expression.by.id = FALSE,append = FALSE) mixture.sizes[mixture.index] <- length(genome) if(numMixtures > 1){ for(mixture.index in 2:numMixtures) { tmp.length <- length(genome) genome <- initializeGenomeObject(file=fasta.paths[mixture.index],genome=genome,match.expression.by.id = FALSE,append = TRUE,positional = T) mixture.sizes[mixture.index] <- length(genome) - tmp.length } } if(length(genome) != sum(mixture.sizes)){ stop("length(genomeObj) != sum(mixture.sizes), but it should.") }else{ print("FASTA successfully files loaded:"); print(fasta.files[1:numMixtures]) } cat("Genome loaded\n") #initialize parameter object sphi_init <- rep(28,numMixtures) with.phi <- F mixDef <- "allUnique" percent.to.keep <- 1 size <- length(genome) cat(size,"\n") index <- c(1:size) #geneAssignment <- c(rep(1,size.tmp),rep(2,size.tmp.2-size.tmp),rep(3,size-size.tmp.2)) geneAssignment <- rep(1:numMixtures, mixture.sizes) # init_phi <- c() # for (i in phi.path) # { # segment_exp <- read.table(file=i,sep=",",header=TRUE) # init_phi <- c(init_phi,segment_exp[,2]) # } # if(length(genome) != length(init_phi)){ # stop("length(genomeObj) != length(init_phi), but it should.") # }else{ # print("Initial Phi values successfully files loaded:"); # } parameter <- initializeParameterObject(genome,model="ROC",sphi_init,numMixtures, geneAssignment, split.serine = TRUE, mixture.definition = mixDef) # parameter$initMutationCategories(c(mut),1) # parameter$initSelectionCategories(c(sel),1) #initialize MCMC object samples <-samp thinning <- thin adaptiveWidth <-adapt mcmc <- initializeMCMCObject(samples=samples, thinning=thinning, adaptive.width=adaptiveWidth, est.expression=T, est.csp=TRUE, est.hyper=F,est.mix = FALSE) #this part set steps adaptiveRatio=0.5 adaptiveSamples=samples*thinning*adaptiveRatio mcmc$setStepsToAdapt(adaptiveSamples) # get model object model <- initializeModelObject(parameter, "ROC", with.phi) run_number <- 1 dir.create(directory) dir_name <- paste0(directory,"/run_",run_number) dir.create(dir_name) dir.create(paste(dir_name,"Graphs",sep="/")) dir.create(paste(dir_name,"Restart_files",sep="/")) dir.create(paste(dir_name,"Parameter_est",sep="/")) dir.create(paste(dir_name,"R_objects",sep="/")) setRestartSettings(mcmc, paste(dir_name,"Restart_files/rstartFile.rst",sep="/"), adaptiveWidth, F) #run mcmc on genome with parameter using model sys.runtime<-system.time( runMCMC(mcmc, genome, model, num_threads,divergence.iteration = div) ) sys.runtime <- data.frame(Value=names(sys.runtime),Time=as.vector(sys.runtime)) write.table(sys.runtime,file=paste(dir_name,"mcmc_runtime.csv",sep="/"),sep=",",col.names = T,row.names = T,quote=F) createParameterOutput(parameter = parameter,numMixtures = numMixtures,mixture.labels = mixture.labels,samples = samples,samples.percent.keep = percent.to.keep,relative.to.optimal.codon = F,report.original.ref = T) expressionValues <- getExpressionEstimates(parameter,c(1:size),samples*percent.to.keep) ## Plotting Routines ## write.table(expressionValues,file=paste(dir_name,"Parameter_est/gene_expression.txt",sep="/"),sep=",",col.names = T,quote = F,row.names = F) expressionValues<- read_csv(file=paste(dir_name,"Parameter_est/gene_expression.txt",sep="/") pdf(paste(dir_name,"Graphs/Parameter_comparisons.pdf",sep="/"), width = 11, height = 12) plot(parameter,what="Mutation",samples=samples*percent.to.keep,mixture.name=mixture.labels) plot(parameter,what="Selection",samples=samples*percent.to.keep,mixture.name=mixture.labels) dev.off() trace <- parameter$getTraceObject() pdf(paste(dir_name,"Graphs/CSP_traces_CUB_plot.pdf",sep="/"), width = 11, height = 12) createTracePlots(trace=trace,model=model,genome=genome,numMixtures=numMixtures,samples=samples,samples.percent.keep = 1,mixture.labels = mixture.labels) dev.off() #plots different aspects of trace pdf(paste(dir_name,"Graphs/mcmc_traces.pdf",sep="/")) plot(mcmc,what = "LogPosterior") plot(trace, what = "ExpectedPhi") aa <- aminoAcids() done.adapt <- TRUE for(a in aa) { if (a=="M"||a=="X"||a=="W") next accept.trace <- trace$getCodonSpecificAcceptanceRateTraceForAA(a) len <- length(accept.trace) mean.acceptance <- mean(accept.trace[(len-len*0.5):len]) if (mean.acceptance < 0.1 || mean.acceptance > 0.44) done.adapt <- FALSE plot(accept.trace,main=paste0("Acceptace Rate for ",a),xlab="Samples",ylab="Acceptance Rate",type="l") } acfCSP(parameter,csp="Selection",numMixtures = numMixtures,samples=samples*percent.to.keep) acfCSP(parameter,csp="Mutation",numMixtures = numMixtures,samples=samples*percent.to.keep) dev.off() for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples*percent.to.keep,thin = thinning,what="Selection",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_eta_",i,".txt"),ncolumns = 1) } for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples*percent.to.keep,thin = thinning,what="Mutation",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_m_",i,".txt"),ncolumns = 1) } writeParameterObject(parameter,paste(dir_name,"R_objects/parameter.Rda",sep="/")) writeMCMCObject(mcmc,file=paste(dir_name,"R_objects/mcmc.Rda",sep="/")) diag <- convergence.test(mcmc,samples = samples*percent.to.keep,thin=thinning,frac1=0.2) z<-abs(diag$z) done <- (z > 1.96) && param.conv rm(parameter) rm(trace) rm(model) while((!done) && (run_number <= 3)) { parameter<-initializeParameterObject(init.with.restart.file = paste(dir_name,"Restart_files/rstartFile.rst_final",sep="/"),model="ROC") run_number <- run_number + 1 dir_name <- paste0(directory,"/run_",run_number) dir.create(dir_name) dir.create(paste(dir_name,"Graphs",sep="/")) dir.create(paste(dir_name,"Restart_files",sep="/")) dir.create(paste(dir_name,"Parameter_est",sep="/")) dir.create(paste(dir_name,"R_objects",sep="/")) mcmc <- initializeMCMCObject(samples=samples, thinning=thinning, adaptive.width=adaptiveWidth, est.expression=T, est.csp=TRUE, est.hyper=F,est.mix=FALSE) model <- initializeModelObject(parameter, "ROC", with.phi) setRestartSettings(mcmc, paste(dir_name,"Restart_files/rstartFile.rst",sep="/"), adaptiveWidth, F) sys.runtime <- system.time( runMCMC(mcmc, genome, model, num_threads,div=0) ) sys.runtime <- data.frame(Value=names(sys.runtime),Time=as.vector(sys.runtime)) write.table(sys.runtime,file=paste(dir_name,"mcmc_runtime.csv",sep="/"),sep=",",col.names = T,row.names = T,quote=F) createParameterOutput(parameter = parameter,numMixtures = numMixtures,samples = samples,mixture.labels = mixture.labels,samples.percent.keep = percent.to.keep,relative.to.optimal.codon = F,report.original.ref = T) expressionValues <- getExpressionEstimates(parameter,c(1:size),samples*percent.to.keep) write.table(expressionValues,file=paste(dir_name,"Parameter_est/gene_expression.txt",sep="/"),sep=",",col.names = T,quote = F,row.names = F) # #plots different aspects of trace trace <- parameter$getTraceObject() pdf(paste(dir_name,"Graphs/mcmc_traces.pdf",sep="/")) plot(mcmc,what = "LogPosterior") plot(trace, what = "ExpectedPhi") aa <- aminoAcids() done.adapt <- TRUE for(a in aa) { if (a=="M"||a=="X"||a=="W") next accept.trace <- trace$getCodonSpecificAcceptanceRateTraceForAA(a) len <- length(accept.trace) mean.acceptance <- mean(accept.trace[(len-len*0.5):len]) if (mean.acceptance < 0.1 || mean.acceptance > 0.44) done.adapt <- FALSE plot(accept.trace,main=paste0("Acceptace Rate for ",a),xlab="Samples",ylab="Acceptance Rate",type="l") } acfCSP(parameter,csp="Selection",numMixtures = numMixtures,samples=samples*percent.to.keep) acfCSP(parameter,csp="Mutation",numMixtures = numMixtures,samples=samples*percent.to.keep) dev.off() for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples*percent.to.keep,thin = thinning,what="Selection",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_eta_",i,".txt"),ncolumns = 1) } for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples*percent.to.keep,thin = thinning,what="Mutation",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_m_",i,".txt"),ncolumns = 1) } pdf(paste(dir_name,"Graphs/Parameter_comparisons.pdf",sep="/"), width = 11, height = 12) plot(parameter,what="Mutation",samples=samples*percent.to.keep,mixture.name=mixture.labels) plot(parameter,what="Selection",samples=samples*percent.to.keep,mixture.name=mixture.labels) dev.off() pdf(paste(dir_name,"Graphs/CSP_traces_CUB_plot.pdf",sep="/"), width = 11, height = 12) createTracePlots(trace=trace,model=model,genome=genome,numMixtures=numMixtures,samples=samples,samples.percent.keep = percent.to.keep,mixture.labels = mixture.labels) dev.off() writeParameterObject(parameter,paste(dir_name,"R_objects/parameter.Rda",sep="/")) writeMCMCObject(mcmc,file=paste(dir_name,"R_objects/mcmc.Rda",sep="/")) diag <- convergence.test(mcmc,samples = samples*percent.to.keep,thin=thinning,frac1=0.1) z<-abs(diag$z) done <- (z > 1.96) && param.conv rm(parameter) rm(trace) rm(model) } samples <- 10000 thinning <- 5 parameter<-initializeParameterObject(init.with.restart.file = paste(dir_name,"Restart_files/rstartFile.rst_final",sep="/"),model="ROC") run_number <- run_number + 1 dir_name <- paste0(directory,"/final_run") dir.create(dir_name) dir.create(paste(dir_name,"Graphs",sep="/")) dir.create(paste(dir_name,"Restart_files",sep="/")) dir.create(paste(dir_name,"Parameter_est",sep="/")) dir.create(paste(dir_name,"R_objects",sep="/")) mcmc <- initializeMCMCObject(samples=samples, thinning=thinning, adaptive.width=adaptiveWidth, est.expression=TRUE, est.csp=TRUE, est.hyper=F,est.mix=FALSE) mcmc$setStepsToAdapt(0) model <- initializeModelObject(parameter, "ROC", with.phi) setRestartSettings(mcmc, paste(dir_name,"Restart_files/rstartFile.rst",sep="/"), adaptiveWidth, F) #run mcmc on genome with parameter using model #p<-profmem({ sys.runtime <- system.time( runMCMC(mcmc, genome, model, num_threads) ) sys.runtime <- data.frame(Value=names(sys.runtime),Time=as.vector(sys.runtime)) write.table(sys.runtime,file=paste(dir_name,"mcmc_runtime.csv",sep="/"),sep=",",col.names = T,row.names = T,quote=F) createParameterOutput(parameter = parameter,numMixtures = numMixtures,samples = samples,mixture.labels = mixture.labels,samples.percent.keep = 1,relative.to.optimal.codon = F,report.original.ref = T) # mixtureAssignment <- getMixtureAssignmentEstimate(parameter,c(1:size),samples*0.5) expressionValues <- getExpressionEstimates(parameter,c(1:size),samples) write.table(expressionValues,file=paste(dir_name,"Parameter_est/gene_expression.txt",sep="/"),sep=",",col.names = T,quote = F,row.names = F) writeParameterObject(parameter,paste(dir_name,"R_objects/parameter.Rda",sep="/")) writeMCMCObject(mcmc,file=paste(dir_name,"R_objects/mcmc.Rda",sep="/")) #plots different aspects of trace trace <- parameter$getTraceObject() pdf(paste(dir_name,"Graphs/mcmc_traces.pdf",sep="/")) plot(mcmc,what = "LogPosterior") plot(trace, what = "ExpectedPhi") acfCSP(parameter,csp="Selection",numMixtures = numMixtures,samples=samples) acfCSP(parameter,csp="Mutation",numMixtures = numMixtures,samples=samples) dev.off() pdf(paste(dir_name,"Graphs/Parameter_comparisons.pdf",sep="/"), width = 11, height = 12) plot(parameter,what="Mutation",samples=samples,mixture.name=mixture.labels) plot(parameter,what="Selection",samples=samples,mixture.name=mixture.labels) dev.off() pdf(paste(dir_name,"Graphs/CSP_traces_CUB_plot.pdf",sep="/"), width = 11, height = 12) createTracePlots(trace=trace,model=model,genome=genome,numMixtures=numMixtures,samples=samples,samples.percent.keep = 1,mixture.labels = mixture.labels) dev.off() for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples,thin = thinning,what="Selection",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_eta_",i,".txt"),ncolumns = 1) } for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples,thin = thinning,what="Mutation",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_m_",i,".txt"),ncolumns = 1) } rm(trace) rm(model) writeParameterObject(parameter,paste(dir_name,"R_objects/parameter.Rda",sep="/")) writeMCMCObject(mcmc,file=paste(dir_name,"R_objects/mcmc.Rda",sep="/"))
/MikesWork/2020/07/28_Plot.Output.from.Fit.ROC.to.Sequences/plotROC.R
no_license
mikegilchrist/AcrossTissue
R
false
false
15,603
r
library(AnaCoDa) library(profmem) library(argparse) rm(list=ls()) parser <- ArgumentParser() parser$add_argument("-i","--input",type="character",default="./") parser$add_argument("-o","--output",type="character",default="./") parser$add_argument("-d","--div",type="integer",default=4) parser$add_argument("-s","--samp",type="integer",default=1000) parser$add_argument("-a","--adapt",type="integer",default=100) parser$add_argument("-t","--thin",type="integer",default=10) parser$add_argument("-n","--threads",type="integer",default=1) args <- parser$parse_args() div <- args$div input <- args$input directory <- args$output thin <- args$thin adapt <- args$adapt samp <- args$samp num_threads <- args$threads #createParameterOutput <- function(parameter, numMixtures,samples, mixture.labels,samples.percent.keep=1, relative.to.optimal.codon=F, report.original.ref=T) #{ # for (i in 1:numMixtures) # { # getCSPEstimates(parameter,paste(dir_name,"Parameter_est",mixture.labels[i],sep="/"),i,samples*samples.percent.keep,relative.to.optimal.codon=relative.to.optimal.codon,report.original.ref = report.original.ref) # } #} createTracePlots <- function(trace, model,genome,numMixtures,samples,mixture.labels,samples.percent.keep=1) { for (i in 1:numMixtures) { plot(trace, what = "Mutation", mixture = i) plot(trace, what = "Selection", mixture = i) plot(model, genome, samples = samples*samples.percent.keep, mixture = i,main = mixture.labels[i]) } } fasta.folders <- input #, "../data/cds/sampled/", "../data/cds/sampled/", "../data/cds/filtered/") fasta.files <- list.files(path=fasta.folders,pattern="*.fasta",full.names = F) print(fasta.files) mixture.labels <- unlist(strsplit(fasta.files,split=".fasta")) fasta.paths <- paste0(fasta.folders, fasta.files) numMixtures <- length(fasta.files) mixture.sizes <- rep(0, numMixtures) ## Note: writing a for loop to deal with all mixtures (1 - n.mixtures) is tricky. ## Part of the issue is the appending of the object defined in the command and the assignment of the output mixture.index <- 1; genome <- initializeGenomeObject(file=fasta.paths[mixture.index],match.expression.by.id = FALSE,append = FALSE) mixture.sizes[mixture.index] <- length(genome) if(numMixtures > 1){ for(mixture.index in 2:numMixtures) { tmp.length <- length(genome) genome <- initializeGenomeObject(file=fasta.paths[mixture.index],genome=genome,match.expression.by.id = FALSE,append = TRUE,positional = T) mixture.sizes[mixture.index] <- length(genome) - tmp.length } } if(length(genome) != sum(mixture.sizes)){ stop("length(genomeObj) != sum(mixture.sizes), but it should.") }else{ print("FASTA successfully files loaded:"); print(fasta.files[1:numMixtures]) } cat("Genome loaded\n") #initialize parameter object sphi_init <- rep(28,numMixtures) with.phi <- F mixDef <- "allUnique" percent.to.keep <- 1 size <- length(genome) cat(size,"\n") index <- c(1:size) #geneAssignment <- c(rep(1,size.tmp),rep(2,size.tmp.2-size.tmp),rep(3,size-size.tmp.2)) geneAssignment <- rep(1:numMixtures, mixture.sizes) # init_phi <- c() # for (i in phi.path) # { # segment_exp <- read.table(file=i,sep=",",header=TRUE) # init_phi <- c(init_phi,segment_exp[,2]) # } # if(length(genome) != length(init_phi)){ # stop("length(genomeObj) != length(init_phi), but it should.") # }else{ # print("Initial Phi values successfully files loaded:"); # } parameter <- initializeParameterObject(genome,model="ROC",sphi_init,numMixtures, geneAssignment, split.serine = TRUE, mixture.definition = mixDef) # parameter$initMutationCategories(c(mut),1) # parameter$initSelectionCategories(c(sel),1) #initialize MCMC object samples <-samp thinning <- thin adaptiveWidth <-adapt mcmc <- initializeMCMCObject(samples=samples, thinning=thinning, adaptive.width=adaptiveWidth, est.expression=T, est.csp=TRUE, est.hyper=F,est.mix = FALSE) #this part set steps adaptiveRatio=0.5 adaptiveSamples=samples*thinning*adaptiveRatio mcmc$setStepsToAdapt(adaptiveSamples) # get model object model <- initializeModelObject(parameter, "ROC", with.phi) run_number <- 1 dir.create(directory) dir_name <- paste0(directory,"/run_",run_number) dir.create(dir_name) dir.create(paste(dir_name,"Graphs",sep="/")) dir.create(paste(dir_name,"Restart_files",sep="/")) dir.create(paste(dir_name,"Parameter_est",sep="/")) dir.create(paste(dir_name,"R_objects",sep="/")) setRestartSettings(mcmc, paste(dir_name,"Restart_files/rstartFile.rst",sep="/"), adaptiveWidth, F) #run mcmc on genome with parameter using model sys.runtime<-system.time( runMCMC(mcmc, genome, model, num_threads,divergence.iteration = div) ) sys.runtime <- data.frame(Value=names(sys.runtime),Time=as.vector(sys.runtime)) write.table(sys.runtime,file=paste(dir_name,"mcmc_runtime.csv",sep="/"),sep=",",col.names = T,row.names = T,quote=F) createParameterOutput(parameter = parameter,numMixtures = numMixtures,mixture.labels = mixture.labels,samples = samples,samples.percent.keep = percent.to.keep,relative.to.optimal.codon = F,report.original.ref = T) expressionValues <- getExpressionEstimates(parameter,c(1:size),samples*percent.to.keep) ## Plotting Routines ## write.table(expressionValues,file=paste(dir_name,"Parameter_est/gene_expression.txt",sep="/"),sep=",",col.names = T,quote = F,row.names = F) expressionValues<- read_csv(file=paste(dir_name,"Parameter_est/gene_expression.txt",sep="/") pdf(paste(dir_name,"Graphs/Parameter_comparisons.pdf",sep="/"), width = 11, height = 12) plot(parameter,what="Mutation",samples=samples*percent.to.keep,mixture.name=mixture.labels) plot(parameter,what="Selection",samples=samples*percent.to.keep,mixture.name=mixture.labels) dev.off() trace <- parameter$getTraceObject() pdf(paste(dir_name,"Graphs/CSP_traces_CUB_plot.pdf",sep="/"), width = 11, height = 12) createTracePlots(trace=trace,model=model,genome=genome,numMixtures=numMixtures,samples=samples,samples.percent.keep = 1,mixture.labels = mixture.labels) dev.off() #plots different aspects of trace pdf(paste(dir_name,"Graphs/mcmc_traces.pdf",sep="/")) plot(mcmc,what = "LogPosterior") plot(trace, what = "ExpectedPhi") aa <- aminoAcids() done.adapt <- TRUE for(a in aa) { if (a=="M"||a=="X"||a=="W") next accept.trace <- trace$getCodonSpecificAcceptanceRateTraceForAA(a) len <- length(accept.trace) mean.acceptance <- mean(accept.trace[(len-len*0.5):len]) if (mean.acceptance < 0.1 || mean.acceptance > 0.44) done.adapt <- FALSE plot(accept.trace,main=paste0("Acceptace Rate for ",a),xlab="Samples",ylab="Acceptance Rate",type="l") } acfCSP(parameter,csp="Selection",numMixtures = numMixtures,samples=samples*percent.to.keep) acfCSP(parameter,csp="Mutation",numMixtures = numMixtures,samples=samples*percent.to.keep) dev.off() for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples*percent.to.keep,thin = thinning,what="Selection",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_eta_",i,".txt"),ncolumns = 1) } for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples*percent.to.keep,thin = thinning,what="Mutation",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_m_",i,".txt"),ncolumns = 1) } writeParameterObject(parameter,paste(dir_name,"R_objects/parameter.Rda",sep="/")) writeMCMCObject(mcmc,file=paste(dir_name,"R_objects/mcmc.Rda",sep="/")) diag <- convergence.test(mcmc,samples = samples*percent.to.keep,thin=thinning,frac1=0.2) z<-abs(diag$z) done <- (z > 1.96) && param.conv rm(parameter) rm(trace) rm(model) while((!done) && (run_number <= 3)) { parameter<-initializeParameterObject(init.with.restart.file = paste(dir_name,"Restart_files/rstartFile.rst_final",sep="/"),model="ROC") run_number <- run_number + 1 dir_name <- paste0(directory,"/run_",run_number) dir.create(dir_name) dir.create(paste(dir_name,"Graphs",sep="/")) dir.create(paste(dir_name,"Restart_files",sep="/")) dir.create(paste(dir_name,"Parameter_est",sep="/")) dir.create(paste(dir_name,"R_objects",sep="/")) mcmc <- initializeMCMCObject(samples=samples, thinning=thinning, adaptive.width=adaptiveWidth, est.expression=T, est.csp=TRUE, est.hyper=F,est.mix=FALSE) model <- initializeModelObject(parameter, "ROC", with.phi) setRestartSettings(mcmc, paste(dir_name,"Restart_files/rstartFile.rst",sep="/"), adaptiveWidth, F) sys.runtime <- system.time( runMCMC(mcmc, genome, model, num_threads,div=0) ) sys.runtime <- data.frame(Value=names(sys.runtime),Time=as.vector(sys.runtime)) write.table(sys.runtime,file=paste(dir_name,"mcmc_runtime.csv",sep="/"),sep=",",col.names = T,row.names = T,quote=F) createParameterOutput(parameter = parameter,numMixtures = numMixtures,samples = samples,mixture.labels = mixture.labels,samples.percent.keep = percent.to.keep,relative.to.optimal.codon = F,report.original.ref = T) expressionValues <- getExpressionEstimates(parameter,c(1:size),samples*percent.to.keep) write.table(expressionValues,file=paste(dir_name,"Parameter_est/gene_expression.txt",sep="/"),sep=",",col.names = T,quote = F,row.names = F) # #plots different aspects of trace trace <- parameter$getTraceObject() pdf(paste(dir_name,"Graphs/mcmc_traces.pdf",sep="/")) plot(mcmc,what = "LogPosterior") plot(trace, what = "ExpectedPhi") aa <- aminoAcids() done.adapt <- TRUE for(a in aa) { if (a=="M"||a=="X"||a=="W") next accept.trace <- trace$getCodonSpecificAcceptanceRateTraceForAA(a) len <- length(accept.trace) mean.acceptance <- mean(accept.trace[(len-len*0.5):len]) if (mean.acceptance < 0.1 || mean.acceptance > 0.44) done.adapt <- FALSE plot(accept.trace,main=paste0("Acceptace Rate for ",a),xlab="Samples",ylab="Acceptance Rate",type="l") } acfCSP(parameter,csp="Selection",numMixtures = numMixtures,samples=samples*percent.to.keep) acfCSP(parameter,csp="Mutation",numMixtures = numMixtures,samples=samples*percent.to.keep) dev.off() for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples*percent.to.keep,thin = thinning,what="Selection",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_eta_",i,".txt"),ncolumns = 1) } for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples*percent.to.keep,thin = thinning,what="Mutation",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_m_",i,".txt"),ncolumns = 1) } pdf(paste(dir_name,"Graphs/Parameter_comparisons.pdf",sep="/"), width = 11, height = 12) plot(parameter,what="Mutation",samples=samples*percent.to.keep,mixture.name=mixture.labels) plot(parameter,what="Selection",samples=samples*percent.to.keep,mixture.name=mixture.labels) dev.off() pdf(paste(dir_name,"Graphs/CSP_traces_CUB_plot.pdf",sep="/"), width = 11, height = 12) createTracePlots(trace=trace,model=model,genome=genome,numMixtures=numMixtures,samples=samples,samples.percent.keep = percent.to.keep,mixture.labels = mixture.labels) dev.off() writeParameterObject(parameter,paste(dir_name,"R_objects/parameter.Rda",sep="/")) writeMCMCObject(mcmc,file=paste(dir_name,"R_objects/mcmc.Rda",sep="/")) diag <- convergence.test(mcmc,samples = samples*percent.to.keep,thin=thinning,frac1=0.1) z<-abs(diag$z) done <- (z > 1.96) && param.conv rm(parameter) rm(trace) rm(model) } samples <- 10000 thinning <- 5 parameter<-initializeParameterObject(init.with.restart.file = paste(dir_name,"Restart_files/rstartFile.rst_final",sep="/"),model="ROC") run_number <- run_number + 1 dir_name <- paste0(directory,"/final_run") dir.create(dir_name) dir.create(paste(dir_name,"Graphs",sep="/")) dir.create(paste(dir_name,"Restart_files",sep="/")) dir.create(paste(dir_name,"Parameter_est",sep="/")) dir.create(paste(dir_name,"R_objects",sep="/")) mcmc <- initializeMCMCObject(samples=samples, thinning=thinning, adaptive.width=adaptiveWidth, est.expression=TRUE, est.csp=TRUE, est.hyper=F,est.mix=FALSE) mcmc$setStepsToAdapt(0) model <- initializeModelObject(parameter, "ROC", with.phi) setRestartSettings(mcmc, paste(dir_name,"Restart_files/rstartFile.rst",sep="/"), adaptiveWidth, F) #run mcmc on genome with parameter using model #p<-profmem({ sys.runtime <- system.time( runMCMC(mcmc, genome, model, num_threads) ) sys.runtime <- data.frame(Value=names(sys.runtime),Time=as.vector(sys.runtime)) write.table(sys.runtime,file=paste(dir_name,"mcmc_runtime.csv",sep="/"),sep=",",col.names = T,row.names = T,quote=F) createParameterOutput(parameter = parameter,numMixtures = numMixtures,samples = samples,mixture.labels = mixture.labels,samples.percent.keep = 1,relative.to.optimal.codon = F,report.original.ref = T) # mixtureAssignment <- getMixtureAssignmentEstimate(parameter,c(1:size),samples*0.5) expressionValues <- getExpressionEstimates(parameter,c(1:size),samples) write.table(expressionValues,file=paste(dir_name,"Parameter_est/gene_expression.txt",sep="/"),sep=",",col.names = T,quote = F,row.names = F) writeParameterObject(parameter,paste(dir_name,"R_objects/parameter.Rda",sep="/")) writeMCMCObject(mcmc,file=paste(dir_name,"R_objects/mcmc.Rda",sep="/")) #plots different aspects of trace trace <- parameter$getTraceObject() pdf(paste(dir_name,"Graphs/mcmc_traces.pdf",sep="/")) plot(mcmc,what = "LogPosterior") plot(trace, what = "ExpectedPhi") acfCSP(parameter,csp="Selection",numMixtures = numMixtures,samples=samples) acfCSP(parameter,csp="Mutation",numMixtures = numMixtures,samples=samples) dev.off() pdf(paste(dir_name,"Graphs/Parameter_comparisons.pdf",sep="/"), width = 11, height = 12) plot(parameter,what="Mutation",samples=samples,mixture.name=mixture.labels) plot(parameter,what="Selection",samples=samples,mixture.name=mixture.labels) dev.off() pdf(paste(dir_name,"Graphs/CSP_traces_CUB_plot.pdf",sep="/"), width = 11, height = 12) createTracePlots(trace=trace,model=model,genome=genome,numMixtures=numMixtures,samples=samples,samples.percent.keep = 1,mixture.labels = mixture.labels) dev.off() for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples,thin = thinning,what="Selection",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_eta_",i,".txt"),ncolumns = 1) } for (i in 1:numMixtures) { param.diag<-convergence.test(trace,samples=samples,thin = thinning,what="Mutation",mixture=i,frac1=0.1) z.scores <- param.diag$z[which(abs(param.diag$z) > 1.96)] if (length(z.scores) > 0) { param.conv <- FALSE } write(param.diag$z,paste0(dir_name,"/Parameter_est/convergence_delta_m_",i,".txt"),ncolumns = 1) } rm(trace) rm(model) writeParameterObject(parameter,paste(dir_name,"R_objects/parameter.Rda",sep="/")) writeMCMCObject(mcmc,file=paste(dir_name,"R_objects/mcmc.Rda",sep="/"))
#Quandl Data YAHOO/INDEX_GSPC library("Quandl") setwd("C:/Users/Rex/Documents/Quant Trading/GRAT") spx <- Quandl("YAHOO/INDEX_GSPC", authcode='UrhC6e98rWr8wGppq2LF') saveRDS(spx,file="spx.rds") spx.xts<-xts(spx$Close,order.by=spx$Date) saveRDS(spx.xts,file="spx_xts.rds")
/get_spx_data.r
no_license
rexmacey/GRAT
R
false
false
273
r
#Quandl Data YAHOO/INDEX_GSPC library("Quandl") setwd("C:/Users/Rex/Documents/Quant Trading/GRAT") spx <- Quandl("YAHOO/INDEX_GSPC", authcode='UrhC6e98rWr8wGppq2LF') saveRDS(spx,file="spx.rds") spx.xts<-xts(spx$Close,order.by=spx$Date) saveRDS(spx.xts,file="spx_xts.rds")
#' Generate SQL to CREATE TABLE #' #' This function uses the output from \code{nchar_df()} to generate a SQL \code{CREATE TABLE} statement #' that can be used to create the skeleton table in Postgres. The result can be copied and pasted for use. #' #' NOTE: \code{write_pg()} does not use the SQL statement to write to PostgreSQL, but solely uses the result #' from \code{set_pgfields()} #' #' @param pg_fields a named \code{character} vector or a named \code{list} of named \code{character} vectors #' @param schema an optional argument to specify the desired schema for \code{CREATE TABLE} #' @param pkey a \code{character} string specifying the primary for the Postgres (PRIMARY KEY and CONSTRAINT) #' @param tbl_name a require option if \code{nchar_df} argument is a \code{data.frame} #' @param export a \code{logical} option export the result as an binary file #' @param path a \code{file path} option to specify the write location of the binary file #' @param ... other arguments passed to \code{\link{glue_sql}()} #' #' @return results in a SQL statement to \code{CREATE TABLE}. See \code{DBI::SQL} #' #' @export #' #' @examples #' \dontrun{ #' nchar_df <- get_nchar(iris) #' #' my_pgfields <- set_pgfields(nchar_df, conn = local_con_test) #' #' get_sql_create(my_pg_fields, pkey = "Species", tbl_name = "iris") #' } get_sql_create <- function( pg_fields, schema = "public", pkey = NULL, tbl_name = NULL, export = FALSE, path = NULL, ...) { if (missing(pg_fields)) stop("requires input to be provided") if (inherits(pg_fields, "list")) { if (!any(pkey %in% unlist(purrr::map(pg_fields, names)))) stop("requires pkey to be provided") out <- purrr::map(names(pg_fields), function(nombres) { glue::glue_sql("CREATE TABLE ", schema, ".", nombres, " (", paste0(names(pg_fields[[nombres]]), " ", pg_fields[[nombres]], ", ", collapse = " "), " CONSTRAINT ", paste0(nombres,"_pkey"), " PRIMARY KEY (", pkey, ")", ");", ...) }) names(out) <- names(pg_fields) } if (inherits(pg_fields, "character")) { if (!any(pkey %in% names(pg_fields))) stop("requires pkey to be provided") if (missing(tbl_name)) stop("requires table name to be provided") nombres <- names(pg_fields) out <- glue::glue_sql("CREATE TABLE ", schema, ".", tbl_name, " (", paste0(names(pg_fields), " ", paste0(pg_fields, ", "), collapse = " "), " CONSTRAINT ", paste0(tbl_name, "_pkey"), " PRIMARY KEY (", pkey, ")", ");", ...) } return(out) }
/R/get_sql_create.R
no_license
eugejoh/pgtools
R
false
false
2,579
r
#' Generate SQL to CREATE TABLE #' #' This function uses the output from \code{nchar_df()} to generate a SQL \code{CREATE TABLE} statement #' that can be used to create the skeleton table in Postgres. The result can be copied and pasted for use. #' #' NOTE: \code{write_pg()} does not use the SQL statement to write to PostgreSQL, but solely uses the result #' from \code{set_pgfields()} #' #' @param pg_fields a named \code{character} vector or a named \code{list} of named \code{character} vectors #' @param schema an optional argument to specify the desired schema for \code{CREATE TABLE} #' @param pkey a \code{character} string specifying the primary for the Postgres (PRIMARY KEY and CONSTRAINT) #' @param tbl_name a require option if \code{nchar_df} argument is a \code{data.frame} #' @param export a \code{logical} option export the result as an binary file #' @param path a \code{file path} option to specify the write location of the binary file #' @param ... other arguments passed to \code{\link{glue_sql}()} #' #' @return results in a SQL statement to \code{CREATE TABLE}. See \code{DBI::SQL} #' #' @export #' #' @examples #' \dontrun{ #' nchar_df <- get_nchar(iris) #' #' my_pgfields <- set_pgfields(nchar_df, conn = local_con_test) #' #' get_sql_create(my_pg_fields, pkey = "Species", tbl_name = "iris") #' } get_sql_create <- function( pg_fields, schema = "public", pkey = NULL, tbl_name = NULL, export = FALSE, path = NULL, ...) { if (missing(pg_fields)) stop("requires input to be provided") if (inherits(pg_fields, "list")) { if (!any(pkey %in% unlist(purrr::map(pg_fields, names)))) stop("requires pkey to be provided") out <- purrr::map(names(pg_fields), function(nombres) { glue::glue_sql("CREATE TABLE ", schema, ".", nombres, " (", paste0(names(pg_fields[[nombres]]), " ", pg_fields[[nombres]], ", ", collapse = " "), " CONSTRAINT ", paste0(nombres,"_pkey"), " PRIMARY KEY (", pkey, ")", ");", ...) }) names(out) <- names(pg_fields) } if (inherits(pg_fields, "character")) { if (!any(pkey %in% names(pg_fields))) stop("requires pkey to be provided") if (missing(tbl_name)) stop("requires table name to be provided") nombres <- names(pg_fields) out <- glue::glue_sql("CREATE TABLE ", schema, ".", tbl_name, " (", paste0(names(pg_fields), " ", paste0(pg_fields, ", "), collapse = " "), " CONSTRAINT ", paste0(tbl_name, "_pkey"), " PRIMARY KEY (", pkey, ")", ");", ...) } return(out) }
structure(list(url = "https://play.dhis2.org/2.33.4/api/organisationUnitGroupSets.json?paging=false&fields=organisationUnitGroups[name]", status_code = 200L, headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 11:08:14 GMT", `content-type` = "application/json;charset=UTF-8", `transfer-encoding` = "chunked", connection = "keep-alive", `cache-control` = "no-cache, private", `x-xss-protection` = "1; mode=block", `x-frame-options` = "SAMEORIGIN", `x-content-type-options` = "nosniff", etag = "W/\"0f390a2465eb2e4c56987a6414d802740\"", `strict-transport-security` = "max-age=15768000", `content-encoding` = "gzip"), class = c("insensitive", "list")), all_headers = list(list(status = 302L, version = "HTTP/1.1", headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 11:08:14 GMT", `content-type` = "text/html", `content-length` = "145", connection = "keep-alive", location = "https://play.dhis2.org/2.33.4/api/organisationUnitGroupSets.json?paging=false&fields=organisationUnitGroups[name]", `strict-transport-security` = "max-age=15768000"), class = c("insensitive", "list"))), list(status = 200L, version = "HTTP/1.1", headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 11:08:14 GMT", `content-type` = "application/json;charset=UTF-8", `transfer-encoding` = "chunked", connection = "keep-alive", `cache-control` = "no-cache, private", `x-xss-protection` = "1; mode=block", `x-frame-options` = "SAMEORIGIN", `x-content-type-options` = "nosniff", etag = "W/\"0f390a2465eb2e4c56987a6414d802740\"", `strict-transport-security` = "max-age=15768000", `content-encoding` = "gzip"), class = c("insensitive", "list")))), cookies = structure(list(domain = "#HttpOnly_play.dhis2.org", flag = FALSE, path = "/2.33.4", secure = TRUE, expiration = structure(Inf, class = c("POSIXct", "POSIXt")), name = "JSESSIONID", value = "REDACTED"), row.names = c(NA, -1L), class = "data.frame"), content = charToRaw("{\"organisationUnitGroupSets\":[{\"organisationUnitGroups\":[{\"name\":\"Eastern Area\"},{\"name\":\"Northern Area\"},{\"name\":\"Southern Area\"},{\"name\":\"Western Area\"}]},{\"organisationUnitGroups\":[{\"name\":\"NGO\"},{\"name\":\"Public facilities\"},{\"name\":\"Private Clinic\"},{\"name\":\"Mission\"}]},{\"organisationUnitGroups\":[{\"name\":\"CHP\"},{\"name\":\"CHC\"},{\"name\":\"MCHP\"},{\"name\":\"Clinic\"},{\"name\":\"Hospital\"}]},{\"organisationUnitGroups\":[{\"name\":\"Urban\"},{\"name\":\"Rural\"}]}]}"), date = structure(1591960094, class = c("POSIXct", "POSIXt" ), tzone = "GMT"), times = c(redirect = 0.126347, namelookup = 2.7e-05, connect = 3e-05, pretransfer = 7e-05, starttransfer = 0.053291, total = 0.179707)), class = "response")
/tests/testthat/play.dhis2.org/2.33/api/organisationUnitGroupSets.json-4a9e62.R
permissive
pepfar-datim/datimutils
R
false
false
2,994
r
structure(list(url = "https://play.dhis2.org/2.33.4/api/organisationUnitGroupSets.json?paging=false&fields=organisationUnitGroups[name]", status_code = 200L, headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 11:08:14 GMT", `content-type` = "application/json;charset=UTF-8", `transfer-encoding` = "chunked", connection = "keep-alive", `cache-control` = "no-cache, private", `x-xss-protection` = "1; mode=block", `x-frame-options` = "SAMEORIGIN", `x-content-type-options` = "nosniff", etag = "W/\"0f390a2465eb2e4c56987a6414d802740\"", `strict-transport-security` = "max-age=15768000", `content-encoding` = "gzip"), class = c("insensitive", "list")), all_headers = list(list(status = 302L, version = "HTTP/1.1", headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 11:08:14 GMT", `content-type` = "text/html", `content-length` = "145", connection = "keep-alive", location = "https://play.dhis2.org/2.33.4/api/organisationUnitGroupSets.json?paging=false&fields=organisationUnitGroups[name]", `strict-transport-security` = "max-age=15768000"), class = c("insensitive", "list"))), list(status = 200L, version = "HTTP/1.1", headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 11:08:14 GMT", `content-type` = "application/json;charset=UTF-8", `transfer-encoding` = "chunked", connection = "keep-alive", `cache-control` = "no-cache, private", `x-xss-protection` = "1; mode=block", `x-frame-options` = "SAMEORIGIN", `x-content-type-options` = "nosniff", etag = "W/\"0f390a2465eb2e4c56987a6414d802740\"", `strict-transport-security` = "max-age=15768000", `content-encoding` = "gzip"), class = c("insensitive", "list")))), cookies = structure(list(domain = "#HttpOnly_play.dhis2.org", flag = FALSE, path = "/2.33.4", secure = TRUE, expiration = structure(Inf, class = c("POSIXct", "POSIXt")), name = "JSESSIONID", value = "REDACTED"), row.names = c(NA, -1L), class = "data.frame"), content = charToRaw("{\"organisationUnitGroupSets\":[{\"organisationUnitGroups\":[{\"name\":\"Eastern Area\"},{\"name\":\"Northern Area\"},{\"name\":\"Southern Area\"},{\"name\":\"Western Area\"}]},{\"organisationUnitGroups\":[{\"name\":\"NGO\"},{\"name\":\"Public facilities\"},{\"name\":\"Private Clinic\"},{\"name\":\"Mission\"}]},{\"organisationUnitGroups\":[{\"name\":\"CHP\"},{\"name\":\"CHC\"},{\"name\":\"MCHP\"},{\"name\":\"Clinic\"},{\"name\":\"Hospital\"}]},{\"organisationUnitGroups\":[{\"name\":\"Urban\"},{\"name\":\"Rural\"}]}]}"), date = structure(1591960094, class = c("POSIXct", "POSIXt" ), tzone = "GMT"), times = c(redirect = 0.126347, namelookup = 2.7e-05, connect = 3e-05, pretransfer = 7e-05, starttransfer = 0.053291, total = 0.179707)), class = "response")
filtered.matrix.kms <- kms(x = filtered.matrix, verbose = T) xlab <- "x" ylab <- "y" zlab <- "z" xlim <- c(0, max(filtered.matrix[, 1])) ylim <- c(0, max(filtered.matrix[, 2])) zlim <- c(0, max(filtered.matrix[, 3])) plot(filtered.matrix.kms) clear3d() plot(filtered.matrix.kms, col=pt.col((1:filtered.matrix.kms$nclust)*2), splom=FALSE, size=8, axes=FALSE, alpha=(filtered.matrix.kms$label+1)^1.5/40, asp=1, xlim=xlim, ylim=ylim, zlim=zlim, xlab=xlab, ylab=ylab, zlab="") hsct.rgl(zlab=zlab) box3d() filtered.lines filtered.matrix kanji.number = filtered.lines[1, 6] kanji.indices = which(filtered.lines[ , 6] == kanji.number) cluster.labels = filtered.matrix.kms$label[kanji.indices] filtered.matrix.kms$label filtered.lines[kanji.indices, ] # DrawLineKanji(kanji.number, kanji.line.data) DrawLineKanjiWithCorners(kanji.number, kanji.line.data, cbind(cluster.labels, filtered.lines[kanji.indices , 7:8]))
/visualizing_kms_clustering.R
no_license
torebre/kanjiR
R
false
false
919
r
filtered.matrix.kms <- kms(x = filtered.matrix, verbose = T) xlab <- "x" ylab <- "y" zlab <- "z" xlim <- c(0, max(filtered.matrix[, 1])) ylim <- c(0, max(filtered.matrix[, 2])) zlim <- c(0, max(filtered.matrix[, 3])) plot(filtered.matrix.kms) clear3d() plot(filtered.matrix.kms, col=pt.col((1:filtered.matrix.kms$nclust)*2), splom=FALSE, size=8, axes=FALSE, alpha=(filtered.matrix.kms$label+1)^1.5/40, asp=1, xlim=xlim, ylim=ylim, zlim=zlim, xlab=xlab, ylab=ylab, zlab="") hsct.rgl(zlab=zlab) box3d() filtered.lines filtered.matrix kanji.number = filtered.lines[1, 6] kanji.indices = which(filtered.lines[ , 6] == kanji.number) cluster.labels = filtered.matrix.kms$label[kanji.indices] filtered.matrix.kms$label filtered.lines[kanji.indices, ] # DrawLineKanji(kanji.number, kanji.line.data) DrawLineKanjiWithCorners(kanji.number, kanji.line.data, cbind(cluster.labels, filtered.lines[kanji.indices , 7:8]))
null.from_template = function(w,template,reps){ temp = template[rownames(w),colnames(w)] temp = temp / max(temp) nulls_mw = null.metaweb(temp,reps) nulls_de = null.degrees(temp,reps) nulls_co = null.connectance(temp,reps) return(I = nulls_co, II = nulls_de, MW = nulls_mw) }
/R/null.from_template.r
no_license
ibartomeus/betalink
R
false
false
280
r
null.from_template = function(w,template,reps){ temp = template[rownames(w),colnames(w)] temp = temp / max(temp) nulls_mw = null.metaweb(temp,reps) nulls_de = null.degrees(temp,reps) nulls_co = null.connectance(temp,reps) return(I = nulls_co, II = nulls_de, MW = nulls_mw) }
# The functions that we want to use are in the `dplyr` and `tidyr` packages for # the most part. But since all of these packages work together, it is simplest # just to load the `tidyverse`. --- # title: "data_manipulation" # author: "Dennis Kelly" --- library(tidyverse) library(historydata) # First an excursus about the pipe %>%, which lets us write function calls like g(f(x)), which are read from inside out, # as calls like x %>% f() %>% g(), which are read from left to right. num <- 1:10 mean(num) round(mean(num)) num %>% mean() num %>% mean() %>% round() # dplyr gives us verbs to deal with data. Here is the data we are going to use. data("paulist_missions") paulist_missions # 1. select(): lets us pick the columns we want. paulist_missions %>% select(city, state, confessions, converts) paulist_missions %>% select(church, date_start, date_end) paulist_missions %>% select(-mission_number, -volume, -page) paulist_missions %>% select(city, state, starts_with("date_")) # 2. filter(): lets us select the rows we want according to some conditional # expression. Remember, a conditional expression returns TRUE/FALSE values. paulist_missions %>% filter(confessions > 10000) paulist_missions %>% select(city, state, date_start) %>% filter(state == "VA") paulist_missions %>% filter(!is.na(mission_number)) # 3. arrange(): lets us sort according to a column paulist_missions %>% select(church, date_start, confessions, converts) %>% arrange(confessions) paulist_missions %>% select(church, date_start, confessions, converts) %>% arrange(desc(converts)) # 4. mutate(): lets us add new columns based on existing columns paulist_missions %>% select(church, date_start, confessions, duration_days) %>% mutate(confessions_per_day = confessions / duration_days) %>% filter(confessions_per_day < Inf) %>% arrange(desc(confessions_per_day)) # Getting the year from a date library(lubridate) paulist_missions %>% select(church, date_start) %>% mutate(year = year(date_start), month = month(date_start), day = day(date_start)) # 5. summarize() and group_by(): lets us boil data down to one row paulist_missions %>% summarize(confessions_total = sum(confessions, na.rm = TRUE)) # But often we want to get one row per category. Group by lets us do that paulist_missions %>% group_by(state) %>% summarize(confessions_total = sum(confessions, na.rm = TRUE)) # We can also count the number of rows using the `n()` function. paulist_missions %>% group_by(state) %>% summarize(missions = n(), confessions = sum(confessions, na.rm = TRUE)) # Grouping and count is done so frequently that there is a shortcut paulist_missions %>% count(state) # 6. pivot_longer(): Go from wide data to long (i.e., tidy) data data("dijon_prices_wide") dijon_prices_wide dijon_prices_wide %>% pivot_longer(c(-commodity, -measure), names_to = "year", values_to = "price") # 7. pivot_wider(): Go from long (i.e., tidy) data to wide data dijon_prices %>% select(-citation, -citation_date) %>% filter(year <= 1572) %>% filter(str_detect(commodity, "wine")) %>% pivot_wider(names_from = year, values_from = price) # 8. left_join(): Join two different tables together via a key library(europop) data("europop") data("city_coords") europop city_coords europop %>% filter(year == 1500, population > 0, region == "Spain") %>% left_join(city_coords) # More explicitly europop %>% filter(year == 1500, population > 0, region == "Spain") %>% left_join(city_coords, by = c("city" = "city"))
/04-data-manipulation.R
no_license
dpkcmc/R-project
R
false
false
3,732
r
# The functions that we want to use are in the `dplyr` and `tidyr` packages for # the most part. But since all of these packages work together, it is simplest # just to load the `tidyverse`. --- # title: "data_manipulation" # author: "Dennis Kelly" --- library(tidyverse) library(historydata) # First an excursus about the pipe %>%, which lets us write function calls like g(f(x)), which are read from inside out, # as calls like x %>% f() %>% g(), which are read from left to right. num <- 1:10 mean(num) round(mean(num)) num %>% mean() num %>% mean() %>% round() # dplyr gives us verbs to deal with data. Here is the data we are going to use. data("paulist_missions") paulist_missions # 1. select(): lets us pick the columns we want. paulist_missions %>% select(city, state, confessions, converts) paulist_missions %>% select(church, date_start, date_end) paulist_missions %>% select(-mission_number, -volume, -page) paulist_missions %>% select(city, state, starts_with("date_")) # 2. filter(): lets us select the rows we want according to some conditional # expression. Remember, a conditional expression returns TRUE/FALSE values. paulist_missions %>% filter(confessions > 10000) paulist_missions %>% select(city, state, date_start) %>% filter(state == "VA") paulist_missions %>% filter(!is.na(mission_number)) # 3. arrange(): lets us sort according to a column paulist_missions %>% select(church, date_start, confessions, converts) %>% arrange(confessions) paulist_missions %>% select(church, date_start, confessions, converts) %>% arrange(desc(converts)) # 4. mutate(): lets us add new columns based on existing columns paulist_missions %>% select(church, date_start, confessions, duration_days) %>% mutate(confessions_per_day = confessions / duration_days) %>% filter(confessions_per_day < Inf) %>% arrange(desc(confessions_per_day)) # Getting the year from a date library(lubridate) paulist_missions %>% select(church, date_start) %>% mutate(year = year(date_start), month = month(date_start), day = day(date_start)) # 5. summarize() and group_by(): lets us boil data down to one row paulist_missions %>% summarize(confessions_total = sum(confessions, na.rm = TRUE)) # But often we want to get one row per category. Group by lets us do that paulist_missions %>% group_by(state) %>% summarize(confessions_total = sum(confessions, na.rm = TRUE)) # We can also count the number of rows using the `n()` function. paulist_missions %>% group_by(state) %>% summarize(missions = n(), confessions = sum(confessions, na.rm = TRUE)) # Grouping and count is done so frequently that there is a shortcut paulist_missions %>% count(state) # 6. pivot_longer(): Go from wide data to long (i.e., tidy) data data("dijon_prices_wide") dijon_prices_wide dijon_prices_wide %>% pivot_longer(c(-commodity, -measure), names_to = "year", values_to = "price") # 7. pivot_wider(): Go from long (i.e., tidy) data to wide data dijon_prices %>% select(-citation, -citation_date) %>% filter(year <= 1572) %>% filter(str_detect(commodity, "wine")) %>% pivot_wider(names_from = year, values_from = price) # 8. left_join(): Join two different tables together via a key library(europop) data("europop") data("city_coords") europop city_coords europop %>% filter(year == 1500, population > 0, region == "Spain") %>% left_join(city_coords) # More explicitly europop %>% filter(year == 1500, population > 0, region == "Spain") %>% left_join(city_coords, by = c("city" = "city"))
transition_hmMDP <- function(file_example){ #inputs file name #returns the transition matrix of the hmMDP source(file_example) data_mean_parameters <- read.csv(file_mean_params, sep = ",") Num_S <- nrow(reward)#number of fully observable states Num_a <- ncol(reward)#number of actions mod <- data_mean_parameters$opt Num_mod <- length(mod) for (act_id in seq(Num_a)){ assign(paste0("tr", act_id), matrix(0, ncol = Num_mod*Num_S , nrow = Num_mod*Num_S)) } for (i in seq(Num_mod)){ mod_id <- mod[i] params <- unlist(c(data_mean_parameters[which(data_mean_parameters$opt == mod_id),-1])) for (act_id in c(1:Num_a)){ tr <- get(paste0("tr",act_id)) mat <- matrix(c(params[(act_id-1)*2+1], params[(act_id-1)*2+2], 1-params[(act_id-1)*2+1], 1-params[(act_id-1)*2+2]), nrow = 2) tr[seq((i-1)*Num_S+1,(i)*Num_S), seq((i-1)*Num_S+1,(i)*Num_S) ] <- mat assign(paste0("tr",act_id), tr) } } all_values_tr <-c() for (act_id in seq(Num_a)){ tr <- get(paste0("tr", act_id)) all_values_tr <- c(all_values_tr, tr) } tr_momdp <- array(all_values_tr, dim = c(Num_mod*Num_S, Num_mod*Num_S, Num_a)) return(tr_momdp) } reward_hmMDP <- function(file_example){ #inputs file name #returns the reward matrix of the hmMDP source(file_example) data_mean_parameters <- read.csv(file_mean_params, sep = ",") Num_S <- nrow(reward)#number of fully observable states Num_a <- ncol(reward)#number of actions mod <- data_mean_parameters$opt Num_mod <- length(mod) rew_momdp <- matrix(0, ncol = Num_a, nrow <-Num_mod*Num_S ) for (act_id in seq(Num_a)){ rew_momdp[,act_id] <- rep(reward[,act_id], Num_mod) } return(rew_momdp) } obs_hmMDP <- function(file_example){ #inputs file name #returns the observation matrix of the hmMDP source(file_example) data_mean_parameters <- read.csv(file_mean_params, sep = ",") Num_S <- nrow(reward)#number of fully observable states Num_a <- ncol(reward)#number of actions mod <- data_mean_parameters$opt Num_mod <- length(mod) obs_momdp <- matrix(rep(c(diag(Num_S)),Num_mod), ncol = Num_S, byrow = T) obs_momdp <- array(rep(c(obs_momdp), Num_a), dim = c(Num_mod*Num_S,Num_S,Num_a)) return(obs_momdp) }
/src/simulations/build_matrices_hmMDP.R
no_license
conservation-decisions/Universal-Adaptive-Management-Solver
R
false
false
2,415
r
transition_hmMDP <- function(file_example){ #inputs file name #returns the transition matrix of the hmMDP source(file_example) data_mean_parameters <- read.csv(file_mean_params, sep = ",") Num_S <- nrow(reward)#number of fully observable states Num_a <- ncol(reward)#number of actions mod <- data_mean_parameters$opt Num_mod <- length(mod) for (act_id in seq(Num_a)){ assign(paste0("tr", act_id), matrix(0, ncol = Num_mod*Num_S , nrow = Num_mod*Num_S)) } for (i in seq(Num_mod)){ mod_id <- mod[i] params <- unlist(c(data_mean_parameters[which(data_mean_parameters$opt == mod_id),-1])) for (act_id in c(1:Num_a)){ tr <- get(paste0("tr",act_id)) mat <- matrix(c(params[(act_id-1)*2+1], params[(act_id-1)*2+2], 1-params[(act_id-1)*2+1], 1-params[(act_id-1)*2+2]), nrow = 2) tr[seq((i-1)*Num_S+1,(i)*Num_S), seq((i-1)*Num_S+1,(i)*Num_S) ] <- mat assign(paste0("tr",act_id), tr) } } all_values_tr <-c() for (act_id in seq(Num_a)){ tr <- get(paste0("tr", act_id)) all_values_tr <- c(all_values_tr, tr) } tr_momdp <- array(all_values_tr, dim = c(Num_mod*Num_S, Num_mod*Num_S, Num_a)) return(tr_momdp) } reward_hmMDP <- function(file_example){ #inputs file name #returns the reward matrix of the hmMDP source(file_example) data_mean_parameters <- read.csv(file_mean_params, sep = ",") Num_S <- nrow(reward)#number of fully observable states Num_a <- ncol(reward)#number of actions mod <- data_mean_parameters$opt Num_mod <- length(mod) rew_momdp <- matrix(0, ncol = Num_a, nrow <-Num_mod*Num_S ) for (act_id in seq(Num_a)){ rew_momdp[,act_id] <- rep(reward[,act_id], Num_mod) } return(rew_momdp) } obs_hmMDP <- function(file_example){ #inputs file name #returns the observation matrix of the hmMDP source(file_example) data_mean_parameters <- read.csv(file_mean_params, sep = ",") Num_S <- nrow(reward)#number of fully observable states Num_a <- ncol(reward)#number of actions mod <- data_mean_parameters$opt Num_mod <- length(mod) obs_momdp <- matrix(rep(c(diag(Num_S)),Num_mod), ncol = Num_S, byrow = T) obs_momdp <- array(rep(c(obs_momdp), Num_a), dim = c(Num_mod*Num_S,Num_S,Num_a)) return(obs_momdp) }
#' Data related to the Reinhart and Rogoff study of debt and GDP growth #' #' The data is taken from Cosma Shalizi's UDA course: \url{http://www.stat.cmu.edu/~cshalizi/uADA/13/hw/11/solutions-11.pdf} #' #' @details \itemize{ #' \item Country. country name #' \item Year. year of observation #' \item growth. real GDP growth rate #' \item ratio. ratio of debt to GDP #' } #' #' @docType data #' @keywords datasets #' @name debt #' @usage debt #' @format data frame with 1171 rows and 4 variables #' @references #' Carmen M. Reinhart and Kenneth S. Rogoff, "Growth in a Time #' of Debt", American Economic Review 100 (2010): 573-578. #' Thomas Herndon, Michael Ash and Robert Pollin, "Does High #' Public Debt Consistently Stifle Economic Growth? A Critique #' of Reinhart and Rogoff", University of Massachusets-Amherst, #' Working Paper 332, 2013. #' \url{http://www.peri.umass.edu/236/hash/31e2ff374b6377b2ddec04deaa6388b1/publication/566/} NULL
/R/data-debt.r
no_license
briatte/psData
R
false
false
956
r
#' Data related to the Reinhart and Rogoff study of debt and GDP growth #' #' The data is taken from Cosma Shalizi's UDA course: \url{http://www.stat.cmu.edu/~cshalizi/uADA/13/hw/11/solutions-11.pdf} #' #' @details \itemize{ #' \item Country. country name #' \item Year. year of observation #' \item growth. real GDP growth rate #' \item ratio. ratio of debt to GDP #' } #' #' @docType data #' @keywords datasets #' @name debt #' @usage debt #' @format data frame with 1171 rows and 4 variables #' @references #' Carmen M. Reinhart and Kenneth S. Rogoff, "Growth in a Time #' of Debt", American Economic Review 100 (2010): 573-578. #' Thomas Herndon, Michael Ash and Robert Pollin, "Does High #' Public Debt Consistently Stifle Economic Growth? A Critique #' of Reinhart and Rogoff", University of Massachusets-Amherst, #' Working Paper 332, 2013. #' \url{http://www.peri.umass.edu/236/hash/31e2ff374b6377b2ddec04deaa6388b1/publication/566/} NULL
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions_implementations.R \name{nimEigen} \alias{nimEigen} \alias{eigen} \title{Spectral Decomposition of a Matrix} \usage{ nimEigen(x, symmetric = FALSE, only.values = FALSE) } \arguments{ \item{x}{a numeric matrix (double or integer) whose spectral decomposition is to be computed.} \item{symmetric}{if \code{TRUE}, the matrix is guarranteed to be symmetric, and only its lower triangle (diagonal included) is used. Otherwise, the matrix is checked for symmetry. Default is \code{FALSE}.} \item{only.values}{if \code{TRUE}, only the eigenvalues are computed, otherwise both eigenvalues and eigenvectors are computed. Setting \code{only.values = TRUE} can speed up eigendecompositions, especially for large matrices. Default is \code{FALSE}.} } \value{ The spectral decomposition of \code{x} is returned as a \code{nCompilerList} with elements: \itemize{ \item values vector containing the eigenvalues of \code{x}, sorted in decreasing order. Since \code{x} is required to be symmetric, all eigenvalues will be real numbers. \item vectors. matrix with columns containing the eigenvectors of \code{x}, or an empty matrix if \code{only.values} is \code{TRUE}. } } \description{ Computes eigenvalues and eigenvectors of a numeric matrix. } \details{ Computes the spectral decomposition of a numeric matrix using the Eigen C++ template library. In a nFunction, \code{eigen} is identical to \code{nimEigen}. If the matrix is symmetric, a faster and more accurate algorithm will be used to compute the eigendecomposition. Note that non-symmetric matrices can have complex eigenvalues, which are not supported by nCompiler. If a complex eigenvalue or a complex element of an eigenvector is detected, a warning will be issued and that element will be returned as \code{NaN}. Additionally, \code{returnType(eigenNimbleList())} can be used within a \link{nFunction} to specify that the function will return a \code{nCompilerList} generated by the \code{nimEigen} function. \code{eigenNimbleList()} can also be used to define a nested \code{nimbleList} element. See the User Manual for usage examples. } \examples{ eigenvaluesDemoFunction <- nFunction( setup = function(){ demoMatrix <- diag(4) + 2 }, run = function(){ eigenvalues <- eigen(demoMatrix, symmetric = TRUE)$values returnType(double(1)) return(eigenvalues) }) } \seealso{ \link{nimSvd} for singular value decompositions in nCompiler. } \author{ nCompiler development team }
/nCompiler/man/nimEigen.Rd
permissive
nimble-dev/nCompiler
R
false
true
2,556
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions_implementations.R \name{nimEigen} \alias{nimEigen} \alias{eigen} \title{Spectral Decomposition of a Matrix} \usage{ nimEigen(x, symmetric = FALSE, only.values = FALSE) } \arguments{ \item{x}{a numeric matrix (double or integer) whose spectral decomposition is to be computed.} \item{symmetric}{if \code{TRUE}, the matrix is guarranteed to be symmetric, and only its lower triangle (diagonal included) is used. Otherwise, the matrix is checked for symmetry. Default is \code{FALSE}.} \item{only.values}{if \code{TRUE}, only the eigenvalues are computed, otherwise both eigenvalues and eigenvectors are computed. Setting \code{only.values = TRUE} can speed up eigendecompositions, especially for large matrices. Default is \code{FALSE}.} } \value{ The spectral decomposition of \code{x} is returned as a \code{nCompilerList} with elements: \itemize{ \item values vector containing the eigenvalues of \code{x}, sorted in decreasing order. Since \code{x} is required to be symmetric, all eigenvalues will be real numbers. \item vectors. matrix with columns containing the eigenvectors of \code{x}, or an empty matrix if \code{only.values} is \code{TRUE}. } } \description{ Computes eigenvalues and eigenvectors of a numeric matrix. } \details{ Computes the spectral decomposition of a numeric matrix using the Eigen C++ template library. In a nFunction, \code{eigen} is identical to \code{nimEigen}. If the matrix is symmetric, a faster and more accurate algorithm will be used to compute the eigendecomposition. Note that non-symmetric matrices can have complex eigenvalues, which are not supported by nCompiler. If a complex eigenvalue or a complex element of an eigenvector is detected, a warning will be issued and that element will be returned as \code{NaN}. Additionally, \code{returnType(eigenNimbleList())} can be used within a \link{nFunction} to specify that the function will return a \code{nCompilerList} generated by the \code{nimEigen} function. \code{eigenNimbleList()} can also be used to define a nested \code{nimbleList} element. See the User Manual for usage examples. } \examples{ eigenvaluesDemoFunction <- nFunction( setup = function(){ demoMatrix <- diag(4) + 2 }, run = function(){ eigenvalues <- eigen(demoMatrix, symmetric = TRUE)$values returnType(double(1)) return(eigenvalues) }) } \seealso{ \link{nimSvd} for singular value decompositions in nCompiler. } \author{ nCompiler development team }