content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
\name{setEnvir}
\alias{setEnvir}
\title{
Set Job's environment.
}
\description{
Set a Job's environment. To see the contents of this environment, apply
as.list() to the environment.
If appendAll is set to TRUE, any new variable assignments bearing the same
names as old ones will overwrite the old ones.
}
\usage{
setEnvir(job, env, appendAll=FALSE)
}
\arguments{
\item{job}{A Job object.}
\item{env}{An environment or list of assignments}
\item{appendAll}{Should the variable assignments be appended to those already
in the Job's environment?}
}
| /man/setEnvir.Rd | no_license | lmoryl/jobQueue | R | false | false | 561 | rd | \name{setEnvir}
\alias{setEnvir}
\title{
Set Job's environment.
}
\description{
Set a Job's environment. To see the contents of this environment, apply
as.list() to the environment.
If appendAll is set to TRUE, any new variable assignments bearing the same
names as old ones will overwrite the old ones.
}
\usage{
setEnvir(job, env, appendAll=FALSE)
}
\arguments{
\item{job}{A Job object.}
\item{env}{An environment or list of assignments}
\item{appendAll}{Should the variable assignments be appended to those already
in the Job's environment?}
}
|
library(ggplot2)
library(grid)
d0 <- read.csv("BB_n_100_m_123_Rep_50_C1_0.5_seed_667BB.csv", sep = ",", header = T)
Rep <- nrow(d0)
d0 <- d0[1:Rep,3:6]
names(d0) <- c("B1.x", "B1.y", "B2.x", "B2.y")
R = nrow(d0)
# alpha.v = 1/R^(0.25)
alpha.v = 1
size.v = 6
center = c(1,1)
# d0 = d0[ sample(1:R, 15), ]
#########################
rm(p)
p <- ggplot(data = d0) + geom_point(aes(x = B1.x, y = B1.y), size = size.v, alpha = alpha.v )
p <- p + geom_point(aes(x = B2.x, y = B2.y), col = "red", size = size.v, alpha = alpha.v)
p <- p + geom_vline(xintercept = 1) + geom_hline(yintercept = 1)
p <- p + labs(x = expression(beta_1), y = expression(beta_2))
# a = 0.02
#
# x.lim = max( abs( quantile(c(d0$B1.x, d0$B2.x), c(a, 1-a) ) - center[1] ) )
# y.lim = max( abs( quantile(c(d0$B1.y, d0$B2.y), c(a, 1-a) ) - center[2] ) )
# xy.lim = max(x.lim, y.lim)
# p <- p + xlim(c(-xy.lim, xy.lim) + center[1]) + ylim( c(-xy.lim, xy.lim) + center[2])
p
#############################333
p <- p + geom_segment(data = d0, aes(x = B1.x, y = B1.y, xend = B2.x, yend = B2.y ), arrow = arrow(), col = "red")
p
| /sensitivity_IV/BB.R | no_license | zhentaoshi/REL | R | false | false | 1,109 | r |
library(ggplot2)
library(grid)
d0 <- read.csv("BB_n_100_m_123_Rep_50_C1_0.5_seed_667BB.csv", sep = ",", header = T)
Rep <- nrow(d0)
d0 <- d0[1:Rep,3:6]
names(d0) <- c("B1.x", "B1.y", "B2.x", "B2.y")
R = nrow(d0)
# alpha.v = 1/R^(0.25)
alpha.v = 1
size.v = 6
center = c(1,1)
# d0 = d0[ sample(1:R, 15), ]
#########################
rm(p)
p <- ggplot(data = d0) + geom_point(aes(x = B1.x, y = B1.y), size = size.v, alpha = alpha.v )
p <- p + geom_point(aes(x = B2.x, y = B2.y), col = "red", size = size.v, alpha = alpha.v)
p <- p + geom_vline(xintercept = 1) + geom_hline(yintercept = 1)
p <- p + labs(x = expression(beta_1), y = expression(beta_2))
# a = 0.02
#
# x.lim = max( abs( quantile(c(d0$B1.x, d0$B2.x), c(a, 1-a) ) - center[1] ) )
# y.lim = max( abs( quantile(c(d0$B1.y, d0$B2.y), c(a, 1-a) ) - center[2] ) )
# xy.lim = max(x.lim, y.lim)
# p <- p + xlim(c(-xy.lim, xy.lim) + center[1]) + ylim( c(-xy.lim, xy.lim) + center[2])
p
#############################333
p <- p + geom_segment(data = d0, aes(x = B1.x, y = B1.y, xend = B2.x, yend = B2.y ), arrow = arrow(), col = "red")
p
|
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | jasminkak/ProgrammingAssignment2 | R | false | false | 1,234 | r | # Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
library("quantmod")
library("parallel")
library("rugarch")
library("datasets")
library("crayon")
library("forecast")
#Define a function to delete NAs.There will be NAs in the retreived data from data source.
delete.na <- function(DF, n=0) {
DF[rowSums(is.na(DF)) <= n,]
}
# Function that selects the optimum AR(p) and MA(q) parameters and the Integration (d) value.
orderSelect <- function(df.train, infoCrea){
final.ic = Inf
final.order.ic = c(0,0,0)
for (p in 0:6) for (d in 1:2) for (q in 0:6) {
mod.ic = arima(df.train,order = c(p,d,q), method='ML',optim.control = list(maxit = 9999999), include.mean = TRUE)
current.ic = infoCrea(mod.ic)
if (current.ic < final.ic) {
final.ic = current.ic
final.order.ic = c(p,d,q)
fit.ic = mod.ic
}
}
return(final.order.ic)
}
# Function that calculates the out of sample recursive RMSE value.
# SYMBOL: First Component of the function is the symbol for the time series data
# SOURCE: Second Componenet of the function which is the data source (E.g. FRED, YAHOO)
# testRatio: Third Component of the function which the train set / test set ratio
# infoCrea: Information criteria that will be used to choose optimum lags for the ARIMA model
# (E.g. Akaike Information Criterion (AIC), Bayesian Information Criterion (BIC))
recursive <- function(SYMBOL, SOURCE,testRatio, infoCrea) {
df = getSymbols(SYMBOL,src= SOURCE,auto.assign = getOption('loadSymbols.auto.assign',FALSE))
df = as.vector(delete.na(df))
rmse = 0
for (i in round(length(df)*testRatio):length(df)){
df.train = df[0:(i-1)]
df.test = df[i]
pqorder = orderSelect(df.train, infoCrea)
p = pqorder[1]
d = pqorder[2]
q = pqorder[3]
mod.ic = arima(df.train,order = c(p,d,q), method='ML',optim.control = list(maxit = 9999999), include.mean = TRUE)
rmse = c(rmse, predict(mod.ic,1)$pred - df.test)
print(rmse)
}
return(rmse[2:length(rmse)])
}
rmse = recursive("DEXUSEU", "FRED", 0.5, AIC)
rmse = sqrt ( mean( rmse ^ 2 ) )
print(rmse)
| /ARIMA(p,d,q).AIC.R | no_license | unalunsal/TimeSeriesAnalysis | R | false | false | 2,135 | r |
library("quantmod")
library("parallel")
library("rugarch")
library("datasets")
library("crayon")
library("forecast")
#Define a function to delete NAs.There will be NAs in the retreived data from data source.
delete.na <- function(DF, n=0) {
DF[rowSums(is.na(DF)) <= n,]
}
# Function that selects the optimum AR(p) and MA(q) parameters and the Integration (d) value.
orderSelect <- function(df.train, infoCrea){
final.ic = Inf
final.order.ic = c(0,0,0)
for (p in 0:6) for (d in 1:2) for (q in 0:6) {
mod.ic = arima(df.train,order = c(p,d,q), method='ML',optim.control = list(maxit = 9999999), include.mean = TRUE)
current.ic = infoCrea(mod.ic)
if (current.ic < final.ic) {
final.ic = current.ic
final.order.ic = c(p,d,q)
fit.ic = mod.ic
}
}
return(final.order.ic)
}
# Function that calculates the out of sample recursive RMSE value.
# SYMBOL: First Component of the function is the symbol for the time series data
# SOURCE: Second Componenet of the function which is the data source (E.g. FRED, YAHOO)
# testRatio: Third Component of the function which the train set / test set ratio
# infoCrea: Information criteria that will be used to choose optimum lags for the ARIMA model
# (E.g. Akaike Information Criterion (AIC), Bayesian Information Criterion (BIC))
recursive <- function(SYMBOL, SOURCE,testRatio, infoCrea) {
df = getSymbols(SYMBOL,src= SOURCE,auto.assign = getOption('loadSymbols.auto.assign',FALSE))
df = as.vector(delete.na(df))
rmse = 0
for (i in round(length(df)*testRatio):length(df)){
df.train = df[0:(i-1)]
df.test = df[i]
pqorder = orderSelect(df.train, infoCrea)
p = pqorder[1]
d = pqorder[2]
q = pqorder[3]
mod.ic = arima(df.train,order = c(p,d,q), method='ML',optim.control = list(maxit = 9999999), include.mean = TRUE)
rmse = c(rmse, predict(mod.ic,1)$pred - df.test)
print(rmse)
}
return(rmse[2:length(rmse)])
}
rmse = recursive("DEXUSEU", "FRED", 0.5, AIC)
rmse = sqrt ( mean( rmse ^ 2 ) )
print(rmse)
|
library(RCyjs)
library(later)
#------------------------------------------------------------------------------------------------------------------------
if(!exists("parseChromLocString"))
source("~/github/trena/R/utils.R")
if(!exists("tbl.geneInfo"))
tbl.geneInfo <- get(load((system.file(package="TrenaProject", "extdata", "geneInfoTable_hg38.RData"))))
if(!exists("mtx")){
mtx.raw <- get(load("~/github/TrenaProjectErythropoiesis/prep/import/rnaFromMarjorie/mtx-rna.RData"))
mtx.asinh <- asinh(mtx.raw)
mtx.asinh.norm <- t(apply(mtx.asinh, 1, function(row) {
row.min <- min(row)
row.max <- max(row)
new.values <- (row-row.min)/(row.max - row.min)
new.values}))
mtx.raw.norm <- t(apply(mtx.raw, 1, function(row) {
row.min <- min(row)
row.max <- max(row)
new.values <- (row-row.min)/(row.max - row.min)
new.values}))
}
#------------------------------------------------------------------------------------------------------------------------
required.regulatoryRegionsColumnNames <- c("motifName", "chrom", "motifStart", "motifEnd", "strand",
"motifScore", "motifRelativeScore", "match",
"distance.from.tss", "tf")
#------------------------------------------------------------------------------------------------------------------------
geneRegulatoryModelToGraph <- function (target.gene, tbl.gm, tbl.reg)
{
required.geneModelColumnNames <- c("tf", "betaLasso", "lassoPValue", "pearsonCoeff", "rfScore", "betaRidge",
"spearmanCoeff", "bindingSites", "sample")
stopifnot(all(required.geneModelColumnNames %in% colnames(tbl.gm)))
stopifnot(all(required.regulatoryRegionsColumnNames %in% colnames(tbl.reg)))
printf("genes: %d, %d occurences of %d motifs", length(tbl.gm$tf), length(tbl.reg$motifName),
length(unique(tbl.reg$motifName)))
g <- graphNEL(edgemode = "directed")
nodeDataDefaults(g, attr = "type") <- "undefined" # targetGene, tf, footprint
nodeDataDefaults(g, attr = "label") <- "default node label"
nodeDataDefaults(g, attr = "distance") <- 0
nodeDataDefaults(g, attr = "pearson") <- 0
nodeDataDefaults(g, attr = "motif") <- ""
nodeDataDefaults(g, attr = "xPos") <- 0
nodeDataDefaults(g, attr = "yPos") <- 0
nodeDataDefaults(g, attr = "expression") <- 0
nodeDataDefaults(g, attr = "strength") <- 0
edgeDataDefaults(g, attr = "edgeType") <- "undefined"
tfs <- sort(unique(c(tbl.gm$tf, tbl.reg$tf)))
regRegions.names <- unlist(lapply(1:nrow(tbl.reg), function(i){
distance.from.tss <- tbl.reg$distance.from.tss[i]
region.size <- nchar(tbl.reg$match[i])
motif.name <- tbl.reg$motifName[i]
if(distance.from.tss < 0)
sprintf("%s.fp.downstream.%05d.L%d.%s", target.gene, abs(distance.from.tss), region.size, motif.name)
else
sprintf("%s.fp.upstream.%05d.L%d.%s", target.gene, abs(distance.from.tss), region.size, motif.name)
}))
tbl.reg$regionName <- regRegions.names
all.nodes <- unique(c(target.gene, tfs, regRegions.names))
g <- addNode(all.nodes, g)
browser()
nodeData(g, target.gene, "type") <- "targetGene"
nodeData(g, tfs, "type") <- "TF"
nodeData(g, regRegions.names, "type") <- "regulatoryRegion"
nodeData(g, all.nodes, "label") <- all.nodes
nodeData(g, regRegions.names, "label") <- tbl.reg$motifName
nodeData(g, regRegions.names, "distance") <- tbl.reg$distance.from.tss
nodeData(g, regRegions.names, "motif") <- tbl.reg$motifName
nodeData(g, tfs, "pearson") <- tbl.gm$pearsonCoeff
nodeData(g, tfs, "rfScore") <- tbl.gm$rfScore
g <- addEdge(tbl.reg$tf, tbl.reg$regionName, g)
edgeData(g, tbl.reg$tf, tbl.reg$regionName, "edgeType") <- "bindsTo"
g <- graph::addEdge(tbl.reg$regionName, target.gene, g)
edgeData(g, tbl.reg$regionName, target.gene, "edgeType") <- "regulatorySiteFor"
g
} # geneRegulatoryModelToGraph
#------------------------------------------------------------------------------------------------------------------------
buildMultiModelGraph <- function (models, targetGene)
{
g <- graphNEL(edgemode = "directed")
model.names <- names(models)
node.attribute.specs <- list(type="undefined",
label="default node label",
distance=0,
pearson=0,
randomForest=0,
pcaMax=0,
concordance=0,
betaLasso=0,
motif="",
xPos=0,
yPos=0)
edge.attribute.spec <- list(edgeType="undefined")
attribute.classes <- c("", model.names) # "" (no prefix) is the currently displayed set of attibutes
# create current version of these attributes, and then
# per-model versions, which get mapped to current
# in response to user's interactive choice on the cyjs user interface
# the "current version" is, e.g., "distance".
# per-model ("wt" and "mut" versions) become "wt.distance" and "mut.distance"
# and are used by copying e.g. all wt.xxx attributes into the current (non-prefixed)
# attribute, upon which the cyjs style is defined
#for(class.name in attribute.classes){
#class.name.prefix <- class.name # with possible "." appended, permits standard and model-specific attributes
#if(nchar(class.name) > 0)
# class.name.prefix <- sprintf("%s.", class.name)
#noa.names.without.prefix <- names(node.attribute.specs)
noa.names <- names(node.attribute.specs)
#noa.names <- sprintf("%s%s", class.name.prefix, noa.names.without.prefix)
#noa.count <- length(node.attribute.specs)
for(noa.name in noa.names){
printf("--- noa.name: %s", noa.name)
#browser()
nodeDataDefaults(g, attr=noa.name) <- node.attribute.specs[noa.name]
}
#for(i in 1:noa.count){
# nodeDataDefaults(g, attr=noa.names[i]) <- node.attribute.specs[[noa.names.without.prefix[i]]]
# }
#} # for class
edgeDataDefaults(g, attr = "edgeType") <- "undefined"
all.tfs <- c()
all.regulatoryRegions <- c()
for(model in models){ # collect all the tf and regulatory region nodes
tbl.model <- model$model
all.tfs <- unique(c(all.tfs, tbl.model$tf))
tbl.reg <- model$reg
regRegions.with.distance.from.tss <- sprintf("%s:%d", tbl.reg$motifName, tbl.reg$distance.from.tss)
all.regulatoryRegions <- unique(c(all.regulatoryRegions, regRegions.with.distance.from.tss))
} # for model
printf("total tfs: %d total regs: %d", length(all.tfs), length(all.regulatoryRegions))
all.nodes <- unique(c(targetGene, all.tfs, all.regulatoryRegions))
g <- addNode(all.nodes, g)
nodeData(g, targetGene, "type") <- "targetGene"
nodeData(g, all.tfs, "type") <- "TF"
nodeData(g, all.regulatoryRegions, "type") <- "regulatoryRegion"
nodeData(g, all.nodes, "label") <- all.nodes
# add edges, edge attribute, and the constant attributes for all of the regulatoryRegion nodes
xyz <- "buildMultiModelGraph, about to interate over models"
for(model in models){
tbl.model <- model$model
sample <- tbl.model$sample[1]
tbl.reg <- model$reg
printf("==== %s: %d rows", unique(tbl.model$sample), nrow(tbl.model))
tfs <- tbl.reg$tf
regRegions <- sprintf("%s:%d", tbl.reg$motifName, tbl.reg$distance.from.tss)
printf("--- working stage %s", sample)
#browser()
xyze <- "addEdge"
suppressWarnings(g <- addEdge(tfs, regRegions, g))
edgeData(g, tfs, regRegions, "edgeType") <- "bindsTo"
suppressWarnings(g <- addEdge(regRegions, targetGene, g))
edgeData(g, regRegions, targetGene, "edgeType") <- "regulatorySiteFor"
nodeData(g, regRegions, "label") <- regRegions
nodeData(g, regRegions, "distance") <- tbl.reg$distance.from.tss
nodeData(g, regRegions, "motif") <- tbl.reg$motifName
} # for model
# now copy in the first model's tf node data
tbl.model <- models[[1]]$model
nodeData(g, tbl.model$tf, attr="randomForest") <- tbl.model$rfScore
nodeData(g, tbl.model$tf, attr="pearson") <- tbl.model$pearsonCoeff
# now copy in each of the model's tf node data in turn
#model.names <- names(models)
#for(model.name in model.names){
# tbl.model <- models[[model.name]]$model
# noa.name <- sprintf("%s.%s", model.name, "randomForest")
# nodeData(g, tbl.model$tf, attr=noa.name) <- tbl.model$rfScore
# noa.name <- sprintf("%s.%s", model.name, "pearson")
# nodeData(g, tbl.model$tf, attr=noa.name) <- tbl.model$pearsonCoeff
# } # for model.name
g
} # buildMultiModelGraph
#------------------------------------------------------------------------------------------------------------------------
addGeneModelLayout <- function (g, xPos.span=1500)
{
printf("--- addGeneModelLayout")
all.distances <- sort(unique(unlist(nodeData(g, attr='distance'), use.names=FALSE)))
print(all.distances)
fp.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "regulatoryRegion")]
tf.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "TF")]
targetGene.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "targetGene")]
# add in a zero in case all of the footprints are up or downstream of the 0 coordinate, the TSS
span.endpoints <- range(c(0, as.numeric(nodeData(g, fp.nodes, attr="distance"))))
span <- max(span.endpoints) - min(span.endpoints)
footprintLayoutFactor <- 1
printf("initial: span: %d footprintLayoutFactor: %f", span, footprintLayoutFactor)
footprintLayoutFactor <- xPos.span/span
#if(span < 600) #
# footprintLayoutFactor <- 600/span
#if(span > 1000)
# footprintLayoutFactor <- span/1000
printf("corrected: span: %d footprintLayoutFactor: %f", span, footprintLayoutFactor)
xPos <- as.numeric(nodeData(g, fp.nodes, attr="distance")) * footprintLayoutFactor
yPos <- 0
nodeData(g, fp.nodes, "xPos") <- xPos
nodeData(g, fp.nodes, "yPos") <- yPos
adjusted.span.endpoints <- range(c(0, as.numeric(nodeData(g, fp.nodes, attr="xPos"))))
printf("raw span of footprints: %d footprintLayoutFactor: %f new span: %8.0f",
span, footprintLayoutFactor, abs(max(adjusted.span.endpoints) - min(adjusted.span.endpoints)))
tfs <- names(which(nodeData(g, attr="type") == "TF"))
for(tf in tfs){
footprint.neighbors <- edges(g)[[tf]]
if(length(footprint.neighbors) > 0){
footprint.positions <- as.integer(nodeData(g, footprint.neighbors, attr="xPos"))
new.xPos <- mean(footprint.positions)
if(is.na(new.xPos)) browser()
if(is.nan(new.xPos)) browser()
#printf("%8s: %5d", tf, new.xPos)
}
else{
new.xPos <- 0
}
nodeData(g, tf, "yPos") <- sample(300:1200, 1)
nodeData(g, tf, "xPos") <- new.xPos
} # for tf
nodeData(g, targetGene.nodes, "xPos") <- 0
nodeData(g, targetGene.nodes, "yPos") <- -200
g
} # addGeneModelLayout
#------------------------------------------------------------------------------------------------------------------------
test_geneRegulatoryModelToGraph <- function()
{
printf("--- test_geneRegulatoryModelToGraph")
all.stages <- get(load("modelsAndRegionsAllSamples.RData"))
tbl.model <- all.stages[[1]]$model
tbl.reg <- all.stages[[1]]$regulatoryRegions
tbl.reg <- subset(tbl.reg, tf %in% tbl.model$tf)
graph <- geneRegulatoryModelToGraph("GATA2", tbl.model, tbl.reg)
graph.lo <- addGeneModelLayout(graph)
# rcy <- RCyjs()
deleteGraph(rcy)
addGraph(rcy, graph.lo)
loadStyleFile(rcy, "trenaStyle.js")
fit(rcy, 200)
browser()
xyz <- 99
} # test_geneRegulatoryModelToGraph
#------------------------------------------------------------------------------------------------------------------------
showStage <- function(all.stages, stageNumber, targetGene)
{
nodes.to.hide <- c()
for(i in seq_len(length(all.stages))){
tfs <- all.stages[[i]]$model$gene
regs <- all.stages[[i]]$regulatoryRegions$motif
nodes.to.hide <- unique(c(nodes.to.hide, tfs, regs))
}
nodes.to.show <- c(all.stages[[stageNumber]]$model$gene,
all.stages[[stageNumber]]$regulatoryRegions$motif,
targetGene)
hideNodes(rcy, setdiff(nodes.to.hide, nodes.to.show))
showNodes(rcy, nodes.to.show)
} # showStage
#------------------------------------------------------------------------------------------------------------------------
run.buildMultiModelGraph <- function(sampleNumbers)
{
printf("--- test_geneRegulatoryModelToGraph")
models <- get(load("modelsAndRegionsAllSamples.RData")) [sampleNumbers]
#browser()
g.big <- buildMultiModelGraph(models, "GATA2")
if(!exists("rcy"))
rcy <<- RCyjs()
g.big.lo <- addGeneModelLayout(g.big)
# rcy <- RCyjs()
deleteGraph(rcy)
addGraph(rcy, g.big.lo)
loadStyleFile(rcy, "trenaStyle.js")
fit(rcy, 200)
invisible(list(model=models, graph=g.big.lo))
} # run_buildMultiModelGraph
#------------------------------------------------------------------------------------------------------------------------
showSample <- function(rcy, g, models, sampleNumber)
{
sampleNames <- c("d04_rep1", "d04_rep2", "d08_rep1", "d10_rep1", "d10_rep2", "d11_rep1",
"d11_rep2", "d12_rep1", "d12_rep2", "d16_rep1", "d16_rep2")
colnames(mtx) <- c("day0.r1", "day0.r2", "day2.r1", "day2.r2",
"day4.r1", "day4.r2",
"day6.r1", "day6.r2", "day7.r1", "day7.r2",
"day8.r1",
"day8.r2", "day8.r1", "day8.r2",
"day10.r1", "day10.r2",
"day10.r1", "day10.r2",
"day11.r1", "day11.r2",
"day11.r1", "day11.r2",
"day12.r1", "day12.r2",
"day14.r1", "day14.r2",
"day16.r1", "day16.r2")
mtx.coi <- c(5, 6, 11, 15, 16, 19, 20, 23, 24, 27, 28)
#browser()
tbl.model <- models[[sampleNumber]]$model
tbl.reg <- models[[sampleNumber]]$regulatoryRegions
regs <- sprintf("%s:%d", tbl.reg$motifName, tbl.reg$distance.from.tss)
tfs <- unique(tbl.reg$tf)
nodes.this.sample <- c("GATA2", regs, tfs)
nodes.to.hide <- setdiff(nodes(g), nodes.this.sample)
hideNodes(rcy, nodes.to.hide)
showNodes(rcy, nodes.this.sample)
#browser()
nodes <- c(tbl.model$tf)
values <- tbl.model$pearsonCoeff
setNodeAttributes(rcy, "pearson", nodes, values)
values <- tbl.model$rfScore
#setNodeAttributes(rcy, "rfScore", nodes, values)
nodes <- c(nodes, "GATA2")
values <- as.numeric(mtx.asinh.norm[nodes, mtx.coi[sampleNumber]])
values2 <- as.numeric(mtx.raw.norm[nodes, mtx.coi[sampleNumber]])
#browser()
setNodeAttributes(rcy, "expression", nodes, values)
later(function() redraw(rcy), 0.25)
xyz <- 99
} # showSample
#------------------------------------------------------------------------------------------------------------------------
talk <- function()
{
x <- run.buildMultiModelGraph(1:11) # c(2,3))
loadStyleFile(rcy, "trenaStyle.js")
models <- x$model
g <- x$graph
showSample(rcy, g, models, 1)
showSample(rcy, g, models, 2)
showSample(rcy, g, models, 3)
showSample(rcy, g, models, 4)
showSample(rcy, g, models, 5)
showSample(rcy, g, models, 6)
showSample(rcy, g, models, 7)
showSample(rcy, g, models, 8)
showSample(rcy, g, models, 9)
showSample(rcy, g, models, 10)
showSample(rcy, g, models, 11)
} # talk
#------------------------------------------------------------------------------------------------------------------------
| /prep/gata-switch/geneRegulatoryModelToGraph.R | permissive | PriceLab/TrenaProjectErythropoiesis | R | false | false | 16,450 | r | library(RCyjs)
library(later)
#------------------------------------------------------------------------------------------------------------------------
if(!exists("parseChromLocString"))
source("~/github/trena/R/utils.R")
if(!exists("tbl.geneInfo"))
tbl.geneInfo <- get(load((system.file(package="TrenaProject", "extdata", "geneInfoTable_hg38.RData"))))
if(!exists("mtx")){
mtx.raw <- get(load("~/github/TrenaProjectErythropoiesis/prep/import/rnaFromMarjorie/mtx-rna.RData"))
mtx.asinh <- asinh(mtx.raw)
mtx.asinh.norm <- t(apply(mtx.asinh, 1, function(row) {
row.min <- min(row)
row.max <- max(row)
new.values <- (row-row.min)/(row.max - row.min)
new.values}))
mtx.raw.norm <- t(apply(mtx.raw, 1, function(row) {
row.min <- min(row)
row.max <- max(row)
new.values <- (row-row.min)/(row.max - row.min)
new.values}))
}
#------------------------------------------------------------------------------------------------------------------------
required.regulatoryRegionsColumnNames <- c("motifName", "chrom", "motifStart", "motifEnd", "strand",
"motifScore", "motifRelativeScore", "match",
"distance.from.tss", "tf")
#------------------------------------------------------------------------------------------------------------------------
geneRegulatoryModelToGraph <- function (target.gene, tbl.gm, tbl.reg)
{
required.geneModelColumnNames <- c("tf", "betaLasso", "lassoPValue", "pearsonCoeff", "rfScore", "betaRidge",
"spearmanCoeff", "bindingSites", "sample")
stopifnot(all(required.geneModelColumnNames %in% colnames(tbl.gm)))
stopifnot(all(required.regulatoryRegionsColumnNames %in% colnames(tbl.reg)))
printf("genes: %d, %d occurences of %d motifs", length(tbl.gm$tf), length(tbl.reg$motifName),
length(unique(tbl.reg$motifName)))
g <- graphNEL(edgemode = "directed")
nodeDataDefaults(g, attr = "type") <- "undefined" # targetGene, tf, footprint
nodeDataDefaults(g, attr = "label") <- "default node label"
nodeDataDefaults(g, attr = "distance") <- 0
nodeDataDefaults(g, attr = "pearson") <- 0
nodeDataDefaults(g, attr = "motif") <- ""
nodeDataDefaults(g, attr = "xPos") <- 0
nodeDataDefaults(g, attr = "yPos") <- 0
nodeDataDefaults(g, attr = "expression") <- 0
nodeDataDefaults(g, attr = "strength") <- 0
edgeDataDefaults(g, attr = "edgeType") <- "undefined"
tfs <- sort(unique(c(tbl.gm$tf, tbl.reg$tf)))
regRegions.names <- unlist(lapply(1:nrow(tbl.reg), function(i){
distance.from.tss <- tbl.reg$distance.from.tss[i]
region.size <- nchar(tbl.reg$match[i])
motif.name <- tbl.reg$motifName[i]
if(distance.from.tss < 0)
sprintf("%s.fp.downstream.%05d.L%d.%s", target.gene, abs(distance.from.tss), region.size, motif.name)
else
sprintf("%s.fp.upstream.%05d.L%d.%s", target.gene, abs(distance.from.tss), region.size, motif.name)
}))
tbl.reg$regionName <- regRegions.names
all.nodes <- unique(c(target.gene, tfs, regRegions.names))
g <- addNode(all.nodes, g)
browser()
nodeData(g, target.gene, "type") <- "targetGene"
nodeData(g, tfs, "type") <- "TF"
nodeData(g, regRegions.names, "type") <- "regulatoryRegion"
nodeData(g, all.nodes, "label") <- all.nodes
nodeData(g, regRegions.names, "label") <- tbl.reg$motifName
nodeData(g, regRegions.names, "distance") <- tbl.reg$distance.from.tss
nodeData(g, regRegions.names, "motif") <- tbl.reg$motifName
nodeData(g, tfs, "pearson") <- tbl.gm$pearsonCoeff
nodeData(g, tfs, "rfScore") <- tbl.gm$rfScore
g <- addEdge(tbl.reg$tf, tbl.reg$regionName, g)
edgeData(g, tbl.reg$tf, tbl.reg$regionName, "edgeType") <- "bindsTo"
g <- graph::addEdge(tbl.reg$regionName, target.gene, g)
edgeData(g, tbl.reg$regionName, target.gene, "edgeType") <- "regulatorySiteFor"
g
} # geneRegulatoryModelToGraph
#------------------------------------------------------------------------------------------------------------------------
buildMultiModelGraph <- function (models, targetGene)
{
g <- graphNEL(edgemode = "directed")
model.names <- names(models)
node.attribute.specs <- list(type="undefined",
label="default node label",
distance=0,
pearson=0,
randomForest=0,
pcaMax=0,
concordance=0,
betaLasso=0,
motif="",
xPos=0,
yPos=0)
edge.attribute.spec <- list(edgeType="undefined")
attribute.classes <- c("", model.names) # "" (no prefix) is the currently displayed set of attibutes
# create current version of these attributes, and then
# per-model versions, which get mapped to current
# in response to user's interactive choice on the cyjs user interface
# the "current version" is, e.g., "distance".
# per-model ("wt" and "mut" versions) become "wt.distance" and "mut.distance"
# and are used by copying e.g. all wt.xxx attributes into the current (non-prefixed)
# attribute, upon which the cyjs style is defined
#for(class.name in attribute.classes){
#class.name.prefix <- class.name # with possible "." appended, permits standard and model-specific attributes
#if(nchar(class.name) > 0)
# class.name.prefix <- sprintf("%s.", class.name)
#noa.names.without.prefix <- names(node.attribute.specs)
noa.names <- names(node.attribute.specs)
#noa.names <- sprintf("%s%s", class.name.prefix, noa.names.without.prefix)
#noa.count <- length(node.attribute.specs)
for(noa.name in noa.names){
printf("--- noa.name: %s", noa.name)
#browser()
nodeDataDefaults(g, attr=noa.name) <- node.attribute.specs[noa.name]
}
#for(i in 1:noa.count){
# nodeDataDefaults(g, attr=noa.names[i]) <- node.attribute.specs[[noa.names.without.prefix[i]]]
# }
#} # for class
edgeDataDefaults(g, attr = "edgeType") <- "undefined"
all.tfs <- c()
all.regulatoryRegions <- c()
for(model in models){ # collect all the tf and regulatory region nodes
tbl.model <- model$model
all.tfs <- unique(c(all.tfs, tbl.model$tf))
tbl.reg <- model$reg
regRegions.with.distance.from.tss <- sprintf("%s:%d", tbl.reg$motifName, tbl.reg$distance.from.tss)
all.regulatoryRegions <- unique(c(all.regulatoryRegions, regRegions.with.distance.from.tss))
} # for model
printf("total tfs: %d total regs: %d", length(all.tfs), length(all.regulatoryRegions))
all.nodes <- unique(c(targetGene, all.tfs, all.regulatoryRegions))
g <- addNode(all.nodes, g)
nodeData(g, targetGene, "type") <- "targetGene"
nodeData(g, all.tfs, "type") <- "TF"
nodeData(g, all.regulatoryRegions, "type") <- "regulatoryRegion"
nodeData(g, all.nodes, "label") <- all.nodes
# add edges, edge attribute, and the constant attributes for all of the regulatoryRegion nodes
xyz <- "buildMultiModelGraph, about to interate over models"
for(model in models){
tbl.model <- model$model
sample <- tbl.model$sample[1]
tbl.reg <- model$reg
printf("==== %s: %d rows", unique(tbl.model$sample), nrow(tbl.model))
tfs <- tbl.reg$tf
regRegions <- sprintf("%s:%d", tbl.reg$motifName, tbl.reg$distance.from.tss)
printf("--- working stage %s", sample)
#browser()
xyze <- "addEdge"
suppressWarnings(g <- addEdge(tfs, regRegions, g))
edgeData(g, tfs, regRegions, "edgeType") <- "bindsTo"
suppressWarnings(g <- addEdge(regRegions, targetGene, g))
edgeData(g, regRegions, targetGene, "edgeType") <- "regulatorySiteFor"
nodeData(g, regRegions, "label") <- regRegions
nodeData(g, regRegions, "distance") <- tbl.reg$distance.from.tss
nodeData(g, regRegions, "motif") <- tbl.reg$motifName
} # for model
# now copy in the first model's tf node data
tbl.model <- models[[1]]$model
nodeData(g, tbl.model$tf, attr="randomForest") <- tbl.model$rfScore
nodeData(g, tbl.model$tf, attr="pearson") <- tbl.model$pearsonCoeff
# now copy in each of the model's tf node data in turn
#model.names <- names(models)
#for(model.name in model.names){
# tbl.model <- models[[model.name]]$model
# noa.name <- sprintf("%s.%s", model.name, "randomForest")
# nodeData(g, tbl.model$tf, attr=noa.name) <- tbl.model$rfScore
# noa.name <- sprintf("%s.%s", model.name, "pearson")
# nodeData(g, tbl.model$tf, attr=noa.name) <- tbl.model$pearsonCoeff
# } # for model.name
g
} # buildMultiModelGraph
#------------------------------------------------------------------------------------------------------------------------
addGeneModelLayout <- function (g, xPos.span=1500)
{
printf("--- addGeneModelLayout")
all.distances <- sort(unique(unlist(nodeData(g, attr='distance'), use.names=FALSE)))
print(all.distances)
fp.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "regulatoryRegion")]
tf.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "TF")]
targetGene.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "targetGene")]
# add in a zero in case all of the footprints are up or downstream of the 0 coordinate, the TSS
span.endpoints <- range(c(0, as.numeric(nodeData(g, fp.nodes, attr="distance"))))
span <- max(span.endpoints) - min(span.endpoints)
footprintLayoutFactor <- 1
printf("initial: span: %d footprintLayoutFactor: %f", span, footprintLayoutFactor)
footprintLayoutFactor <- xPos.span/span
#if(span < 600) #
# footprintLayoutFactor <- 600/span
#if(span > 1000)
# footprintLayoutFactor <- span/1000
printf("corrected: span: %d footprintLayoutFactor: %f", span, footprintLayoutFactor)
xPos <- as.numeric(nodeData(g, fp.nodes, attr="distance")) * footprintLayoutFactor
yPos <- 0
nodeData(g, fp.nodes, "xPos") <- xPos
nodeData(g, fp.nodes, "yPos") <- yPos
adjusted.span.endpoints <- range(c(0, as.numeric(nodeData(g, fp.nodes, attr="xPos"))))
printf("raw span of footprints: %d footprintLayoutFactor: %f new span: %8.0f",
span, footprintLayoutFactor, abs(max(adjusted.span.endpoints) - min(adjusted.span.endpoints)))
tfs <- names(which(nodeData(g, attr="type") == "TF"))
for(tf in tfs){
footprint.neighbors <- edges(g)[[tf]]
if(length(footprint.neighbors) > 0){
footprint.positions <- as.integer(nodeData(g, footprint.neighbors, attr="xPos"))
new.xPos <- mean(footprint.positions)
if(is.na(new.xPos)) browser()
if(is.nan(new.xPos)) browser()
#printf("%8s: %5d", tf, new.xPos)
}
else{
new.xPos <- 0
}
nodeData(g, tf, "yPos") <- sample(300:1200, 1)
nodeData(g, tf, "xPos") <- new.xPos
} # for tf
nodeData(g, targetGene.nodes, "xPos") <- 0
nodeData(g, targetGene.nodes, "yPos") <- -200
g
} # addGeneModelLayout
#------------------------------------------------------------------------------------------------------------------------
test_geneRegulatoryModelToGraph <- function()
{
printf("--- test_geneRegulatoryModelToGraph")
all.stages <- get(load("modelsAndRegionsAllSamples.RData"))
tbl.model <- all.stages[[1]]$model
tbl.reg <- all.stages[[1]]$regulatoryRegions
tbl.reg <- subset(tbl.reg, tf %in% tbl.model$tf)
graph <- geneRegulatoryModelToGraph("GATA2", tbl.model, tbl.reg)
graph.lo <- addGeneModelLayout(graph)
# rcy <- RCyjs()
deleteGraph(rcy)
addGraph(rcy, graph.lo)
loadStyleFile(rcy, "trenaStyle.js")
fit(rcy, 200)
browser()
xyz <- 99
} # test_geneRegulatoryModelToGraph
#------------------------------------------------------------------------------------------------------------------------
showStage <- function(all.stages, stageNumber, targetGene)
{
nodes.to.hide <- c()
for(i in seq_len(length(all.stages))){
tfs <- all.stages[[i]]$model$gene
regs <- all.stages[[i]]$regulatoryRegions$motif
nodes.to.hide <- unique(c(nodes.to.hide, tfs, regs))
}
nodes.to.show <- c(all.stages[[stageNumber]]$model$gene,
all.stages[[stageNumber]]$regulatoryRegions$motif,
targetGene)
hideNodes(rcy, setdiff(nodes.to.hide, nodes.to.show))
showNodes(rcy, nodes.to.show)
} # showStage
#------------------------------------------------------------------------------------------------------------------------
run.buildMultiModelGraph <- function(sampleNumbers)
{
printf("--- test_geneRegulatoryModelToGraph")
models <- get(load("modelsAndRegionsAllSamples.RData")) [sampleNumbers]
#browser()
g.big <- buildMultiModelGraph(models, "GATA2")
if(!exists("rcy"))
rcy <<- RCyjs()
g.big.lo <- addGeneModelLayout(g.big)
# rcy <- RCyjs()
deleteGraph(rcy)
addGraph(rcy, g.big.lo)
loadStyleFile(rcy, "trenaStyle.js")
fit(rcy, 200)
invisible(list(model=models, graph=g.big.lo))
} # run_buildMultiModelGraph
#------------------------------------------------------------------------------------------------------------------------
showSample <- function(rcy, g, models, sampleNumber)
{
sampleNames <- c("d04_rep1", "d04_rep2", "d08_rep1", "d10_rep1", "d10_rep2", "d11_rep1",
"d11_rep2", "d12_rep1", "d12_rep2", "d16_rep1", "d16_rep2")
colnames(mtx) <- c("day0.r1", "day0.r2", "day2.r1", "day2.r2",
"day4.r1", "day4.r2",
"day6.r1", "day6.r2", "day7.r1", "day7.r2",
"day8.r1",
"day8.r2", "day8.r1", "day8.r2",
"day10.r1", "day10.r2",
"day10.r1", "day10.r2",
"day11.r1", "day11.r2",
"day11.r1", "day11.r2",
"day12.r1", "day12.r2",
"day14.r1", "day14.r2",
"day16.r1", "day16.r2")
mtx.coi <- c(5, 6, 11, 15, 16, 19, 20, 23, 24, 27, 28)
#browser()
tbl.model <- models[[sampleNumber]]$model
tbl.reg <- models[[sampleNumber]]$regulatoryRegions
regs <- sprintf("%s:%d", tbl.reg$motifName, tbl.reg$distance.from.tss)
tfs <- unique(tbl.reg$tf)
nodes.this.sample <- c("GATA2", regs, tfs)
nodes.to.hide <- setdiff(nodes(g), nodes.this.sample)
hideNodes(rcy, nodes.to.hide)
showNodes(rcy, nodes.this.sample)
#browser()
nodes <- c(tbl.model$tf)
values <- tbl.model$pearsonCoeff
setNodeAttributes(rcy, "pearson", nodes, values)
values <- tbl.model$rfScore
#setNodeAttributes(rcy, "rfScore", nodes, values)
nodes <- c(nodes, "GATA2")
values <- as.numeric(mtx.asinh.norm[nodes, mtx.coi[sampleNumber]])
values2 <- as.numeric(mtx.raw.norm[nodes, mtx.coi[sampleNumber]])
#browser()
setNodeAttributes(rcy, "expression", nodes, values)
later(function() redraw(rcy), 0.25)
xyz <- 99
} # showSample
#------------------------------------------------------------------------------------------------------------------------
talk <- function()
{
x <- run.buildMultiModelGraph(1:11) # c(2,3))
loadStyleFile(rcy, "trenaStyle.js")
models <- x$model
g <- x$graph
showSample(rcy, g, models, 1)
showSample(rcy, g, models, 2)
showSample(rcy, g, models, 3)
showSample(rcy, g, models, 4)
showSample(rcy, g, models, 5)
showSample(rcy, g, models, 6)
showSample(rcy, g, models, 7)
showSample(rcy, g, models, 8)
showSample(rcy, g, models, 9)
showSample(rcy, g, models, 10)
showSample(rcy, g, models, 11)
} # talk
#------------------------------------------------------------------------------------------------------------------------
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
xinv<-NULL ##sets default value for inverse
set<-function(y){
xinv<<-NULL
x<<-y
}
get<-function() {x}
setInverse<-function(inver){xinv<<-inver}
getInverse<-function() {xinv}
isInverted<-function() {FALSE}
list(set=set,get=get,setInverse=setInverse,getInverse=getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
xinv <- x$getInverse()
data <- x$get()
##check if oldx matrix exists. It is created the first time you run call the function
if(!is.null(r <- get0("oldx", envir = environment()))){
##checks if the input matrix has changed or not
if( isTRUE(all.equal(oldx,data))) {
print("getting cached data")
##return(oldxinv)
return(oldxinv)
}
}
##the actual matrix inversion!!
xinv <- solve(data)
x$setInverse(xinv)
##keeps the old INPUT matrix for checking next time the function is called
oldx<<-x$get()
##keeps the inverse matrix cached
oldxinv<<-xinv
xinv
}
| /cachematrix.R | no_license | mochimero/ProgrammingAssignment2 | R | false | false | 1,177 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
xinv<-NULL ##sets default value for inverse
set<-function(y){
xinv<<-NULL
x<<-y
}
get<-function() {x}
setInverse<-function(inver){xinv<<-inver}
getInverse<-function() {xinv}
isInverted<-function() {FALSE}
list(set=set,get=get,setInverse=setInverse,getInverse=getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
xinv <- x$getInverse()
data <- x$get()
##check if oldx matrix exists. It is created the first time you run call the function
if(!is.null(r <- get0("oldx", envir = environment()))){
##checks if the input matrix has changed or not
if( isTRUE(all.equal(oldx,data))) {
print("getting cached data")
##return(oldxinv)
return(oldxinv)
}
}
##the actual matrix inversion!!
xinv <- solve(data)
x$setInverse(xinv)
##keeps the old INPUT matrix for checking next time the function is called
oldx<<-x$get()
##keeps the inverse matrix cached
oldxinv<<-xinv
xinv
}
|
#' Neuroconductor CI Template Paths
#'
#' @param ci Which continuous integration system
#' @param ants Does the package depend on ANTs or ANTsR
#' @param user GitHub username for repos
#' @param dev Development Site vs. not?
#' @param deployment indicator if this is a release, not standard running.
#' Just deployment.
#' @param ... not used
#'
#' @return File path of YAML file
#' @export
#'
#' @examples
#' neuroc_ci_template_path()
#' neuroc_ci_template_path(ci = "autoci")
#' neuroc_appveyor_template_path()
#' neuroc_ci_template_path(ants = TRUE)
#' neuroc_travis_template_path()
#' neuroc_travis_template_path(ants = TRUE)
#' neuroc_appveyor_template_path()
#' neuroc_appveyor_template_path(ants = TRUE)
neuroc_ci_template_path = function(
ci = c("travis", "appveyor", "travis_pkgdown", "autoci", "tic", "autoci_pkgdown"),
ants = FALSE,
dev = FALSE,
user = NULL,
deployment = FALSE,
...) {
ci = match.arg(ci)
if (ci == "autoci") {
file = "autoci.yml"
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
return(file)
}
if (ci == "autoci_pkgdown") {
file = "autoci_pkgdown.yml"
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
return(file)
}
if (ci == "tic") {
file = "tic.R"
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
return(file)
}
user = neuroc_user(user = user, dev = dev, deployment = deployment)
user = switch(user,
"neuroconductor" = "neuroconductor",
"neuroconductor-devel" = "neuroconductor",
"neuroconductor-releases" = "neuroconductor",
"neuroconductor-devel-releases" = "neuroconductor",
"oslerinhealth" = "oslerinhealth",
"oslerinhealth-releases" = "oslerinhealth"
)
if (ci == "github") {
file = paste0(user, "_", "github.yml")
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
}
if ((user == "neuroconductor" || user == "oslerinhealth") & ci == "travis_pkgdown") {
ants = TRUE
}
file = paste0(user, "_", ci, ifelse(ants, "_ants", ""), ".yml")
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
}
#' @rdname neuroc_ci_template_path
#' @export
neuroc_travis_template_path = function(...){
neuroc_ci_template_path(ci = "travis", ...)
}
#' @rdname neuroc_ci_template_path
#' @export
neuroc_appveyor_template_path = function(...){
neuroc_ci_template_path(ci = "appveyor", ...)
}
| /R/neuroc_ci_template_path.R | no_license | muschellij2/neuroc.deps | R | false | false | 2,481 | r | #' Neuroconductor CI Template Paths
#'
#' @param ci Which continuous integration system
#' @param ants Does the package depend on ANTs or ANTsR
#' @param user GitHub username for repos
#' @param dev Development Site vs. not?
#' @param deployment indicator if this is a release, not standard running.
#' Just deployment.
#' @param ... not used
#'
#' @return File path of YAML file
#' @export
#'
#' @examples
#' neuroc_ci_template_path()
#' neuroc_ci_template_path(ci = "autoci")
#' neuroc_appveyor_template_path()
#' neuroc_ci_template_path(ants = TRUE)
#' neuroc_travis_template_path()
#' neuroc_travis_template_path(ants = TRUE)
#' neuroc_appveyor_template_path()
#' neuroc_appveyor_template_path(ants = TRUE)
neuroc_ci_template_path = function(
ci = c("travis", "appveyor", "travis_pkgdown", "autoci", "tic", "autoci_pkgdown"),
ants = FALSE,
dev = FALSE,
user = NULL,
deployment = FALSE,
...) {
ci = match.arg(ci)
if (ci == "autoci") {
file = "autoci.yml"
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
return(file)
}
if (ci == "autoci_pkgdown") {
file = "autoci_pkgdown.yml"
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
return(file)
}
if (ci == "tic") {
file = "tic.R"
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
return(file)
}
user = neuroc_user(user = user, dev = dev, deployment = deployment)
user = switch(user,
"neuroconductor" = "neuroconductor",
"neuroconductor-devel" = "neuroconductor",
"neuroconductor-releases" = "neuroconductor",
"neuroconductor-devel-releases" = "neuroconductor",
"oslerinhealth" = "oslerinhealth",
"oslerinhealth-releases" = "oslerinhealth"
)
if (ci == "github") {
file = paste0(user, "_", "github.yml")
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
}
if ((user == "neuroconductor" || user == "oslerinhealth") & ci == "travis_pkgdown") {
ants = TRUE
}
file = paste0(user, "_", ci, ifelse(ants, "_ants", ""), ".yml")
file = system.file(file, package = "neuroc.deps", mustWork = TRUE)
}
#' @rdname neuroc_ci_template_path
#' @export
neuroc_travis_template_path = function(...){
neuroc_ci_template_path(ci = "travis", ...)
}
#' @rdname neuroc_ci_template_path
#' @export
neuroc_appveyor_template_path = function(...){
neuroc_ci_template_path(ci = "appveyor", ...)
}
|
\name{adjust_matrix}
\alias{adjust_matrix}
\title{
Remove rows with low variance and impute missing data
}
\description{
Remove rows with low variance and impute missing data
}
\usage{
adjust_matrix(m, sd_quantile = 0.05, max_na = 0.25)
}
\arguments{
\item{m}{a numeric matrix}
\item{sd_quantile}{cutoff the quantile of standard variation}
\item{max_na}{maximum NA rate for rows}
}
\details{
The function uses \code{\link[impute]{impute.knn}} to impute missing data, then
uses \code{\link{adjust_outlier}} to adjust outliers and
removes rows with low standard variation.
}
\value{
A numeric matrix.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
m = matrix(rnorm(200), 10)
m[1, 1] = 1000
range(m)
m2 = adjust_matrix(m)
range(m2)
}
| /man/adjust_matrix.rd | no_license | NagaComBio/cola | R | false | false | 743 | rd | \name{adjust_matrix}
\alias{adjust_matrix}
\title{
Remove rows with low variance and impute missing data
}
\description{
Remove rows with low variance and impute missing data
}
\usage{
adjust_matrix(m, sd_quantile = 0.05, max_na = 0.25)
}
\arguments{
\item{m}{a numeric matrix}
\item{sd_quantile}{cutoff the quantile of standard variation}
\item{max_na}{maximum NA rate for rows}
}
\details{
The function uses \code{\link[impute]{impute.knn}} to impute missing data, then
uses \code{\link{adjust_outlier}} to adjust outliers and
removes rows with low standard variation.
}
\value{
A numeric matrix.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
m = matrix(rnorm(200), 10)
m[1, 1] = 1000
range(m)
m2 = adjust_matrix(m)
range(m2)
}
|
# Set up
Sys.setenv(JAVA_HOME='/usr/bin/java')
# Might need to run this fin the terminal:
# sudo R CMD javareconf
library(rJava)
library(RWeka)
library(openNLP)
library(ggplot2)
library(dplyr)
library(magrittr)
#install.packages('slam')
library(slam)
library(tm)
# Read it in
path <- '~/Desktop/OneDrive - Johns Hopkins University/programming/coursera_data_science_courses/10_capstone/'
setwd(path)
twit <- 'final/en_US/en_US.twitter.txt'
news <- 'final/en_US/en_US.news.txt'
blog <- 'final/en_US/en_US.blogs.txt'
test <- 'test.txt'
sources <- c(twit, news, blog)
source_names <- c('twit', 'news', 'blog')
# Build a sample set
set.seed(8234)
sampling_ratio <- 0.005
return_subset_indices <- function(file, ratio){
file_length <- length(readLines(file))
return(sample(1:file_length, file_length*ratio))
}
twit_indices <- (return_subset_indices(twit, sampling_ratio))
news_indices <- (return_subset_indices(news, sampling_ratio))
blog_indices <- (return_subset_indices(blog, sampling_ratio))
train <- sample(c(readLines(twit)[twit_indices], readLines(news)[news_indices], readLines(blog)[blog_indices])) # sample mixes the order up
# Tokenize the training set
#dataS <- c(blogs_sub, news_sub, twitter_sub)
corpus <- train[1:20]
corpus <- VCorpus(VectorSource(corpus))
corpus <- VCorpus(VectorSource(sapply(corpus, function(row) iconv(row, "latin1", "ASCII", sub=""))))
# Clean and tokenize
corpus1 <- tm_map(corpus, removeNumbers)
corpus1 <- tm_map(corpus1, stripWhitespace)
corpus1 <- tm_map(corpus1, removePunctuation, preserve_intra_word_dashes = TRUE)
corpus1 <- tm_map(corpus1, content_transformer(tolower))
#corpus1 <- tm_map(corpus1, stemDocument)
corpus1 <- tm_map(corpus1, removeWords, stopwords("en"))
#corpus2 <- tm_map(corpus1, PlainTextDocument)
#corpus1 <- NGramTokenizer(corpus1, Weka_control(min = 2, max = 2))
# Tokenize and make N-grams
unigrammer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
bigrammer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
trigrammer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
unigrams <- TermDocumentMatrix(corpus1, control = list(tokenize = unigrammer))
bigrams <- TermDocumentMatrix(corpus1, control = list(tokenize = bigrammer))
trigrams <- TermDocumentMatrix(corpus1, control = list(tokenize = trigrammer))
#tokens <- AlphabeticTokenizer(x, control = NULL)
#n_grams <- NGramTokenizer(x)
#word_tokens <- WordTokenizer(x, control = NULL)
#save(x=n_grams, file='corpora.Rda')
uni_corpus <- findFreqTerms(unigrams,lowfreq = 0)
uni_corpusF <- rowSums(as.matrix(unigrams[uni_corpus,]))
uni_corpusF <- data.frame(word=names(uni_corpusF), frequency=uni_corpusF)
uni_corpusF <- uni_corpusF[order(-uni_corpusF$frequency),]
uni_corpusF[1:10,]
bi_corpus <- findFreqTerms(bigrams,lowfreq = 0)
bi_corpusF <- rowSums(as.matrix(bigrams[bi_corpus,]))
bi_corpusF <- data.frame(word=names(bi_corpusF), frequency=bi_corpusF)
bi_corpusF <- bi_corpusF[order(-bi_corpusF$frequency),]
bi_corpusF[1:10,]
tri_corpus <- findFreqTerms(trigrams,lowfreq = 0)
tri_corpusF <- rowSums(as.matrix(trigrams[tri_corpus,]))
tri_corpusF <- data.frame(word=names(tri_corpusF), frequency=tri_corpusF)
tri_corpusF <- tri_corpusF[order(-tri_corpusF$frequency),]
tri_corpusF[1:10,]
# Strategy- could be better to grep serially the words and shorten -1 until you get a match, then sort by mode
| /10_capstone/build_corpora.R | no_license | J-McNamara/mcnamara_coursera_data_science_code_portfolio | R | false | false | 3,404 | r | # Set up
Sys.setenv(JAVA_HOME='/usr/bin/java')
# Might need to run this fin the terminal:
# sudo R CMD javareconf
library(rJava)
library(RWeka)
library(openNLP)
library(ggplot2)
library(dplyr)
library(magrittr)
#install.packages('slam')
library(slam)
library(tm)
# Read it in
path <- '~/Desktop/OneDrive - Johns Hopkins University/programming/coursera_data_science_courses/10_capstone/'
setwd(path)
twit <- 'final/en_US/en_US.twitter.txt'
news <- 'final/en_US/en_US.news.txt'
blog <- 'final/en_US/en_US.blogs.txt'
test <- 'test.txt'
sources <- c(twit, news, blog)
source_names <- c('twit', 'news', 'blog')
# Build a sample set
set.seed(8234)
sampling_ratio <- 0.005
return_subset_indices <- function(file, ratio){
file_length <- length(readLines(file))
return(sample(1:file_length, file_length*ratio))
}
twit_indices <- (return_subset_indices(twit, sampling_ratio))
news_indices <- (return_subset_indices(news, sampling_ratio))
blog_indices <- (return_subset_indices(blog, sampling_ratio))
train <- sample(c(readLines(twit)[twit_indices], readLines(news)[news_indices], readLines(blog)[blog_indices])) # sample mixes the order up
# Tokenize the training set
#dataS <- c(blogs_sub, news_sub, twitter_sub)
corpus <- train[1:20]
corpus <- VCorpus(VectorSource(corpus))
corpus <- VCorpus(VectorSource(sapply(corpus, function(row) iconv(row, "latin1", "ASCII", sub=""))))
# Clean and tokenize
corpus1 <- tm_map(corpus, removeNumbers)
corpus1 <- tm_map(corpus1, stripWhitespace)
corpus1 <- tm_map(corpus1, removePunctuation, preserve_intra_word_dashes = TRUE)
corpus1 <- tm_map(corpus1, content_transformer(tolower))
#corpus1 <- tm_map(corpus1, stemDocument)
corpus1 <- tm_map(corpus1, removeWords, stopwords("en"))
#corpus2 <- tm_map(corpus1, PlainTextDocument)
#corpus1 <- NGramTokenizer(corpus1, Weka_control(min = 2, max = 2))
# Tokenize and make N-grams
unigrammer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
bigrammer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
trigrammer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
unigrams <- TermDocumentMatrix(corpus1, control = list(tokenize = unigrammer))
bigrams <- TermDocumentMatrix(corpus1, control = list(tokenize = bigrammer))
trigrams <- TermDocumentMatrix(corpus1, control = list(tokenize = trigrammer))
#tokens <- AlphabeticTokenizer(x, control = NULL)
#n_grams <- NGramTokenizer(x)
#word_tokens <- WordTokenizer(x, control = NULL)
#save(x=n_grams, file='corpora.Rda')
uni_corpus <- findFreqTerms(unigrams,lowfreq = 0)
uni_corpusF <- rowSums(as.matrix(unigrams[uni_corpus,]))
uni_corpusF <- data.frame(word=names(uni_corpusF), frequency=uni_corpusF)
uni_corpusF <- uni_corpusF[order(-uni_corpusF$frequency),]
uni_corpusF[1:10,]
bi_corpus <- findFreqTerms(bigrams,lowfreq = 0)
bi_corpusF <- rowSums(as.matrix(bigrams[bi_corpus,]))
bi_corpusF <- data.frame(word=names(bi_corpusF), frequency=bi_corpusF)
bi_corpusF <- bi_corpusF[order(-bi_corpusF$frequency),]
bi_corpusF[1:10,]
tri_corpus <- findFreqTerms(trigrams,lowfreq = 0)
tri_corpusF <- rowSums(as.matrix(trigrams[tri_corpus,]))
tri_corpusF <- data.frame(word=names(tri_corpusF), frequency=tri_corpusF)
tri_corpusF <- tri_corpusF[order(-tri_corpusF$frequency),]
tri_corpusF[1:10,]
# Strategy- could be better to grep serially the words and shorten -1 until you get a match, then sort by mode
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapdeck_dependencies.R
\name{mapdeck_dependencies}
\alias{mapdeck_dependencies}
\title{Mapdeck Dependencies}
\usage{
mapdeck_dependencies()
}
\description{
Adds the required mapdeck javascript dependencies to a Shiny UI when you want to use
mapdeck layers, but not with a mapdeck map.
}
| /man/mapdeck_dependencies.Rd | no_license | SymbolixAU/mapdeck | R | false | true | 365 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapdeck_dependencies.R
\name{mapdeck_dependencies}
\alias{mapdeck_dependencies}
\title{Mapdeck Dependencies}
\usage{
mapdeck_dependencies()
}
\description{
Adds the required mapdeck javascript dependencies to a Shiny UI when you want to use
mapdeck layers, but not with a mapdeck map.
}
|
#!/usr/bin/Rscript
### This R script uses DiMO (Discriminative Motif Optimizer) from "R.Y. Patel, G.D. Stormo, Discriminative motif optimization based on perceptron training, Bioinformatics, 30 (2014), pp. 941-948"
### If methylation is considered, we use a modified version of DiMO where the tool has been adapted to methylation by adding the "m" and "1" coding charachters (which represent methylation on a cytosine and on the paired cytosine of a guanine respectively) to the non ambiguous DNA alphabet "ACGT".
### As input, it takes 5 arguments: a boolean indicating whether methylation is considered, the positive fasta sequences, the negative fasta sequences, the motif to optimize and the name of the output files
### The output will then contain the optimized motif matrix
working_dir <- ".."
setwd(working_dir)
args <- commandArgs(trailingOnly=TRUE)
methylation <- args[1]
foreground_seq <- args[2]
background_seq <- args[3]
matrix_in <- args[4]
output_name <- args[5]
if (eval(methylation)){
library(DiMO.methyl, lib.loc = "~/.R/")
} else {
library(DiMO, lib.loc = "~/.R/")
}
DiMO(
positive.file=foreground_seq,
negative.file=background_seq,
pfm.file.name=matrix_in,
output.flag=output_name,
epochs=150,
add.at.five=0,
add.at.three=0,
learning.rates=seq(1,0.1,length.out=3)
)
| /launch_DiMO_AUROC.R | no_license | ArnaudStigliani/ampDAP_bg_DiMo | R | false | false | 1,313 | r | #!/usr/bin/Rscript
### This R script uses DiMO (Discriminative Motif Optimizer) from "R.Y. Patel, G.D. Stormo, Discriminative motif optimization based on perceptron training, Bioinformatics, 30 (2014), pp. 941-948"
### If methylation is considered, we use a modified version of DiMO where the tool has been adapted to methylation by adding the "m" and "1" coding charachters (which represent methylation on a cytosine and on the paired cytosine of a guanine respectively) to the non ambiguous DNA alphabet "ACGT".
### As input, it takes 5 arguments: a boolean indicating whether methylation is considered, the positive fasta sequences, the negative fasta sequences, the motif to optimize and the name of the output files
### The output will then contain the optimized motif matrix
working_dir <- ".."
setwd(working_dir)
args <- commandArgs(trailingOnly=TRUE)
methylation <- args[1]
foreground_seq <- args[2]
background_seq <- args[3]
matrix_in <- args[4]
output_name <- args[5]
if (eval(methylation)){
library(DiMO.methyl, lib.loc = "~/.R/")
} else {
library(DiMO, lib.loc = "~/.R/")
}
DiMO(
positive.file=foreground_seq,
negative.file=background_seq,
pfm.file.name=matrix_in,
output.flag=output_name,
epochs=150,
add.at.five=0,
add.at.three=0,
learning.rates=seq(1,0.1,length.out=3)
)
|
AutoModel <- function(DF_INPUT, SEED=1)
{
###############################################################################################
## PURPOSE: TEMPLATE R-SCRIPT TO AUTOMATE MODEL BUILDING & ACCURACY CHECK
## USAGE: CUSTOMIZE EACH SECTION FOR SPECIFIC DATA ANALYSIS NEED
## TECHNIQUES: GLM, CART & RANDOM FORREST
###############################################################################################
## ############################################################################################
## MODEL LOGIC DESCRIPTION
## DATA sET: FULLY IMPUTED - MICE PMM METHOD
## VAR. SET: FEATURES SELECTED USING - RANDOM FOREST 5000 TRESS !
## ############################################################################################
## LOAD REQUIRED LIBRARIES
library(caTools)
library(rpart)
library(randomForest)
library(ROCR)
## INITIALIZE VARIABLES
BaselineModel = "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163"
NextVariable = c("Gender", "Q110740", "Q103293", "Q104996", "Q114748", "Q115777", "Q120194", "Q115899", "Q102687", "Q116953", "Q105655", "Q112270", "Q118232", "Q122770", "Q120379", "Q119334", "Q120978", "Q115390", "Q114961", "Q115195", "Q120012", "Q98078", "Q119851", "Q112478", "Q113584", "Q99982", "Q116448", "Q105840", "Q117193", "Q120472", "Q121011", "Q124122", "Q106042", "Q118237", "Q96024", "Q108855", "Q111848", "Q114386", "Q122769", "Q106997", "Q118892", "Q98578", "Q116797", "Q107869", "Q120014", "Q102906", "Q117186", "Q118117", "Q111580", "Q100680", "Q100689", "Q106389", "Q116197", "Q124742", "Q116881", "Q108950", "Q118233", "Q101162", "Q109367", "Q114517", "Q108342", "Q98869", "Q108856", "Q101596", "Q99480", "Q116441", "Q102289", "Q111220", "Q108754", "Q108343", "Q113992", "Q121699", "Q122120", "Q113583", "Q106272", "Q102089", "Q123621", "Q114152", "Q102674", "Q119650", "Q106388", "Q100010", "Q115602", "Q122771", "Q106993", "Q100562", "Q115610", "Q112512", "Q116601", "Q107491", "Q108617", "Q121700", "Q99581", "Q99716", "Q98059", "Q120650", "Q123464")
FormulaString = ""
##PredictionFormula = c("Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Gender", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q110740", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q103293", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q104996", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114748", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115777", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120194", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115899", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102687", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116953", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q105655", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q112270", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118232", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q122770", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120379", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q119334", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120978", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115390", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114961", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115195", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120012", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q98078", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q119851", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q112478", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q113584", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q99982", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116448", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q105840", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q117193", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120472", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q121011", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q124122", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106042", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118237", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q96024", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108855", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q111848", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114386", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q122769", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106997", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118892", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q98578", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116797", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q107869", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120014", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102906", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q117186", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118117", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q111580", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q100680", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q100689", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106389", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116197", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q124742", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116881", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108950", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118233", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q101162", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q109367", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114517", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108342", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q98869", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108856", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q101596", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q99480", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116441", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102289", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q111220", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108754", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108343", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q113992", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q121699", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q122120", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q113583", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106272", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102089", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q123621", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114152", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102674", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q119650", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106388", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q100010", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115602", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q122771", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106993", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q100562", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115610", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q112512", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116601", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q107491", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108617", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q121700", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q99581", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q99716", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q98059", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120650", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q123464")
##ModelTypes = c("glm", "CART", "RandomForest")
ModelTypes = c("glm")
SplRatio <- c(0.999)
RandSeed <- ifelse(SEED != 1, SEED, 13)
Threshold <- c(0.50)
TotalRows <- (((length(ModelTypes)-1) * length(SplRatio)) + (length(SplRatio) * length(Threshold)))*length(NextVariable)
dfInput = DF_INPUT
rIndex = 1
## INITIALIZE OUTPUT DATA FRAME
dfOutput = data.frame(matrix(NA, nrow=TotalRows, ncol=9))
colnames(dfOutput) = c("Model.Technique", "SplitRatio", "Threshold", "Training.AUC", "Test.AUC", "TrainingSet.Accuracy", "TestSet.Accuracy", "Next.Variable", "Prediction.Formula")
## BUILD & EVALUATE MODELS
for(i3 in 0:length(NextVariable))
{
if(i3==0)
{
predFormula = as.formula(BaselineModel)
}
else
{
FormulaString = paste(BaselineModel, " + ", NextVariable[i3], sep = "")
predFormula = as.formula(FormulaString)
}
for (i in 1:length(SplRatio))
{
set.seed(RandSeed)
####: Needs to be customized
splt = sample.split(dfInput$Party, SplitRatio = SplRatio[i])
dfTrain = subset(dfInput, splt == TRUE)
dfTest = subset(dfInput, splt == FALSE)
####: Check for required customization before each run
glmModel = glm(predFormula, data = dfTrain, family = "binomial")
#cartModel = rpart(predFormula, data = dfTrain, method = "class")
#rfModel = randomForest(predFormula, data = dfTrain, ntree = 1500)
for(i1 in 1:length(ModelTypes))
{
if(ModelTypes[i1] == "glm")
{
predTrain = predict(glmModel, type = "response")
predTest = predict(glmModel, newdata = dfTest, type = "response")
####: Check for required customization before each run
for(i2 in 1:length(Threshold))
{
dfOutput[rIndex,1] = ModelTypes[i1]
dfOutput[rIndex,2] = SplRatio[i]
dfOutput[rIndex,3] = Threshold[i2]
m = as.matrix(table(dfTrain$Party, predTrain > Threshold[i2]))
dfOutput[rIndex,4] = as.numeric(performance(prediction(predTrain, dfTrain$Party), "auc")@y.values)
dfOutput[rIndex,6] = (m[1,1] + m[2,2]) / nrow(dfTrain)
if(i3>0)
dfOutput[rIndex,8] = NextVariable[i3]
dfOutput[rIndex,9] = FormulaString
if(SplRatio[i] < 1)
{
dfOutput[rIndex,5] = as.numeric(performance(prediction(predTest, dfTest$Party), "auc")@y.values)
m = as.matrix(table(dfTest$Party, predTest > Threshold[i2]))
dfOutput[rIndex,7] = (m[1,1] + m[2,2]) / nrow(dfTest)
}
rIndex = rIndex + 1
}
}
if(ModelTypes[i1] == "CART")
{
predTrain = predict(cartModel, type = "class")
predTest = predict(cartModel, newdata = dfTest, type = "class")
predTrainProb = predict(cartModel, type = "prob")
predTestProb = predict(cartModel, newdata = dfTest, type = "prob")
####: Check for required customization before each run
dfOutput[rIndex,1] = ModelTypes[i1]
dfOutput[rIndex,2] = SplRatio[i]
dfOutput[rIndex,3] = ""
dfOutput[rIndex,4] = as.numeric(performance(prediction(predTrainProb[ ,2], dfTrain$Party), "auc")@y.values)
m = as.matrix(table(dfTrain$Party, predTrain))
dfOutput[rIndex,6] = (m[1,1] + m[2,2]) / nrow(dfTrain)
dfOutput[rIndex,8] = NextVariable[i3]
dfOutput[rIndex,9] = FormulaString
if(SplRatio[i] < 1)
{
dfOutput[rIndex,5] = as.numeric(performance(prediction(predTestProb[ ,2], dfTest$Party), "auc")@y.values)
m = as.matrix(table(dfTest$Party, predTest))
dfOutput[rIndex,7] = (m[1,1] + m[2,2]) / nrow(dfTest)
}
rIndex = rIndex + 1
}
if(ModelTypes[i1] == "RandomForest")
{
predTrain = predict(rfModel, type = "class")
predTest = predict(rfModel, newdata = dfTest, type = "class")
predTrainProb = predict(rfModel, type = "prob")
predTestProb = predict(rfModel, newdata = dfTest, type = "prob")
####: Check for required customization before each run
dfOutput[rIndex,1] = ModelTypes[i1]
dfOutput[rIndex,2] = SplRatio[i]
dfOutput[rIndex,3] = ""
dfOutput[rIndex,4] = as.numeric(performance(prediction(predTrainProb[ ,2], dfTrain$Party), "auc")@y.values)
m = as.matrix(table(dfTrain$Party, predTrain))
dfOutput[rIndex,6] = (m[1,1] + m[2,2]) / nrow(dfTrain)
dfOutput[rIndex,8] = NextVariable[i3]
dfOutput[rIndex,9] = FormulaString
if(SplRatio[i] < 1)
{
dfOutput[rIndex,5] = as.numeric(performance(prediction(predTestProb[ ,2], dfTest$Party), "auc")@y.values)
m = as.matrix(table(dfTest$Party, predTest))
dfOutput[rIndex,7] = (m[1,1] + m[2,2]) / nrow(dfTest)
}
rIndex = rIndex + 1
}
}
}
}
## WRITE THE OUTPUT TO CSV FILE
write.csv(dfOutput, "Auto_Model_Output.csv", row.names = FALSE)
} | /31-Models/08_RF_5000_Features_Model/Archived/AutoModel_V1.0.R | no_license | mirajvashi/2016_MIT_TheAnalyticsEdge_Kaggle_Competition | R | false | false | 19,372 | r | AutoModel <- function(DF_INPUT, SEED=1)
{
###############################################################################################
## PURPOSE: TEMPLATE R-SCRIPT TO AUTOMATE MODEL BUILDING & ACCURACY CHECK
## USAGE: CUSTOMIZE EACH SECTION FOR SPECIFIC DATA ANALYSIS NEED
## TECHNIQUES: GLM, CART & RANDOM FORREST
###############################################################################################
## ############################################################################################
## MODEL LOGIC DESCRIPTION
## DATA sET: FULLY IMPUTED - MICE PMM METHOD
## VAR. SET: FEATURES SELECTED USING - RANDOM FOREST 5000 TRESS !
## ############################################################################################
## LOAD REQUIRED LIBRARIES
library(caTools)
library(rpart)
library(randomForest)
library(ROCR)
## INITIALIZE VARIABLES
BaselineModel = "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163"
NextVariable = c("Gender", "Q110740", "Q103293", "Q104996", "Q114748", "Q115777", "Q120194", "Q115899", "Q102687", "Q116953", "Q105655", "Q112270", "Q118232", "Q122770", "Q120379", "Q119334", "Q120978", "Q115390", "Q114961", "Q115195", "Q120012", "Q98078", "Q119851", "Q112478", "Q113584", "Q99982", "Q116448", "Q105840", "Q117193", "Q120472", "Q121011", "Q124122", "Q106042", "Q118237", "Q96024", "Q108855", "Q111848", "Q114386", "Q122769", "Q106997", "Q118892", "Q98578", "Q116797", "Q107869", "Q120014", "Q102906", "Q117186", "Q118117", "Q111580", "Q100680", "Q100689", "Q106389", "Q116197", "Q124742", "Q116881", "Q108950", "Q118233", "Q101162", "Q109367", "Q114517", "Q108342", "Q98869", "Q108856", "Q101596", "Q99480", "Q116441", "Q102289", "Q111220", "Q108754", "Q108343", "Q113992", "Q121699", "Q122120", "Q113583", "Q106272", "Q102089", "Q123621", "Q114152", "Q102674", "Q119650", "Q106388", "Q100010", "Q115602", "Q122771", "Q106993", "Q100562", "Q115610", "Q112512", "Q116601", "Q107491", "Q108617", "Q121700", "Q99581", "Q99716", "Q98059", "Q120650", "Q123464")
FormulaString = ""
##PredictionFormula = c("Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Gender", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q110740", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q103293", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q104996", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114748", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115777", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120194", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115899", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102687", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116953", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q105655", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q112270", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118232", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q122770", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120379", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q119334", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120978", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115390", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114961", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115195", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120012", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q98078", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q119851", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q112478", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q113584", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q99982", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116448", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q105840", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q117193", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120472", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q121011", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q124122", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106042", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118237", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q96024", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108855", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q111848", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114386", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q122769", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106997", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118892", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q98578", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116797", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q107869", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120014", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102906", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q117186", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118117", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q111580", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q100680", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q100689", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106389", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116197", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q124742", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116881", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108950", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q118233", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q101162", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q109367", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114517", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108342", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q98869", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108856", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q101596", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q99480", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116441", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102289", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q111220", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108754", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108343", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q113992", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q121699", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q122120", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q113583", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106272", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102089", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q123621", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q114152", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q102674", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q119650", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106388", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q100010", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115602", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q122771", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q106993", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q100562", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q115610", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q112512", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q116601", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q107491", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q108617", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q121700", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q99581", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q99716", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q98059", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q120650", "Party ~ YOB + Income + EducationLevel + Q109244 + HouseholdStatus + Q115611 + Q113181 + Q98197 + Q101163 + Q123464")
##ModelTypes = c("glm", "CART", "RandomForest")
ModelTypes = c("glm")
SplRatio <- c(0.999)
RandSeed <- ifelse(SEED != 1, SEED, 13)
Threshold <- c(0.50)
TotalRows <- (((length(ModelTypes)-1) * length(SplRatio)) + (length(SplRatio) * length(Threshold)))*length(NextVariable)
dfInput = DF_INPUT
rIndex = 1
## INITIALIZE OUTPUT DATA FRAME
dfOutput = data.frame(matrix(NA, nrow=TotalRows, ncol=9))
colnames(dfOutput) = c("Model.Technique", "SplitRatio", "Threshold", "Training.AUC", "Test.AUC", "TrainingSet.Accuracy", "TestSet.Accuracy", "Next.Variable", "Prediction.Formula")
## BUILD & EVALUATE MODELS
for(i3 in 0:length(NextVariable))
{
if(i3==0)
{
predFormula = as.formula(BaselineModel)
}
else
{
FormulaString = paste(BaselineModel, " + ", NextVariable[i3], sep = "")
predFormula = as.formula(FormulaString)
}
for (i in 1:length(SplRatio))
{
set.seed(RandSeed)
####: Needs to be customized
splt = sample.split(dfInput$Party, SplitRatio = SplRatio[i])
dfTrain = subset(dfInput, splt == TRUE)
dfTest = subset(dfInput, splt == FALSE)
####: Check for required customization before each run
glmModel = glm(predFormula, data = dfTrain, family = "binomial")
#cartModel = rpart(predFormula, data = dfTrain, method = "class")
#rfModel = randomForest(predFormula, data = dfTrain, ntree = 1500)
for(i1 in 1:length(ModelTypes))
{
if(ModelTypes[i1] == "glm")
{
predTrain = predict(glmModel, type = "response")
predTest = predict(glmModel, newdata = dfTest, type = "response")
####: Check for required customization before each run
for(i2 in 1:length(Threshold))
{
dfOutput[rIndex,1] = ModelTypes[i1]
dfOutput[rIndex,2] = SplRatio[i]
dfOutput[rIndex,3] = Threshold[i2]
m = as.matrix(table(dfTrain$Party, predTrain > Threshold[i2]))
dfOutput[rIndex,4] = as.numeric(performance(prediction(predTrain, dfTrain$Party), "auc")@y.values)
dfOutput[rIndex,6] = (m[1,1] + m[2,2]) / nrow(dfTrain)
if(i3>0)
dfOutput[rIndex,8] = NextVariable[i3]
dfOutput[rIndex,9] = FormulaString
if(SplRatio[i] < 1)
{
dfOutput[rIndex,5] = as.numeric(performance(prediction(predTest, dfTest$Party), "auc")@y.values)
m = as.matrix(table(dfTest$Party, predTest > Threshold[i2]))
dfOutput[rIndex,7] = (m[1,1] + m[2,2]) / nrow(dfTest)
}
rIndex = rIndex + 1
}
}
if(ModelTypes[i1] == "CART")
{
predTrain = predict(cartModel, type = "class")
predTest = predict(cartModel, newdata = dfTest, type = "class")
predTrainProb = predict(cartModel, type = "prob")
predTestProb = predict(cartModel, newdata = dfTest, type = "prob")
####: Check for required customization before each run
dfOutput[rIndex,1] = ModelTypes[i1]
dfOutput[rIndex,2] = SplRatio[i]
dfOutput[rIndex,3] = ""
dfOutput[rIndex,4] = as.numeric(performance(prediction(predTrainProb[ ,2], dfTrain$Party), "auc")@y.values)
m = as.matrix(table(dfTrain$Party, predTrain))
dfOutput[rIndex,6] = (m[1,1] + m[2,2]) / nrow(dfTrain)
dfOutput[rIndex,8] = NextVariable[i3]
dfOutput[rIndex,9] = FormulaString
if(SplRatio[i] < 1)
{
dfOutput[rIndex,5] = as.numeric(performance(prediction(predTestProb[ ,2], dfTest$Party), "auc")@y.values)
m = as.matrix(table(dfTest$Party, predTest))
dfOutput[rIndex,7] = (m[1,1] + m[2,2]) / nrow(dfTest)
}
rIndex = rIndex + 1
}
if(ModelTypes[i1] == "RandomForest")
{
predTrain = predict(rfModel, type = "class")
predTest = predict(rfModel, newdata = dfTest, type = "class")
predTrainProb = predict(rfModel, type = "prob")
predTestProb = predict(rfModel, newdata = dfTest, type = "prob")
####: Check for required customization before each run
dfOutput[rIndex,1] = ModelTypes[i1]
dfOutput[rIndex,2] = SplRatio[i]
dfOutput[rIndex,3] = ""
dfOutput[rIndex,4] = as.numeric(performance(prediction(predTrainProb[ ,2], dfTrain$Party), "auc")@y.values)
m = as.matrix(table(dfTrain$Party, predTrain))
dfOutput[rIndex,6] = (m[1,1] + m[2,2]) / nrow(dfTrain)
dfOutput[rIndex,8] = NextVariable[i3]
dfOutput[rIndex,9] = FormulaString
if(SplRatio[i] < 1)
{
dfOutput[rIndex,5] = as.numeric(performance(prediction(predTestProb[ ,2], dfTest$Party), "auc")@y.values)
m = as.matrix(table(dfTest$Party, predTest))
dfOutput[rIndex,7] = (m[1,1] + m[2,2]) / nrow(dfTest)
}
rIndex = rIndex + 1
}
}
}
}
## WRITE THE OUTPUT TO CSV FILE
write.csv(dfOutput, "Auto_Model_Output.csv", row.names = FALSE)
} |
### Name: fullaxis
### Title: Add an axis with a line to the edge of the plot
### Aliases: fullaxis
### Keywords: misc
### ** Examples
plot(runif(20,-1,1),runif(20,-1,1),xlim=c(-1,1.5),main="Demo of fullaxis",
xlab="X",ylab="Y",axes=FALSE)
fullaxis(1,col="red")
fullaxis(2,col="blue",col.axis="blue")
fullaxis(4,at=c(-0.5,0,0.5),labels=c("Negative","Zero","Positive"),pos=1.2,
col="green",las=1)
# add a top line to complete the "box"
xylim<-par("usr")
segments(xylim[1],xylim[4],xylim[2],xylim[4])
| /icfp09/lib/plotrix/R-ex/fullaxis.R | no_license | Jacob33123/narorumo | R | false | false | 515 | r | ### Name: fullaxis
### Title: Add an axis with a line to the edge of the plot
### Aliases: fullaxis
### Keywords: misc
### ** Examples
plot(runif(20,-1,1),runif(20,-1,1),xlim=c(-1,1.5),main="Demo of fullaxis",
xlab="X",ylab="Y",axes=FALSE)
fullaxis(1,col="red")
fullaxis(2,col="blue",col.axis="blue")
fullaxis(4,at=c(-0.5,0,0.5),labels=c("Negative","Zero","Positive"),pos=1.2,
col="green",las=1)
# add a top line to complete the "box"
xylim<-par("usr")
segments(xylim[1],xylim[4],xylim[2],xylim[4])
|
source("analysis/data_utils_2.R")
source("analysis/celline_2_tcga_pipeline.R")
library(synapseClient)
synapseLogin("justin.guinney@sagebase.org",'marley')
sanger <- getSanger_MetaGenomics()
ccle <- getCCLE_MetaGenomics()
###############
## build data sets
brca <- build.tcga.ds(geneExprId="syn1446183",rppaId="syn1571265",gisticId="syn1687590",cbioPrefix="brca",isRNASeq=TRUE)
luad <- build.tcga.ds(geneExprId="syn418003",rppaId="syn464306",gisticId="syn1687610",cbioPrefix="luad",isRNASeq=TRUE)
coad <- build.tcga.ds(geneExprId="syn1446195",rppaId="syn1446043",gisticId="syn1687596",cbioPrefix="coadread",isRNASeq=TRUE)
read <- build.tcga.ds(geneExprId="syn1446274",rppaId="syn1446053",gisticId="syn1687628",cbioPrefix="coadread",isRNASeq=TRUE)
skcm <- build.tcga.ds(geneExprId="~/projects/h3/data~/firehose/gdac.broadinstitute.org_SKCM.Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.Level_3.2013032600.0.0/SKCM.rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt",
gisticId="syn1687618",
rppaId="~/projects/h3/data~/firehose/gdac.broadinstitute.org_SKCM.RPPA_AnnotateWithGene.Level_3.2013032600.0.0/SKCM.rppa.txt",
cbioPrefix="skcm",isRNASeq=TRUE)
blca <- build.tcga.ds(geneExprId="syn417761",rppaId="syn1681031",gisticId="syn1687592",cbioPrefix="syn1571577",isRNASeq=TRUE)
lusc <- build.tcga.ds(geneExprId="syn1446244",rppaId="syn1446049",gisticId="syn1687612",cbioPrefix="lusc",isRNASeq=TRUE)
# merge rectal and colon
idxs1 <- groupMatch(rownames(coad$geneExpr), rownames(read$geneExpr))
idxs2 <- groupMatch(rownames(coad$rppa), rownames(read$rppa))
idxs3 <- groupMatch(rownames(coad$gistic), rownames(read$gistic))
crc <- list(geneExpr=cbind(coad$geneExpr[idxs1[[1]],], read$geneExpr[idxs1[[2]],]),
rppa=cbind(coad$rppa[idxs2[[1]],], read$rppa[idxs2[[2]],]),
gistic=cbind(coad$gistic[idxs3[[1]],], read$gistic[idxs3[[2]],]),
mut=coad$mut)
# save datasets for fast retreival
save(brca, crc, blca, luad, skcm, lusc, file="~/data/TCGA_ds.rda")
# triple neg brca
brca.3neg <- brca
brca.3neg$geneExpr <- brca.3neg$geneExpr[,as.matrix(brca$geneExpr)["ESR1",] < 8 & as.matrix(brca$geneExpr)["PGR",] < 8]
brca.3neg$gistic <- brca.3neg$gistic[, as.matrix(brca$gistic)["ERBB2",] != 2]
###############
## fgfr in bladder
carcinoma_mask <- !(getTissueType(sampleNames(sanger)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
y_hat_fgfr <- virtual_ic50(sanger[,carcinoma_mask],"PD-173074",blca,reverseDrug=TRUE)
blca_fgfr_vogel <- find_drug_features (y_hat_fgfr,blca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="vogelstein")
blca_fgfr_cosmic <- find_drug_features (y_hat_fgfr,blca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cosmic")
blca_fgfr_cbio <- find_drug_features (y_hat_fgfr,brca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cbio")
pdf("plots/brca_fgfr_vogel.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_vogel, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
pdf("plots/brca_fgfr_cosmic.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_cosmic, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
pdf("plots/brca_fgfr_cbio.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_cbio, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
##########################
# BRAF inhib in melanoma
#melanoma_mask <- getTissueType(sampleNames(ccle)) %in% c("SKIN")
skcm_braf <- virtual_ic50(ccle, "PLX4720",skcm, seed=2013)
skcm_braf_vogel <- find_drug_features (skcm_braf,skcm, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="vogelstein")
skcm_braf_cosmic <- find_drug_features (skcm_braf,skcm, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cosmic")
skcm_braf_vogel$df[1:10,]
pdf("plots/tcga/skcm_plx4720.pdf",width=10,height=6,useDingbats=F)
plot_features(skcm_braf_DF, "PLX4720 (braf inib)","melanoma",text.cex=.9)
dev.off()
pdf("plots/tcga/skcm_rppa_plx4720.pdf",width=10,height=6,useDingbats=F)
plot_features(skcm_braf_rppa_DF, "PLX4720 (braf inib)","melanoma",text.cex=.9)
dev.off()
skcm_braf_2 <- virtual_ic50(ccle, "SORAFENIB",skcm, seed=2013)
##########################
## MEK in crc
carcinoma_mask <- !(getTissueType(sampleNames(ccle)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
coad_mek <- virtual_ic50(ccle[,carcinoma_mask], "PD0325901",crc, seed=2013)
coad_mek_driver <- find_drug_features (coad_mek,crc, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=TRUE)
coad_mek_cgenes <- find_drug_features (coad_mek,crc, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=FALSE)
coad_mek_cgenes_rppa <- find_drug_features (coad_mek,crc, with.rppa=TRUE,beta_threshold=10^-3,driver.genes.only=FALSE)
coad_mek_null <- t(replicate(100, find_drug_features (coad_mek,crc, with.rppa=FALSE,beta_threshold=10^-3,num.bootstraps=5,randomize=TRUE)$metric))
coad_mek_rppa_null <- t(replicate(100, find_drug_features (coad_mek,crc, with.rppa=TRUE,beta_threshold=10^-3,num.bootstraps=5,randomize=TRUE)$metric))
pdf("plots/tcga/crc_mek_dgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(coad_mek_driver, "MEK","CRC",text.cex=.9)
dev.off()
pdf("plots/tcga/crc_mek_cgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(coad_mek_cgenes, "MEK","CRC",text.cex=.9)
dev.off()
pdf("plots/tcga/crc_mek_cgenes_rppa.pdf",width=10,height=6,useDingbats=F)
plot_features(coad_mek_cgenes_rppa, "MEK","CRC",text.cex=.9)
dev.off()
#########################
# erlotinib in LUNG
lung_mask <- getTissueType(sampleNames(ccle)) %in% c("LUNG")
luad_erl <- virtual_ic50(ccle[,lung_mask], "ERLOTONIB",luad, seed=2013)
luad_erl_DF <- find_drug_features (luad_erl,luad, with.rppa=FALSE,beta_threshold=10^-3)
luad_erl_rppa_DF <- find_drug_features (luad_erl,luad, with.rppa=TRUE,beta_threshold=10^-3)
pdf("plots/tcga/luad_erl.pdf",width=10,height=6,useDingbats=F)
plot_features(luad_erl_DF, "Erlotinib","LUAD",text.cex=.9)
dev.off()
pdf("plots/tcga/luad_rppa_erl.pdf",width=10,height=6,useDingbats=F)
plot_features(luad_erl_rppa_DF, "Erlotinib","LUAD",text.cex=.9)
dev.off()
###################
# breast, pik3 pathway
breast_mask <- grepl("BREAST",getTissueType(sampleNames(sanger)))
carcinoma_mask <- !(getTissueType(sampleNames(sanger)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
par(mfrow=c(2,3))
y_hat_akt_1 <- virtual_ic50(sanger[,breast_mask],"A-443654",brca,reverseDrug=TRUE)
y_hat_akt_2 <- virtual_ic50(sanger[,breast_mask],"AKT-inhibitor-VIII",brca,reverseDrug=TRUE)
y_hat_pik3b <- virtual_ic50(sanger[,breast_mask],"AZD6482",brca,reverseDrug=TRUE)
y_hat_pi3k_1 <- virtual_ic50(sanger[,breast_mask],"GDC0941",brca,reverseDrug=TRUE)
y_hat_pi3k_2 <- virtual_ic50(sanger[,breast_mask],"NVP-BEZ235",brca,reverseDrug=TRUE)
###############
## fgfr in breast
carcinoma_mask <- !(getTissueType(sampleNames(sanger)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
y_hat_fgfr <- virtual_ic50(sanger[,carcinoma_mask],"PD-173074",brca,reverseDrug=TRUE)
brca_fgfr_vogel <- find_drug_features (y_hat_fgfr,brca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="vogelstein")
brca_fgfr_cosmic <- find_drug_features (y_hat_fgfr,brca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cosmic")
brca_fgfr_cbio <- find_drug_features (y_hat_fgfr,brca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cbio")
pdf("plots/brca_fgfr_vogel.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_vogel, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
pdf("plots/brca_fgfr_cosmic.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_cosmic, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
pdf("plots/brca_fgfr_cbio.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_cbio, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
####################
## MTOR in breast
y_hat_mtor <- virtual_ic50(sanger[,carcinoma_mask],"Temsirolimus",brca,reverseDrug=TRUE)
brca_mtor_dgenes <- find_drug_features (y_hat_mtor,brca, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=TRUE)
brca_mtor_cgenes <- find_drug_features (y_hat_mtor,brca, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=FALSE)
brca_mtor_cgenes_rppa <- find_drug_features (y_hat_mtor,brca, with.rppa=TRUE,beta_threshold=10^-3,driver.genes.only=FALSE)
pdf("plots/tcga/brca_mtor_dgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_dgenes, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca_mtor_cgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_cgenes, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca_mtor_cgenes_rppa.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_cgenes_rppa, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
################
## MTOR 3neg bc
y_hat_mtor <- virtual_ic50(sanger[,carcinoma_mask],"Temsirolimus",brca.3neg,reverseDrug=TRUE)
brca_mtor_cgenes <- find_drug_features (y_hat_mtor,brca.3neg, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=FALSE,min.count=1)
brca_mtor_cgenes_rppa <- find_drug_features (y_hat_mtor,brca.3neg, with.rppa=TRUE,beta_threshold=10^-3,driver.genes.only=FALSE,min.count=1)
pdf("plots/tcga/brca3neg_mtor_cgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_cgenes, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca3neg_mtor_cgenes_rppa.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_cgenes_rppa, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
##################
## her2/egfr in breast
carcinoma_mask <- !(getTissueType(sampleNames(ccle)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
y_hat_her2 <- virtual_ic50(ccle[,carcinoma_mask], "LAPATINIB",brca, seed=2013)
brca_her2_dgenes <- find_drug_features (y_hat_her2,brca, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=TRUE)
brca_her2_cgenes <- find_drug_features (y_hat_her2,brca, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=FALSE)
brca_her2_cgenes_rppa <- find_drug_features (y_hat_her2,brca, with.rppa=TRUE,beta_threshold=10^-3,driver.genes.only=FALSE)
pdf("plots/tcga/brca_her2_dgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_her2_dgenes, "Lapatanib","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca_her2_cgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_her2_cgenes, "Lapatanib","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca_her2_cgenes_rppa.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_her2_cgenes_rppa, "Lapatanib","BRCA",text.cex=.9)
dev.off()
###################
# PIK3B in breast
brca_pik3b_DF <- find_drug_features (y_hat_pik3b,brca, with.rppa=FALSE,beta_threshold=10^-3)
brca_pik3b_rppa_DF <- find_drug_features (y_hat_pik3b,brca, with.rppa=TRUE,beta_threshold=10^-3)
pdf("plots/tcga/brca_pik3b.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_pik3b_DF[1:25,], "AZD6482 (pik3b)","BRCA",length(y_hat_mtor),text.cex=.9)
dev.off()
pdf("plots/tcga/brca_rppa_pik3b.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_pik3b_rppa_DF[1:25,], "AZD6482 (pik3b)","BRCA",length(y_hat_mtor),text.cex=.9)
dev.off()
| /analysis/celline_2_tcga_Analysis.R | no_license | chferte/virtualIC50 | R | false | false | 11,176 | r | source("analysis/data_utils_2.R")
source("analysis/celline_2_tcga_pipeline.R")
library(synapseClient)
synapseLogin("justin.guinney@sagebase.org",'marley')
sanger <- getSanger_MetaGenomics()
ccle <- getCCLE_MetaGenomics()
###############
## build data sets
brca <- build.tcga.ds(geneExprId="syn1446183",rppaId="syn1571265",gisticId="syn1687590",cbioPrefix="brca",isRNASeq=TRUE)
luad <- build.tcga.ds(geneExprId="syn418003",rppaId="syn464306",gisticId="syn1687610",cbioPrefix="luad",isRNASeq=TRUE)
coad <- build.tcga.ds(geneExprId="syn1446195",rppaId="syn1446043",gisticId="syn1687596",cbioPrefix="coadread",isRNASeq=TRUE)
read <- build.tcga.ds(geneExprId="syn1446274",rppaId="syn1446053",gisticId="syn1687628",cbioPrefix="coadread",isRNASeq=TRUE)
skcm <- build.tcga.ds(geneExprId="~/projects/h3/data~/firehose/gdac.broadinstitute.org_SKCM.Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.Level_3.2013032600.0.0/SKCM.rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt",
gisticId="syn1687618",
rppaId="~/projects/h3/data~/firehose/gdac.broadinstitute.org_SKCM.RPPA_AnnotateWithGene.Level_3.2013032600.0.0/SKCM.rppa.txt",
cbioPrefix="skcm",isRNASeq=TRUE)
blca <- build.tcga.ds(geneExprId="syn417761",rppaId="syn1681031",gisticId="syn1687592",cbioPrefix="syn1571577",isRNASeq=TRUE)
lusc <- build.tcga.ds(geneExprId="syn1446244",rppaId="syn1446049",gisticId="syn1687612",cbioPrefix="lusc",isRNASeq=TRUE)
# merge rectal and colon
idxs1 <- groupMatch(rownames(coad$geneExpr), rownames(read$geneExpr))
idxs2 <- groupMatch(rownames(coad$rppa), rownames(read$rppa))
idxs3 <- groupMatch(rownames(coad$gistic), rownames(read$gistic))
crc <- list(geneExpr=cbind(coad$geneExpr[idxs1[[1]],], read$geneExpr[idxs1[[2]],]),
rppa=cbind(coad$rppa[idxs2[[1]],], read$rppa[idxs2[[2]],]),
gistic=cbind(coad$gistic[idxs3[[1]],], read$gistic[idxs3[[2]],]),
mut=coad$mut)
# save datasets for fast retreival
save(brca, crc, blca, luad, skcm, lusc, file="~/data/TCGA_ds.rda")
# triple neg brca
brca.3neg <- brca
brca.3neg$geneExpr <- brca.3neg$geneExpr[,as.matrix(brca$geneExpr)["ESR1",] < 8 & as.matrix(brca$geneExpr)["PGR",] < 8]
brca.3neg$gistic <- brca.3neg$gistic[, as.matrix(brca$gistic)["ERBB2",] != 2]
###############
## fgfr in bladder
carcinoma_mask <- !(getTissueType(sampleNames(sanger)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
y_hat_fgfr <- virtual_ic50(sanger[,carcinoma_mask],"PD-173074",blca,reverseDrug=TRUE)
blca_fgfr_vogel <- find_drug_features (y_hat_fgfr,blca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="vogelstein")
blca_fgfr_cosmic <- find_drug_features (y_hat_fgfr,blca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cosmic")
blca_fgfr_cbio <- find_drug_features (y_hat_fgfr,brca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cbio")
pdf("plots/brca_fgfr_vogel.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_vogel, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
pdf("plots/brca_fgfr_cosmic.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_cosmic, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
pdf("plots/brca_fgfr_cbio.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_cbio, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
##########################
# BRAF inhib in melanoma
#melanoma_mask <- getTissueType(sampleNames(ccle)) %in% c("SKIN")
skcm_braf <- virtual_ic50(ccle, "PLX4720",skcm, seed=2013)
skcm_braf_vogel <- find_drug_features (skcm_braf,skcm, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="vogelstein")
skcm_braf_cosmic <- find_drug_features (skcm_braf,skcm, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cosmic")
skcm_braf_vogel$df[1:10,]
pdf("plots/tcga/skcm_plx4720.pdf",width=10,height=6,useDingbats=F)
plot_features(skcm_braf_DF, "PLX4720 (braf inib)","melanoma",text.cex=.9)
dev.off()
pdf("plots/tcga/skcm_rppa_plx4720.pdf",width=10,height=6,useDingbats=F)
plot_features(skcm_braf_rppa_DF, "PLX4720 (braf inib)","melanoma",text.cex=.9)
dev.off()
skcm_braf_2 <- virtual_ic50(ccle, "SORAFENIB",skcm, seed=2013)
##########################
## MEK in crc
carcinoma_mask <- !(getTissueType(sampleNames(ccle)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
coad_mek <- virtual_ic50(ccle[,carcinoma_mask], "PD0325901",crc, seed=2013)
coad_mek_driver <- find_drug_features (coad_mek,crc, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=TRUE)
coad_mek_cgenes <- find_drug_features (coad_mek,crc, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=FALSE)
coad_mek_cgenes_rppa <- find_drug_features (coad_mek,crc, with.rppa=TRUE,beta_threshold=10^-3,driver.genes.only=FALSE)
coad_mek_null <- t(replicate(100, find_drug_features (coad_mek,crc, with.rppa=FALSE,beta_threshold=10^-3,num.bootstraps=5,randomize=TRUE)$metric))
coad_mek_rppa_null <- t(replicate(100, find_drug_features (coad_mek,crc, with.rppa=TRUE,beta_threshold=10^-3,num.bootstraps=5,randomize=TRUE)$metric))
pdf("plots/tcga/crc_mek_dgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(coad_mek_driver, "MEK","CRC",text.cex=.9)
dev.off()
pdf("plots/tcga/crc_mek_cgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(coad_mek_cgenes, "MEK","CRC",text.cex=.9)
dev.off()
pdf("plots/tcga/crc_mek_cgenes_rppa.pdf",width=10,height=6,useDingbats=F)
plot_features(coad_mek_cgenes_rppa, "MEK","CRC",text.cex=.9)
dev.off()
#########################
# erlotinib in LUNG
lung_mask <- getTissueType(sampleNames(ccle)) %in% c("LUNG")
luad_erl <- virtual_ic50(ccle[,lung_mask], "ERLOTONIB",luad, seed=2013)
luad_erl_DF <- find_drug_features (luad_erl,luad, with.rppa=FALSE,beta_threshold=10^-3)
luad_erl_rppa_DF <- find_drug_features (luad_erl,luad, with.rppa=TRUE,beta_threshold=10^-3)
pdf("plots/tcga/luad_erl.pdf",width=10,height=6,useDingbats=F)
plot_features(luad_erl_DF, "Erlotinib","LUAD",text.cex=.9)
dev.off()
pdf("plots/tcga/luad_rppa_erl.pdf",width=10,height=6,useDingbats=F)
plot_features(luad_erl_rppa_DF, "Erlotinib","LUAD",text.cex=.9)
dev.off()
###################
# breast, pik3 pathway
breast_mask <- grepl("BREAST",getTissueType(sampleNames(sanger)))
carcinoma_mask <- !(getTissueType(sampleNames(sanger)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
par(mfrow=c(2,3))
y_hat_akt_1 <- virtual_ic50(sanger[,breast_mask],"A-443654",brca,reverseDrug=TRUE)
y_hat_akt_2 <- virtual_ic50(sanger[,breast_mask],"AKT-inhibitor-VIII",brca,reverseDrug=TRUE)
y_hat_pik3b <- virtual_ic50(sanger[,breast_mask],"AZD6482",brca,reverseDrug=TRUE)
y_hat_pi3k_1 <- virtual_ic50(sanger[,breast_mask],"GDC0941",brca,reverseDrug=TRUE)
y_hat_pi3k_2 <- virtual_ic50(sanger[,breast_mask],"NVP-BEZ235",brca,reverseDrug=TRUE)
###############
## fgfr in breast
carcinoma_mask <- !(getTissueType(sampleNames(sanger)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
y_hat_fgfr <- virtual_ic50(sanger[,carcinoma_mask],"PD-173074",brca,reverseDrug=TRUE)
brca_fgfr_vogel <- find_drug_features (y_hat_fgfr,brca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="vogelstein")
brca_fgfr_cosmic <- find_drug_features (y_hat_fgfr,brca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cosmic")
brca_fgfr_cbio <- find_drug_features (y_hat_fgfr,brca, with.rppa=FALSE,beta_threshold=10^-3,gene.dict="cbio")
pdf("plots/brca_fgfr_vogel.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_vogel, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
pdf("plots/brca_fgfr_cosmic.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_cosmic, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
pdf("plots/brca_fgfr_cbio.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_fgfr_cbio, "PD-173074 (FGFR)","BRCA",text.cex=.9)
dev.off()
####################
## MTOR in breast
y_hat_mtor <- virtual_ic50(sanger[,carcinoma_mask],"Temsirolimus",brca,reverseDrug=TRUE)
brca_mtor_dgenes <- find_drug_features (y_hat_mtor,brca, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=TRUE)
brca_mtor_cgenes <- find_drug_features (y_hat_mtor,brca, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=FALSE)
brca_mtor_cgenes_rppa <- find_drug_features (y_hat_mtor,brca, with.rppa=TRUE,beta_threshold=10^-3,driver.genes.only=FALSE)
pdf("plots/tcga/brca_mtor_dgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_dgenes, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca_mtor_cgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_cgenes, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca_mtor_cgenes_rppa.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_cgenes_rppa, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
################
## MTOR 3neg bc
y_hat_mtor <- virtual_ic50(sanger[,carcinoma_mask],"Temsirolimus",brca.3neg,reverseDrug=TRUE)
brca_mtor_cgenes <- find_drug_features (y_hat_mtor,brca.3neg, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=FALSE,min.count=1)
brca_mtor_cgenes_rppa <- find_drug_features (y_hat_mtor,brca.3neg, with.rppa=TRUE,beta_threshold=10^-3,driver.genes.only=FALSE,min.count=1)
pdf("plots/tcga/brca3neg_mtor_cgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_cgenes, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca3neg_mtor_cgenes_rppa.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_mtor_cgenes_rppa, "Temsirolimus","BRCA",text.cex=.9)
dev.off()
##################
## her2/egfr in breast
carcinoma_mask <- !(getTissueType(sampleNames(ccle)) %in% c("CENTRAL_NERVOUS_SYSTEM","SKIN","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE"))
y_hat_her2 <- virtual_ic50(ccle[,carcinoma_mask], "LAPATINIB",brca, seed=2013)
brca_her2_dgenes <- find_drug_features (y_hat_her2,brca, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=TRUE)
brca_her2_cgenes <- find_drug_features (y_hat_her2,brca, with.rppa=FALSE,beta_threshold=10^-3,driver.genes.only=FALSE)
brca_her2_cgenes_rppa <- find_drug_features (y_hat_her2,brca, with.rppa=TRUE,beta_threshold=10^-3,driver.genes.only=FALSE)
pdf("plots/tcga/brca_her2_dgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_her2_dgenes, "Lapatanib","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca_her2_cgenes.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_her2_cgenes, "Lapatanib","BRCA",text.cex=.9)
dev.off()
pdf("plots/tcga/brca_her2_cgenes_rppa.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_her2_cgenes_rppa, "Lapatanib","BRCA",text.cex=.9)
dev.off()
###################
# PIK3B in breast
brca_pik3b_DF <- find_drug_features (y_hat_pik3b,brca, with.rppa=FALSE,beta_threshold=10^-3)
brca_pik3b_rppa_DF <- find_drug_features (y_hat_pik3b,brca, with.rppa=TRUE,beta_threshold=10^-3)
pdf("plots/tcga/brca_pik3b.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_pik3b_DF[1:25,], "AZD6482 (pik3b)","BRCA",length(y_hat_mtor),text.cex=.9)
dev.off()
pdf("plots/tcga/brca_rppa_pik3b.pdf",width=10,height=6,useDingbats=F)
plot_features(brca_pik3b_rppa_DF[1:25,], "AZD6482 (pik3b)","BRCA",length(y_hat_mtor),text.cex=.9)
dev.off()
|
#' @title aesColor
#' @description ColorInput UI production for discrete variables.
#' @param type character of label and inputId of element
#' @return UI object
#' @keywords internal
aesColour=function(type) {
list(type=colourpicker::colourInput,
args=list(inputId = paste0('pop',toupper(type)),
label = type,
value = NA,
allowTransparent = T)
)
} | /R/aesColour.R | no_license | serenity-r/ggedit | R | false | false | 410 | r | #' @title aesColor
#' @description ColorInput UI production for discrete variables.
#' @param type character of label and inputId of element
#' @return UI object
#' @keywords internal
aesColour=function(type) {
list(type=colourpicker::colourInput,
args=list(inputId = paste0('pop',toupper(type)),
label = type,
value = NA,
allowTransparent = T)
)
} |
x <- data.frame(foo = 1:4, bar = c(T,T,F,F))
x
x <- 1:3
names(x)
names(x) <- c("foo", "bar", "norf")
x
names(x)
x <- list(a = 1, "com espaço" = 2, c = 3)
x$"com espaço"
m <- matrix(1:4, nrow = 2, ncol = 2)
dimnames(m) <- list(c("a", "b"), c("c", "d"))
m
x <- as.Date("1970-01-01")
unclass(x)
unclass(as.Date("2017-03-18"))
unclass(as.Date("1900-03-18"))
x <- Sys.time()
x
p <- as.POSIXlt(x)
p
names(unclass(p))
p$sec
p$hour
p$gmtoff
x < Sys.time()
unclass(x)
p <- as.POSIXlt(x)
p$sec
datestring <- c("January 10, 2012 10:40", "December 09, 2011 09:10")
x <- strptime(datestring, "%B %d, %Y %H:%M")
x
class(x)
x <- as.Date("2012-01-01")
y <- strptime("9 Jan 2011 11:34:21", "%d %b %Y %H:%M:%S")
| /JotJunior/aula2-dataframes.R | permissive | carlosmachel/ApoemaTraining | R | false | false | 707 | r | x <- data.frame(foo = 1:4, bar = c(T,T,F,F))
x
x <- 1:3
names(x)
names(x) <- c("foo", "bar", "norf")
x
names(x)
x <- list(a = 1, "com espaço" = 2, c = 3)
x$"com espaço"
m <- matrix(1:4, nrow = 2, ncol = 2)
dimnames(m) <- list(c("a", "b"), c("c", "d"))
m
x <- as.Date("1970-01-01")
unclass(x)
unclass(as.Date("2017-03-18"))
unclass(as.Date("1900-03-18"))
x <- Sys.time()
x
p <- as.POSIXlt(x)
p
names(unclass(p))
p$sec
p$hour
p$gmtoff
x < Sys.time()
unclass(x)
p <- as.POSIXlt(x)
p$sec
datestring <- c("January 10, 2012 10:40", "December 09, 2011 09:10")
x <- strptime(datestring, "%B %d, %Y %H:%M")
x
class(x)
x <- as.Date("2012-01-01")
y <- strptime("9 Jan 2011 11:34:21", "%d %b %Y %H:%M:%S")
|
library(iglu)
data_test = c(101,121,141,151,161,171,191,201,231,251)
test_that("lbgi returns true value for vector", {
expect_equal((lbgi(data_test)),0.04042143, tolerance = 1e-04)
})
| /tests/testthat/test-lbgi.R | no_license | trippsapientae/iglu | R | false | false | 187 | r | library(iglu)
data_test = c(101,121,141,151,161,171,191,201,231,251)
test_that("lbgi returns true value for vector", {
expect_equal((lbgi(data_test)),0.04042143, tolerance = 1e-04)
})
|
####work with data on google drive
#FBolduc, 8-11-2016
#https://www.r-bloggers.com/download-all-documents-from-google-drive-with-r/
#see also http://blog.revolutionanalytics.com/2015/09/using-the-googlesheets-package-to-work-with-google-sheets.html
# https://github.com/jennybc/googlesheets/blob/master/vignettes/basic-usage.R
###with RGoogleDocs
#see http://gastonsanchez.com/how-to/2015/04/06/RGoogleDocs-access/
# library(devtools)
# install_github("RGoogleDocs", "duncantl")
# library(RGoogleDocs)
# #unresolvable "Error: Not Found" ? see: https://github.com/duncantl/RGoogleDocs/issues/6
# #library(RCurl)
# email <- "drpacoqc@gmail.com"
# psw <- "IDON'tThinkSo"
# auth = getGoogleAuth(email, psw,service="wise") error happens here
#with googlesheets
library(googlesheets)
library(dplyr)
(my_sheets <- gs_ls())# (expect a prompt to authenticate with Google interactively HERE)
my_sheets %>% glimpse()
#specify folder?:
| /GoogleDriveData.R | no_license | Drpaco/varia | R | false | false | 952 | r | ####work with data on google drive
#FBolduc, 8-11-2016
#https://www.r-bloggers.com/download-all-documents-from-google-drive-with-r/
#see also http://blog.revolutionanalytics.com/2015/09/using-the-googlesheets-package-to-work-with-google-sheets.html
# https://github.com/jennybc/googlesheets/blob/master/vignettes/basic-usage.R
###with RGoogleDocs
#see http://gastonsanchez.com/how-to/2015/04/06/RGoogleDocs-access/
# library(devtools)
# install_github("RGoogleDocs", "duncantl")
# library(RGoogleDocs)
# #unresolvable "Error: Not Found" ? see: https://github.com/duncantl/RGoogleDocs/issues/6
# #library(RCurl)
# email <- "drpacoqc@gmail.com"
# psw <- "IDON'tThinkSo"
# auth = getGoogleAuth(email, psw,service="wise") error happens here
#with googlesheets
library(googlesheets)
library(dplyr)
(my_sheets <- gs_ls())# (expect a prompt to authenticate with Google interactively HERE)
my_sheets %>% glimpse()
#specify folder?:
|
#' Apply tidyverse_style, but use equal sign = for assignment
#' @rdname style
#' @export
eq_assign_style = function(...) {
x = styler::tidyverse_style(...)
x$token$force_assignment_op = function(pd) {
to_replace = (pd$token == "LEFT_ASSIGN" & pd$text == "<-")
pd$token[to_replace] = "EQ_ASSIGN"
pd$text[to_replace] = "="
pd
}
x
}
#' @inheritParams styler::style_pkg
#' @inheritParams styler::style_dir
#' @rdname style
#' @export
styler_style = function(path = ".", ..., style = eq_assign_style, filetype = "R",
recursive = TRUE, exclude_files = NULL) {
if (fs::is_dir(path)) {
.pkg_files = c("DESCRIPTION", "NAMESPACE", "R")
if (all(fs::file_exists(fs::path(path, .pkg_files)))) {
styler::style_pkg(
path, ...,
style = style, filetype = filetype,
exclude_files = c(exclude_files, "R/RcppExports.R")
)
} else {
styler::style_dir(
path, ...,
style = style, filetype = filetype, recursive = recursive,
exclude_files = exclude_files
)
}
} else {
styler::style_file(path, ..., style = style)
}
}
| /R/style.R | permissive | heavywatal/rwtl | R | false | false | 1,140 | r | #' Apply tidyverse_style, but use equal sign = for assignment
#' @rdname style
#' @export
eq_assign_style = function(...) {
x = styler::tidyverse_style(...)
x$token$force_assignment_op = function(pd) {
to_replace = (pd$token == "LEFT_ASSIGN" & pd$text == "<-")
pd$token[to_replace] = "EQ_ASSIGN"
pd$text[to_replace] = "="
pd
}
x
}
#' @inheritParams styler::style_pkg
#' @inheritParams styler::style_dir
#' @rdname style
#' @export
styler_style = function(path = ".", ..., style = eq_assign_style, filetype = "R",
recursive = TRUE, exclude_files = NULL) {
if (fs::is_dir(path)) {
.pkg_files = c("DESCRIPTION", "NAMESPACE", "R")
if (all(fs::file_exists(fs::path(path, .pkg_files)))) {
styler::style_pkg(
path, ...,
style = style, filetype = filetype,
exclude_files = c(exclude_files, "R/RcppExports.R")
)
} else {
styler::style_dir(
path, ...,
style = style, filetype = filetype, recursive = recursive,
exclude_files = exclude_files
)
}
} else {
styler::style_file(path, ..., style = style)
}
}
|
#install.packages("dplyr") # Install dplyr package
install.packages("plyr") # Install plyr package
install.packages("readr") # Install readr package
#library("dplyr") # Load dplyr package
library("plyr") # Load plyr package
library("readr")
install.packages('tidyr')
install.packages('dplyr')
#install.packages('janitor')
library(dplyr)
library(tidyr)
install.packages("writexl")
library(writexl)
install.packages("openxlsx")
library(openxlsx)
install.packages("trim")
### Don't forget to save each csv file first
###################################################################################
#1 No filter
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/1_DIAB_ALL_HCH_04_19.csv', sep = ",", header=TRUE)
#df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/1_no_filter.csv', sep= ",", header=TRUE)
#summary(df)
#df <- trimws(df, which = c("both"))
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[,!duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#df2 <- trimws(df2$Measure, which = c("both"))
#Add additional columns
#df3 <- cbind('Filtered By' = "No filters applied", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2019", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Group' = "", df2)
#df3 <- cbind('Audit Period' = "Apr-2019", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 159, 180)
df4 <- df4 %>%
rename(
'Total CD' = 'Total patients with one or more chronic diseases',
'Indigenous' = 'Total Indigenous patients'
)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/DIAB_0419.csv", row.names = FALSE)
##############################################################################################
#2 Active patients
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/2_MBS_ALL_HCH_Less12mths_04_19.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2 <-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2019", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2019", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 5, 6, 9:14)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_0419.csv", row.names = FALSE)
###############################################################################################
#3 Active Patients, Conditions: (Diabetes - YES), Last Results: <= 12 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/3_MBS_ALL_HCH_MH_Less12mths_04_19.csv', sep =",", header=TRUE)
#df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/3_diabetes12m.csv', sep =",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (Diabetes - YES) Last Results: <= 12 mths", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2019", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2019", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 19, 20)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_MH_0419.csv", row.names = FALSE)
##############################################################################################
#4 Active Patients, Indigenous
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/4_PDQ_ALL_HCH_04_19.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Indigenous", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2019", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2019", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 46, 47)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/PDQ_0419.csv", row.names = FALSE)
###########################################################################################
#5 Active Patients, Conditions: (CVD - YES)
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/5_DIAB_ALL_HCH_04_20.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (CVD - YES)", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2020", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2020", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 159, 180)
df4 <- df4 %>%
rename(
'Total CD' = 'Total patients with one or more chronic diseases',
'Indigenous' = 'Total Indigenous patients'
)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/DIAB_0420.csv", row.names = FALSE)
##########################################################################################
#6 Active Patients, MBS Last Results: <= 12 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/6_MBS_ALL_HCH_Less12mths_04_20.csv', sep= ",", header=TRUE)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2020", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#Remove rows not required
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
#Add HCH Gruop ID
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
#Select required columns
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 5, 6, 9:14)
#create CSV file
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_0420.csv", row.names = FALSE)
########################################################################################
#7 Active Patients, Non-Indigenous
# Import the data
df <- read.csv("C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/7_MBS_ALL_HCH_MH_Less12mths_04_20.csv", sep = ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Non-Indigenous", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2020", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2020", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 19, 20)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_MH_0420.csv", row.names = FALSE)
#################################################################################
#8 Active Patients, Conditions: (Diabetes - YES)
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/8_PDQ_ALL_HCH_04_20.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (Diabetes - YES)", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2020-04-01"), to = as.Date("2020-04-01"),by = 'day'), df2)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
#df3 <- cbind('Audit Month' = seq(from = as.Date("2020-04-01"), to = as.Date("2020-04-01"),by = 'day'), df2)
#df3 <- cbind('Audit Month' = "Apr-2020", df2)
df3 <- cbind('Audit Month' = "Apr 2020", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 46, 47)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/PDQ_0420.csv", row.names = FALSE)
################################################################################
#9 Active Patients, Conditions: (Diabetes - NO), Medications: (Antidiabetics - YES), Last Results: <= 12 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/9_DIAB_ALL_HCH_04_21.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patient Conditions: (Diabetes - NO) Medications: (Antidiabetics - YES) Last Results: <= 12 mths", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2021", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2021", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 159, 180)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/DIAB_0421.csv", row.names = FALSE)
#################################################################################
#10 Active Patients, Conditions: (Diabetes - YES), Last Results: <= 6 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/10_MBS_ALL_HCH_Less12mths_04_21.csv', sep= ",", header = TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (Diabetes - YES) Last Results: <= 6 mths", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- df3 <- cbind('Audit Month' = "Apr 2021", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2021", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 5, 6, 9:14)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_0421.csv", row.names = FALSE)
#################################################################################
#11 Active Patients Conditions: (COPD - YES)
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/11_MBS_ALL_HCH_MH_Less12mths_04_21.csv', sep= ",", header=TRUE)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (COPD - YES)", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- df3 <- cbind('Audit Month' = "Apr 2021", df3)
df3 <- cbind('Group' = "", df3)
#df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
#vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
#df3$Group <- vec
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2021", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 19, 20)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_MH_0421.csv", row.names = FALSE)
###################################################################################
#12 Active Patients Conditions: (COPD - YES) Last Results: <= 12 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/12_PDQ_ALL_HCH_04_21.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (COPD - YES) Last Results: <= 12 mths", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- df3 <- cbind('Audit Month' = "Apr 2021", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2021", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 46, 47)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/PDQ_0421.csv", row.names = FALSE)
##############################################################################
df1 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/DIAB_0419.csv', sep= ",", header=TRUE)
df2 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/DIAB_0420.csv', sep= ",", header=TRUE)
df3 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/DIAB_0421.csv', sep= ",", header=TRUE)
df4 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_0419.csv', sep= ",", header=TRUE)
df5 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_0420.csv', sep= ",", header=TRUE)
df6 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_0421.csv', sep= ",", header=TRUE)
df7 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_MH_0419.csv', sep= ",", header=TRUE)
df8 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_MH_0420.csv', sep= ",", header=TRUE)
df9 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_MH_0421.csv', sep= ",", header=TRUE)
df10 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/PDQ_0419.csv', sep= ",", header=TRUE)
df11 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/PDQ_0420.csv', sep= ",", header=TRUE)
df12 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/PDQ_0421.csv', sep= ",", header=TRUE)
#merge each data extraction seperately into the 4 data categories
df_1 <- merge(df1,df2, by = c("measure","auditmonth", "hchgroup", "totalcd", "indigenous"), all = TRUE)
df_2 <- merge(df_1, df3, by = c("measure","auditmonth", "hchgroup", "totalcd", "indigenous"), all = TRUE)
df_3 <- merge(df4, df5, by = c("measure","auditmonth", "hchgroup", "ha75.", "ha45.49", "indigenous.ha.55.", "indigenous.ha.15.54",
"indigenous.ha..15", "cdm.gpmp", "cdm.tca", "cdm.review"), all = TRUE)
df_4 <- merge(df_3, df6, by = c("measure","auditmonth", "hchgroup", "ha75.", "ha45.49", "indigenous.ha.55.", "indigenous.ha.15.54",
"indigenous.ha..15", "cdm.gpmp", "cdm.tca", "cdm.review"), all = TRUE)
df_5 <-merge(df7, df8, by = c("measure","auditmonth", "hchgroup","mhnumber","gp_mhtp", "gp_mhtp.review"), all = TRUE)
df_6 <- merge(df_5, df9, by = c("measure","auditmonth", "hchgroup","mhnumber","gp_mhtp", "gp_mhtp.review"), all = TRUE)
df_7 <-merge(df10, df11, by = c("measure","auditmonth", "hchgroup", "total_population", "X75.", "X45_49"), all = TRUE )
df_8 <-merge(df_7, df12, by = c("measure","auditmonth", "hchgroup", "total_population", "X75.", "X45_49"), all = TRUE)
#merge all merged dataframes together
df_9 <- merge(df_2, df_4, by = c("measure","auditmonth", "hchgroup"),all = TRUE)
df_10 <- merge(df_9, df_6, by = c("measure","auditmonth", "hchgroup"),all = TRUE)
df_11 <- merge(df_10, df_8, by = c("measure","auditmonth", "hchgroup"),all = TRUE)
library(dplyr)
df_12 <- df_11[, c(1,2,3,17,4,11,12,13,14,15,16,5,8,9,10,19,7,18,6)]
#df_12 %>% mutate(ha715_total = select(.,indigenous.ha.55., indigenous.ha.15.54, indigenous.ha..15) %>% rowSums(na.rm = TRUE))
df_12$ha715_total <- rowSums(df_12[,c(13,14,15)], na.rm = TRUE)
df_12[4:19] <- lapply(df_12[4:19], as.numeric)
df_12[is.na(df_12)] <- 0
library(scales)
df_12$perc_cd = (df_12$totalcd/df_12$total_population)*100
df_12$perc_gpmp = (df_12$cdm.gpmp/df_12$totalcd)*100
df_12$perc_tca = (df_12$cdm.tca/df_12$totalcd)*100
df_12$perc_review = (df_12$cdm.review/df_12$totalcd)*100
df_12$perc_mh = (df_12$mhnumber/df_12$total_population)*100
df_12$perc_mhtp = (df_12$gp_mhtp/df_12$mhnumber)*100
df_12$perc_review = (df_12$gp_mhtp.review/df_12$mhnumber)*100
df_12$perc_indig = (df_12$indigenous/df_12$total_population)*100
df_12$perc_715 = (df_12$ha715_total/df_12$indigenous)*100
df_12$perc_45_49 = (df_12$X45_49/df_12$total_population)*100
df_12$perc_ha45_49 = (df_12$ha45.49/df_12$X45_49)*100
df_12$perc_75 = (df_12$X75./df_12$total_population)*100
df_12$perc_ha75 = (df_12$ha75./df_12$X75.)*100
write.csv(df_12,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/hchdata_all4.csv", row.names = FALSE)
str(df_12)
| /HCH_Analysis_JOIN ALL.R | no_license | Daskindata/Data-Wrangling | R | false | false | 28,232 | r | #install.packages("dplyr") # Install dplyr package
install.packages("plyr") # Install plyr package
install.packages("readr") # Install readr package
#library("dplyr") # Load dplyr package
library("plyr") # Load plyr package
library("readr")
install.packages('tidyr')
install.packages('dplyr')
#install.packages('janitor')
library(dplyr)
library(tidyr)
install.packages("writexl")
library(writexl)
install.packages("openxlsx")
library(openxlsx)
install.packages("trim")
### Don't forget to save each csv file first
###################################################################################
#1 No filter
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/1_DIAB_ALL_HCH_04_19.csv', sep = ",", header=TRUE)
#df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/1_no_filter.csv', sep= ",", header=TRUE)
#summary(df)
#df <- trimws(df, which = c("both"))
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[,!duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#df2 <- trimws(df2$Measure, which = c("both"))
#Add additional columns
#df3 <- cbind('Filtered By' = "No filters applied", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2019", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Group' = "", df2)
#df3 <- cbind('Audit Period' = "Apr-2019", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 159, 180)
df4 <- df4 %>%
rename(
'Total CD' = 'Total patients with one or more chronic diseases',
'Indigenous' = 'Total Indigenous patients'
)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/DIAB_0419.csv", row.names = FALSE)
##############################################################################################
#2 Active patients
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/2_MBS_ALL_HCH_Less12mths_04_19.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2 <-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2019", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2019", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 5, 6, 9:14)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_0419.csv", row.names = FALSE)
###############################################################################################
#3 Active Patients, Conditions: (Diabetes - YES), Last Results: <= 12 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/3_MBS_ALL_HCH_MH_Less12mths_04_19.csv', sep =",", header=TRUE)
#df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/3_diabetes12m.csv', sep =",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (Diabetes - YES) Last Results: <= 12 mths", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2019", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2019", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 19, 20)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_MH_0419.csv", row.names = FALSE)
##############################################################################################
#4 Active Patients, Indigenous
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/4_PDQ_ALL_HCH_04_19.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Indigenous", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2019", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2019", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 46, 47)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/PDQ_0419.csv", row.names = FALSE)
###########################################################################################
#5 Active Patients, Conditions: (CVD - YES)
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/5_DIAB_ALL_HCH_04_20.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (CVD - YES)", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2020", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2020", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 159, 180)
df4 <- df4 %>%
rename(
'Total CD' = 'Total patients with one or more chronic diseases',
'Indigenous' = 'Total Indigenous patients'
)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/DIAB_0420.csv", row.names = FALSE)
##########################################################################################
#6 Active Patients, MBS Last Results: <= 12 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/6_MBS_ALL_HCH_Less12mths_04_20.csv', sep= ",", header=TRUE)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2020", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#Remove rows not required
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
#Add HCH Gruop ID
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
#Select required columns
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 5, 6, 9:14)
#create CSV file
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_0420.csv", row.names = FALSE)
########################################################################################
#7 Active Patients, Non-Indigenous
# Import the data
df <- read.csv("C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/7_MBS_ALL_HCH_MH_Less12mths_04_20.csv", sep = ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Non-Indigenous", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2020", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2020", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 19, 20)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_MH_0420.csv", row.names = FALSE)
#################################################################################
#8 Active Patients, Conditions: (Diabetes - YES)
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/8_PDQ_ALL_HCH_04_20.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (Diabetes - YES)", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2020-04-01"), to = as.Date("2020-04-01"),by = 'day'), df2)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
#df3 <- cbind('Audit Month' = seq(from = as.Date("2020-04-01"), to = as.Date("2020-04-01"),by = 'day'), df2)
#df3 <- cbind('Audit Month' = "Apr-2020", df2)
df3 <- cbind('Audit Month' = "Apr 2020", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 46, 47)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/PDQ_0420.csv", row.names = FALSE)
################################################################################
#9 Active Patients, Conditions: (Diabetes - NO), Medications: (Antidiabetics - YES), Last Results: <= 12 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/9_DIAB_ALL_HCH_04_21.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patient Conditions: (Diabetes - NO) Medications: (Antidiabetics - YES) Last Results: <= 12 mths", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- cbind('Audit Month' = "Apr 2021", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2021", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 159, 180)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/DIAB_0421.csv", row.names = FALSE)
#################################################################################
#10 Active Patients, Conditions: (Diabetes - YES), Last Results: <= 6 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/10_MBS_ALL_HCH_Less12mths_04_21.csv', sep= ",", header = TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (Diabetes - YES) Last Results: <= 6 mths", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- df3 <- cbind('Audit Month' = "Apr 2021", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2021", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 5, 6, 9:14)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_0421.csv", row.names = FALSE)
#################################################################################
#11 Active Patients Conditions: (COPD - YES)
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/11_MBS_ALL_HCH_MH_Less12mths_04_21.csv', sep= ",", header=TRUE)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (COPD - YES)", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- df3 <- cbind('Audit Month' = "Apr 2021", df3)
df3 <- cbind('Group' = "", df3)
#df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
#vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
#df3$Group <- vec
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2021", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 19, 20)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/MBS_MH_0421.csv", row.names = FALSE)
###################################################################################
#12 Active Patients Conditions: (COPD - YES) Last Results: <= 12 mths
# Import the data
df <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/12_PDQ_ALL_HCH_04_21.csv', sep= ",", header=TRUE)
#trimws(df)
# remove index header row, duplications and columns with %
names(df) <- df[1,]
df <- df[-1,]
df <- df[, !duplicated(colnames(df))]
df_sub <- select(df, -contains("%"))
#transpose data
df_transpose <- as.data.frame(t(as.matrix(df_sub)), header = TRUE)
#df_transpose
names(df_transpose) <- df_transpose[1,]
df_transpose <- df_transpose[-1,]
#Change row name to first column
library(tidyr)
df2<-tibble::rownames_to_column(df_transpose, "Measure")
#Add additional columns
#df3 <- cbind('Filtered By' = "Active Patients Conditions: (COPD - YES) Last Results: <= 12 mths", df2)
#df3 <- cbind('Audit Month' = seq(from = as.Date("2021-04-01"), to = as.Date("2021-04-01"),by = 'day'), df3)
#df3 <- cbind('Generated On' = seq(from = as.Date("2020-10-27"), to = as.Date("2020-10-27"),by = 'day'), df3)
#df3 <- cbind('Generated On' = Sys.Date(), df3)
df2$Measure <- trimws(df2$Measure, which = c("both"))
df3 <- df2[order(df2$Measure),]
df3 <- df3 <- cbind('Audit Month' = "Apr 2021", df3)
df3 <- cbind('Group' = "", df3)
df3 <- df3 %>% relocate('Audit Month', .after = Measure)
df3 <- df3 %>% relocate(Group, .after = 'Audit Month')
#df3 <- cbind('Audit Period' = "Apr-2021", df2)
#df3 <- cbind('Group' = "", df2)
#df3 <- df3 %>% relocate(Group, .after = Measure)
df3 <- df3[-c(1:7, 13:14,20,25, 31:43), ]
vec <- c(3.1, 2, 1, 1, 3, 3.1, 3, 2, 2, 2, 3, 1, 1, 3.1, 3.1, 1, 1, 1, 3.1)
df3$Group <- vec
library(dplyr)
df4 <- df3 %>% select(1, 2, 3, 4, 46, 47)
write.csv(df4,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/PDQ_0421.csv", row.names = FALSE)
##############################################################################
df1 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/DIAB_0419.csv', sep= ",", header=TRUE)
df2 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/DIAB_0420.csv', sep= ",", header=TRUE)
df3 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/DIAB_0421.csv', sep= ",", header=TRUE)
df4 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_0419.csv', sep= ",", header=TRUE)
df5 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_0420.csv', sep= ",", header=TRUE)
df6 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_0421.csv', sep= ",", header=TRUE)
df7 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_MH_0419.csv', sep= ",", header=TRUE)
df8 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_MH_0420.csv', sep= ",", header=TRUE)
df9 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/MBS_MH_0421.csv', sep= ",", header=TRUE)
df10 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/PDQ_0419.csv', sep= ",", header=TRUE)
df11 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/PDQ_0420.csv', sep= ",", header=TRUE)
df12 <- read.csv('C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/PDQ_0421.csv', sep= ",", header=TRUE)
#merge each data extraction seperately into the 4 data categories
df_1 <- merge(df1,df2, by = c("measure","auditmonth", "hchgroup", "totalcd", "indigenous"), all = TRUE)
df_2 <- merge(df_1, df3, by = c("measure","auditmonth", "hchgroup", "totalcd", "indigenous"), all = TRUE)
df_3 <- merge(df4, df5, by = c("measure","auditmonth", "hchgroup", "ha75.", "ha45.49", "indigenous.ha.55.", "indigenous.ha.15.54",
"indigenous.ha..15", "cdm.gpmp", "cdm.tca", "cdm.review"), all = TRUE)
df_4 <- merge(df_3, df6, by = c("measure","auditmonth", "hchgroup", "ha75.", "ha45.49", "indigenous.ha.55.", "indigenous.ha.15.54",
"indigenous.ha..15", "cdm.gpmp", "cdm.tca", "cdm.review"), all = TRUE)
df_5 <-merge(df7, df8, by = c("measure","auditmonth", "hchgroup","mhnumber","gp_mhtp", "gp_mhtp.review"), all = TRUE)
df_6 <- merge(df_5, df9, by = c("measure","auditmonth", "hchgroup","mhnumber","gp_mhtp", "gp_mhtp.review"), all = TRUE)
df_7 <-merge(df10, df11, by = c("measure","auditmonth", "hchgroup", "total_population", "X75.", "X45_49"), all = TRUE )
df_8 <-merge(df_7, df12, by = c("measure","auditmonth", "hchgroup", "total_population", "X75.", "X45_49"), all = TRUE)
#merge all merged dataframes together
df_9 <- merge(df_2, df_4, by = c("measure","auditmonth", "hchgroup"),all = TRUE)
df_10 <- merge(df_9, df_6, by = c("measure","auditmonth", "hchgroup"),all = TRUE)
df_11 <- merge(df_10, df_8, by = c("measure","auditmonth", "hchgroup"),all = TRUE)
library(dplyr)
df_12 <- df_11[, c(1,2,3,17,4,11,12,13,14,15,16,5,8,9,10,19,7,18,6)]
#df_12 %>% mutate(ha715_total = select(.,indigenous.ha.55., indigenous.ha.15.54, indigenous.ha..15) %>% rowSums(na.rm = TRUE))
df_12$ha715_total <- rowSums(df_12[,c(13,14,15)], na.rm = TRUE)
df_12[4:19] <- lapply(df_12[4:19], as.numeric)
df_12[is.na(df_12)] <- 0
library(scales)
df_12$perc_cd = (df_12$totalcd/df_12$total_population)*100
df_12$perc_gpmp = (df_12$cdm.gpmp/df_12$totalcd)*100
df_12$perc_tca = (df_12$cdm.tca/df_12$totalcd)*100
df_12$perc_review = (df_12$cdm.review/df_12$totalcd)*100
df_12$perc_mh = (df_12$mhnumber/df_12$total_population)*100
df_12$perc_mhtp = (df_12$gp_mhtp/df_12$mhnumber)*100
df_12$perc_review = (df_12$gp_mhtp.review/df_12$mhnumber)*100
df_12$perc_indig = (df_12$indigenous/df_12$total_population)*100
df_12$perc_715 = (df_12$ha715_total/df_12$indigenous)*100
df_12$perc_45_49 = (df_12$X45_49/df_12$total_population)*100
df_12$perc_ha45_49 = (df_12$ha45.49/df_12$X45_49)*100
df_12$perc_75 = (df_12$X75./df_12$total_population)*100
df_12$perc_ha75 = (df_12$ha75./df_12$X75.)*100
write.csv(df_12,"C:/Users/Kathleen/Dropbox (WQPHN)/David/Data Export/HCH_Compare/HCH_DATA/hchdata_all4.csv", row.names = FALSE)
str(df_12)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tr2g.R
\name{tr2g_TxDb}
\alias{tr2g_TxDb}
\title{Get transcript and gene info from TxDb objects}
\usage{
tr2g_TxDb(txdb)
}
\arguments{
\item{txdb}{A \code{\link{TxDb}} object with gene annotation.}
}
\value{
A data frame with 3 columns: \code{gene} for gene ID, \code{transcript}
for transcript ID, and \code{tx_id} for internal transcript IDs used to avoid
duplicate transcript names. For TxDb packages from Bioconductor, gene ID is
Entrez ID, while transcript IDs are Ensembl IDs with version numbers for
\code{TxDb.Hsapiens.UCSC.hg38.knownGene}. In some cases, the transcript ID
have duplicates, and this is resolved by adding numbers to make the IDs
unique.
A data frame with 3 columns: \code{gene} for gene ID, \code{transcript}
for transcript ID, and \code{gene_name} for gene names. If \code{other_attrs}
has been specified, then those will also be columns in the data frame returned.
}
\description{
The genome and gene annotations of some species can be conveniently obtained
from Bioconductor packages. This is more convenient than downloading GTF
files from Ensembl and reading it into R. In these packages, the gene
annotation is stored in a \code{\link{TxDb}} object, which has standardized
names for gene IDs, transcript IDs, exon IDs, and so on, which are stored in
the metadata fields in GTF and GFF3 files, which are not standardized.
This function extracts transcript and corresponding gene information from
gene annotation stored in a \code{\link{TxDb}} object.
}
\examples{
library(TxDb.Hsapiens.UCSC.hg38.knownGene)
tr2g_TxDb(TxDb.Hsapiens.UCSC.hg38.knownGene)
}
\seealso{
Other functions to retrieve transcript and gene info: \code{\link{sort_tr2g}},
\code{\link{tr2g_EnsDb}}, \code{\link{tr2g_ensembl}},
\code{\link{tr2g_fasta}}, \code{\link{tr2g_gff3}},
\code{\link{tr2g_gtf}}, \code{\link{transcript2gene}}
Other functions to retrieve transcript and gene info: \code{\link{sort_tr2g}},
\code{\link{tr2g_EnsDb}}, \code{\link{tr2g_ensembl}},
\code{\link{tr2g_fasta}}, \code{\link{tr2g_gff3}},
\code{\link{tr2g_gtf}}, \code{\link{transcript2gene}}
}
\concept{functions to retrieve transcript and gene info}
| /man/tr2g_TxDb.Rd | permissive | EugOT/BUSpaRse | R | false | true | 2,221 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tr2g.R
\name{tr2g_TxDb}
\alias{tr2g_TxDb}
\title{Get transcript and gene info from TxDb objects}
\usage{
tr2g_TxDb(txdb)
}
\arguments{
\item{txdb}{A \code{\link{TxDb}} object with gene annotation.}
}
\value{
A data frame with 3 columns: \code{gene} for gene ID, \code{transcript}
for transcript ID, and \code{tx_id} for internal transcript IDs used to avoid
duplicate transcript names. For TxDb packages from Bioconductor, gene ID is
Entrez ID, while transcript IDs are Ensembl IDs with version numbers for
\code{TxDb.Hsapiens.UCSC.hg38.knownGene}. In some cases, the transcript ID
have duplicates, and this is resolved by adding numbers to make the IDs
unique.
A data frame with 3 columns: \code{gene} for gene ID, \code{transcript}
for transcript ID, and \code{gene_name} for gene names. If \code{other_attrs}
has been specified, then those will also be columns in the data frame returned.
}
\description{
The genome and gene annotations of some species can be conveniently obtained
from Bioconductor packages. This is more convenient than downloading GTF
files from Ensembl and reading it into R. In these packages, the gene
annotation is stored in a \code{\link{TxDb}} object, which has standardized
names for gene IDs, transcript IDs, exon IDs, and so on, which are stored in
the metadata fields in GTF and GFF3 files, which are not standardized.
This function extracts transcript and corresponding gene information from
gene annotation stored in a \code{\link{TxDb}} object.
}
\examples{
library(TxDb.Hsapiens.UCSC.hg38.knownGene)
tr2g_TxDb(TxDb.Hsapiens.UCSC.hg38.knownGene)
}
\seealso{
Other functions to retrieve transcript and gene info: \code{\link{sort_tr2g}},
\code{\link{tr2g_EnsDb}}, \code{\link{tr2g_ensembl}},
\code{\link{tr2g_fasta}}, \code{\link{tr2g_gff3}},
\code{\link{tr2g_gtf}}, \code{\link{transcript2gene}}
Other functions to retrieve transcript and gene info: \code{\link{sort_tr2g}},
\code{\link{tr2g_EnsDb}}, \code{\link{tr2g_ensembl}},
\code{\link{tr2g_fasta}}, \code{\link{tr2g_gff3}},
\code{\link{tr2g_gtf}}, \code{\link{transcript2gene}}
}
\concept{functions to retrieve transcript and gene info}
|
setwd("~/coursera/exploratory/ExData_Plotting1")
if (!file.exists("data")){
dir.create("data")
}
if ( !file.exists("./data/household_power_consumption.txt")){
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="./data/powerConsumption.zip", method="curl")
unzip( zipfile="./data/powerConsumption.zip", exdir="./data")
}
data <- read.csv(file="./data/household_power_consumption.txt",sep = ";",header=TRUE,na.strings="?",)
data <- within(data, Datetime <- as.POSIXlt(paste(Date, Time),
format = "%d/%m/%Y %H:%M:%S"))
plotdata <- data[data$Datetime>="2007-02-01 00:00:00" & data$Datetime<="2007-02-02 23:59:59",]
for (col in 3:8) set(data, j=col, value=as.numeric(data[[col]]))
png("plot1.png", width = 480, height=480)
hist(plotdata$Global_active_power,col="red",main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | courseraJC/ExData_Plotting1 | R | false | false | 981 | r | setwd("~/coursera/exploratory/ExData_Plotting1")
if (!file.exists("data")){
dir.create("data")
}
if ( !file.exists("./data/household_power_consumption.txt")){
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="./data/powerConsumption.zip", method="curl")
unzip( zipfile="./data/powerConsumption.zip", exdir="./data")
}
data <- read.csv(file="./data/household_power_consumption.txt",sep = ";",header=TRUE,na.strings="?",)
data <- within(data, Datetime <- as.POSIXlt(paste(Date, Time),
format = "%d/%m/%Y %H:%M:%S"))
plotdata <- data[data$Datetime>="2007-02-01 00:00:00" & data$Datetime<="2007-02-02 23:59:59",]
for (col in 3:8) set(data, j=col, value=as.numeric(data[[col]]))
png("plot1.png", width = 480, height=480)
hist(plotdata$Global_active_power,col="red",main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/options.R
\name{aesStyle}
\alias{aesStyle}
\alias{areaStyle}
\alias{chordStyle}
\alias{itemStyle}
\alias{labelLineStyle}
\alias{labelStyle}
\alias{lineStyle}
\alias{linkStyle}
\alias{nodeStyle}
\alias{textStyle}
\title{Define Aesthetic Elements of Echarts Object}
\usage{
aesStyle(element = c("text", "line", "area", "chord", "node", "link"), ...)
lineStyle(...)
textStyle(...)
areaStyle(...)
chordStyle(...)
nodeStyle(...)
linkStyle(...)
labelStyle(...)
labelLineStyle(...)
itemStyle(...)
}
\arguments{
\item{element}{String, could be 'text', 'line', 'area', 'chord', 'node', or 'link',
corresponding to \code{textStyle, lineStyle, areaStyle, chordStyle, nodeStyle,
linkStyle}}
\item{...}{The params to form an aesthetic element \cr
The element-specific params list:
\describe{
\item{\strong{itemStyle} (normal|emphasis)}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{lineStyle} \tab for line, k charts and markLine, \code{\link{lineStyle}} \cr
\code{textStyle} \tab \code{\link{textStyle}} \cr
\code{areaStyle} \tab for stacked line chart and map, \code{\link{areaStyle}} \cr
\code{chordStyle} \tab for chord chart, \code{\link{chordStyle}} \cr
\code{nodeStyle} \tab for force chart, \code{\link{nodeStyle}} \cr
\code{linkStyle} \tab for force chart, \code{\link{linkStyle}} \cr
\code{borderColor} \tab color vector, 'rgba', hex color, or color names. \cr
\code{borderWidth} \tab for symbol, symbole, pie chart, map and markPoint, numeric \cr
\code{barBorderColor} \tab for symbol, symbole, pie chart, map and markPoint, numeric \cr
\code{barBorderRadius} \tab numeric vector length 1 or 4 (right-bottom-left-top), default 0 \cr
\code{barBorderWidth} \tab numeric vector length 1 or 4 (right-bottom-left-top), default 00 \cr
\code{label} \tab for line, bar, k, scatter, pie, map, force, funnel charts
and markPoint, markLine, \code{\link{labelStyle}} \cr
\code{labelLine} \tab for pie and funnel chart, \code{\link{labelLineStyle}}
}}
\item{\emph{label}}{
\tabular{ll}{
\code{show} \tab TRUE|FALSE, default TRUE \cr
\code{position} \tab \itemize{
\item for pie, 'outer'|'inner'; \cr
\item for funnel, 'inner'|'left'|'right'; \cr
\item for line, bar, k, scatter, 'top'|'right'|'inside'|'left'|'bottom'; \cr
\item for bar, additionally 'insideLeft' | 'insideRight' | 'insideTop' | 'insideBottom'} \cr
\code{rotate} \tab chord chart only. TRUE|FALSE, default FALSE \cr
\code{distance} \tab chord and pie chart only. numeric, default 10 \cr
\code{formatter} \tab \code{\link{setTooltip}} \cr
\code{textStyle} \tab \code{\link{textStyle}} \cr
\code{x} \tab treemap only, numeric \cr
\code{y} \tab treemap only, numeric
}}
\item{\emph{labelLine}}{
\tabular{ll}{
\code{show} \tab TRUE|FALSE, default TRUE \cr
\code{length} \tab numeric or 'auto', default 40 \cr
\code{lineStyle} \tab \code{\link{lineStyle}}
}}
\item{textStyle}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{decoration} \tab only for tooltip. string, default 'none' \cr
\code{align} \tab 'left' | 'right' | 'center' \cr
\code{baseline} \tab 'top' | 'bottom' | 'middle' \cr
\code{fontFamily} \tab valid font family name \cr
\code{fontSize} \tab numeric, default 12 \cr
\code{fontStyle} \tab 'normal' | 'italic' | 'oblique', default 'normal' \cr
\code{fontWeight} \tab 'normal' | 'bold' | 'bolder' | 'lighter' or numeric,
default 'normal'
}}
\item{lineStyle}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{type} \tab 'solid' | 'dotted' | 'dashed', for tree, additionally
'curve' | 'broken'. Default 'solid' \cr
\code{width} \tab numeric \cr
\code{shadowColor} \tab color vector, 'rgba', hex color, or color names. \cr
\code{shadowBlur} \tab numeric, default 5 \cr
\code{shadowOffsetX} \tab numeric, default 3 \cr
\code{shadowOffsetY} \tab numeric, default 3
}}
\item{areaStyle}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{type} \tab only 'default'
}}
\item{chordStyle}{
\tabular{ll}{
\code{width} \tab numeric, default 1 \cr
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{borderWidth} \tab numeric, default 1 \cr
\code{borderColor} \tab color vector, 'rgba', hex color, or color names.
}}
\item{nodeStyle}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{borderWidth} \tab numeric, default 1 \cr
\code{borderColor} \tab color vector, 'rgba', hex color, or color names.
}}
\item{linkStyle}{
\tabular{ll}{
\code{type} \tab 'curve'|'line' \cr
\code{color} \tab color vector, 'rgba', hex color, or color names. default '#5182ab' \cr
\code{width} \tab numeric, default 1
}}
}}
}
\value{
A list
}
\description{
An Echarts object uses \code{itemStyle} heavily. You can use \code{itemStyle}
to compose an itemStyle list. \cr \cr
Contained in an itemStyle object are \cr
\describe{
\item{atomic features}{'color', 'borderColor', 'borderWidth', 'barBorderColor',
'barBorderRadius', 'barBorderWidth', which you can directly assign values}
\item{object features}{'lineStyle', 'textStyle','areaStyle', 'chordStyle',
'nodeStyle', 'linkStyle', which you can yield by \code{aesStyle} function family}
\item{mixed object features}{'label' and 'labelLine', which contains other
object features, such as 'lineStyle', 'textStyle'}
} \cr
You can use \code{aesStyle} function family (\code{
lineStyle, textStyle, areaStyle, aesChordSytle, labelStyle, labelLineStyle})
to compose basic feature objects, and then group them into label or labelLine
using \code{labelStyle / labelLineStyle}, and finally pack them into an itemStyle
object using \code{itemStyle}.
}
\examples{
\dontrun{
lab <- labelStyle(show=TRUE, position='inside',
textStyle=textStyle(color='red'))
styLine <- lineStyle(color='#fff', width=4, shadowBlur=5)
itemStyle <- list(normal=itemStyle(lineStyle=styLine, label=lab),
emphasis=itemStyle(lineStyle=styLine, label=lab)
)
}
}
| /man/aesStyle.Rd | permissive | takewiki/recharts2 | R | false | true | 6,641 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/options.R
\name{aesStyle}
\alias{aesStyle}
\alias{areaStyle}
\alias{chordStyle}
\alias{itemStyle}
\alias{labelLineStyle}
\alias{labelStyle}
\alias{lineStyle}
\alias{linkStyle}
\alias{nodeStyle}
\alias{textStyle}
\title{Define Aesthetic Elements of Echarts Object}
\usage{
aesStyle(element = c("text", "line", "area", "chord", "node", "link"), ...)
lineStyle(...)
textStyle(...)
areaStyle(...)
chordStyle(...)
nodeStyle(...)
linkStyle(...)
labelStyle(...)
labelLineStyle(...)
itemStyle(...)
}
\arguments{
\item{element}{String, could be 'text', 'line', 'area', 'chord', 'node', or 'link',
corresponding to \code{textStyle, lineStyle, areaStyle, chordStyle, nodeStyle,
linkStyle}}
\item{...}{The params to form an aesthetic element \cr
The element-specific params list:
\describe{
\item{\strong{itemStyle} (normal|emphasis)}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{lineStyle} \tab for line, k charts and markLine, \code{\link{lineStyle}} \cr
\code{textStyle} \tab \code{\link{textStyle}} \cr
\code{areaStyle} \tab for stacked line chart and map, \code{\link{areaStyle}} \cr
\code{chordStyle} \tab for chord chart, \code{\link{chordStyle}} \cr
\code{nodeStyle} \tab for force chart, \code{\link{nodeStyle}} \cr
\code{linkStyle} \tab for force chart, \code{\link{linkStyle}} \cr
\code{borderColor} \tab color vector, 'rgba', hex color, or color names. \cr
\code{borderWidth} \tab for symbol, symbole, pie chart, map and markPoint, numeric \cr
\code{barBorderColor} \tab for symbol, symbole, pie chart, map and markPoint, numeric \cr
\code{barBorderRadius} \tab numeric vector length 1 or 4 (right-bottom-left-top), default 0 \cr
\code{barBorderWidth} \tab numeric vector length 1 or 4 (right-bottom-left-top), default 00 \cr
\code{label} \tab for line, bar, k, scatter, pie, map, force, funnel charts
and markPoint, markLine, \code{\link{labelStyle}} \cr
\code{labelLine} \tab for pie and funnel chart, \code{\link{labelLineStyle}}
}}
\item{\emph{label}}{
\tabular{ll}{
\code{show} \tab TRUE|FALSE, default TRUE \cr
\code{position} \tab \itemize{
\item for pie, 'outer'|'inner'; \cr
\item for funnel, 'inner'|'left'|'right'; \cr
\item for line, bar, k, scatter, 'top'|'right'|'inside'|'left'|'bottom'; \cr
\item for bar, additionally 'insideLeft' | 'insideRight' | 'insideTop' | 'insideBottom'} \cr
\code{rotate} \tab chord chart only. TRUE|FALSE, default FALSE \cr
\code{distance} \tab chord and pie chart only. numeric, default 10 \cr
\code{formatter} \tab \code{\link{setTooltip}} \cr
\code{textStyle} \tab \code{\link{textStyle}} \cr
\code{x} \tab treemap only, numeric \cr
\code{y} \tab treemap only, numeric
}}
\item{\emph{labelLine}}{
\tabular{ll}{
\code{show} \tab TRUE|FALSE, default TRUE \cr
\code{length} \tab numeric or 'auto', default 40 \cr
\code{lineStyle} \tab \code{\link{lineStyle}}
}}
\item{textStyle}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{decoration} \tab only for tooltip. string, default 'none' \cr
\code{align} \tab 'left' | 'right' | 'center' \cr
\code{baseline} \tab 'top' | 'bottom' | 'middle' \cr
\code{fontFamily} \tab valid font family name \cr
\code{fontSize} \tab numeric, default 12 \cr
\code{fontStyle} \tab 'normal' | 'italic' | 'oblique', default 'normal' \cr
\code{fontWeight} \tab 'normal' | 'bold' | 'bolder' | 'lighter' or numeric,
default 'normal'
}}
\item{lineStyle}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{type} \tab 'solid' | 'dotted' | 'dashed', for tree, additionally
'curve' | 'broken'. Default 'solid' \cr
\code{width} \tab numeric \cr
\code{shadowColor} \tab color vector, 'rgba', hex color, or color names. \cr
\code{shadowBlur} \tab numeric, default 5 \cr
\code{shadowOffsetX} \tab numeric, default 3 \cr
\code{shadowOffsetY} \tab numeric, default 3
}}
\item{areaStyle}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{type} \tab only 'default'
}}
\item{chordStyle}{
\tabular{ll}{
\code{width} \tab numeric, default 1 \cr
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{borderWidth} \tab numeric, default 1 \cr
\code{borderColor} \tab color vector, 'rgba', hex color, or color names.
}}
\item{nodeStyle}{
\tabular{ll}{
\code{color} \tab color vector, 'rgba', hex color, or color names. \cr
\code{borderWidth} \tab numeric, default 1 \cr
\code{borderColor} \tab color vector, 'rgba', hex color, or color names.
}}
\item{linkStyle}{
\tabular{ll}{
\code{type} \tab 'curve'|'line' \cr
\code{color} \tab color vector, 'rgba', hex color, or color names. default '#5182ab' \cr
\code{width} \tab numeric, default 1
}}
}}
}
\value{
A list
}
\description{
An Echarts object uses \code{itemStyle} heavily. You can use \code{itemStyle}
to compose an itemStyle list. \cr \cr
Contained in an itemStyle object are \cr
\describe{
\item{atomic features}{'color', 'borderColor', 'borderWidth', 'barBorderColor',
'barBorderRadius', 'barBorderWidth', which you can directly assign values}
\item{object features}{'lineStyle', 'textStyle','areaStyle', 'chordStyle',
'nodeStyle', 'linkStyle', which you can yield by \code{aesStyle} function family}
\item{mixed object features}{'label' and 'labelLine', which contains other
object features, such as 'lineStyle', 'textStyle'}
} \cr
You can use \code{aesStyle} function family (\code{
lineStyle, textStyle, areaStyle, aesChordSytle, labelStyle, labelLineStyle})
to compose basic feature objects, and then group them into label or labelLine
using \code{labelStyle / labelLineStyle}, and finally pack them into an itemStyle
object using \code{itemStyle}.
}
\examples{
\dontrun{
lab <- labelStyle(show=TRUE, position='inside',
textStyle=textStyle(color='red'))
styLine <- lineStyle(color='#fff', width=4, shadowBlur=5)
itemStyle <- list(normal=itemStyle(lineStyle=styLine, label=lab),
emphasis=itemStyle(lineStyle=styLine, label=lab)
)
}
}
|
library(stepp)
### Name: bigKM
### Title: The BIG 1-98 trial dataset for Kaplan-Meier STEPP.
### Aliases: bigKM
### Keywords: datasets
### ** Examples
data(bigKM)
rxgroup <- bigKM$trt
time <- bigKM$time
evt <- bigKM$event
cov <- bigKM$ki67
# analyze using Cumulative Incidence method with
# sliding window size of 150 patients and a maximum of 50 patients in common
#
swin <- new("stwin", type="sliding", r1=50, r2=150) # create a sliding window
subp <- new("stsubpop") # create subpopulation object
subp <- generate(subp, win=swin, covariate=cov) # generate the subpopulations
summary(subp) # summary of the subpopulations
# create a stepp model using Kaplan Meier Method to analyze the data
#
smodel <- new("stmodelKM", coltrt=rxgroup, trts=c(1,2), survTime=time, censor=evt, timePoint=4)
statKM <- new("steppes") # create a test object based on subpopulation and window
statKM <- estimate(statKM, subp, smodel) # estimate the subpo10ulation results
# Warning: In this example, the permutations have been set to 0 to allow the function
# to finish in a short amount of time. IT IS RECOMMEND TO USE AT LEAST 2500 PERMUTATIONS TO
# PROVIDE STABLE RESULTS.
statKM <- test(statKM, nperm=0) # permutation test with 0 iterations
print(statKM) # print the estimates and test statistics
plot(statKM, ncex=0.65, legendy=30, pline=-15.5, color=c("blue","gold"),
pointwise=FALSE,
xlabel="Median Ki-67 LI in Subpopulation (% immunoreactivity)",
ylabel="4-year Disease Free Survival",
tlegend=c("Taxmoxifen", "Letrozole"), nlas=3)
| /data/genthat_extracted_code/stepp/examples/bigKM.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,646 | r | library(stepp)
### Name: bigKM
### Title: The BIG 1-98 trial dataset for Kaplan-Meier STEPP.
### Aliases: bigKM
### Keywords: datasets
### ** Examples
data(bigKM)
rxgroup <- bigKM$trt
time <- bigKM$time
evt <- bigKM$event
cov <- bigKM$ki67
# analyze using Cumulative Incidence method with
# sliding window size of 150 patients and a maximum of 50 patients in common
#
swin <- new("stwin", type="sliding", r1=50, r2=150) # create a sliding window
subp <- new("stsubpop") # create subpopulation object
subp <- generate(subp, win=swin, covariate=cov) # generate the subpopulations
summary(subp) # summary of the subpopulations
# create a stepp model using Kaplan Meier Method to analyze the data
#
smodel <- new("stmodelKM", coltrt=rxgroup, trts=c(1,2), survTime=time, censor=evt, timePoint=4)
statKM <- new("steppes") # create a test object based on subpopulation and window
statKM <- estimate(statKM, subp, smodel) # estimate the subpo10ulation results
# Warning: In this example, the permutations have been set to 0 to allow the function
# to finish in a short amount of time. IT IS RECOMMEND TO USE AT LEAST 2500 PERMUTATIONS TO
# PROVIDE STABLE RESULTS.
statKM <- test(statKM, nperm=0) # permutation test with 0 iterations
print(statKM) # print the estimates and test statistics
plot(statKM, ncex=0.65, legendy=30, pline=-15.5, color=c("blue","gold"),
pointwise=FALSE,
xlabel="Median Ki-67 LI in Subpopulation (% immunoreactivity)",
ylabel="4-year Disease Free Survival",
tlegend=c("Taxmoxifen", "Letrozole"), nlas=3)
|
island.pars <- list(isl.r=5, isl.c=5, E.mean=20, E.sd=0, prod.mean=20, prod.sd=0, incComp=NA, incComp.time=NA)
pop.pars <- list(N.init=40, w.mean=1, w.CV=0.04)
move.pars <- list(m=0.5, move.fn='random')
pred.pars <- list(p=0.5, prP=0.2, predOpt=10, predEnd=300, pred.fn='none')
feed.pars <- list(st=0.1785714, feed.fn='outcompete')
repro.pars <- list(f=1, babybump=1, repro.fn='log')
nsims <- 50
MeanW.init <- 1.003571
MeanW.pF <- 8.411474
VarW.pF <- NA
MeanW.pR <- 8.509541
SumW.init <- 40.14284
SumW.pF <- 503.7047
SumW.pR <- 1633.83
N.init <- 40
N.pF <- 61.04
N.pR <- 194.78
| /SimOutput/outcompete/st_ParSet_3/parameters.R | no_license | Sz-Tim/IslandRule | R | false | false | 578 | r | island.pars <- list(isl.r=5, isl.c=5, E.mean=20, E.sd=0, prod.mean=20, prod.sd=0, incComp=NA, incComp.time=NA)
pop.pars <- list(N.init=40, w.mean=1, w.CV=0.04)
move.pars <- list(m=0.5, move.fn='random')
pred.pars <- list(p=0.5, prP=0.2, predOpt=10, predEnd=300, pred.fn='none')
feed.pars <- list(st=0.1785714, feed.fn='outcompete')
repro.pars <- list(f=1, babybump=1, repro.fn='log')
nsims <- 50
MeanW.init <- 1.003571
MeanW.pF <- 8.411474
VarW.pF <- NA
MeanW.pR <- 8.509541
SumW.init <- 40.14284
SumW.pF <- 503.7047
SumW.pR <- 1633.83
N.init <- 40
N.pF <- 61.04
N.pR <- 194.78
|
\name{spacodi.treeplot}
\alias{spacodi.treeplot}
\title{EXPERIMENTAL: plotting diversity turnover on trees}
\description{\code{spacodi.treeplot} is used to plot departures from expectations for diversity turnover on a phylogeny}
\usage{spacodi.treeplot(spacodi.permutations, phy, cex=list(pch = 1.5, tip = 0.5, legend = 0.75), transp=0.8, sig.plot = TRUE, cut.off = 0.05, cols = list("white", "gray", "black"), main = TRUE, outfile = NULL, add.id = FALSE, ...)}
\arguments{
\item{spacodi.permutations}{a list-object generated by \code{\link{spacodi.by.nodes}}}
\item{phy}{a phylogenetic tree of class \code{phylo}; see \code{\link[ape]{read.tree}}}
\item{cex}{a named list of character scalings for node-associated symbols (\code{pch}), tip labels (\code{tip}), and the legend (\code{legend})}
\item{transp}{degree of color transparency: \code{transp=1} is opaque}
\item{sig.plot}{Boolean; whether nodes are colored by significance of observed and expected estimates of structure}
\item{cut.off}{a value, if \code{sig.plot=TRUE}, distinguishing observed from expected estimates of structure}
\item{cols}{a list of three elements if using \code{sig.plot}: the first color is for values not different than expected; the second and third for values greater and less than expected}
\item{main}{Boolean; whether a title is plotted}
\item{outfile}{an optional .pdf file to which to write output}
\item{add.id}{Boolean; whether \code{node.labels} are placed near nodes; see \code{\link[ape]{nodelabels}}}
\item{\dots}{additional plotting parameters to be optionally supplied}
}
\details{
This function will compute and plot estimates of diversity structure on the tree, with color-coded values.
Note: this function requires a \code{spacodi.permutations} with data for all nodes. If using, for instance, \code{spacodi.by.nodes} to
generate these data, the option \code{obs.only=TRUE} will ensure that data are returned for all nodes (see \strong{Examples} below).
}
\value{a plot of diversity structure estimates for a phylogeny, with the option to direct output to a .pdf file}
\references{
HARDY OJ and B SENTERRE. 2007. Characterizing the
phylogenetic structure of communities by an additive partitioning of
phylogenetic diversity. Journal of Ecology 95:493-506.
HARDY OJ. 2008. Testing the spatial phylogenetic
structure of local communities: statistical performances of
different null models and test statistics on a locally neutral
community. Journal of Ecology 96:914-926.
}
\author{Timothy Paine and Jonathan Eastman}
\seealso{
see \code{\link{spacodi.by.nodes}} for estimating phylogenetic turnover on trees with community-level sampling;
\code{\link{spacodi.permutplot}}; \code{\link{phy.dotplot}} }
\examples{
data(sp.example)
attach(sp.example)
# plot PIst
PI=spacodi.by.nodes(sp.plot=spl, sp.parm="PIst", phy=phy, return.all=TRUE, method="1s")
spacodi.treeplot(PI, phy, sig.plot=TRUE, add.id=FALSE)
spacodi.treeplot(PI, phy, sig.plot=FALSE, add.id=FALSE)
}
| /spacodiR/man/spacodi.treeplot.Rd | no_license | eastman/spacodiR | R | false | false | 3,022 | rd | \name{spacodi.treeplot}
\alias{spacodi.treeplot}
\title{EXPERIMENTAL: plotting diversity turnover on trees}
\description{\code{spacodi.treeplot} is used to plot departures from expectations for diversity turnover on a phylogeny}
\usage{spacodi.treeplot(spacodi.permutations, phy, cex=list(pch = 1.5, tip = 0.5, legend = 0.75), transp=0.8, sig.plot = TRUE, cut.off = 0.05, cols = list("white", "gray", "black"), main = TRUE, outfile = NULL, add.id = FALSE, ...)}
\arguments{
\item{spacodi.permutations}{a list-object generated by \code{\link{spacodi.by.nodes}}}
\item{phy}{a phylogenetic tree of class \code{phylo}; see \code{\link[ape]{read.tree}}}
\item{cex}{a named list of character scalings for node-associated symbols (\code{pch}), tip labels (\code{tip}), and the legend (\code{legend})}
\item{transp}{degree of color transparency: \code{transp=1} is opaque}
\item{sig.plot}{Boolean; whether nodes are colored by significance of observed and expected estimates of structure}
\item{cut.off}{a value, if \code{sig.plot=TRUE}, distinguishing observed from expected estimates of structure}
\item{cols}{a list of three elements if using \code{sig.plot}: the first color is for values not different than expected; the second and third for values greater and less than expected}
\item{main}{Boolean; whether a title is plotted}
\item{outfile}{an optional .pdf file to which to write output}
\item{add.id}{Boolean; whether \code{node.labels} are placed near nodes; see \code{\link[ape]{nodelabels}}}
\item{\dots}{additional plotting parameters to be optionally supplied}
}
\details{
This function will compute and plot estimates of diversity structure on the tree, with color-coded values.
Note: this function requires a \code{spacodi.permutations} with data for all nodes. If using, for instance, \code{spacodi.by.nodes} to
generate these data, the option \code{obs.only=TRUE} will ensure that data are returned for all nodes (see \strong{Examples} below).
}
\value{a plot of diversity structure estimates for a phylogeny, with the option to direct output to a .pdf file}
\references{
HARDY OJ and B SENTERRE. 2007. Characterizing the
phylogenetic structure of communities by an additive partitioning of
phylogenetic diversity. Journal of Ecology 95:493-506.
HARDY OJ. 2008. Testing the spatial phylogenetic
structure of local communities: statistical performances of
different null models and test statistics on a locally neutral
community. Journal of Ecology 96:914-926.
}
\author{Timothy Paine and Jonathan Eastman}
\seealso{
see \code{\link{spacodi.by.nodes}} for estimating phylogenetic turnover on trees with community-level sampling;
\code{\link{spacodi.permutplot}}; \code{\link{phy.dotplot}} }
\examples{
data(sp.example)
attach(sp.example)
# plot PIst
PI=spacodi.by.nodes(sp.plot=spl, sp.parm="PIst", phy=phy, return.all=TRUE, method="1s")
spacodi.treeplot(PI, phy, sig.plot=TRUE, add.id=FALSE)
spacodi.treeplot(PI, phy, sig.plot=FALSE, add.id=FALSE)
}
|
library(nleqslv)
f <- function(x) {
y <-numeric(length(x))
y[1] <- x[1]^2 + x[2]^3
y[2] <- x[1] + 2*x[2] + 3
y
}
# test named x-values
xstart <- c(a=1.0, b=0.5)
xstart
z <- nleqslv(xstart,f, control=list(trace=0))
all(names(z$x) == names(xstart))
# test named x-values
xstart <- c(u=1.0, 0.5)
xstart
z <- nleqslv(xstart,f, control=list(trace=0))
all(names(z$x) == names(xstart))
| /tests/xnames.R | no_license | cran/nleqslv | R | false | false | 401 | r |
library(nleqslv)
f <- function(x) {
y <-numeric(length(x))
y[1] <- x[1]^2 + x[2]^3
y[2] <- x[1] + 2*x[2] + 3
y
}
# test named x-values
xstart <- c(a=1.0, b=0.5)
xstart
z <- nleqslv(xstart,f, control=list(trace=0))
all(names(z$x) == names(xstart))
# test named x-values
xstart <- c(u=1.0, 0.5)
xstart
z <- nleqslv(xstart,f, control=list(trace=0))
all(names(z$x) == names(xstart))
|
\name{add.phenos}
\alias{add.phenos}
\title{Create genetic and physical list of maps object.}
\description{
Add phenotypes to cross object.
}
\usage{
add.phenos(cross, newdata = NULL, index = NULL)
}
\arguments{
\item{cross}{Objects of class \code{cross}. See
\code{\link[qlt]{read.cross}}.}
\item{newdata}{Data frame with one phenotype per column. Row names should correspond to \code{index} in \code{cross} object.}
\item{index}{Numerical index or character name of phenotype in \code{cross} that corresponds to row names of \code{newdata}.}
}
\details{
Returns the \code{cross} object with added phenotypes.
No change if \code{newdata} is \code{NULL}.
Assumes \code{newdata} are in same individual order as \code{cross} if \code{index} is \code{NULL}; otherwise, matches row names of \code{newdata} to the phenotype identified by \code{index}.
}
\seealso{
\code{\link{snp.record}}
}
\examples{
data(hyper)
newdata <- data.frame(x = rnorm(nind(hyper))
cross <- add.phenos(hyper, newdata)
summary(cross)
## Use of index. First need to create one.
newdata <- data.frame(myindex = seq(nind(cross)))
cross <- add.phenos(cross, newdata)
newdata <- data.frame(y = rnorm(100))
row.names(newdata) <- sample(seq(nind(cross)), 100)
cross <- add.phenos(cross, newdata)
}
\keyword{utilities}
| /man/add.phenos.Rd | no_license | atbroman/qtlview | R | false | false | 1,293 | rd | \name{add.phenos}
\alias{add.phenos}
\title{Create genetic and physical list of maps object.}
\description{
Add phenotypes to cross object.
}
\usage{
add.phenos(cross, newdata = NULL, index = NULL)
}
\arguments{
\item{cross}{Objects of class \code{cross}. See
\code{\link[qlt]{read.cross}}.}
\item{newdata}{Data frame with one phenotype per column. Row names should correspond to \code{index} in \code{cross} object.}
\item{index}{Numerical index or character name of phenotype in \code{cross} that corresponds to row names of \code{newdata}.}
}
\details{
Returns the \code{cross} object with added phenotypes.
No change if \code{newdata} is \code{NULL}.
Assumes \code{newdata} are in same individual order as \code{cross} if \code{index} is \code{NULL}; otherwise, matches row names of \code{newdata} to the phenotype identified by \code{index}.
}
\seealso{
\code{\link{snp.record}}
}
\examples{
data(hyper)
newdata <- data.frame(x = rnorm(nind(hyper))
cross <- add.phenos(hyper, newdata)
summary(cross)
## Use of index. First need to create one.
newdata <- data.frame(myindex = seq(nind(cross)))
cross <- add.phenos(cross, newdata)
newdata <- data.frame(y = rnorm(100))
row.names(newdata) <- sample(seq(nind(cross)), 100)
cross <- add.phenos(cross, newdata)
}
\keyword{utilities}
|
#kmeans- herchical methods
#librerias
library(factoextra)
data("USArrests")
df <- scale(USArrests)
#Choose number of cluster
jpeg('optimal_cluster.jpg')
fviz_nbclust(df, kmeans, method= "wss") + geom_vline(xintercept = 4, linetype=2)
dev.off()
km_res <- kmeans(df, 4, nstart = 25)
km_res
#Calcular la media por cada cluster
aggregate(USArrests, by = list(cluster=km_res$cluster), mean)
#adicionar al cluster original
dd <- cbind(USArrests, cluster= km_res$cluster)
km_res$centers
km_res$size
jpeg('cluster_diagrama.jpg')
fviz_cluster(km_res, data =df, palette = c("#2E9FDF", "#00AFBB", "#E7B800", "#FC4E07"),
ellipse.type = "euclid",
star.plot = TRUE,
repel = TRUE,
ggtheme = theme_minimal())
dev.off()
#Clustering Hierarchical
#libraries
library("factoextra")
#load the data
data("USArrests")
df <- scale(USArrests)
#df <- df[1:10]
head(df)
#Similaridad o desiguales
x <- c(1,2)
y <- c(2,4)
dist_xy <- data.frame(x = x, y= y)
plot(x, y, col= "red", pch=15)
#diferent methods
dist_point <- dist(dist_xy, method = "euclidean")
dist_point <- dist(dist_xy, method = "maximum")
res_dist <- dist(df, method = "euclidean")
head(res_dist)
as.matrix(res_dist)[1:6,1:6]
res_hc <- hclust( d = res_dist, method = "single")
#jpeg("ddad.png")
fviz_dend(res_hc, cex=0.5)
#dev.off()
jpeg("ddad.png")
res_hc <- hclust( d = res_dist, method = "complete")
fviz_dend(res_hc, cex=0.5)
dev.off()
res_hc <- hclust( d = res_dist, method = "complete")
fviz_dend(res_hc, cex=0.5)
res_hc <- hclust( d = res_dist, method = "average")
fviz_dend(res_hc, cex=0.5)
#verify cluster tree
res_coph <- cophenetic(res_hc)
cor(res_dist , res_coph)
#podemos cambiar la distancia con method
#cortar el arbol en cuatro
grp <- cutree (res_hc, k=4)
head(grp)
jpeg("dendograma_groups.png")
fviz_dend(res_hc, k=4,
cex= 0.5,
k_colors = c("#2E9FDF", "#00AFBB", "#E7B800", "#FC4E07"),
color_labels_by_k = TRUE,
rect= TRUE
)
dev.off()
| /scripts/kmeans_clustering.R | no_license | j-river1/Taller_SantaMaria | R | false | false | 2,029 | r | #kmeans- herchical methods
#librerias
library(factoextra)
data("USArrests")
df <- scale(USArrests)
#Choose number of cluster
jpeg('optimal_cluster.jpg')
fviz_nbclust(df, kmeans, method= "wss") + geom_vline(xintercept = 4, linetype=2)
dev.off()
km_res <- kmeans(df, 4, nstart = 25)
km_res
#Calcular la media por cada cluster
aggregate(USArrests, by = list(cluster=km_res$cluster), mean)
#adicionar al cluster original
dd <- cbind(USArrests, cluster= km_res$cluster)
km_res$centers
km_res$size
jpeg('cluster_diagrama.jpg')
fviz_cluster(km_res, data =df, palette = c("#2E9FDF", "#00AFBB", "#E7B800", "#FC4E07"),
ellipse.type = "euclid",
star.plot = TRUE,
repel = TRUE,
ggtheme = theme_minimal())
dev.off()
#Clustering Hierarchical
#libraries
library("factoextra")
#load the data
data("USArrests")
df <- scale(USArrests)
#df <- df[1:10]
head(df)
#Similaridad o desiguales
x <- c(1,2)
y <- c(2,4)
dist_xy <- data.frame(x = x, y= y)
plot(x, y, col= "red", pch=15)
#diferent methods
dist_point <- dist(dist_xy, method = "euclidean")
dist_point <- dist(dist_xy, method = "maximum")
res_dist <- dist(df, method = "euclidean")
head(res_dist)
as.matrix(res_dist)[1:6,1:6]
res_hc <- hclust( d = res_dist, method = "single")
#jpeg("ddad.png")
fviz_dend(res_hc, cex=0.5)
#dev.off()
jpeg("ddad.png")
res_hc <- hclust( d = res_dist, method = "complete")
fviz_dend(res_hc, cex=0.5)
dev.off()
res_hc <- hclust( d = res_dist, method = "complete")
fviz_dend(res_hc, cex=0.5)
res_hc <- hclust( d = res_dist, method = "average")
fviz_dend(res_hc, cex=0.5)
#verify cluster tree
res_coph <- cophenetic(res_hc)
cor(res_dist , res_coph)
#podemos cambiar la distancia con method
#cortar el arbol en cuatro
grp <- cutree (res_hc, k=4)
head(grp)
jpeg("dendograma_groups.png")
fviz_dend(res_hc, k=4,
cex= 0.5,
k_colors = c("#2E9FDF", "#00AFBB", "#E7B800", "#FC4E07"),
color_labels_by_k = TRUE,
rect= TRUE
)
dev.off()
|
sg.int<-function(g,...,lower,upper){
require("SparseGrid")
lower<-floor(lower)
upper<-ceiling(upper)
if (any(lower>upper)) stop("lower must be smaller than upper")
gridss<-as.matrix(expand.grid(seq(lower[1],upper[1]-1,by=1),seq(lower[2],upper[2]-1,by=1)))
sp.grid <- createIntegrationGrid( 'KPU', dimension=2, k=5 )
nodes<-gridss[1,]+sp.grid$nodes
weights<-sp.grid$weights
for (i in 2:nrow(gridss))
{
nodes<-rbind(nodes,gridss[i,]+sp.grid$nodes)
weights<-c(weights,sp.grid$weights)
}
gx.sp <- apply(nodes, 1, g,...)
val.sp <- gx.sp %*%weights
val.sp
} | /PS6.R | no_license | jeonghkim/PS6 | R | false | false | 631 | r | sg.int<-function(g,...,lower,upper){
require("SparseGrid")
lower<-floor(lower)
upper<-ceiling(upper)
if (any(lower>upper)) stop("lower must be smaller than upper")
gridss<-as.matrix(expand.grid(seq(lower[1],upper[1]-1,by=1),seq(lower[2],upper[2]-1,by=1)))
sp.grid <- createIntegrationGrid( 'KPU', dimension=2, k=5 )
nodes<-gridss[1,]+sp.grid$nodes
weights<-sp.grid$weights
for (i in 2:nrow(gridss))
{
nodes<-rbind(nodes,gridss[i,]+sp.grid$nodes)
weights<-c(weights,sp.grid$weights)
}
gx.sp <- apply(nodes, 1, g,...)
val.sp <- gx.sp %*%weights
val.sp
} |
library(shiny)
library(readr)
library(dplyr)
library(stringr)
library(ggplot2)
library(tidyr)
library(reshape2)
library(plotly)
library(shinyWidgets)
library(viridis)
library(shinyjs)
m1 <- read_csv("imfoAppMassProfiles.csv")
mass <- m1 %>%
mutate(`Umbrella Disposition` = ifelse(disposition %in% "landfilling", "Disposal", "Recovery")) %>%
mutate(Material = recode(material, "FoodWaste" = "Food Waste")) %>%
mutate(`Life Cycle Stage` = ifelse(LCstage %in% "endOfLifeTransport", "EOL Transport", "EOL")) %>%
filter(`Life Cycle Stage` != "EOL Transport") %>%
mutate(`2015 Weight` = round(tons, digits = -2)) %>%
rename(Wasteshed = wasteshed, Disposition = disposition) %>%
select(Wasteshed, Material, Disposition, `Life Cycle Stage`, `Umbrella Disposition`, `2015 Weight`)
I <- read_csv("imfoAppImpactFactors.csv")
I1 <- I %>%
mutate(Material = recode(material, "FoodWaste" = "Food Waste")) %>%
mutate(`Life Cycle Stage` = ifelse(LCstage %in% "endOfLifeTransport", "EOL Transport",
ifelse(LCstage %in% "endOfLife", "EOL",
ifelse(LCstage %in% "production", "Production",
"other")))
) %>%
rename(Disposition = disposition, `Impact Category` = impactCategory,
`Impact Units` = impactUnits, `Impact Factor` = impactFactor,
`Implied Miles` = impliedMiles) %>%
select(Material, Disposition, `Life Cycle Stage`, `Impact Category`,
`Impact Units`, `Impact Factor`, `Implied Miles`)
Wastesheds <- sort(unique(mass$Wasteshed))
Materials <- sort(unique(mass$Material))
Dispositions <- sort(unique(mass$Disposition))
options(shiny.reactlog = TRUE)
# UI ----------------------------------------------------------------------
# Define UI for application that draws a histogram
ui <- fluidPage(
# theme = "bootstrap.css",
chooseSliderSkin("Modern"),
navbarPage("Material Impact Visualizer",
# Introduction tab -------------------------------------------------------
tabPanel("Introduction",
img(src = 'greenpic.jpeg', align = "center")),
# User Input Tab ---------------------------------------------------------------
tabPanel("Visualize Impacts",
sidebarLayout(
sidebarPanel(
selectInput(inputId = "selectedwasteshed",
label = "Select a wasteshed:",
choices = Wastesheds),
uiOutput("choose_materials"),
tags$div(class = "header",
tags$p(tags$b("Sliders are set to the 2015 weights for each material. Move sliders to generate a new scenario for this wasteshed.")),
tags$p("The overall weight for each material can be more or less than the 2015 amount, which would reflect a change in production.")),
#scrolling well panel
wellPanel(id = "tPanel",style = "overflow-y:scroll; max-height: 600px",
# Sliders (UI) ------------------------------------------------------------
conditionalPanel(
condition = "input$usermaterials %in% 'Cardboard'",
uiOutput("cardboardsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Electronics'",
uiOutput("electricsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Food'",
uiOutput("foodsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Glass Containers'",
uiOutput("glasssliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Nonrecyclables'",
uiOutput("nonrecyclableslider")),
conditionalPanel(
condition = "input$usermaterials %in% 'Paper'",
uiOutput("papersliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Rigid Plastic Cont.'",
uiOutput("rigidplasticsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Scrap Metal'",
uiOutput("metalsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Wood'",
uiOutput("woodsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Yard'",
uiOutput("yardsliders"))
)
),
# Main Panel --------------------------------------------------------------
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Disposition weights in separate plots",
tags$br(),
tags$div(class = "header",
tags$p("Existing weights are shown in the
light colors. Moving the sliders generates
a new scenario, shown in the darker colors.")),
plotOutput("weightsplot1")),
tabPanel("Stacked by disposition",
tags$br(),
tags$div(class = "header",
tags$p("Existing weights are shown in the light colors.
Moving the sliders generates a new scenario,
shown in the darker colors.")),
plotOutput("weightsplot2")),
tabPanel("Impacts",
plotOutput("impactplot")),
tabPanel("ImpactsB",
plotOutput("impactplot_b")),
# tabPanel("impact2", plotOutput("impactplot2")),
# Tables tab --------------------------------------------------------------
tabPanel("Tables",
hr(),
tags$div(HTML("A reactive table created from the selected wasteshed
and selected materials:")),
DT::dataTableOutput("table1"),
hr(),
tags$div(HTML("A reactive table created from the above table and
the sliders:")),
DT::dataTableOutput("table2"),
DT::dataTableOutput("table3"),
DT::dataTableOutput("table4")),
# Download button ---------------------------------------------------------
tabPanel("Data export",
selectInput("dataset",
"Choose a dataset:",
choices = c("Wide form", "Long form")),
# Button
downloadButton("downloadData", "Download")
)
)
# plotOutput("weightsplot2"),
# plotOutput("impactplot"),
# # DT::dataTableOutput("table1"),
# DT::dataTableOutput("table2"),
# DT::dataTableOutput("table3"),
# DT::dataTableOutput("table4")
# plotlyOutput("plot1")
)
)
),
# Glossary tab ------------------------------------------------------------
tabPanel("Glossary",
navlistPanel(
widths = c(2, 6),
tabPanel("Materials"),
tabPanel("Dispositions"),
tabPanel("Impacts")
)),
navbarMenu("More",
tabPanel("Resources"),
tabPanel("About"))
)
)
# Server ------------------------------------------------------------------
server <- function(input, output, session) {
values <- reactiveValues(starting = TRUE)
session$onFlushed(function() {
values$starting <- FALSE
})
# userwasteshed <- reactive({
# # req(input$wasteshed)
# mass %>%
# filter(Wasteshed == input$selectedwasteshed)
# })
#widget that filters dataframe to display chosen materials
output$choose_materials <- renderUI({
selectInput(inputId = "usermaterials",
label = 'Select up to six materials:',
choices = Materials,
multiple = TRUE,
selectize = TRUE)
# selectizeInput(inputId = "usermaterials",
# label = "Select up to six materials:",
# choices = Materials,
# selected = NULL,
# options = list(placeholder = "Select materials",
# maxItems = 6))
# checkboxGroupInput(inputId = "usermaterials",
# label = "Choose materials:",
# choices = unique(userwasteshed()$material),
# selected = unique(userwasteshed()$material),
# inline = TRUE)
})
userwastemat <- reactive({
# req(input$wasteshed)
mass %>%
filter(Wasteshed == input$selectedwasteshed) %>%
filter(Material %in% input$usermaterials)
})
# Sliders ----------------------------------------------------------------
output$cardboardsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Cardboard/Kraft") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Cardboard/Kraft")
tagList(h4("Cardboard/Kraft"),
sliderInput(inputId = "slider1cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider1cp",
label = "Composting",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2],
step = 1000),
sliderInput(inputId = "slider1L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]),
sliderInput(inputId = "slider1R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[4],
step = 1000))
})
output$electricsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Electronics") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Electronics")
tagList(h4("Electronics"),
sliderInput(inputId = "slider2L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider2R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2])
)
})
output$foodsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Food Waste") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Food Waste")
tagList(h4("Food Waste"),
sliderInput(inputId = "slider3AD",
label = "Anaerobic Digestion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider3cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]),
sliderInput(inputId = "slider3cp",
label = "Composting",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]),
sliderInput(inputId = "slider3L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[4])
)
})
output$glasssliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Glass Containers") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Glass Containers")
tagList(h4("Glass Containers"),
sliderInput(inputId = "slider4L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider4R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]),
sliderInput(inputId = "slider4U",
label = "useAsAggregate",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3])
)
})
output$nonrecyclableslider <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Nonrecyclables") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Nonrecyclables")
tagList(h4("Nonrecylables"),
sliderInput(inputId = "slider5L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1])
)
})
output$papersliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Paper Fiber") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Paper Fiber")
tagList(h4("Paper Fiber"),
sliderInput(inputId = "slider6cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]
),
sliderInput(inputId = "slider6L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]
),
sliderInput(inputId = "slider6R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]
)
)
})
output$rigidplasticsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Rigid Plastic Cont.") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Rigid Plastic Cont.")
tagList(h4("Rigid Plastic"),
sliderInput(inputId = "slider7cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider7L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]),
sliderInput(inputId = "slider7R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3])
)
})
output$metalsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Scrap Metal - Other") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Scrap Metal - Other")
tagList(h4("Scrap Metal - Other"),
sliderInput(inputId = "slider8L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider8R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2])
)
})
output$woodsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Wood Waste") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Wood Waste")
tagList(h4("Wood Waste"),
sliderInput(inputId = "slider9cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]
),
sliderInput(inputId = "slider9cp",
label = "Composting",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]
),
sliderInput(inputId = "slider9L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]
),
sliderInput(inputId = "slider9R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[4]
)
)
})
output$yardsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Yard Debris") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Yard Debris")
tagList(h4("Yard Debris"),
sliderInput(inputId = "slider10AD",
label = "Anaerobic Digestion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]
),
sliderInput(inputId = "slider10cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]
),
sliderInput(inputId = "slider10cp",
label = "Composting",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]
),
sliderInput(inputId = "slider10L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[4]
)
)
})
# End Sliders -------------------------------------------------------------
# Dataframes and tables ---------------------------------------------------
output$table1 <- DT::renderDataTable({
if (is.null(input$selectedwasteshed))
return()
userwastemat()
})
newnew <- reactive({
df <- mass %>%
filter(Wasteshed == input$selectedwasteshed) %>%
filter(Material %in% input$usermaterials) %>%
mutate(`New Weight` = `2015 Weight`)
df$`New Weight` <- c(input$slider1cb, input$slider1cp,
input$slider1L, input$slider1R,
input$slider2L, input$slider2R,
input$slider3AD, input$slider3cb,
input$slider3cp, input$slider3L,
input$slider4L, input$slider4R, input$slider4U,
input$slider5L,
input$slider6cb, input$slider6L, input$slider6R,
input$slider7cb, input$slider7L, input$slider7R,
input$slider8L, input$slider8R,
input$slider9cb, input$slider9cp,
input$slider9L, input$slider9R,
input$slider10AD, input$slider10cb,
input$slider10cp, input$slider10L)
df
})
output$table2 <- DT::renderDataTable({
newnew()
})
# allLC <- reactive({
# nT <- newnew() %>%
# select(-`Life Cycle Stage`) %>%
# mutate(`Life Cycle Stage` = "EOL Transportation")
#
# nP <- newnew() %>%
# select(-`Life Cycle Stage`) %>%
# mutate(`Life Cycle Stage` = "Production")
#
# nn <- newnew() %>%
# rbind(nT) %>%
# rbind(nP)
#
# nn
# })
newimpacts <- reactive({
nT <- newnew() %>%
mutate(`Life Cycle Stage` = "EOL Transport")
nP <- newnew() %>%
mutate(`Life Cycle Stage` = "Production",
Disposition = "production")
nn <- newnew() %>%
rbind(nT) %>%
rbind(nP) %>%
left_join(I1, by = c("Material", "Disposition",
"Life Cycle Stage")) %>%
mutate(`2015 Impact` = round(`2015 Weight`*`Impact Factor`),
`New Impact` = round(`New Weight`*`Impact Factor`))
nn
# n <- newnew() %>%
# left_join(I1, by = c("Material", "Disposition",
# "Life Cycle Stage")) %>%
# mutate(`2015 Impact` = round(`2015 Weight`*`Impact Factor`),
# `New Impact` = round(`New Weight`*`Impact Factor`))
# n
})
output$table3 <- DT::renderDataTable({
newimpacts()
})
meltedusermass <- reactive({
test <- newnew() %>%
select(-c(Wasteshed, `Umbrella Disposition`, `Life Cycle Stage`)) %>%
melt(id.vars = c('Material', 'Disposition'))
# print(test)
})
# meltedimpacts <- reactive({
# t <- newimpacts() %>%
# select(-c(`2015 Weight`, `New Weight`, `Impact Factor`)) %>%
# melt(id.vars = c('Material', 'Disposition', `Impact Factor`, `Impact Units`)) %>%
# filter(!is.na(`Impact Factor`))
# t
# })
meltedimpacts <- reactive({
t <- newimpacts() %>%
select(Material, Disposition, `Life Cycle Stage`, `Umbrella Disposition`, `Impact Category`, `Impact Units`, `2015 Impact`, `New Impact`) %>%
gather(key = "Scenario", value = "Impact", -c(Material, Disposition, `Life Cycle Stage`, `Umbrella Disposition`, `Impact Category`, `Impact Units`))
})
output$table4 <- DT::renderDataTable({
meltedimpacts()
})
# Impacts plot ------------------------------------------------------------
output$impactplot <- renderPlot({
pl <- ggplot(meltedimpacts(),
aes(y = Impact,
x = Material,
fill = `Life Cycle Stage`,
alpha = Scenario
)) +
geom_bar(position = "dodge",
stat = "identity") +
theme_bw(base_size = 16) +
facet_wrap(~`Impact Category`, ncol = 3, scales = "free_y"
) +
scale_fill_viridis_d(begin = 0.5, direction = -1)
# +
# scale_alpha_discrete(range = c(0.5, 1))
pl + theme(axis.text.x = element_text(angle = 50, hjust = 1
)) +
# scale_y_continuous(limits = c(min(meltedimpacts()$Impact), max(meltedimpacts()$Impact))) +
geom_hline(mapping = NULL, data = NULL, size = 1, yintercept = 0,
na.rm = FALSE, show.legend = NA)
}, height = 750, width = 1000)
output$impactplot_b <- renderPlot({
pl <- ggplot(meltedimpacts() %>%
filter(Scenario %in% "New Impact"),
aes(y = Impact,
x = Material,
fill = `Life Cycle Stage`)) +
geom_bar(
stat = "identity") +
theme_bw(base_size = 16) +
facet_wrap(~`Impact Category`, ncol = 3, scales = "free_y"
) +
scale_fill_viridis_d(begin = 0.5, direction = -1)
# +
# scale_alpha_discrete(range = c(0.5, 1))
pl + theme(axis.text.x = element_text(angle = 50, hjust = 1
)) +
# scale_y_continuous(limits = c(min(meltedimpacts()$Impact), max(meltedimpacts()$Impact))) +
geom_hline(mapping = NULL, data = NULL, size = 1, yintercept = 0,
na.rm = FALSE, show.legend = NA)
}, height = 750, width = 1000)
output$impactplot2 <- renderPlot({
pl <- ggplot(meltedimpacts(), aes(y = value, x = impactCategory, fill = impactCategory, alpha = variable)) +
geom_bar(position = "dodge", stat = "identity") +
theme_bw(base_size = 16) +
facet_wrap(~Material, ncol = 3, scales = "free_y"
) +
scale_fill_viridis_d(direction = -1, option = "A") +
scale_alpha_discrete(range = c(0.5, 1))
pl + theme(axis.text.x = element_text(angle = 50, hjust = 1
)) +
geom_hline(mapping = NULL, data = NULL, size = 1, yintercept = 0,
na.rm = FALSE, show.legend = NA)
}, height = 750, width = 1200)
# Weights plot ------------------------------------------------------------
# output$weightsplot2 <- renderPlot({
# if (values$starting)
# return(NULL)
# ggplot(meltedusermass(),
# aes(y = value,
# x = variable,
# fill = Material,
# alpha = variable)) +
# geom_bar(stat = "identity") +
# theme_bw(base_size = 16) +
# theme(axis.text.x = element_text(angle = 50, hjust = 1)) +
# facet_wrap(~Material, nrow = 2) +
# scale_fill_viridis_d(direction = -1) +
# scale_alpha_discrete(range = c(0.5, 1)) +
# scale_y_continuous(labels = scales::comma)
# })
output$weightsplot1 <- renderPlot({
req(meltedusermass())
ggplot(meltedusermass(),
aes(y = value,
x = variable,
fill = Disposition,
alpha = variable)) +
geom_bar(stat = "identity") +
theme_minimal(base_size = 18) +
theme(axis.title.x = element_blank(),
axis.title.y = element_text(
margin = margin(t = 0, r = 20, b = 0, l = 0),
size = 16,
vjust = -0.65),
axis.text = element_text(size = 16),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
axis.text.x = element_text(angle = 50, hjust = 1)) +
labs(y = "Weight in Tons",
alpha = "") +
facet_wrap(~Material, nrow = 2) +
scale_fill_viridis_d(direction = 1, end = 0.85) +
scale_alpha_discrete(range = c(0.3, 1)) +
scale_y_continuous(labels = scales::comma,
limits = c(0, 20000))
}, height = 600, width = 1000)
output$weightsplot2 <- renderPlot({
ggplot(meltedusermass(), aes(y = value, x = Disposition, fill = Material, alpha = variable)) +
geom_bar(position = "dodge", stat = "identity") +
theme_minimal(base_size = 20) +
labs(y = "Weight in Tons") +
theme(
panel.grid.minor = element_blank(),
axis.text.x = element_text(angle = 50, hjust = 1)) +
facet_wrap(~Material, ncol = 3) +
scale_fill_viridis_d(direction = 1) +
scale_alpha_discrete(range = c(0.5, 1)) +
scale_y_continuous(labels = scales::comma)
}, height = 600, width = 1000)
# Download button ---------------------------------------------------------
datasetInput <- reactive({
switch(input$dataset,
"Wide form" = newimpacts(),
"Long form" = meltedimpacts())
})
output$table <- renderTable({
datsetInput()
})
output$downloadData <- downloadHandler(
filename = function() {
paste(input$dataset, ".csv", sep = "")
},
content = function(file) {
write.csv(datasetInput(), file, row.names = FALSE)
}
)
}
shinyApp(ui = ui, server = server)
| /Archive/app.R | no_license | annawit/IMFOv5_0 | R | false | false | 27,185 | r |
library(shiny)
library(readr)
library(dplyr)
library(stringr)
library(ggplot2)
library(tidyr)
library(reshape2)
library(plotly)
library(shinyWidgets)
library(viridis)
library(shinyjs)
m1 <- read_csv("imfoAppMassProfiles.csv")
mass <- m1 %>%
mutate(`Umbrella Disposition` = ifelse(disposition %in% "landfilling", "Disposal", "Recovery")) %>%
mutate(Material = recode(material, "FoodWaste" = "Food Waste")) %>%
mutate(`Life Cycle Stage` = ifelse(LCstage %in% "endOfLifeTransport", "EOL Transport", "EOL")) %>%
filter(`Life Cycle Stage` != "EOL Transport") %>%
mutate(`2015 Weight` = round(tons, digits = -2)) %>%
rename(Wasteshed = wasteshed, Disposition = disposition) %>%
select(Wasteshed, Material, Disposition, `Life Cycle Stage`, `Umbrella Disposition`, `2015 Weight`)
I <- read_csv("imfoAppImpactFactors.csv")
I1 <- I %>%
mutate(Material = recode(material, "FoodWaste" = "Food Waste")) %>%
mutate(`Life Cycle Stage` = ifelse(LCstage %in% "endOfLifeTransport", "EOL Transport",
ifelse(LCstage %in% "endOfLife", "EOL",
ifelse(LCstage %in% "production", "Production",
"other")))
) %>%
rename(Disposition = disposition, `Impact Category` = impactCategory,
`Impact Units` = impactUnits, `Impact Factor` = impactFactor,
`Implied Miles` = impliedMiles) %>%
select(Material, Disposition, `Life Cycle Stage`, `Impact Category`,
`Impact Units`, `Impact Factor`, `Implied Miles`)
Wastesheds <- sort(unique(mass$Wasteshed))
Materials <- sort(unique(mass$Material))
Dispositions <- sort(unique(mass$Disposition))
options(shiny.reactlog = TRUE)
# UI ----------------------------------------------------------------------
# Define UI for application that draws a histogram
ui <- fluidPage(
# theme = "bootstrap.css",
chooseSliderSkin("Modern"),
navbarPage("Material Impact Visualizer",
# Introduction tab -------------------------------------------------------
tabPanel("Introduction",
img(src = 'greenpic.jpeg', align = "center")),
# User Input Tab ---------------------------------------------------------------
tabPanel("Visualize Impacts",
sidebarLayout(
sidebarPanel(
selectInput(inputId = "selectedwasteshed",
label = "Select a wasteshed:",
choices = Wastesheds),
uiOutput("choose_materials"),
tags$div(class = "header",
tags$p(tags$b("Sliders are set to the 2015 weights for each material. Move sliders to generate a new scenario for this wasteshed.")),
tags$p("The overall weight for each material can be more or less than the 2015 amount, which would reflect a change in production.")),
#scrolling well panel
wellPanel(id = "tPanel",style = "overflow-y:scroll; max-height: 600px",
# Sliders (UI) ------------------------------------------------------------
conditionalPanel(
condition = "input$usermaterials %in% 'Cardboard'",
uiOutput("cardboardsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Electronics'",
uiOutput("electricsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Food'",
uiOutput("foodsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Glass Containers'",
uiOutput("glasssliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Nonrecyclables'",
uiOutput("nonrecyclableslider")),
conditionalPanel(
condition = "input$usermaterials %in% 'Paper'",
uiOutput("papersliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Rigid Plastic Cont.'",
uiOutput("rigidplasticsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Scrap Metal'",
uiOutput("metalsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Wood'",
uiOutput("woodsliders")),
conditionalPanel(
condition = "input$usermaterials %in% 'Yard'",
uiOutput("yardsliders"))
)
),
# Main Panel --------------------------------------------------------------
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Disposition weights in separate plots",
tags$br(),
tags$div(class = "header",
tags$p("Existing weights are shown in the
light colors. Moving the sliders generates
a new scenario, shown in the darker colors.")),
plotOutput("weightsplot1")),
tabPanel("Stacked by disposition",
tags$br(),
tags$div(class = "header",
tags$p("Existing weights are shown in the light colors.
Moving the sliders generates a new scenario,
shown in the darker colors.")),
plotOutput("weightsplot2")),
tabPanel("Impacts",
plotOutput("impactplot")),
tabPanel("ImpactsB",
plotOutput("impactplot_b")),
# tabPanel("impact2", plotOutput("impactplot2")),
# Tables tab --------------------------------------------------------------
tabPanel("Tables",
hr(),
tags$div(HTML("A reactive table created from the selected wasteshed
and selected materials:")),
DT::dataTableOutput("table1"),
hr(),
tags$div(HTML("A reactive table created from the above table and
the sliders:")),
DT::dataTableOutput("table2"),
DT::dataTableOutput("table3"),
DT::dataTableOutput("table4")),
# Download button ---------------------------------------------------------
tabPanel("Data export",
selectInput("dataset",
"Choose a dataset:",
choices = c("Wide form", "Long form")),
# Button
downloadButton("downloadData", "Download")
)
)
# plotOutput("weightsplot2"),
# plotOutput("impactplot"),
# # DT::dataTableOutput("table1"),
# DT::dataTableOutput("table2"),
# DT::dataTableOutput("table3"),
# DT::dataTableOutput("table4")
# plotlyOutput("plot1")
)
)
),
# Glossary tab ------------------------------------------------------------
tabPanel("Glossary",
navlistPanel(
widths = c(2, 6),
tabPanel("Materials"),
tabPanel("Dispositions"),
tabPanel("Impacts")
)),
navbarMenu("More",
tabPanel("Resources"),
tabPanel("About"))
)
)
# Server ------------------------------------------------------------------
server <- function(input, output, session) {
values <- reactiveValues(starting = TRUE)
session$onFlushed(function() {
values$starting <- FALSE
})
# userwasteshed <- reactive({
# # req(input$wasteshed)
# mass %>%
# filter(Wasteshed == input$selectedwasteshed)
# })
#widget that filters dataframe to display chosen materials
output$choose_materials <- renderUI({
selectInput(inputId = "usermaterials",
label = 'Select up to six materials:',
choices = Materials,
multiple = TRUE,
selectize = TRUE)
# selectizeInput(inputId = "usermaterials",
# label = "Select up to six materials:",
# choices = Materials,
# selected = NULL,
# options = list(placeholder = "Select materials",
# maxItems = 6))
# checkboxGroupInput(inputId = "usermaterials",
# label = "Choose materials:",
# choices = unique(userwasteshed()$material),
# selected = unique(userwasteshed()$material),
# inline = TRUE)
})
userwastemat <- reactive({
# req(input$wasteshed)
mass %>%
filter(Wasteshed == input$selectedwasteshed) %>%
filter(Material %in% input$usermaterials)
})
# Sliders ----------------------------------------------------------------
output$cardboardsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Cardboard/Kraft") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Cardboard/Kraft")
tagList(h4("Cardboard/Kraft"),
sliderInput(inputId = "slider1cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider1cp",
label = "Composting",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2],
step = 1000),
sliderInput(inputId = "slider1L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]),
sliderInput(inputId = "slider1R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[4],
step = 1000))
})
output$electricsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Electronics") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Electronics")
tagList(h4("Electronics"),
sliderInput(inputId = "slider2L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider2R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2])
)
})
output$foodsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Food Waste") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Food Waste")
tagList(h4("Food Waste"),
sliderInput(inputId = "slider3AD",
label = "Anaerobic Digestion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider3cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]),
sliderInput(inputId = "slider3cp",
label = "Composting",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]),
sliderInput(inputId = "slider3L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[4])
)
})
output$glasssliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Glass Containers") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Glass Containers")
tagList(h4("Glass Containers"),
sliderInput(inputId = "slider4L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider4R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]),
sliderInput(inputId = "slider4U",
label = "useAsAggregate",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3])
)
})
output$nonrecyclableslider <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Nonrecyclables") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Nonrecyclables")
tagList(h4("Nonrecylables"),
sliderInput(inputId = "slider5L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1])
)
})
output$papersliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Paper Fiber") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Paper Fiber")
tagList(h4("Paper Fiber"),
sliderInput(inputId = "slider6cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]
),
sliderInput(inputId = "slider6L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]
),
sliderInput(inputId = "slider6R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]
)
)
})
output$rigidplasticsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Rigid Plastic Cont.") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Rigid Plastic Cont.")
tagList(h4("Rigid Plastic"),
sliderInput(inputId = "slider7cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider7L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]),
sliderInput(inputId = "slider7R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3])
)
})
output$metalsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Scrap Metal - Other") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Scrap Metal - Other")
tagList(h4("Scrap Metal - Other"),
sliderInput(inputId = "slider8L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]),
sliderInput(inputId = "slider8R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2])
)
})
output$woodsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Wood Waste") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Wood Waste")
tagList(h4("Wood Waste"),
sliderInput(inputId = "slider9cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]
),
sliderInput(inputId = "slider9cp",
label = "Composting",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]
),
sliderInput(inputId = "slider9L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]
),
sliderInput(inputId = "slider9R",
label = "Recycling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[4]
)
)
})
output$yardsliders <- renderUI({
tweight <- userwastemat() %>%
filter(Material %in% "Yard Debris") %>%
pull(`2015 Weight`)
req(input$usermaterials == "Yard Debris")
tagList(h4("Yard Debris"),
sliderInput(inputId = "slider10AD",
label = "Anaerobic Digestion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[1]
),
sliderInput(inputId = "slider10cb",
label = "Combustion",
min = 0,
max = sum(tweight)*1.2,
value = tweight[2]
),
sliderInput(inputId = "slider10cp",
label = "Composting",
min = 0,
max = sum(tweight)*1.2,
value = tweight[3]
),
sliderInput(inputId = "slider10L",
label = "Landfilling",
min = 0,
max = sum(tweight)*1.2,
value = tweight[4]
)
)
})
# End Sliders -------------------------------------------------------------
# Dataframes and tables ---------------------------------------------------
output$table1 <- DT::renderDataTable({
if (is.null(input$selectedwasteshed))
return()
userwastemat()
})
newnew <- reactive({
df <- mass %>%
filter(Wasteshed == input$selectedwasteshed) %>%
filter(Material %in% input$usermaterials) %>%
mutate(`New Weight` = `2015 Weight`)
df$`New Weight` <- c(input$slider1cb, input$slider1cp,
input$slider1L, input$slider1R,
input$slider2L, input$slider2R,
input$slider3AD, input$slider3cb,
input$slider3cp, input$slider3L,
input$slider4L, input$slider4R, input$slider4U,
input$slider5L,
input$slider6cb, input$slider6L, input$slider6R,
input$slider7cb, input$slider7L, input$slider7R,
input$slider8L, input$slider8R,
input$slider9cb, input$slider9cp,
input$slider9L, input$slider9R,
input$slider10AD, input$slider10cb,
input$slider10cp, input$slider10L)
df
})
output$table2 <- DT::renderDataTable({
newnew()
})
# allLC <- reactive({
# nT <- newnew() %>%
# select(-`Life Cycle Stage`) %>%
# mutate(`Life Cycle Stage` = "EOL Transportation")
#
# nP <- newnew() %>%
# select(-`Life Cycle Stage`) %>%
# mutate(`Life Cycle Stage` = "Production")
#
# nn <- newnew() %>%
# rbind(nT) %>%
# rbind(nP)
#
# nn
# })
newimpacts <- reactive({
nT <- newnew() %>%
mutate(`Life Cycle Stage` = "EOL Transport")
nP <- newnew() %>%
mutate(`Life Cycle Stage` = "Production",
Disposition = "production")
nn <- newnew() %>%
rbind(nT) %>%
rbind(nP) %>%
left_join(I1, by = c("Material", "Disposition",
"Life Cycle Stage")) %>%
mutate(`2015 Impact` = round(`2015 Weight`*`Impact Factor`),
`New Impact` = round(`New Weight`*`Impact Factor`))
nn
# n <- newnew() %>%
# left_join(I1, by = c("Material", "Disposition",
# "Life Cycle Stage")) %>%
# mutate(`2015 Impact` = round(`2015 Weight`*`Impact Factor`),
# `New Impact` = round(`New Weight`*`Impact Factor`))
# n
})
output$table3 <- DT::renderDataTable({
newimpacts()
})
meltedusermass <- reactive({
test <- newnew() %>%
select(-c(Wasteshed, `Umbrella Disposition`, `Life Cycle Stage`)) %>%
melt(id.vars = c('Material', 'Disposition'))
# print(test)
})
# meltedimpacts <- reactive({
# t <- newimpacts() %>%
# select(-c(`2015 Weight`, `New Weight`, `Impact Factor`)) %>%
# melt(id.vars = c('Material', 'Disposition', `Impact Factor`, `Impact Units`)) %>%
# filter(!is.na(`Impact Factor`))
# t
# })
meltedimpacts <- reactive({
t <- newimpacts() %>%
select(Material, Disposition, `Life Cycle Stage`, `Umbrella Disposition`, `Impact Category`, `Impact Units`, `2015 Impact`, `New Impact`) %>%
gather(key = "Scenario", value = "Impact", -c(Material, Disposition, `Life Cycle Stage`, `Umbrella Disposition`, `Impact Category`, `Impact Units`))
})
output$table4 <- DT::renderDataTable({
meltedimpacts()
})
# Impacts plot ------------------------------------------------------------
output$impactplot <- renderPlot({
pl <- ggplot(meltedimpacts(),
aes(y = Impact,
x = Material,
fill = `Life Cycle Stage`,
alpha = Scenario
)) +
geom_bar(position = "dodge",
stat = "identity") +
theme_bw(base_size = 16) +
facet_wrap(~`Impact Category`, ncol = 3, scales = "free_y"
) +
scale_fill_viridis_d(begin = 0.5, direction = -1)
# +
# scale_alpha_discrete(range = c(0.5, 1))
pl + theme(axis.text.x = element_text(angle = 50, hjust = 1
)) +
# scale_y_continuous(limits = c(min(meltedimpacts()$Impact), max(meltedimpacts()$Impact))) +
geom_hline(mapping = NULL, data = NULL, size = 1, yintercept = 0,
na.rm = FALSE, show.legend = NA)
}, height = 750, width = 1000)
output$impactplot_b <- renderPlot({
pl <- ggplot(meltedimpacts() %>%
filter(Scenario %in% "New Impact"),
aes(y = Impact,
x = Material,
fill = `Life Cycle Stage`)) +
geom_bar(
stat = "identity") +
theme_bw(base_size = 16) +
facet_wrap(~`Impact Category`, ncol = 3, scales = "free_y"
) +
scale_fill_viridis_d(begin = 0.5, direction = -1)
# +
# scale_alpha_discrete(range = c(0.5, 1))
pl + theme(axis.text.x = element_text(angle = 50, hjust = 1
)) +
# scale_y_continuous(limits = c(min(meltedimpacts()$Impact), max(meltedimpacts()$Impact))) +
geom_hline(mapping = NULL, data = NULL, size = 1, yintercept = 0,
na.rm = FALSE, show.legend = NA)
}, height = 750, width = 1000)
output$impactplot2 <- renderPlot({
pl <- ggplot(meltedimpacts(), aes(y = value, x = impactCategory, fill = impactCategory, alpha = variable)) +
geom_bar(position = "dodge", stat = "identity") +
theme_bw(base_size = 16) +
facet_wrap(~Material, ncol = 3, scales = "free_y"
) +
scale_fill_viridis_d(direction = -1, option = "A") +
scale_alpha_discrete(range = c(0.5, 1))
pl + theme(axis.text.x = element_text(angle = 50, hjust = 1
)) +
geom_hline(mapping = NULL, data = NULL, size = 1, yintercept = 0,
na.rm = FALSE, show.legend = NA)
}, height = 750, width = 1200)
# Weights plot ------------------------------------------------------------
# output$weightsplot2 <- renderPlot({
# if (values$starting)
# return(NULL)
# ggplot(meltedusermass(),
# aes(y = value,
# x = variable,
# fill = Material,
# alpha = variable)) +
# geom_bar(stat = "identity") +
# theme_bw(base_size = 16) +
# theme(axis.text.x = element_text(angle = 50, hjust = 1)) +
# facet_wrap(~Material, nrow = 2) +
# scale_fill_viridis_d(direction = -1) +
# scale_alpha_discrete(range = c(0.5, 1)) +
# scale_y_continuous(labels = scales::comma)
# })
output$weightsplot1 <- renderPlot({
req(meltedusermass())
ggplot(meltedusermass(),
aes(y = value,
x = variable,
fill = Disposition,
alpha = variable)) +
geom_bar(stat = "identity") +
theme_minimal(base_size = 18) +
theme(axis.title.x = element_blank(),
axis.title.y = element_text(
margin = margin(t = 0, r = 20, b = 0, l = 0),
size = 16,
vjust = -0.65),
axis.text = element_text(size = 16),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
axis.text.x = element_text(angle = 50, hjust = 1)) +
labs(y = "Weight in Tons",
alpha = "") +
facet_wrap(~Material, nrow = 2) +
scale_fill_viridis_d(direction = 1, end = 0.85) +
scale_alpha_discrete(range = c(0.3, 1)) +
scale_y_continuous(labels = scales::comma,
limits = c(0, 20000))
}, height = 600, width = 1000)
output$weightsplot2 <- renderPlot({
ggplot(meltedusermass(), aes(y = value, x = Disposition, fill = Material, alpha = variable)) +
geom_bar(position = "dodge", stat = "identity") +
theme_minimal(base_size = 20) +
labs(y = "Weight in Tons") +
theme(
panel.grid.minor = element_blank(),
axis.text.x = element_text(angle = 50, hjust = 1)) +
facet_wrap(~Material, ncol = 3) +
scale_fill_viridis_d(direction = 1) +
scale_alpha_discrete(range = c(0.5, 1)) +
scale_y_continuous(labels = scales::comma)
}, height = 600, width = 1000)
# Download button ---------------------------------------------------------
datasetInput <- reactive({
switch(input$dataset,
"Wide form" = newimpacts(),
"Long form" = meltedimpacts())
})
output$table <- renderTable({
datsetInput()
})
output$downloadData <- downloadHandler(
filename = function() {
paste(input$dataset, ".csv", sep = "")
},
content = function(file) {
write.csv(datasetInput(), file, row.names = FALSE)
}
)
}
shinyApp(ui = ui, server = server)
|
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample101.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results101.csv",sep=",")
| /Reduced model optimizations/explorelikereduced101.R | no_license | roszenil/Bichromdryad | R | false | false | 750 | r | source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample101.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results101.csv",sep=",")
|
# Load package
library(RMySQL)
library(xlsx)
library(plyr)
library(dplyr)
# Set timer
ptm <- proc.time()
# Establish connection
con <- dbConnect(RMySQL::MySQL(), host = '172.20.0.1', port = 3307, dbname = "beta",
user = "tantonakis", password = "2secret4usAll!")
# Send query
rs <- dbSendQuery(con,"
SELECT
`prefecture_detail`.`prefecture_name` as PREF,
COUNT(DISTINCT `restaurant_master`.`restaurant_id`) AS NUMBER_OF_RESTAURANTS,
YEAR(FROM_UNIXTIME(`order_master`.`i_date`)) AS YEAR,
MONTH(FROM_UNIXTIME(`order_master`.`i_date`)) AS MONTH
FROM `order_master`
JOIN `restaurant_master`
USING (`restaurant_id`)
JOIN `restaurant_detail`
ON (`restaurant_detail`.`restaurant_id` = `restaurant_master`.`restaurant_id` AND `restaurant_detail`.`language_id` = 1)
LEFT JOIN `city_detail`
ON (`restaurant_master`.`restaurant_city_id` = `city_detail`.`city_id` AND `city_detail`.`language_id` = 1)
LEFT JOIN `city_master`
ON ( `city_master`.`city_id` = `city_detail`.`city_id`)
LEFT JOIN `prefecture_detail`
ON (`prefecture_detail`.`language_id` = 1 AND `city_master`.`prefecture_id` = `prefecture_detail`.`prefecture_id`)
WHERE ((`order_master`.`is_deleted` = 'N') and ((`order_master`.`status` = 'VERIFIED') or (`order_master`.`status` = 'REJECTED')))
AND `order_master`.`i_date` >= UNIX_TIMESTAMP('2015-07-01')
AND `order_master`.`i_date` < UNIX_TIMESTAMP('2015-08-01')
GROUP BY year, month, pref
ORDER BY year ASC, month ASC, NUMBER_OF_RESTAURANTS DESC
")
# Fetch query results (n=-1) means all results
active_pref <- dbFetch(rs, n=-1)
# close connection
dbDisconnect(con)
# Stop timer
proc.time() - ptm
| /act_rest_pref_per_month.R | no_license | thomasantonakis/cdgr_mysql | R | false | false | 1,928 | r | # Load package
library(RMySQL)
library(xlsx)
library(plyr)
library(dplyr)
# Set timer
ptm <- proc.time()
# Establish connection
con <- dbConnect(RMySQL::MySQL(), host = '172.20.0.1', port = 3307, dbname = "beta",
user = "tantonakis", password = "2secret4usAll!")
# Send query
rs <- dbSendQuery(con,"
SELECT
`prefecture_detail`.`prefecture_name` as PREF,
COUNT(DISTINCT `restaurant_master`.`restaurant_id`) AS NUMBER_OF_RESTAURANTS,
YEAR(FROM_UNIXTIME(`order_master`.`i_date`)) AS YEAR,
MONTH(FROM_UNIXTIME(`order_master`.`i_date`)) AS MONTH
FROM `order_master`
JOIN `restaurant_master`
USING (`restaurant_id`)
JOIN `restaurant_detail`
ON (`restaurant_detail`.`restaurant_id` = `restaurant_master`.`restaurant_id` AND `restaurant_detail`.`language_id` = 1)
LEFT JOIN `city_detail`
ON (`restaurant_master`.`restaurant_city_id` = `city_detail`.`city_id` AND `city_detail`.`language_id` = 1)
LEFT JOIN `city_master`
ON ( `city_master`.`city_id` = `city_detail`.`city_id`)
LEFT JOIN `prefecture_detail`
ON (`prefecture_detail`.`language_id` = 1 AND `city_master`.`prefecture_id` = `prefecture_detail`.`prefecture_id`)
WHERE ((`order_master`.`is_deleted` = 'N') and ((`order_master`.`status` = 'VERIFIED') or (`order_master`.`status` = 'REJECTED')))
AND `order_master`.`i_date` >= UNIX_TIMESTAMP('2015-07-01')
AND `order_master`.`i_date` < UNIX_TIMESTAMP('2015-08-01')
GROUP BY year, month, pref
ORDER BY year ASC, month ASC, NUMBER_OF_RESTAURANTS DESC
")
# Fetch query results (n=-1) means all results
active_pref <- dbFetch(rs, n=-1)
# close connection
dbDisconnect(con)
# Stop timer
proc.time() - ptm
|
#' ustawia zmienne okienka czasowego (okres_min, okres_max, len)
#' @param dane dane wygenerowane za pomocą funkcji \code{\link{polacz_zus_zdau}},
#' lub \code{\link{agreguj_do_miesiecy}}
#' @param okienko obiekt opisujący okienko stworzony za pomocą funkcji
#' \code{\link{okienko}}
#' @param filtrZdau ramka danych zawierająca zmienną \code{id_zdau} ograniczająca
#' obserwacje, które mają się znaleźć w okienku
#' @return data.frame wyliczone zmienne
#' @export
#' @import dplyr
oblicz_okienko = function(dane, okienko, filtrZdau = NULL){
stopifnot(
is.null(filtrZdau) | is.data.frame(filtrZdau) & 'id_zdau' %in% colnames(filtrZdau)
)
if (!is.null(filtrZdau)) {
dane = dane %>%
inner_join(
filtrZdau %>%
select_('id_zdau') %>%
distinct()
)
}
dataMin = data2okres(okienko[['dataMin']])
dataMax = data2okres(okienko[['dataMax']])
dane = dane %>%
mutate_(
okres_min = paste('as.integer(', okienko[['zmiennaMin']], '+', okienko[['offsetMin']], ')'),
okres_max = paste('as.integer(', okienko[['zmiennaMax']], '+', okienko[['offsetMax']], ')')
) %>%
mutate_(
okres_max = ~if_else(okres_max >= koniec & !is.na(koniec), koniec - 1L, okres_max)
) %>%
mutate_(
okres_min = ~if_else(okres_min < dataMin, dataMin, okres_min),
okres_max = ~if_else(okres_max > dataMax, dataMax, okres_max)
) %>%
mutate_(
len = ~okres_max - okres_min + 1L
) %>%
filter_(~len > 0L)
return(dane)
} | /R/oblicz_okienko.R | no_license | zozlak/MLAKdane | R | false | false | 1,526 | r | #' ustawia zmienne okienka czasowego (okres_min, okres_max, len)
#' @param dane dane wygenerowane za pomocą funkcji \code{\link{polacz_zus_zdau}},
#' lub \code{\link{agreguj_do_miesiecy}}
#' @param okienko obiekt opisujący okienko stworzony za pomocą funkcji
#' \code{\link{okienko}}
#' @param filtrZdau ramka danych zawierająca zmienną \code{id_zdau} ograniczająca
#' obserwacje, które mają się znaleźć w okienku
#' @return data.frame wyliczone zmienne
#' @export
#' @import dplyr
oblicz_okienko = function(dane, okienko, filtrZdau = NULL){
stopifnot(
is.null(filtrZdau) | is.data.frame(filtrZdau) & 'id_zdau' %in% colnames(filtrZdau)
)
if (!is.null(filtrZdau)) {
dane = dane %>%
inner_join(
filtrZdau %>%
select_('id_zdau') %>%
distinct()
)
}
dataMin = data2okres(okienko[['dataMin']])
dataMax = data2okres(okienko[['dataMax']])
dane = dane %>%
mutate_(
okres_min = paste('as.integer(', okienko[['zmiennaMin']], '+', okienko[['offsetMin']], ')'),
okres_max = paste('as.integer(', okienko[['zmiennaMax']], '+', okienko[['offsetMax']], ')')
) %>%
mutate_(
okres_max = ~if_else(okres_max >= koniec & !is.na(koniec), koniec - 1L, okres_max)
) %>%
mutate_(
okres_min = ~if_else(okres_min < dataMin, dataMin, okres_min),
okres_max = ~if_else(okres_max > dataMax, dataMax, okres_max)
) %>%
mutate_(
len = ~okres_max - okres_min + 1L
) %>%
filter_(~len > 0L)
return(dane)
} |
#' @section Resolving conflicts:
#'
#' To permanently resolve a conflict within a session, use assignment:
#'
#' \preformatted{
#' library(conflicted)
#' library(dplyr)
#'
#' filter <- dplyr::filter
#' }
#'
#' @keywords internal
#' @import rlang
"_PACKAGE"
| /R/conflicted.R | permissive | r-lib/conflicted | R | false | false | 257 | r | #' @section Resolving conflicts:
#'
#' To permanently resolve a conflict within a session, use assignment:
#'
#' \preformatted{
#' library(conflicted)
#' library(dplyr)
#'
#' filter <- dplyr::filter
#' }
#'
#' @keywords internal
#' @import rlang
"_PACKAGE"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PoisBG.R
\docType{methods}
\name{fitPoisBG_sp}
\alias{fitPoisBG_sp}
\alias{fitPoisBG_sp,matrix-method}
\title{Estimate Poisson background model for multiple slides}
\usage{
fitPoisBG_sp(object, ...)
\S4method{fitPoisBG_sp}{matrix}(
object,
id,
iterations = 10,
tol = 0.001,
size_scale = c("sum", "first")
)
}
\arguments{
\item{object}{count matrix with features in rows and samples in columns}
\item{...}{additional argument list that might be used}
\item{id}{character vector same size as sample size representing slide names of each sample}
\item{iterations}{maximum iterations to be run, default=10}
\item{tol}{tolerance to determine convergence, default = 1e-3}
\item{size_scale}{method to scale the sizefact, sum(sizefact)=1 when size_scale="sum", sizefact[1]=1 when size_scale="first"}
}
\value{
a list of following items
\itemize{
\item sizefact - estimated size factor
\item featfact - estimated feature factor matrix, column names the same as the slide id
\item countmat - the input count matrix
\item id - the input id
}
}
\description{
Estimate Poisson background model for multiple slides:
}
| /man/fitPoisBG_sp-methods.Rd | permissive | JasonWReeves/GeoDiff | R | false | true | 1,206 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PoisBG.R
\docType{methods}
\name{fitPoisBG_sp}
\alias{fitPoisBG_sp}
\alias{fitPoisBG_sp,matrix-method}
\title{Estimate Poisson background model for multiple slides}
\usage{
fitPoisBG_sp(object, ...)
\S4method{fitPoisBG_sp}{matrix}(
object,
id,
iterations = 10,
tol = 0.001,
size_scale = c("sum", "first")
)
}
\arguments{
\item{object}{count matrix with features in rows and samples in columns}
\item{...}{additional argument list that might be used}
\item{id}{character vector same size as sample size representing slide names of each sample}
\item{iterations}{maximum iterations to be run, default=10}
\item{tol}{tolerance to determine convergence, default = 1e-3}
\item{size_scale}{method to scale the sizefact, sum(sizefact)=1 when size_scale="sum", sizefact[1]=1 when size_scale="first"}
}
\value{
a list of following items
\itemize{
\item sizefact - estimated size factor
\item featfact - estimated feature factor matrix, column names the same as the slide id
\item countmat - the input count matrix
\item id - the input id
}
}
\description{
Estimate Poisson background model for multiple slides:
}
|
# -----------------------------------------------------------------
# Macro variables for Capital Bikeshare Analysis
# -----------------------------------------------------------------
wd <- 'working/directory/here'
google_api_key <- 'google-api-here' | /macro_variables.R | no_license | ficonsulting/capital-bikeshare | R | false | false | 257 | r | # -----------------------------------------------------------------
# Macro variables for Capital Bikeshare Analysis
# -----------------------------------------------------------------
wd <- 'working/directory/here'
google_api_key <- 'google-api-here' |
# Load the Concrete data as concrete
# custom normalization function
normalize <- function(x) {
return((x - min(x)) / (max(x) - min(x)))
}
View(concrete)
# apply normalization to entire data frame
concrete_norm <- as.data.frame(lapply(concrete, normalize))
View(concrete_norm)
# create training and test data
concrete_train <- concrete_norm[1:773, ]
concrete_test <- concrete_norm[774:1030, ]
## Training a model on the data ----
# train the neuralnet model
install.packages("neuralnet")
library(neuralnet)
# simple ANN with only a single hidden neuron
concrete_model <- neuralnet(formula = strength ~ cement + slag +
ash + water + superplastic +
coarseagg + fineagg + age,
data = concrete_train)
# visualize the network topology
plot(concrete_model)
## Evaluating model performance
----
# obtain model results
#results_model <- NULL
concrete_test[1:8]
results_model <- compute(concrete_model, concrete_test[1:8])
# obtain predicted strength values
str(results_model)
predicted_strength <- results_model$net.result
predicted_strength
# examine the correlation between predicted and actual values
cor(predicted_strength, concrete_test$strength)
## Improving model performance ----
# a more complex neural network topology with 10 hidden neurons
concrete_model2 <- neuralnet(strength ~ cement + slag +
ash + water + superplastic +
coarseagg + fineagg + age,
data = concrete_train, hidden = 10)
# plot the network
plot(concrete_model2)
# evaluate the results as we did before
model_results2 <- compute(concrete_model2, concrete_test[1:8])
predicted_strength2 <- model_results2$net.result
cor(predicted_strength2, concrete_test$strength)
| /Assignments/Nural_networks/Concrete.R | no_license | suhas-iyer-au7/ExcelR-assignments | R | false | false | 1,842 | r | # Load the Concrete data as concrete
# custom normalization function
normalize <- function(x) {
return((x - min(x)) / (max(x) - min(x)))
}
View(concrete)
# apply normalization to entire data frame
concrete_norm <- as.data.frame(lapply(concrete, normalize))
View(concrete_norm)
# create training and test data
concrete_train <- concrete_norm[1:773, ]
concrete_test <- concrete_norm[774:1030, ]
## Training a model on the data ----
# train the neuralnet model
install.packages("neuralnet")
library(neuralnet)
# simple ANN with only a single hidden neuron
concrete_model <- neuralnet(formula = strength ~ cement + slag +
ash + water + superplastic +
coarseagg + fineagg + age,
data = concrete_train)
# visualize the network topology
plot(concrete_model)
## Evaluating model performance
----
# obtain model results
#results_model <- NULL
concrete_test[1:8]
results_model <- compute(concrete_model, concrete_test[1:8])
# obtain predicted strength values
str(results_model)
predicted_strength <- results_model$net.result
predicted_strength
# examine the correlation between predicted and actual values
cor(predicted_strength, concrete_test$strength)
## Improving model performance ----
# a more complex neural network topology with 10 hidden neurons
concrete_model2 <- neuralnet(strength ~ cement + slag +
ash + water + superplastic +
coarseagg + fineagg + age,
data = concrete_train, hidden = 10)
# plot the network
plot(concrete_model2)
# evaluate the results as we did before
model_results2 <- compute(concrete_model2, concrete_test[1:8])
predicted_strength2 <- model_results2$net.result
cor(predicted_strength2, concrete_test$strength)
|
library(jsonlite)
x = list(
'Matrix of word-grams occurrences tokenizer'= 'scikit-learn based tokenizer',
'Matrix of word-grams occurrences binary'= TRUE,
'Matrix of word-grams occurrences sublinear_tf'= FALSE,
'Matrix of word-grams occurrences use_idf'= FALSE,
'Matrix of word-grams occurrences norm'= 'L2',
'Total weights'= 447,
'Intercept'= -1.20494260954,
'Model precision'= 'single',
'Loss distribution'= 'Binomial Deviance',
'Link function'= 'logit',
'Pairwise interactions found'=list(
list(var1='groups', var2='item_ids', cf=0.0087981938365420381),
list(var1='item_ids', var2='こんにちは', cf=0.0073953000639295765),
list(var1='tvh', var2='こんにちは', cf=0.0062485787413982278),
list(var1='dates_2 (Year)', var2='こんにちは', cf=0.0039294478075596277),
list(var1='dates (Year)', var2='groups', cf=0.0035171332815932552),
list(var1='dates_2 (Year)', var2='groups', cf=0.0028919212094156216),
list(var1='dates (Year', var2='tvh', cf=0.0027262013972543125),
list(var1='dates (Year)', var2='item_ids', cf=0.0087981938365420381),
list(var1='dates (Year)', var2='こんにちは', cf=0.0018645497034906681),
list(var1='dates_2 (Year)', var2='tvh', cf=0.0013746505632680347)
)
)
toJSON(x, pretty=T, auto_unbox=T)
| /data-science-scripts/zach/tr_example.R | no_license | mcohenmcohen/DataRobot | R | false | false | 1,294 | r | library(jsonlite)
x = list(
'Matrix of word-grams occurrences tokenizer'= 'scikit-learn based tokenizer',
'Matrix of word-grams occurrences binary'= TRUE,
'Matrix of word-grams occurrences sublinear_tf'= FALSE,
'Matrix of word-grams occurrences use_idf'= FALSE,
'Matrix of word-grams occurrences norm'= 'L2',
'Total weights'= 447,
'Intercept'= -1.20494260954,
'Model precision'= 'single',
'Loss distribution'= 'Binomial Deviance',
'Link function'= 'logit',
'Pairwise interactions found'=list(
list(var1='groups', var2='item_ids', cf=0.0087981938365420381),
list(var1='item_ids', var2='こんにちは', cf=0.0073953000639295765),
list(var1='tvh', var2='こんにちは', cf=0.0062485787413982278),
list(var1='dates_2 (Year)', var2='こんにちは', cf=0.0039294478075596277),
list(var1='dates (Year)', var2='groups', cf=0.0035171332815932552),
list(var1='dates_2 (Year)', var2='groups', cf=0.0028919212094156216),
list(var1='dates (Year', var2='tvh', cf=0.0027262013972543125),
list(var1='dates (Year)', var2='item_ids', cf=0.0087981938365420381),
list(var1='dates (Year)', var2='こんにちは', cf=0.0018645497034906681),
list(var1='dates_2 (Year)', var2='tvh', cf=0.0013746505632680347)
)
)
toJSON(x, pretty=T, auto_unbox=T)
|
library('plyr')
library('dplyr')
library('stringr')
compare<-function(df1, df2, key1,key2,rvalue){
#df1<-req_ls_data df2<-constituency_electoral_info key1<-ls_key key2<-const_eci_key
ncol_df1=(ncol(df1))
ncol_df2=(ncol(df2))
#EXACT MATCH
exact_match<-merge(df1,df2,by=colnames(key1)[1])
#IN DF1 but not in DF2
all_unmatch<-merge(df1,df2,by.x=colnames(key1)[1],by.y=colnames(key2)[1],all=TRUE)
ncol_df1+2
c1<-colnames(all_unmatch)[ncol_df1+2]
all_unmatch<-all_unmatch[is.na(all_unmatch[,ncol_df1+2]),]
#in DF2 but not in DF1
all_unmatch2<-merge(df2,df1,by.y = colnames(key1),by.x=colnames(key2),all=TRUE)
all_unmatch2<-all_unmatch2[is.na(all_unmatch2[,ncol_df2+2]),]
if(rvalue=="EXACT"){
return(exact_match)
}
else if(rvalue=="UNMATCH A TO B"){
return(all_unmatch)
}
else if(rvalue=="UNMATCH B TO A"){
return(all_unmatch2)
}
#Match Outputs:
}
#Canonicalize for Constituency Names only
# indian name canonicalization
canonicalize <- function(s) {
trim <- function (s) { return (gsub ("\\s+$", "", gsub("^\\s+", "", s))) }
# concats with space
concat <- function(a, b) {
return (paste(a, b))
}
x <- toupper (s)
# remove all chars except A-Z and space. convert special chars like , to space, to retain tokenization
#x <- gsub ("[^A-Z]", " ", x)
x<-gsub("[^[:alnum:]]"," ",x)
tokens <- strsplit(x, " ")[[1]] # this syntax needed because strsplit returns a list (of length 1)
tokens <- sort (tokens)
tokenNumber <- 1
result <- ""
for (token in tokens) {
# token is already in uppercase
token <- trim (token)
token <- gsub ("[^[:alnum:]]", "", token)
#Removing SC from canon name
if(token=="SC" | token=="AND" | token=="ST"){
token=""
}
if (nchar(token) == 0) {
next
}
# some common phonetic variations
token <- gsub ("TH", "T", token)
token <- gsub ("V", "W", token)
token <- gsub ("GH", "G", token)
token <- gsub ("BH", "B", token)
token <- gsub ("DH", "D", token)
token <- gsub ("JH", "J", token)
token <- gsub ("KH", "K", token)
token <- gsub ("MH", "M", token)
token <- gsub ("PH", "P", token)
token <- gsub ("SH", "S", token)
token <- gsub ("ZH", "S", token)
token <- gsub ("Z", "S", token)
token <- gsub ("Y", "I", token)
token <- gsub ("AU", "OU", token)
token <- gsub ("OO", "U", token)
token <- gsub ("EE", "I", token)
token <- gsub ("KSH", "X", token)
# repeated letters
token <- gsub ("AA", "A", token)
token <- gsub ("BB", "B", token)
token <- gsub ("CC", "C", token)
token <- gsub ("DD", "D", token)
token <- gsub ("FF", "F", token)
token <- gsub ("GG", "G", token)
token <- gsub ("JJ", "J", token)
token <- gsub ("KK", "K", token)
token <- gsub ("LL", "L", token)
token <- gsub ("MM", "M", token)
token <- gsub ("NN", "N", token)
token <- gsub ("PP", "P", token)
token <- gsub ("RR", "R", token)
token <- gsub ("SS", "S", token)
token <- gsub ("TT", "T", token)
token <- gsub ("WW", "W", token)
token <- gsub ("YY", "Y", token)
token <- gsub ("ZZ", "Z", token)
# now a bunch of rules that are mostly true -- change these per requirement
if (nchar(token) > 0) {
result <- concat (result, token)
}
tokenNumber = tokenNumber + 1
}
return (trim(result)) # remove the unneeded space in the beginning
}
compare_canon<-function(df1, df2, key1,key2,rvalue){
#REMOVE NA COLUMNS FROM DF1
remcols1=data.frame(df1)
k=0
for(i in 1:ncol(remcols1)){
if(k==0){
if(all(is.na(remcols1[,i]))){
k=1
num=i
}
}
}
new_df1<-remcols1[,c(1:num-1)]
#REMOVE NA COLUMNS FROM DF2
remcols2=data.frame(df2)
k=0
for(i in 1:ncol(remcols2)){
if(k==0){
if(all(is.na(remcols2[,i]))){
k=1
num=i
}
}
}
new_df2<-remcols2[,c(1:num-1)]
#add canonicalized values
numcol_df1=ncol(new_df1)
for(i in 1:nrow(new_df1)){
new_df1[i,numcol_df1+1]=canonicalize(new_df1[i,1])
}
colnames(new_df1)[numcol_df1+1]="CANON"
numcol_df2=ncol(new_df2)
for(i in 1:nrow(new_df2)){
new_df2[i,numcol_df2+1]=canonicalize(new_df2[i,1])
}
colnames(new_df2)[numcol_df2+1]="CANON"
#EXACT MERGE
mer_canon=merge(new_df1,new_df2,by="CANON")
#Remaining Unmerged from A to B
unmatched_AB=data.frame()
j=1
for(i in 1:nrow(new_df1)){
if(!(new_df1$CANON[i] %in% mer_canon$CANON)){
unmatched_AB[j,1]=new_df1[i,1]
j=j+1
}
}
colnames(unmatched_AB)[1]=colnames(key1)[1]
mer_un_AB=unique(merge(unmatched_AB,df1,by=colnames(key1)[1]))
#Remaining Unmerged from B to A
unmatched_BA=data.frame()
j=1
for(i in 1:nrow(new_df2)){
if(!(new_df2$CANON[i] %in% mer_canon$CANON)){
unmatched_BA[j,1]=new_df2[i,1]
j=j+1
}
}
colnames(unmatched_BA)[1]=colnames(key1)[1]
mer_un_BA=unique(merge(unmatched_BA,df2,by=colnames(key1)[1]))
if(rvalue=="MERGED"){
return(mer_canon)
}
else if(rvalue=="UNMERGED A TO B"){
return(mer_un_AB)
}
else if(rvalue=="UNMERGED B TO A"){
return(mer_un_BA)
}
}
compare_edit_dist<-function(df1, df2, key1,key2,rvalue){
#REMOVE NA COLUMNS FROM DF1
remcols1=data.frame(df1)
k=0
for(i in 1:ncol(remcols1)){
if(k==0){
if(all(is.na(remcols1[,i]))){
k=1
num=i
}
}
}
new_df1<-remcols1[,c(1:num-1)]
#REMOVE NA COLUMNS FROM DF2
remcols2=data.frame(df2)
k=0
for(i in 1:ncol(remcols2)){
if(k==0){
if(all(is.na(remcols2[,i]))){
k=1
num=i
}
}
}
new_df2<-remcols2[,c(1:num-1)]
#add canonicalized values
numcol_df1=ncol(new_df1)
for(i in 1:nrow(new_df1)){
new_df1[i,numcol_df1+1]=canonicalize(new_df1[i,1])
}
colnames(new_df1)[numcol_df1+1]="CANON"
numcol_df2=ncol(new_df2)
for(i in 1:nrow(new_df2)){
new_df2[i,numcol_df2+1]=canonicalize(new_df2[i,1])
}
colnames(new_df2)[numcol_df2+1]="CANON"
for(i in 1:nrow(new_df1)){
#Ignoring LS Year
new_df1[i,numcol_df1+1]
sp_f=str_locate(new_df1[i,numcol_df1+1]," ")
temp=substr(new_df1[i,numcol_df1+1],sp_f+1,nchar(new_df1[i,numcol_df1+1]))
for(j in 1:nrow(new_df2)){
sp_f2=str_locate(new_df2[j,numcol_df2+1]," ")
temp2=substr(new_df2[j,numcol_df2+1],sp_f2+1,nchar(new_df2[j,numcol_df2+1]))
edist=adist(temp,temp2)
if(edist==1 | edist==2){
n_ls=substr(new_df1[i,numcol_df1+1],1,sp_f-1)
n_eci=substr(new_df2[j,numcol_df2+1],1,sp_f2-1)
if(n_ls==n_eci){
new_df1[i,numcol_df1+2]="Matched"
new_df1[i,numcol_df1+3]=new_df2[j,numcol_df2+1]
}
}
}
if(is.na(new_df1[i,numcol_df1+2])){
new_df1[i,numcol_df1+2]="Unmatched"
}
}
colnames(new_df1)[numcol_df1+2]="MATCH_INDICATOR"
colnames(new_df1)[numcol_df1+3]="KEY"
df1_sub<-subset(new_df1, MATCH_INDICATOR=="Matched")
mer_ed_dist=merge(df1_sub,new_df2,by.x="KEY",by.y="CANON")
#to find unmerged from B To A
unmerged_edist_ba=data.frame()
j=1
num=ncol(new_df2)
for(i in 1:nrow(new_df2)){
if(!(new_df2$CANON[i] %in% df1_sub$KEY)){
unmerged_edist_ba[j,1]=new_df2$CANON[i]
j=j+1
}
}
colnames(unmerged_edist_ba)[1]=colnames(new_df2)[num]
unm_ba=merge(unmerged_edist_ba,new_df2,by=colnames(new_df2)[num])
if(rvalue=="MERGED"){
return(mer_ed_dist)
}
else if(rvalue=="UNMERGED A TO B"){
return(subset(new_df1, MATCH_INDICATOR=="Unmatched"))
}
else if(rvalue=="UNMERGED B TO A"){
return(unm_ba)
}
}
canonicalize1 <- function(s) {
trim <- function (s) { return (gsub ("\\s+$", "", gsub("^\\s+", "", s))) }
# concats with space
concat <- function(a, b) {
return (paste(a, b))
}
x <- toupper (s)
# remove all chars except A-Z and space. convert special chars like , to space, to retain tokenization
#x <- gsub ("[^A-Z]", " ", x)
x<-gsub("[^[:alnum:]]"," ",x)
tokens <- strsplit(x, " ")[[1]] # this syntax needed because strsplit returns a list (of length 1)
tokens <- sort (tokens)
tokenNumber <- 1
result <- ""
for (token in tokens) {
# token is already in uppercase
token <- trim (token)
token <- gsub ("[^A-Z]", "", token)
if (nchar(token) == 0) {
next
}
# these titles are simply dropped
if (token == "DR" | token == "MR" | token == "PROF" | token == "MRS" | token == "ENG" | token == "SHRI" | token == "SMT" | token == "SHRI" | token == "SARDAR" | token == "PANDIT" || token == "PT" || token == "THIRU")
next
# some common phonetic variations
token <- gsub ("TH", "T", token)
token <- gsub ("V", "W", token)
token <- gsub ("GH", "G", token)
token <- gsub ("BH", "B", token)
token <- gsub ("DH", "D", token)
token <- gsub ("JH", "J", token)
token <- gsub ("KH", "K", token)
token <- gsub ("MH", "M", token)
token <- gsub ("PH", "P", token)
token <- gsub ("SH", "S", token)
token <- gsub ("ZH", "S", token)
token <- gsub ("Z", "S", token)
token <- gsub ("Y", "I", token)
token <- gsub ("AU", "OU", token)
token <- gsub ("OO", "U", token)
token <- gsub ("EE", "I", token)
token <- gsub ("KSH", "X", token)
# repeated letters
token <- gsub ("AA", "A", token)
token <- gsub ("BB", "B", token)
token <- gsub ("CC", "C", token)
token <- gsub ("DD", "D", token)
token <- gsub ("FF", "F", token)
token <- gsub ("GG", "G", token)
token <- gsub ("JJ", "J", token)
token <- gsub ("KK", "K", token)
token <- gsub ("LL", "L", token)
token <- gsub ("MM", "M", token)
token <- gsub ("NN", "N", token)
token <- gsub ("PP", "P", token)
token <- gsub ("RR", "R", token)
token <- gsub ("SS", "S", token)
token <- gsub ("TT", "T", token)
token <- gsub ("WW", "W", token)
token <- gsub ("YY", "Y", token)
token <- gsub ("ZZ", "Z", token)
# now a bunch of rules that are mostly true -- change these per requirement
token <- gsub ("PD", "PRASAD", token)
token <- gsub ("MD", "MOHAMMAD", token)
token <- gsub ("MOHAMAD", "MOHAMMAD", token)
token <- gsub ("MOHMED", "MOHAMMAD", token)
token <- gsub ("MOHAMED", "MOHAMED", token)
# according to Gilles, Ku being the first token implies KUNWAR, otherwise KUMAR
if (tokenNumber == 1) {
token <- gsub("^KU$", "KUNWAR", token)
} else {
token <- gsub("^KU$", "KUMAR", token)
}
# replace suffixes at the end of the token (indicated by $)
token <- gsub ("BHAI$", "", token)
token <- gsub ("BEN$", "", token)
token <- gsub ("BAI$", "", token)
token <- gsub ("JI$", "", token)
if (nchar(token) > 0) {
result <- concat (result, token)
}
tokenNumber = tokenNumber + 1
}
return (trim(result)) # remove the unneeded space in the beginning
}
| /All_Functions.R | no_license | salonibhogale/LS_Scraping | R | false | false | 11,481 | r | library('plyr')
library('dplyr')
library('stringr')
compare<-function(df1, df2, key1,key2,rvalue){
#df1<-req_ls_data df2<-constituency_electoral_info key1<-ls_key key2<-const_eci_key
ncol_df1=(ncol(df1))
ncol_df2=(ncol(df2))
#EXACT MATCH
exact_match<-merge(df1,df2,by=colnames(key1)[1])
#IN DF1 but not in DF2
all_unmatch<-merge(df1,df2,by.x=colnames(key1)[1],by.y=colnames(key2)[1],all=TRUE)
ncol_df1+2
c1<-colnames(all_unmatch)[ncol_df1+2]
all_unmatch<-all_unmatch[is.na(all_unmatch[,ncol_df1+2]),]
#in DF2 but not in DF1
all_unmatch2<-merge(df2,df1,by.y = colnames(key1),by.x=colnames(key2),all=TRUE)
all_unmatch2<-all_unmatch2[is.na(all_unmatch2[,ncol_df2+2]),]
if(rvalue=="EXACT"){
return(exact_match)
}
else if(rvalue=="UNMATCH A TO B"){
return(all_unmatch)
}
else if(rvalue=="UNMATCH B TO A"){
return(all_unmatch2)
}
#Match Outputs:
}
#Canonicalize for Constituency Names only
# indian name canonicalization
canonicalize <- function(s) {
trim <- function (s) { return (gsub ("\\s+$", "", gsub("^\\s+", "", s))) }
# concats with space
concat <- function(a, b) {
return (paste(a, b))
}
x <- toupper (s)
# remove all chars except A-Z and space. convert special chars like , to space, to retain tokenization
#x <- gsub ("[^A-Z]", " ", x)
x<-gsub("[^[:alnum:]]"," ",x)
tokens <- strsplit(x, " ")[[1]] # this syntax needed because strsplit returns a list (of length 1)
tokens <- sort (tokens)
tokenNumber <- 1
result <- ""
for (token in tokens) {
# token is already in uppercase
token <- trim (token)
token <- gsub ("[^[:alnum:]]", "", token)
#Removing SC from canon name
if(token=="SC" | token=="AND" | token=="ST"){
token=""
}
if (nchar(token) == 0) {
next
}
# some common phonetic variations
token <- gsub ("TH", "T", token)
token <- gsub ("V", "W", token)
token <- gsub ("GH", "G", token)
token <- gsub ("BH", "B", token)
token <- gsub ("DH", "D", token)
token <- gsub ("JH", "J", token)
token <- gsub ("KH", "K", token)
token <- gsub ("MH", "M", token)
token <- gsub ("PH", "P", token)
token <- gsub ("SH", "S", token)
token <- gsub ("ZH", "S", token)
token <- gsub ("Z", "S", token)
token <- gsub ("Y", "I", token)
token <- gsub ("AU", "OU", token)
token <- gsub ("OO", "U", token)
token <- gsub ("EE", "I", token)
token <- gsub ("KSH", "X", token)
# repeated letters
token <- gsub ("AA", "A", token)
token <- gsub ("BB", "B", token)
token <- gsub ("CC", "C", token)
token <- gsub ("DD", "D", token)
token <- gsub ("FF", "F", token)
token <- gsub ("GG", "G", token)
token <- gsub ("JJ", "J", token)
token <- gsub ("KK", "K", token)
token <- gsub ("LL", "L", token)
token <- gsub ("MM", "M", token)
token <- gsub ("NN", "N", token)
token <- gsub ("PP", "P", token)
token <- gsub ("RR", "R", token)
token <- gsub ("SS", "S", token)
token <- gsub ("TT", "T", token)
token <- gsub ("WW", "W", token)
token <- gsub ("YY", "Y", token)
token <- gsub ("ZZ", "Z", token)
# now a bunch of rules that are mostly true -- change these per requirement
if (nchar(token) > 0) {
result <- concat (result, token)
}
tokenNumber = tokenNumber + 1
}
return (trim(result)) # remove the unneeded space in the beginning
}
compare_canon<-function(df1, df2, key1,key2,rvalue){
#REMOVE NA COLUMNS FROM DF1
remcols1=data.frame(df1)
k=0
for(i in 1:ncol(remcols1)){
if(k==0){
if(all(is.na(remcols1[,i]))){
k=1
num=i
}
}
}
new_df1<-remcols1[,c(1:num-1)]
#REMOVE NA COLUMNS FROM DF2
remcols2=data.frame(df2)
k=0
for(i in 1:ncol(remcols2)){
if(k==0){
if(all(is.na(remcols2[,i]))){
k=1
num=i
}
}
}
new_df2<-remcols2[,c(1:num-1)]
#add canonicalized values
numcol_df1=ncol(new_df1)
for(i in 1:nrow(new_df1)){
new_df1[i,numcol_df1+1]=canonicalize(new_df1[i,1])
}
colnames(new_df1)[numcol_df1+1]="CANON"
numcol_df2=ncol(new_df2)
for(i in 1:nrow(new_df2)){
new_df2[i,numcol_df2+1]=canonicalize(new_df2[i,1])
}
colnames(new_df2)[numcol_df2+1]="CANON"
#EXACT MERGE
mer_canon=merge(new_df1,new_df2,by="CANON")
#Remaining Unmerged from A to B
unmatched_AB=data.frame()
j=1
for(i in 1:nrow(new_df1)){
if(!(new_df1$CANON[i] %in% mer_canon$CANON)){
unmatched_AB[j,1]=new_df1[i,1]
j=j+1
}
}
colnames(unmatched_AB)[1]=colnames(key1)[1]
mer_un_AB=unique(merge(unmatched_AB,df1,by=colnames(key1)[1]))
#Remaining Unmerged from B to A
unmatched_BA=data.frame()
j=1
for(i in 1:nrow(new_df2)){
if(!(new_df2$CANON[i] %in% mer_canon$CANON)){
unmatched_BA[j,1]=new_df2[i,1]
j=j+1
}
}
colnames(unmatched_BA)[1]=colnames(key1)[1]
mer_un_BA=unique(merge(unmatched_BA,df2,by=colnames(key1)[1]))
if(rvalue=="MERGED"){
return(mer_canon)
}
else if(rvalue=="UNMERGED A TO B"){
return(mer_un_AB)
}
else if(rvalue=="UNMERGED B TO A"){
return(mer_un_BA)
}
}
compare_edit_dist<-function(df1, df2, key1,key2,rvalue){
#REMOVE NA COLUMNS FROM DF1
remcols1=data.frame(df1)
k=0
for(i in 1:ncol(remcols1)){
if(k==0){
if(all(is.na(remcols1[,i]))){
k=1
num=i
}
}
}
new_df1<-remcols1[,c(1:num-1)]
#REMOVE NA COLUMNS FROM DF2
remcols2=data.frame(df2)
k=0
for(i in 1:ncol(remcols2)){
if(k==0){
if(all(is.na(remcols2[,i]))){
k=1
num=i
}
}
}
new_df2<-remcols2[,c(1:num-1)]
#add canonicalized values
numcol_df1=ncol(new_df1)
for(i in 1:nrow(new_df1)){
new_df1[i,numcol_df1+1]=canonicalize(new_df1[i,1])
}
colnames(new_df1)[numcol_df1+1]="CANON"
numcol_df2=ncol(new_df2)
for(i in 1:nrow(new_df2)){
new_df2[i,numcol_df2+1]=canonicalize(new_df2[i,1])
}
colnames(new_df2)[numcol_df2+1]="CANON"
for(i in 1:nrow(new_df1)){
#Ignoring LS Year
new_df1[i,numcol_df1+1]
sp_f=str_locate(new_df1[i,numcol_df1+1]," ")
temp=substr(new_df1[i,numcol_df1+1],sp_f+1,nchar(new_df1[i,numcol_df1+1]))
for(j in 1:nrow(new_df2)){
sp_f2=str_locate(new_df2[j,numcol_df2+1]," ")
temp2=substr(new_df2[j,numcol_df2+1],sp_f2+1,nchar(new_df2[j,numcol_df2+1]))
edist=adist(temp,temp2)
if(edist==1 | edist==2){
n_ls=substr(new_df1[i,numcol_df1+1],1,sp_f-1)
n_eci=substr(new_df2[j,numcol_df2+1],1,sp_f2-1)
if(n_ls==n_eci){
new_df1[i,numcol_df1+2]="Matched"
new_df1[i,numcol_df1+3]=new_df2[j,numcol_df2+1]
}
}
}
if(is.na(new_df1[i,numcol_df1+2])){
new_df1[i,numcol_df1+2]="Unmatched"
}
}
colnames(new_df1)[numcol_df1+2]="MATCH_INDICATOR"
colnames(new_df1)[numcol_df1+3]="KEY"
df1_sub<-subset(new_df1, MATCH_INDICATOR=="Matched")
mer_ed_dist=merge(df1_sub,new_df2,by.x="KEY",by.y="CANON")
#to find unmerged from B To A
unmerged_edist_ba=data.frame()
j=1
num=ncol(new_df2)
for(i in 1:nrow(new_df2)){
if(!(new_df2$CANON[i] %in% df1_sub$KEY)){
unmerged_edist_ba[j,1]=new_df2$CANON[i]
j=j+1
}
}
colnames(unmerged_edist_ba)[1]=colnames(new_df2)[num]
unm_ba=merge(unmerged_edist_ba,new_df2,by=colnames(new_df2)[num])
if(rvalue=="MERGED"){
return(mer_ed_dist)
}
else if(rvalue=="UNMERGED A TO B"){
return(subset(new_df1, MATCH_INDICATOR=="Unmatched"))
}
else if(rvalue=="UNMERGED B TO A"){
return(unm_ba)
}
}
canonicalize1 <- function(s) {
trim <- function (s) { return (gsub ("\\s+$", "", gsub("^\\s+", "", s))) }
# concats with space
concat <- function(a, b) {
return (paste(a, b))
}
x <- toupper (s)
# remove all chars except A-Z and space. convert special chars like , to space, to retain tokenization
#x <- gsub ("[^A-Z]", " ", x)
x<-gsub("[^[:alnum:]]"," ",x)
tokens <- strsplit(x, " ")[[1]] # this syntax needed because strsplit returns a list (of length 1)
tokens <- sort (tokens)
tokenNumber <- 1
result <- ""
for (token in tokens) {
# token is already in uppercase
token <- trim (token)
token <- gsub ("[^A-Z]", "", token)
if (nchar(token) == 0) {
next
}
# these titles are simply dropped
if (token == "DR" | token == "MR" | token == "PROF" | token == "MRS" | token == "ENG" | token == "SHRI" | token == "SMT" | token == "SHRI" | token == "SARDAR" | token == "PANDIT" || token == "PT" || token == "THIRU")
next
# some common phonetic variations
token <- gsub ("TH", "T", token)
token <- gsub ("V", "W", token)
token <- gsub ("GH", "G", token)
token <- gsub ("BH", "B", token)
token <- gsub ("DH", "D", token)
token <- gsub ("JH", "J", token)
token <- gsub ("KH", "K", token)
token <- gsub ("MH", "M", token)
token <- gsub ("PH", "P", token)
token <- gsub ("SH", "S", token)
token <- gsub ("ZH", "S", token)
token <- gsub ("Z", "S", token)
token <- gsub ("Y", "I", token)
token <- gsub ("AU", "OU", token)
token <- gsub ("OO", "U", token)
token <- gsub ("EE", "I", token)
token <- gsub ("KSH", "X", token)
# repeated letters
token <- gsub ("AA", "A", token)
token <- gsub ("BB", "B", token)
token <- gsub ("CC", "C", token)
token <- gsub ("DD", "D", token)
token <- gsub ("FF", "F", token)
token <- gsub ("GG", "G", token)
token <- gsub ("JJ", "J", token)
token <- gsub ("KK", "K", token)
token <- gsub ("LL", "L", token)
token <- gsub ("MM", "M", token)
token <- gsub ("NN", "N", token)
token <- gsub ("PP", "P", token)
token <- gsub ("RR", "R", token)
token <- gsub ("SS", "S", token)
token <- gsub ("TT", "T", token)
token <- gsub ("WW", "W", token)
token <- gsub ("YY", "Y", token)
token <- gsub ("ZZ", "Z", token)
# now a bunch of rules that are mostly true -- change these per requirement
token <- gsub ("PD", "PRASAD", token)
token <- gsub ("MD", "MOHAMMAD", token)
token <- gsub ("MOHAMAD", "MOHAMMAD", token)
token <- gsub ("MOHMED", "MOHAMMAD", token)
token <- gsub ("MOHAMED", "MOHAMED", token)
# according to Gilles, Ku being the first token implies KUNWAR, otherwise KUMAR
if (tokenNumber == 1) {
token <- gsub("^KU$", "KUNWAR", token)
} else {
token <- gsub("^KU$", "KUMAR", token)
}
# replace suffixes at the end of the token (indicated by $)
token <- gsub ("BHAI$", "", token)
token <- gsub ("BEN$", "", token)
token <- gsub ("BAI$", "", token)
token <- gsub ("JI$", "", token)
if (nchar(token) > 0) {
result <- concat (result, token)
}
tokenNumber = tokenNumber + 1
}
return (trim(result)) # remove the unneeded space in the beginning
}
|
# ------------------------------------------------------------------------------
# BayesDiallel plots - see BayesDiallel vignette for specifics
# S. Turner
# 18 July 2016
# ------------------------------------------------------------------------------
library(psych)
library(ggplot2)
# Code to plot observed vs. expected values (TwoDiallelPlot), highest
# posterior density (HPD) intervals (PlotHPD), and strawplots for AFD objects
# ------------------------------------------------------------------------------
# load AFD objects
# ------------------------------------------------------------------------------
setwd("~/GitHub/carrot-diallel")
source("~/GitHub/carrot-diallel/07_read_AFD_objects.R")
# ------------------------------------------------------------------------------
# Observed vs. expected plots - expected shows predictions based on observed data
# ------------------------------------------------------------------------------
# e.g. height
results[["height"]]$AllDiallelObs[[1]]$TwoDiallelPlot(PlotObservedVersusExpectedAll = TRUE,
HeadTitle = "",
show.strain.names = TRUE,
LabelLeft = TRUE)
# loop to generate observed vs expected TwoDiallelPlots for all traits
for (i in varNames) {
setEPS()
postscript(paste0("~/GitHub/carrot-diallel/results/", i, "/OvsE", i, ".eps", sep = ""), width = 6,
height = 4)
results[[i]]$AllDiallelObs[[1]]$TwoDiallelPlot(PlotObservedVersusExpectedAll = TRUE,
HeadTitle = "",
show.strain.names = TRUE,
LabelLeft = TRUE)
dev.off()
}
# ------------------------------------------------------------------------------
# highest posterior density (HPD) plots for inheritance classes
# ------------------------------------------------------------------------------
# e.g. height
results[["height"]]$AllDiallelObs[[1]]$PlotHPD(UseDefaultWanted1 = TRUE,
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-10, 10), main = "height")
# loop to generate HPD plots for all traits
for (i in varNames) {
setEPS()
postscript(paste0("~/GitHub/carrot-diallel/results/", i, "HPD_main", i, ".eps", sep = ""),
width = 10, height = 6)
results[[i]]$AllDiallelObs[[1]]$PlotHPD(UseDefaultWanted1 = TRUE,
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-10, 10), main = paste(i))
dev.off()
}
# ------------------------------------------------------------------------------
# HPD plots for random effects
# ------------------------------------------------------------------------------
# loop to generate HPD plots of random effects for all traits
for (i in varNames) {
postscript(paste0("~/GitHub/carrot-diallel/results/", i,
"/HPD_random", i, ".eps", sep = ""), width = 5, height = 5)
results[[i]]$AllDiallelObs[[1]]$PlotHPD(wanted = c(5,4,6,3,7:12),
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-10, 10), main = paste(i))
dev.off()
}
# e.g. height & midHeight (for Fig 6)
setEPS()
postscript("~/GitHub/carrot-diallel/results/midheight_random_eff.eps", width = 6, height = 6)
#layout(matrix(c(1,2), 2, 2, byrow=TRUE), widths=c(1.8,1))
results[["midHeight"]]$AllDiallelObs[[1]]$PlotHPD(wanted = c(5,4,6,3,7:12),
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-15, 15), main = "midHeight")
dev.off()
setEPS()
postscript("~/GitHub/carrot-diallel/results/height_random_eff.eps", width = 3.465, height = 6)
results[["height"]]$AllDiallelObs[[1]]$PlotHPD(wanted = c(5,4,6,3,7:12),
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-15, 15), main = "height")
dev.off()
# ------------------------------------------------------------------------------
# straw plots (interactions among inheritance classes)
# see Lenarcic et al. (2012) Genetics and BayesDiallel documentation for details
# ------------------------------------------------------------------------------
# e.g. height
PlotStrawPlot(results[["height"]]$AllDiallelObs[[1]], DoMedian = FALSE, DoMu = FALSE,
GiveHeadings = TRUE, yline = 2, DoCC = FALSE, lwd = 4,
col = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#0072B2",
"#D55E00"), ylab = "height")
legend("topleft", c("A", "B", "C", "D", "E", "F"), lty = 1, lwd = 4,
col = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00"),
title = "parent")
# loop to generate straw plots for all traits
for (i in varNames) {
PlotStrawPlot(results[[i]]$AllDiallelObs[[1]], DoMedian = FALSE, DoMu = FALSE,
GiveHeadings = TRUE, yline = 2, DoCC = FALSE, lwd = 4,
col = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#0072B2",
"#D55E00"), ylab = paste(i))
legend("topleft", c("A", "B", "C", "D", "E", "F"), lty = 1, lwd = 4,
col = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00"),
title = "parent")
}
# ------------------------------------------------------------------------------
# plot correlations for effects
# Fig 2 B and C
summary(results[[1]]$AllDiallelObs[[1]]$cent.chains)
post_means <- list()
for(i in varNames){
AO <- results[[i]]$AllDiallelObs[[1]]
means <- summary(AO$cent.chains)
post_means[[i]] <- means[[1]][13:30,1]
}
post_df <- as.data.frame(post_means)
post_df <- post_df[,c(1:9)]
# rename variables
post_df <- rename(post_df, c("midHeight" = "height (80DAP)",
"midWidth" = "width (80DAP)",
"height" = "height (130DAP)",
"width" = "width (130DAP)",
"flw" = "shoot biomass (fresh)",
"dlw" = "shoot biomass (dry)",
"frw" = "root biomass (fresh)",
"drw" = "root biomass (dry)",
"ratio" = "shoot:root ratio"))
a_cor <- cor(post_df[1:6,]) # additive effect correlations
i_cor <- cor(post_df[13:18,]) # inbred deviation correlations
corrplot(i_cor)
# ------------------------------------------------------------------------------
# create correlation matrix for additive and inbred parameters
# ------------------------------------------------------------------------------
correlation_a <- cor(post_df[1:6,], method = "pearson")
correlation_i <- cor(post_df[13:18,], method = "pearson")
# ------------------------------------------------------------------------------
# function to calculate significance of correlations
# source: https://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html
# ------------------------------------------------------------------------------
cor.mtest <- function(mat, conf.level = 0.95) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat <- lowCI.mat <- uppCI.mat <- matrix(NA, n, n)
diag(p.mat) <- 0
diag(lowCI.mat) <- diag(uppCI.mat) <- 1
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], conf.level = conf.level)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
lowCI.mat[i, j] <- lowCI.mat[j, i] <- tmp$conf.int[1]
uppCI.mat[i, j] <- uppCI.mat[j, i] <- tmp$conf.int[2]
}
}
return(list(p.mat, lowCI.mat, uppCI.mat))
}
# ------------------------------------------------------------------------------
# export matrix of p-values
# ------------------------------------------------------------------------------
p.mat_a <- cor.mtest(post_df[1:6,])
p.mat_i <- cor.mtest(post_df[13:18,])
# ------------------------------------------------------------------------------
# create matrix for significance notations
# * = P < 0.05, ** = P < 0.01, *** = P < 0.001
# ------------------------------------------------------------------------------
sigCodes_a <- ifelse(p.mat_a[[1]] > 0.05, "NS",
ifelse(p.mat_a[[1]] > 0.01, "*",
ifelse(p.mat_a[[1]] > 0.001, "**", "***")))
sigCodes_i <- ifelse(p.mat_i[[1]] > 0.05, "NS",
ifelse(p.mat_i[[1]] > 0.01, "*",
ifelse(p.mat_i[[1]] > 0.001, "**", "***")))
# set diagonal to NA
sigCodes_a[upper.tri(sigCodes_a)] <- NA
sigCodes_i[upper.tri(sigCodes_i)] <- NA
# ------------------------------------------------------------------------------
# combine correlation coefficients and significance codes into single matrix
# ------------------------------------------------------------------------------
combinedMat_a <- lowerUpper(upper = sigCodes_a, lower = round(correlation_a, 2),
diff = FALSE)
diag(combinedMat_a) <- ""
combinedMat_i <- lowerUpper(upper = sigCodes_i, lower = round(correlation_i, 2),
diff = FALSE)
diag(combinedMat_i) <- ""
# ------------------------------------------------------------------------------
# construct the plot!
# ------------------------------------------------------------------------------
# create labels as input for geom_text
labels_a <- melt(combinedMat_a, na.rm = TRUE)
labels_i <- melt(combinedMat_i, na.rm = TRUE)
# melt matrix into vector format for plotting
meltedCormat_a <- melt(correlation_a)
meltedCormat_i <- melt(correlation_i)
ggplot(data = meltedCormat_a, aes(Var2, Var1, fill = value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "#ca0020", high = "#2166AC", mid = "white",
midpoint = 0, limit = c(-1, 1), space = "Lab",
name = "Pearson\nCorrelation") +
scale_y_discrete(name = "", limits = rev(levels(meltedCormat_a$Var1))) +
theme(axis.text = element_text(colour = "black", size = 10),
axis.text.x = element_text(angle = 60, hjust = 1),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank()) +
geom_text(aes(Var2, Var1, label = labels_a$value), colour = "black", size = 3.15) +
coord_fixed()
ggsave("~/GitHub/results/additive_corr.eps", height = 150, width = 150, units = "mm")
ggplot(data = meltedCormat_i, aes(Var2, Var1, fill = value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "#ca0020", high = "#2166AC", mid = "white",
midpoint = 0, limit = c(-1, 1), space = "Lab",
name = "Pearson\nCorrelation") +
scale_y_discrete(name = "", limits = rev(levels(meltedCormat_i$Var1))) +
theme(axis.text = element_text(colour = "black", size = 10),
axis.text.x = element_text(angle = 60, hjust = 1),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank()) +
geom_text(aes(Var2, Var1, label = labels_i$value), colour = "black", size = 3.15) +
coord_fixed()
ggsave("~/GitHub/results/inbred_corr.eps", height = 150, width = 150, units = "mm")
| /09_BayesDiallel_plots.R | permissive | qcmeng/carrot-diallel | R | false | false | 11,692 | r | # ------------------------------------------------------------------------------
# BayesDiallel plots - see BayesDiallel vignette for specifics
# S. Turner
# 18 July 2016
# ------------------------------------------------------------------------------
library(psych)
library(ggplot2)
# Code to plot observed vs. expected values (TwoDiallelPlot), highest
# posterior density (HPD) intervals (PlotHPD), and strawplots for AFD objects
# ------------------------------------------------------------------------------
# load AFD objects
# ------------------------------------------------------------------------------
setwd("~/GitHub/carrot-diallel")
source("~/GitHub/carrot-diallel/07_read_AFD_objects.R")
# ------------------------------------------------------------------------------
# Observed vs. expected plots - expected shows predictions based on observed data
# ------------------------------------------------------------------------------
# e.g. height
results[["height"]]$AllDiallelObs[[1]]$TwoDiallelPlot(PlotObservedVersusExpectedAll = TRUE,
HeadTitle = "",
show.strain.names = TRUE,
LabelLeft = TRUE)
# loop to generate observed vs expected TwoDiallelPlots for all traits
for (i in varNames) {
setEPS()
postscript(paste0("~/GitHub/carrot-diallel/results/", i, "/OvsE", i, ".eps", sep = ""), width = 6,
height = 4)
results[[i]]$AllDiallelObs[[1]]$TwoDiallelPlot(PlotObservedVersusExpectedAll = TRUE,
HeadTitle = "",
show.strain.names = TRUE,
LabelLeft = TRUE)
dev.off()
}
# ------------------------------------------------------------------------------
# highest posterior density (HPD) plots for inheritance classes
# ------------------------------------------------------------------------------
# e.g. height
results[["height"]]$AllDiallelObs[[1]]$PlotHPD(UseDefaultWanted1 = TRUE,
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-10, 10), main = "height")
# loop to generate HPD plots for all traits
for (i in varNames) {
setEPS()
postscript(paste0("~/GitHub/carrot-diallel/results/", i, "HPD_main", i, ".eps", sep = ""),
width = 10, height = 6)
results[[i]]$AllDiallelObs[[1]]$PlotHPD(UseDefaultWanted1 = TRUE,
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-10, 10), main = paste(i))
dev.off()
}
# ------------------------------------------------------------------------------
# HPD plots for random effects
# ------------------------------------------------------------------------------
# loop to generate HPD plots of random effects for all traits
for (i in varNames) {
postscript(paste0("~/GitHub/carrot-diallel/results/", i,
"/HPD_random", i, ".eps", sep = ""), width = 5, height = 5)
results[[i]]$AllDiallelObs[[1]]$PlotHPD(wanted = c(5,4,6,3,7:12),
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-10, 10), main = paste(i))
dev.off()
}
# e.g. height & midHeight (for Fig 6)
setEPS()
postscript("~/GitHub/carrot-diallel/results/midheight_random_eff.eps", width = 6, height = 6)
#layout(matrix(c(1,2), 2, 2, byrow=TRUE), widths=c(1.8,1))
results[["midHeight"]]$AllDiallelObs[[1]]$PlotHPD(wanted = c(5,4,6,3,7:12),
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-15, 15), main = "midHeight")
dev.off()
setEPS()
postscript("~/GitHub/carrot-diallel/results/height_random_eff.eps", width = 3.465, height = 6)
results[["height"]]$AllDiallelObs[[1]]$PlotHPD(wanted = c(5,4,6,3,7:12),
EvenXlims = TRUE, DoMu = FALSE,
xlim = c(-15, 15), main = "height")
dev.off()
# ------------------------------------------------------------------------------
# straw plots (interactions among inheritance classes)
# see Lenarcic et al. (2012) Genetics and BayesDiallel documentation for details
# ------------------------------------------------------------------------------
# e.g. height
PlotStrawPlot(results[["height"]]$AllDiallelObs[[1]], DoMedian = FALSE, DoMu = FALSE,
GiveHeadings = TRUE, yline = 2, DoCC = FALSE, lwd = 4,
col = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#0072B2",
"#D55E00"), ylab = "height")
legend("topleft", c("A", "B", "C", "D", "E", "F"), lty = 1, lwd = 4,
col = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00"),
title = "parent")
# loop to generate straw plots for all traits
for (i in varNames) {
PlotStrawPlot(results[[i]]$AllDiallelObs[[1]], DoMedian = FALSE, DoMu = FALSE,
GiveHeadings = TRUE, yline = 2, DoCC = FALSE, lwd = 4,
col = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#0072B2",
"#D55E00"), ylab = paste(i))
legend("topleft", c("A", "B", "C", "D", "E", "F"), lty = 1, lwd = 4,
col = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00"),
title = "parent")
}
# ------------------------------------------------------------------------------
# plot correlations for effects
# Fig 2 B and C
summary(results[[1]]$AllDiallelObs[[1]]$cent.chains)
post_means <- list()
for(i in varNames){
AO <- results[[i]]$AllDiallelObs[[1]]
means <- summary(AO$cent.chains)
post_means[[i]] <- means[[1]][13:30,1]
}
post_df <- as.data.frame(post_means)
post_df <- post_df[,c(1:9)]
# rename variables
post_df <- rename(post_df, c("midHeight" = "height (80DAP)",
"midWidth" = "width (80DAP)",
"height" = "height (130DAP)",
"width" = "width (130DAP)",
"flw" = "shoot biomass (fresh)",
"dlw" = "shoot biomass (dry)",
"frw" = "root biomass (fresh)",
"drw" = "root biomass (dry)",
"ratio" = "shoot:root ratio"))
a_cor <- cor(post_df[1:6,]) # additive effect correlations
i_cor <- cor(post_df[13:18,]) # inbred deviation correlations
corrplot(i_cor)
# ------------------------------------------------------------------------------
# create correlation matrix for additive and inbred parameters
# ------------------------------------------------------------------------------
correlation_a <- cor(post_df[1:6,], method = "pearson")
correlation_i <- cor(post_df[13:18,], method = "pearson")
# ------------------------------------------------------------------------------
# function to calculate significance of correlations
# source: https://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html
# ------------------------------------------------------------------------------
cor.mtest <- function(mat, conf.level = 0.95) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat <- lowCI.mat <- uppCI.mat <- matrix(NA, n, n)
diag(p.mat) <- 0
diag(lowCI.mat) <- diag(uppCI.mat) <- 1
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], conf.level = conf.level)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
lowCI.mat[i, j] <- lowCI.mat[j, i] <- tmp$conf.int[1]
uppCI.mat[i, j] <- uppCI.mat[j, i] <- tmp$conf.int[2]
}
}
return(list(p.mat, lowCI.mat, uppCI.mat))
}
# ------------------------------------------------------------------------------
# export matrix of p-values
# ------------------------------------------------------------------------------
p.mat_a <- cor.mtest(post_df[1:6,])
p.mat_i <- cor.mtest(post_df[13:18,])
# ------------------------------------------------------------------------------
# create matrix for significance notations
# * = P < 0.05, ** = P < 0.01, *** = P < 0.001
# ------------------------------------------------------------------------------
sigCodes_a <- ifelse(p.mat_a[[1]] > 0.05, "NS",
ifelse(p.mat_a[[1]] > 0.01, "*",
ifelse(p.mat_a[[1]] > 0.001, "**", "***")))
sigCodes_i <- ifelse(p.mat_i[[1]] > 0.05, "NS",
ifelse(p.mat_i[[1]] > 0.01, "*",
ifelse(p.mat_i[[1]] > 0.001, "**", "***")))
# set diagonal to NA
sigCodes_a[upper.tri(sigCodes_a)] <- NA
sigCodes_i[upper.tri(sigCodes_i)] <- NA
# ------------------------------------------------------------------------------
# combine correlation coefficients and significance codes into single matrix
# ------------------------------------------------------------------------------
combinedMat_a <- lowerUpper(upper = sigCodes_a, lower = round(correlation_a, 2),
diff = FALSE)
diag(combinedMat_a) <- ""
combinedMat_i <- lowerUpper(upper = sigCodes_i, lower = round(correlation_i, 2),
diff = FALSE)
diag(combinedMat_i) <- ""
# ------------------------------------------------------------------------------
# construct the plot!
# ------------------------------------------------------------------------------
# create labels as input for geom_text
labels_a <- melt(combinedMat_a, na.rm = TRUE)
labels_i <- melt(combinedMat_i, na.rm = TRUE)
# melt matrix into vector format for plotting
meltedCormat_a <- melt(correlation_a)
meltedCormat_i <- melt(correlation_i)
ggplot(data = meltedCormat_a, aes(Var2, Var1, fill = value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "#ca0020", high = "#2166AC", mid = "white",
midpoint = 0, limit = c(-1, 1), space = "Lab",
name = "Pearson\nCorrelation") +
scale_y_discrete(name = "", limits = rev(levels(meltedCormat_a$Var1))) +
theme(axis.text = element_text(colour = "black", size = 10),
axis.text.x = element_text(angle = 60, hjust = 1),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank()) +
geom_text(aes(Var2, Var1, label = labels_a$value), colour = "black", size = 3.15) +
coord_fixed()
ggsave("~/GitHub/results/additive_corr.eps", height = 150, width = 150, units = "mm")
ggplot(data = meltedCormat_i, aes(Var2, Var1, fill = value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "#ca0020", high = "#2166AC", mid = "white",
midpoint = 0, limit = c(-1, 1), space = "Lab",
name = "Pearson\nCorrelation") +
scale_y_discrete(name = "", limits = rev(levels(meltedCormat_i$Var1))) +
theme(axis.text = element_text(colour = "black", size = 10),
axis.text.x = element_text(angle = 60, hjust = 1),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank()) +
geom_text(aes(Var2, Var1, label = labels_i$value), colour = "black", size = 3.15) +
coord_fixed()
ggsave("~/GitHub/results/inbred_corr.eps", height = 150, width = 150, units = "mm")
|
\name{stest}
\alias{stest}
\title{Computes a Few Stationarity Tests.}
\description{
This is a wrapper for three functions from \code{tseries} package. Augmented Dickey-Fuller (ADF, \code{\link[tseries]{adf.test}}), Phillips-Perron (PP, \code{\link[tseries]{pp.test}}) and Kwiatkowski-Phillips-Schmidt-Shin (KPSS, \code{\link[tseries]{kpss.test}}) tests for stationarity are performed.
}
\usage{
stest(data)
}
\arguments{
\item{data}{\code{\link[base]{matrix}} of variables, different columns correspond to different variables}
}
\value{\code{\link[base]{matrix}},
tests statistics and p-values are given by columns,
tests outcomes for different variables are ordered by rows}
\examples{
\dontrun{
wti <- crudeoil[-1,1]
drivers <- (lag(crudeoil[,-1],k=1))[-1,]
ld.wti <- (diff(log(wti)))[-1,]
ld.drivers <- (diff(log(drivers)))[-1,]
x <- cbind(ld.wti,ld.drivers)
stest(x)
}
}
| /man/stest.Rd | no_license | bailishuimo/fDMA | R | false | false | 884 | rd | \name{stest}
\alias{stest}
\title{Computes a Few Stationarity Tests.}
\description{
This is a wrapper for three functions from \code{tseries} package. Augmented Dickey-Fuller (ADF, \code{\link[tseries]{adf.test}}), Phillips-Perron (PP, \code{\link[tseries]{pp.test}}) and Kwiatkowski-Phillips-Schmidt-Shin (KPSS, \code{\link[tseries]{kpss.test}}) tests for stationarity are performed.
}
\usage{
stest(data)
}
\arguments{
\item{data}{\code{\link[base]{matrix}} of variables, different columns correspond to different variables}
}
\value{\code{\link[base]{matrix}},
tests statistics and p-values are given by columns,
tests outcomes for different variables are ordered by rows}
\examples{
\dontrun{
wti <- crudeoil[-1,1]
drivers <- (lag(crudeoil[,-1],k=1))[-1,]
ld.wti <- (diff(log(wti)))[-1,]
ld.drivers <- (diff(log(drivers)))[-1,]
x <- cbind(ld.wti,ld.drivers)
stest(x)
}
}
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(plyr)
library(dplyr)
library(RColorBrewer)
# Directories
datadir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/sst_productivity/output"
tabledir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/sst_productivity/tables"
# Read data
data <- read.csv(paste(datadir, "ramldb_v3.8_spsst_pella_cobe_lme.csv", sep="/"), as.is=T)
# Stats for manuscript
################################################################################
# Sample size info
n_distinct(data$stockid)
n_distinct(data$species)
n_distinct(data$lme_name)
# Plot data
################################################################################
# Build data
data1 <- data %>%
mutate(comm_name=revalue(comm_name, c("common European sole"="Common sole",
"Common seabream"="Red porgy",
"Hake"="European hake",
"Hawaiian morwong"="Tarakihi",
"Herring"="Atlantic herring",
"Walleye pollock"="Alaska pollock",
"Pollock"="Saithe")),
spp_name=paste0(comm_name, " (", species, ")"),
betaT=round(betaT,2)) %>%
filter(betaT_inf!="none") %>%
select(stockid, spp_name, area, betaT) %>%
arrange(desc(betaT))
# Export data
################################################################################
# Export table
write.csv(data1, paste(tabledir, "STable5_significant_stocks.csv", sep="/"), row.names=F)
| /code/figures/STable5_significant_stocks.R | no_license | cfree14/sst_productivity | R | false | false | 1,731 | r |
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(plyr)
library(dplyr)
library(RColorBrewer)
# Directories
datadir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/sst_productivity/output"
tabledir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/sst_productivity/tables"
# Read data
data <- read.csv(paste(datadir, "ramldb_v3.8_spsst_pella_cobe_lme.csv", sep="/"), as.is=T)
# Stats for manuscript
################################################################################
# Sample size info
n_distinct(data$stockid)
n_distinct(data$species)
n_distinct(data$lme_name)
# Plot data
################################################################################
# Build data
data1 <- data %>%
mutate(comm_name=revalue(comm_name, c("common European sole"="Common sole",
"Common seabream"="Red porgy",
"Hake"="European hake",
"Hawaiian morwong"="Tarakihi",
"Herring"="Atlantic herring",
"Walleye pollock"="Alaska pollock",
"Pollock"="Saithe")),
spp_name=paste0(comm_name, " (", species, ")"),
betaT=round(betaT,2)) %>%
filter(betaT_inf!="none") %>%
select(stockid, spp_name, area, betaT) %>%
arrange(desc(betaT))
# Export data
################################################################################
# Export table
write.csv(data1, paste(tabledir, "STable5_significant_stocks.csv", sep="/"), row.names=F)
|
/plugins/MacSignedAU/Console6Channel/Console6Channel.r | permissive | airwindows/airwindows | R | false | false | 3,304 | r | ||
library(lpSolveAPI)
### Name: name.lp
### Title: Name LP
### Aliases: name.lp
### Keywords: programming
### ** Examples
lps.model <- make.lp(0, 3)
xt <- c(6,2,4)
add.constraint(lps.model, xt, "<=", 150)
xt <- c(1,1,6)
add.constraint(lps.model, xt, ">=", 0)
xt <- c(4,5,4)
add.constraint(lps.model, xt, "=", 40)
set.objfn(lps.model, c(-3,-4,-3))
name.lp(lps.model, "Simple LP")
name.lp(lps.model)
| /data/genthat_extracted_code/lpSolveAPI/examples/name.lp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 404 | r | library(lpSolveAPI)
### Name: name.lp
### Title: Name LP
### Aliases: name.lp
### Keywords: programming
### ** Examples
lps.model <- make.lp(0, 3)
xt <- c(6,2,4)
add.constraint(lps.model, xt, "<=", 150)
xt <- c(1,1,6)
add.constraint(lps.model, xt, ">=", 0)
xt <- c(4,5,4)
add.constraint(lps.model, xt, "=", 40)
set.objfn(lps.model, c(-3,-4,-3))
name.lp(lps.model, "Simple LP")
name.lp(lps.model)
|
#import(LabTAT)
attach(LabTAT)
summary(LabTAT)
#means
# Laboratory.1 Laboratory.2 Laboratory.3 Laboratory.4
#Mean :178.4 Mean :178.9 Mean :199.9 Mean :163.7
#normality test
#Ho<-data is normally disturbuted
#Ha<-data is not normally disturbuted
library(nortest)
shapiro.test(Laboratory.1)
#p-value = 0.5508>0.05
#since the p-value is greater than 0.05 we accept the null hypothesis
shapiro.test(Laboratory.2)
#p-value = 0.8637>0.05
#since the p-value is greater than 0.05 we accept the null hypothesis
shapiro.test(Laboratory.3)
#p-value = 0.4205>0.05
#since the p-value is greater than 0.05 we accept the null hypothesis
shapiro.test(Laboratory.4)
#p-value = 0.6619>0.05
#since the p-value is greater than 0.05 we accept the null hypothesis
#varience test
#Ho<-the varience in the lab reports of lab A is equal to varience of lab B
#Ha<-the varience in the lab reports of lab A is not equal to varience of lab B
var.test(Laboratory.1,Laboratory.2)
#p-value = 0.1675>0.05
#since p-value is greaterthan 0.05 thus we aceept the null hypothesis
#the varience in the lab reports of lab 1 is equal to varience of lab 2
var.test(Laboratory.1,Laboratory.4)
#p-value = 0.1408>0.05
#since p-value is greaterthan 0.05 thus we aceept the null hypothesis
#the varience in the lab reports of lab 1 is equal to varience of lab 4
var.test(Laboratory.2,Laboratory.3)
#p-value = 0.2742>0.05
#since p-value is greaterthan 0.05 thus we aceept the null hypothesis
#the varience in the lab reports of lab 2 is equal to varience of lab 3
var.test(Laboratory.3,Laboratory.4)
#p-value = 0.3168>0.05
#since p-value is greaterthan 0.05 thus we aceept the null hypothesis
#the varience in the lab reports of lab 3 is equal to varience of lab 4
#var.test(Laboratory.3,Laboratory.1)
#p-value = 0.01366<0.05
#since p-value is less than 0.05 thus we reject the null hypothesis
#the varience in the lab reports of lab 3 is not equal to varience of lab 1
#since there are more than 2 components we perform ANOVA test
#ANOVA 1 way test
#Ho<-the average of TAT value is same
#Ha<-the average of TAT value is not same
test1<-aov(Laboratory.1~Laboratory.2,data = LabTAT)
summary(test1)
#p-value=0.168>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
test2<-aov(Laboratory.2~Laboratory.3,data = LabTAT)
summary(test2)
#p-value=0.474>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
test3<-aov(Laboratory.4~Laboratory.3,data = LabTAT)
summary(test3)
#p-value= 0.173>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
test4<-aov(Laboratory.1~Laboratory.4,data = LabTAT)
summary(test4)
#p-value= 0.315>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
#test5<-aov(Laboratory.1~Laboratory.3,data = LabTAT)
#summary(test5)
#p-value=0.243>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
#visualations
boxplot(LabTAT)
| /hypothis testing-labtat.R | no_license | PATELVIMALV1/hypothesis-testing | R | false | false | 3,083 | r | #import(LabTAT)
attach(LabTAT)
summary(LabTAT)
#means
# Laboratory.1 Laboratory.2 Laboratory.3 Laboratory.4
#Mean :178.4 Mean :178.9 Mean :199.9 Mean :163.7
#normality test
#Ho<-data is normally disturbuted
#Ha<-data is not normally disturbuted
library(nortest)
shapiro.test(Laboratory.1)
#p-value = 0.5508>0.05
#since the p-value is greater than 0.05 we accept the null hypothesis
shapiro.test(Laboratory.2)
#p-value = 0.8637>0.05
#since the p-value is greater than 0.05 we accept the null hypothesis
shapiro.test(Laboratory.3)
#p-value = 0.4205>0.05
#since the p-value is greater than 0.05 we accept the null hypothesis
shapiro.test(Laboratory.4)
#p-value = 0.6619>0.05
#since the p-value is greater than 0.05 we accept the null hypothesis
#varience test
#Ho<-the varience in the lab reports of lab A is equal to varience of lab B
#Ha<-the varience in the lab reports of lab A is not equal to varience of lab B
var.test(Laboratory.1,Laboratory.2)
#p-value = 0.1675>0.05
#since p-value is greaterthan 0.05 thus we aceept the null hypothesis
#the varience in the lab reports of lab 1 is equal to varience of lab 2
var.test(Laboratory.1,Laboratory.4)
#p-value = 0.1408>0.05
#since p-value is greaterthan 0.05 thus we aceept the null hypothesis
#the varience in the lab reports of lab 1 is equal to varience of lab 4
var.test(Laboratory.2,Laboratory.3)
#p-value = 0.2742>0.05
#since p-value is greaterthan 0.05 thus we aceept the null hypothesis
#the varience in the lab reports of lab 2 is equal to varience of lab 3
var.test(Laboratory.3,Laboratory.4)
#p-value = 0.3168>0.05
#since p-value is greaterthan 0.05 thus we aceept the null hypothesis
#the varience in the lab reports of lab 3 is equal to varience of lab 4
#var.test(Laboratory.3,Laboratory.1)
#p-value = 0.01366<0.05
#since p-value is less than 0.05 thus we reject the null hypothesis
#the varience in the lab reports of lab 3 is not equal to varience of lab 1
#since there are more than 2 components we perform ANOVA test
#ANOVA 1 way test
#Ho<-the average of TAT value is same
#Ha<-the average of TAT value is not same
test1<-aov(Laboratory.1~Laboratory.2,data = LabTAT)
summary(test1)
#p-value=0.168>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
test2<-aov(Laboratory.2~Laboratory.3,data = LabTAT)
summary(test2)
#p-value=0.474>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
test3<-aov(Laboratory.4~Laboratory.3,data = LabTAT)
summary(test3)
#p-value= 0.173>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
test4<-aov(Laboratory.1~Laboratory.4,data = LabTAT)
summary(test4)
#p-value= 0.315>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
#test5<-aov(Laboratory.1~Laboratory.3,data = LabTAT)
#summary(test5)
#p-value=0.243>0.05
#thus we accept the null hypothesis
#that is the average of TAT value is same
#visualations
boxplot(LabTAT)
|
#' Plot forecasts (historical and future) for exponential smoothing models
#'
#' @param mod The result of ftting an exponential smoothing moel with ses( ), holt( ) or hw( )
#' @param include A number of recnt past values to show (default is NULL for all)
#' @param attach Attach the future forecasts (and bounds) the the historical "fits" (default), or historical "series", or "no" attaching
#' @param main Usual paramter to enter a title for the graph
#' @param lwd Control line width (default is lwd=2)
#'
#' @return A plot with the series, historical (smoothed) values, forecast (and bounds) for future values.
#'
#' @export
#'
#'
sesplot=function(mod,include=NULL,attach="fits",main="",lwd=2){
x=mod$x
n=length(x)
past=mod$fitted
upper=mod$upper
lower=mod$lower
nextfit=mod$mean
#adjust to show only the last "include" values of the historical series
if (!is.null(include)) {
x=subset(x,start=n-include+1)
past=subset(past,start=n-include+1)
}
#Below to attach future forecasts to the historical series
if (attach=="series"){
nextfit=ts(c(tail(x,1),nextfit),start=end(x),freq=frequency(x))
lower=ts(c(tail(x,1),lower),start=end(x),freq=frequency(x))
upper=ts(c(tail(x,1),upper),start=end(x),freq=frequency(x))
}
#Below to attach future forecasts to the forecasts for historical series (default)
if(attach=="fits"){
nextfit=ts(c(tail(past,1),nextfit),start=end(past),freq=frequency(past))
lower=ts(c(tail(past,1),lower),start=end(past),freq=frequency(past))
upper=ts(c(tail(past,1),upper),start=end(past),freq=frequency(past))
}
ts.plot(x,past,nextfit,lower,upper,
col=c("black","blue","blue","red","red"),lwd=lwd,main=main)
}
| /R/sesplot.R | no_license | statmanrobin/ts343 | R | false | false | 1,706 | r | #' Plot forecasts (historical and future) for exponential smoothing models
#'
#' @param mod The result of ftting an exponential smoothing moel with ses( ), holt( ) or hw( )
#' @param include A number of recnt past values to show (default is NULL for all)
#' @param attach Attach the future forecasts (and bounds) the the historical "fits" (default), or historical "series", or "no" attaching
#' @param main Usual paramter to enter a title for the graph
#' @param lwd Control line width (default is lwd=2)
#'
#' @return A plot with the series, historical (smoothed) values, forecast (and bounds) for future values.
#'
#' @export
#'
#'
sesplot=function(mod,include=NULL,attach="fits",main="",lwd=2){
x=mod$x
n=length(x)
past=mod$fitted
upper=mod$upper
lower=mod$lower
nextfit=mod$mean
#adjust to show only the last "include" values of the historical series
if (!is.null(include)) {
x=subset(x,start=n-include+1)
past=subset(past,start=n-include+1)
}
#Below to attach future forecasts to the historical series
if (attach=="series"){
nextfit=ts(c(tail(x,1),nextfit),start=end(x),freq=frequency(x))
lower=ts(c(tail(x,1),lower),start=end(x),freq=frequency(x))
upper=ts(c(tail(x,1),upper),start=end(x),freq=frequency(x))
}
#Below to attach future forecasts to the forecasts for historical series (default)
if(attach=="fits"){
nextfit=ts(c(tail(past,1),nextfit),start=end(past),freq=frequency(past))
lower=ts(c(tail(past,1),lower),start=end(past),freq=frequency(past))
upper=ts(c(tail(past,1),upper),start=end(past),freq=frequency(past))
}
ts.plot(x,past,nextfit,lower,upper,
col=c("black","blue","blue","red","red"),lwd=lwd,main=main)
}
|
# This script gets data that is going to be used by the Vignettes. Add data to here where required.
# elo-ratings script to get data for vignette
devtools::install_github("jimmyday12/fitzRoy")
library(dplyr)
library(elo)
library(lubridate)
library(fitzRoy)
# Get data
results <- fitzRoy::get_match_results()
fixture <- fitzRoy::get_fixture(2019)
# mens-stats
#results <- get_match_results()
stats <- get_afltables_stats(start_date = "2018-01-01", end_date = "2018-06-01")
#fixture <- get_fixture()
stats_gf <- get_footywire_stats(ids = 9927)
ladder <- return_ladder(match_results_df = results)
ladder_round <- return_ladder(match_results_df = results, season_round = 15, season = 2018)
sources <- get_squiggle_data("sources")
tips <- get_squiggle_data("tips")
tips_round <- get_squiggle_data("tips", round = 1, year = 2018)
# womens-stats
cookie <- get_aflw_cookie()
# This is used by other parts so it's important to keep.
# First lets load afldata provided
load(here::here("data-raw", "afl_tables_playerstats", "afltables_playerstats_provided.rda"))
# Select out the columns we want
afldata <- afldata %>%
select(
-X, -year, -month, -day,
-Home.coach, -Home.coach.DOB, -Away.coach, -Away.coach.DOB,
-Height, -Weight, -DOB
)
# Save the names of the columns. Will be used internally by the package
afldata_cols <- names(afldata)
# Function to fix abbreviations
fix_abbreviations <- function(x) {
purrr::map_chr(x, ~
case_when(
. == "KI" ~ "Kicks",
. == "MK" ~ "Marks",
. == "HB" ~ "Handballs",
. == "GL" ~ "Goals",
. == "BH" ~ "Behinds",
. == "HO" ~ "Hit.Outs",
. == "TK" ~ "Tackles",
. == "RB" ~ "Rebounds",
. == "IF" ~ "Inside.50s",
. == "CL" ~ "Clearances",
. == "CG" ~ "Clangers",
. == "FF" ~ "Frees.For",
. == "FA" ~ "Frees.Against",
. == "BR" ~ "Brownlow.Votes",
. == "CP" ~ "Contested.Possessions",
. == "UP" ~ "Uncontested.Possessions",
. == "CM" ~ "Contested.Marks",
. == "MI" ~ "Marks.Inside.50",
. == "One.Percenters" ~ "One.Percenters",
. == "BO" ~ "Bounces",
. == "GA" ~ "Goal.Assists",
. == "TOG" ~ "Time.on.Ground..",
. == "Jumper" ~ "Jumper.No",
TRUE ~ ""
))
}
# Let's get the stats
# match_urls <- get_afltables_urls("01/06/2018", "15/06/2018")
# dat <- scrape_afltables_match(match_urls)
load(here::here("data-raw", "afl_tables_playerstats", "afltables_raw.rda"))
abb <- fix_abbreviations(names(afltables_raw))
stat_abbr <- tibble(
stat = abb[abb != ""],
stat.abb = names(afltables_raw)[abb != ""]
)
## Write data for abbreviations Team and Stats to a data frame that can be used
team_abbr <- tibble(
Team = c(
"Adelaide", "Brisbane Lions", "Carlton", "Collingwood", "Essendon",
"Fremantle", "Gold Coast", "Geelong", "Greater Western Sydney", "Hawthorn",
"Melbourne", "North Melbourne", "Port Adelaide", "Richmond", "St Kilda",
"Sydney", "Western Bulldogs", "West Coast"
),
Team.abb = c(
"AD", "BL", "CA", "CW", "ES", "FR",
"GC", "GE", "GW", "HW", "ME", "NM",
"PA", "RI", "SK", "SY", "WB", "WC"
)
)
# Frizigg
fryzigg <- fitzRoy::get_fryzigg_stats(start = 2019, end = 2019)
usethis::use_data(stat_abbr, team_abbr, afldata_cols,
results, fixture, stats, stats_gf,
ladder, ladder_round, sources, tips, tips_round, fryzigg,
internal = TRUE, overwrite = TRUE)
| /data-raw/vignette_data.R | no_license | DTS098/fitzRoy | R | false | false | 3,664 | r | # This script gets data that is going to be used by the Vignettes. Add data to here where required.
# elo-ratings script to get data for vignette
devtools::install_github("jimmyday12/fitzRoy")
library(dplyr)
library(elo)
library(lubridate)
library(fitzRoy)
# Get data
results <- fitzRoy::get_match_results()
fixture <- fitzRoy::get_fixture(2019)
# mens-stats
#results <- get_match_results()
stats <- get_afltables_stats(start_date = "2018-01-01", end_date = "2018-06-01")
#fixture <- get_fixture()
stats_gf <- get_footywire_stats(ids = 9927)
ladder <- return_ladder(match_results_df = results)
ladder_round <- return_ladder(match_results_df = results, season_round = 15, season = 2018)
sources <- get_squiggle_data("sources")
tips <- get_squiggle_data("tips")
tips_round <- get_squiggle_data("tips", round = 1, year = 2018)
# womens-stats
cookie <- get_aflw_cookie()
# This is used by other parts so it's important to keep.
# First lets load afldata provided
load(here::here("data-raw", "afl_tables_playerstats", "afltables_playerstats_provided.rda"))
# Select out the columns we want
afldata <- afldata %>%
select(
-X, -year, -month, -day,
-Home.coach, -Home.coach.DOB, -Away.coach, -Away.coach.DOB,
-Height, -Weight, -DOB
)
# Save the names of the columns. Will be used internally by the package
afldata_cols <- names(afldata)
# Function to fix abbreviations
fix_abbreviations <- function(x) {
purrr::map_chr(x, ~
case_when(
. == "KI" ~ "Kicks",
. == "MK" ~ "Marks",
. == "HB" ~ "Handballs",
. == "GL" ~ "Goals",
. == "BH" ~ "Behinds",
. == "HO" ~ "Hit.Outs",
. == "TK" ~ "Tackles",
. == "RB" ~ "Rebounds",
. == "IF" ~ "Inside.50s",
. == "CL" ~ "Clearances",
. == "CG" ~ "Clangers",
. == "FF" ~ "Frees.For",
. == "FA" ~ "Frees.Against",
. == "BR" ~ "Brownlow.Votes",
. == "CP" ~ "Contested.Possessions",
. == "UP" ~ "Uncontested.Possessions",
. == "CM" ~ "Contested.Marks",
. == "MI" ~ "Marks.Inside.50",
. == "One.Percenters" ~ "One.Percenters",
. == "BO" ~ "Bounces",
. == "GA" ~ "Goal.Assists",
. == "TOG" ~ "Time.on.Ground..",
. == "Jumper" ~ "Jumper.No",
TRUE ~ ""
))
}
# Let's get the stats
# match_urls <- get_afltables_urls("01/06/2018", "15/06/2018")
# dat <- scrape_afltables_match(match_urls)
load(here::here("data-raw", "afl_tables_playerstats", "afltables_raw.rda"))
abb <- fix_abbreviations(names(afltables_raw))
stat_abbr <- tibble(
stat = abb[abb != ""],
stat.abb = names(afltables_raw)[abb != ""]
)
## Write data for abbreviations Team and Stats to a data frame that can be used
team_abbr <- tibble(
Team = c(
"Adelaide", "Brisbane Lions", "Carlton", "Collingwood", "Essendon",
"Fremantle", "Gold Coast", "Geelong", "Greater Western Sydney", "Hawthorn",
"Melbourne", "North Melbourne", "Port Adelaide", "Richmond", "St Kilda",
"Sydney", "Western Bulldogs", "West Coast"
),
Team.abb = c(
"AD", "BL", "CA", "CW", "ES", "FR",
"GC", "GE", "GW", "HW", "ME", "NM",
"PA", "RI", "SK", "SY", "WB", "WC"
)
)
# Frizigg
fryzigg <- fitzRoy::get_fryzigg_stats(start = 2019, end = 2019)
usethis::use_data(stat_abbr, team_abbr, afldata_cols,
results, fixture, stats, stats_gf,
ladder, ladder_round, sources, tips, tips_round, fryzigg,
internal = TRUE, overwrite = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCommentsByMediaCode.R
\name{getCommentsByMediaCode}
\alias{getCommentsByMediaCode}
\title{Get Comments By Media Code}
\usage{
getCommentsByMediaCode(code, n = 10, maxID = "", ...)
}
\arguments{
\item{code}{An Instagram shortcode for a media post}
\item{n}{The number of comments to return}
\item{maxID}{An identifier for a comment that indicates where to start searching}
\item{...}{Additional options passed to a shinyAppDir}
}
\value{
n x 6 dataframe - id, text, created_at, owner.id, owner.profile_pic_url, owner.username
}
\description{
Gets the first n comments for a media with a given Instagram shortcode
}
\examples{
\dontrun{ getCommentsByMediaCode("W0IL2cujb3", 100)}
}
| /man/getCommentsByMediaCode.Rd | no_license | AFIT-R/instaExtract | R | false | true | 765 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCommentsByMediaCode.R
\name{getCommentsByMediaCode}
\alias{getCommentsByMediaCode}
\title{Get Comments By Media Code}
\usage{
getCommentsByMediaCode(code, n = 10, maxID = "", ...)
}
\arguments{
\item{code}{An Instagram shortcode for a media post}
\item{n}{The number of comments to return}
\item{maxID}{An identifier for a comment that indicates where to start searching}
\item{...}{Additional options passed to a shinyAppDir}
}
\value{
n x 6 dataframe - id, text, created_at, owner.id, owner.profile_pic_url, owner.username
}
\description{
Gets the first n comments for a media with a given Instagram shortcode
}
\examples{
\dontrun{ getCommentsByMediaCode("W0IL2cujb3", 100)}
}
|
pkgname <- "inmetwrangler"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
library('inmetwrangler')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
cleanEx()
nameEx("import_txt_files_inmet")
### * import_txt_files_inmet
flush(stderr()); flush(stdout())
### Name: import_txt_files_inmet
### Title: Import raw data files of automatic stations
### Aliases: import_txt_files_inmet
### ** Examples
library(dplyr); library(purrr); library(stringr); library(readr)
# missing columns problem example
myfile <- system.file("extdata", "A838.txt", package = "inmetwrangler")
myfile
A838_problems <- import_txt_files_inmet(files = myfile,
verbose = TRUE,
only.problems = TRUE)
A838_data <- import_txt_files_inmet(files = myfile,
verbose = TRUE,
only.problems = FALSE)
#looking at rows
for(irow in A838_problems$row) read_lines(myfile, skip = irow-2, n_max = irow+2)
# View(slice(A838_data, A838_problems$row)) # columns filled with NAs
### * <FOOTER>
###
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
| /inmetwrangler.Rcheck/inmetwrangler-Ex.R | no_license | lhmet/inmetwrangler | R | false | false | 1,415 | r | pkgname <- "inmetwrangler"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
library('inmetwrangler')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
cleanEx()
nameEx("import_txt_files_inmet")
### * import_txt_files_inmet
flush(stderr()); flush(stdout())
### Name: import_txt_files_inmet
### Title: Import raw data files of automatic stations
### Aliases: import_txt_files_inmet
### ** Examples
library(dplyr); library(purrr); library(stringr); library(readr)
# missing columns problem example
myfile <- system.file("extdata", "A838.txt", package = "inmetwrangler")
myfile
A838_problems <- import_txt_files_inmet(files = myfile,
verbose = TRUE,
only.problems = TRUE)
A838_data <- import_txt_files_inmet(files = myfile,
verbose = TRUE,
only.problems = FALSE)
#looking at rows
for(irow in A838_problems$row) read_lines(myfile, skip = irow-2, n_max = irow+2)
# View(slice(A838_data, A838_problems$row)) # columns filled with NAs
### * <FOOTER>
###
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
|
# all the way through
source('./code/FromTaiki/gridFunctions.R')
# EXAMPLE WORK FLOW
# 1) First read in gps, then you need to convert longitudes to 0-360,
# convert UTC to POSIXct format, and create a column called "effort" that is TRUE or FALSE
# Column names needed: Longitude, Latitude, UTC, effort
gpsAll <- read.csv('./tests/straightPathWeffort_1706.csv', stringsAsFactors = FALSE) #read.csv('./tests/SpermWhale_1706_0716_effort.csv', stringsAsFactors = FALSE)
# gpsAll <- gpsAll %>% mutate_if(is.character, str_trim) #white space after join.veffort!?!?
gpsAll$effort <- gpsAll$straight == TRUE & gpsAll$join.aeffort == "on" # & gpsAll$join.veffort =="on"
gpsAll$Longitude <- ifelse(gpsAll$Longitude <= 0, gpsAll$Longitude + 360, gpsAll$Longitude)
gpsAll$UTC <- lubridate::ymd_hms(gpsAll$UTC)
# 2) Read in detections, similar to above convert column names to Longitude, Latitude, UTC,
# and "distance" if you need to create a detection function using Distance package
# Longitude must be matching 0-360 like above
# pmDets <- read.csv('./tests/SpermWhale_1706_BEST_detections.csv', stringsAsFactors = FALSE)
pmDets <- filter(SwEnvFinal, loc == 1 | loc == 0 & sid < 999) # localized encs and sightings
pmDets <- rename(pmDets, Longitude = lon, Latitude=lat, distance = pdist)
pmDets$Longitude <- ifelse(pmDets$Longitude <= 0, pmDets$Longitude + 360, pmDets$Longitude)
pmDets$UTC <- lubridate::ymd_hms(pmDets$UTC)
pmDets$distance <- abs(pmDets$distance)*1000
pmDetsAc <- filter(pmDets, loc==1)#, distance < 15)
# 3) fit a detection function if you need one
dsm <- Distance::ds(pmDets, key='hr')
# dsm <- Distance::ds(pmDetsAc, key='unif')
# 4) Run grid stuff. This will take a while, and should have a couple progress bars showing you how long the slow parts
# are taking. Set "trunc_m" to whatever truncation distance you might want to use
# It will ask you what pixel size you want (this is the grid size), in the future you can specify this when you call it
#filter for just 1706 to test things
pmDetsub <- filter(pmDets, survey == 1706, loc == 1 | loc == 0 & sid < 999 ) # localized encs and sightings
gridEffort10 <- doAllGrid(gps = gpsAll,
bounds = NULL,
dets = pmDetsub,
trunc_m = 15e3, #METERS
dsmodel = dsm, #NULL or dsm
pixel = NULL,
grid = NULL,
plot = F)
# Result of this is a list with the gps you provided, the grid it created, the effort values for each grid, and
# the detections you provided (these have the effortArea attached to them, as well as the actualArea of that grid
# cell - actualArea is mostly there as a reference point for sanity check, prob not used for much)
# Theres also a plotty function, but it will look pretty messy for all your data. Trackline is (roughly) in blue,
# detections are red dots, amount of effort is shaded in gray, but for a big grid you wont really be able to see the shading
plotGridResult(gridEffort10)
saveRDS(gridEffort10, file = paste0(here::here('output'), '/', 'gridEffort10km.rda'))
| /code/FromTaiki/GridExample_forTHEHOMIE_YBEEZY.R | no_license | ybarkley/SpermWhales | R | false | false | 3,112 | r | # all the way through
source('./code/FromTaiki/gridFunctions.R')
# EXAMPLE WORK FLOW
# 1) First read in gps, then you need to convert longitudes to 0-360,
# convert UTC to POSIXct format, and create a column called "effort" that is TRUE or FALSE
# Column names needed: Longitude, Latitude, UTC, effort
gpsAll <- read.csv('./tests/straightPathWeffort_1706.csv', stringsAsFactors = FALSE) #read.csv('./tests/SpermWhale_1706_0716_effort.csv', stringsAsFactors = FALSE)
# gpsAll <- gpsAll %>% mutate_if(is.character, str_trim) #white space after join.veffort!?!?
gpsAll$effort <- gpsAll$straight == TRUE & gpsAll$join.aeffort == "on" # & gpsAll$join.veffort =="on"
gpsAll$Longitude <- ifelse(gpsAll$Longitude <= 0, gpsAll$Longitude + 360, gpsAll$Longitude)
gpsAll$UTC <- lubridate::ymd_hms(gpsAll$UTC)
# 2) Read in detections, similar to above convert column names to Longitude, Latitude, UTC,
# and "distance" if you need to create a detection function using Distance package
# Longitude must be matching 0-360 like above
# pmDets <- read.csv('./tests/SpermWhale_1706_BEST_detections.csv', stringsAsFactors = FALSE)
pmDets <- filter(SwEnvFinal, loc == 1 | loc == 0 & sid < 999) # localized encs and sightings
pmDets <- rename(pmDets, Longitude = lon, Latitude=lat, distance = pdist)
pmDets$Longitude <- ifelse(pmDets$Longitude <= 0, pmDets$Longitude + 360, pmDets$Longitude)
pmDets$UTC <- lubridate::ymd_hms(pmDets$UTC)
pmDets$distance <- abs(pmDets$distance)*1000
pmDetsAc <- filter(pmDets, loc==1)#, distance < 15)
# 3) fit a detection function if you need one
dsm <- Distance::ds(pmDets, key='hr')
# dsm <- Distance::ds(pmDetsAc, key='unif')
# 4) Run grid stuff. This will take a while, and should have a couple progress bars showing you how long the slow parts
# are taking. Set "trunc_m" to whatever truncation distance you might want to use
# It will ask you what pixel size you want (this is the grid size), in the future you can specify this when you call it
#filter for just 1706 to test things
pmDetsub <- filter(pmDets, survey == 1706, loc == 1 | loc == 0 & sid < 999 ) # localized encs and sightings
gridEffort10 <- doAllGrid(gps = gpsAll,
bounds = NULL,
dets = pmDetsub,
trunc_m = 15e3, #METERS
dsmodel = dsm, #NULL or dsm
pixel = NULL,
grid = NULL,
plot = F)
# Result of this is a list with the gps you provided, the grid it created, the effort values for each grid, and
# the detections you provided (these have the effortArea attached to them, as well as the actualArea of that grid
# cell - actualArea is mostly there as a reference point for sanity check, prob not used for much)
# Theres also a plotty function, but it will look pretty messy for all your data. Trackline is (roughly) in blue,
# detections are red dots, amount of effort is shaded in gray, but for a big grid you wont really be able to see the shading
plotGridResult(gridEffort10)
saveRDS(gridEffort10, file = paste0(here::here('output'), '/', 'gridEffort10km.rda'))
|
# Esqueleto de la preparación de los datos para entrenamiento
# Falta definir en base a qué variables se va a hacer el entrenamiento
#Requiere paquete "dataPreparation"
#install.packages("dataPreparation")
library(dataPreparation)
dft <- df
##
# Dividir la base de datos en 80% train y 20% test
##
train_index <- sample(1:nrow(df),0.8*nrow(df))
test_index <- setdiff(1:nrow(df),train_index)
X_train <- dft[train_index, -15]
y_train <- dft[train_index, "Promedio"]
X_test <- dft[test_index, -15]
y_test <- dft[test_index, "Promedio"]
###
# Filtrado de variables
###
constant_cols <- whichAreConstant(dft)
double_cols <- whichAreInDouble(dft)
bijections_cols <- whichAreBijection(dft)
#Calificacion x/100 es biyección de Calificacionx/128 -> Remover
X_train$`Calificación x/100` <- NULL
X_test$`Calificación x/100` <- NULL
##
# Escalado y normalización
##
scales <- build_scales(dataSet = X_train, cols = c("Calificación x/128","Promedio"), verbose = TRUE)
X_train <- fastScale(dataSet = X_train,scales = scales, verbose = TRUE)
X_test <- fastScale(dataSet = X_test, scales = scales, verbose = TRUE)
#print(head(X_train[, c("Calificación x/128","Promedio")]))
##
# Discretizacion
##
#bins <- build_bins(dataSet = X)
X_train <- fastDiscretization(dataSet = X_train, bins = list("Calificación x/128" = c(0, 30, 60, 70, 80, 90, 100, 105, 110, +Inf)))
##
# Codificación de variables categóricas
##
encoding <- build_encoding(dataSet = X_train, cols = "auto", verbose = TRUE)
X_train <- one_hot_encoder(dataSet = X_train, encoding = encoding, drop = TRUE, verbose = TRUE)
X_test <- one_hot_encoder(dataSet = X_test, encoding = encoding, drop = TRUE, verbose = TRUE)
##
# Volver a filtrar variables, ahora que ya estás codificadas
##
bijections <- whichAreBijection(dataSet = X_train, verbose = TRUE)
#Las siguientes columnas fueron detectadas como biyecciones -> Remover
X_train$`Calificación x.128..0. 30.`<-NULL
X_train$Situacion.NA <- NULL
X_train$MotivoEstudio.NA <- NULL
X_train$Faltar.NA <- NULL
X_train$Expectativas.NA <- NULL
X_train$Agua.NA <- NULL
X_train$RedSocial.NA <- NULL
X_train$Correo.0 <- NULL
X_train$Facebook.0 <- NULL
##
# Control de la forma de Train y Test
##
X_test <- sameShape(X_test, referenceSet = X_test, verbose = TRUE)
X_train <- sameShape(X_train, referenceSet = X_train, verbose = TRUE)
| /pretraining_9v.R | no_license | DiegoNavaArsola/copernicus | R | false | false | 2,343 | r | # Esqueleto de la preparación de los datos para entrenamiento
# Falta definir en base a qué variables se va a hacer el entrenamiento
#Requiere paquete "dataPreparation"
#install.packages("dataPreparation")
library(dataPreparation)
dft <- df
##
# Dividir la base de datos en 80% train y 20% test
##
train_index <- sample(1:nrow(df),0.8*nrow(df))
test_index <- setdiff(1:nrow(df),train_index)
X_train <- dft[train_index, -15]
y_train <- dft[train_index, "Promedio"]
X_test <- dft[test_index, -15]
y_test <- dft[test_index, "Promedio"]
###
# Filtrado de variables
###
constant_cols <- whichAreConstant(dft)
double_cols <- whichAreInDouble(dft)
bijections_cols <- whichAreBijection(dft)
#Calificacion x/100 es biyección de Calificacionx/128 -> Remover
X_train$`Calificación x/100` <- NULL
X_test$`Calificación x/100` <- NULL
##
# Escalado y normalización
##
scales <- build_scales(dataSet = X_train, cols = c("Calificación x/128","Promedio"), verbose = TRUE)
X_train <- fastScale(dataSet = X_train,scales = scales, verbose = TRUE)
X_test <- fastScale(dataSet = X_test, scales = scales, verbose = TRUE)
#print(head(X_train[, c("Calificación x/128","Promedio")]))
##
# Discretizacion
##
#bins <- build_bins(dataSet = X)
X_train <- fastDiscretization(dataSet = X_train, bins = list("Calificación x/128" = c(0, 30, 60, 70, 80, 90, 100, 105, 110, +Inf)))
##
# Codificación de variables categóricas
##
encoding <- build_encoding(dataSet = X_train, cols = "auto", verbose = TRUE)
X_train <- one_hot_encoder(dataSet = X_train, encoding = encoding, drop = TRUE, verbose = TRUE)
X_test <- one_hot_encoder(dataSet = X_test, encoding = encoding, drop = TRUE, verbose = TRUE)
##
# Volver a filtrar variables, ahora que ya estás codificadas
##
bijections <- whichAreBijection(dataSet = X_train, verbose = TRUE)
#Las siguientes columnas fueron detectadas como biyecciones -> Remover
X_train$`Calificación x.128..0. 30.`<-NULL
X_train$Situacion.NA <- NULL
X_train$MotivoEstudio.NA <- NULL
X_train$Faltar.NA <- NULL
X_train$Expectativas.NA <- NULL
X_train$Agua.NA <- NULL
X_train$RedSocial.NA <- NULL
X_train$Correo.0 <- NULL
X_train$Facebook.0 <- NULL
##
# Control de la forma de Train y Test
##
X_test <- sameShape(X_test, referenceSet = X_test, verbose = TRUE)
X_train <- sameShape(X_train, referenceSet = X_train, verbose = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gmail_functions.R
\docType{package}
\name{gmail_googleAuthR}
\alias{gmail_googleAuthR}
\alias{gmail_googleAuthR-package}
\title{Gmail API
Access Gmail mailboxes including sending user email.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2017-03-05 19:54:07
filename: /Users/mark/dev/R/autoGoogleAPI/googlegmailv1.auto/R/gmail_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://mail.google.com/
\item https://www.googleapis.com/auth/gmail.compose
\item https://www.googleapis.com/auth/gmail.insert
\item https://www.googleapis.com/auth/gmail.labels
\item https://www.googleapis.com/auth/gmail.metadata
\item https://www.googleapis.com/auth/gmail.modify
\item https://www.googleapis.com/auth/gmail.readonly
\item https://www.googleapis.com/auth/gmail.send
\item https://www.googleapis.com/auth/gmail.settings.basic
\item https://www.googleapis.com/auth/gmail.settings.sharing
}
}
| /googlegmailv1.auto/man/gmail_googleAuthR.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 1,039 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gmail_functions.R
\docType{package}
\name{gmail_googleAuthR}
\alias{gmail_googleAuthR}
\alias{gmail_googleAuthR-package}
\title{Gmail API
Access Gmail mailboxes including sending user email.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2017-03-05 19:54:07
filename: /Users/mark/dev/R/autoGoogleAPI/googlegmailv1.auto/R/gmail_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://mail.google.com/
\item https://www.googleapis.com/auth/gmail.compose
\item https://www.googleapis.com/auth/gmail.insert
\item https://www.googleapis.com/auth/gmail.labels
\item https://www.googleapis.com/auth/gmail.metadata
\item https://www.googleapis.com/auth/gmail.modify
\item https://www.googleapis.com/auth/gmail.readonly
\item https://www.googleapis.com/auth/gmail.send
\item https://www.googleapis.com/auth/gmail.settings.basic
\item https://www.googleapis.com/auth/gmail.settings.sharing
}
}
|
# CAR - head
# CDR - tail
# CONS - create cell with head and tail
# EQ - equality
# ATOM
psil <- function() {
namespace <- list('cdr', 'car', 'x', 'define', 'print')
symbols <- list('+', '-', '*', '/', '%')
x <- TRUE
while(x) {
y <- readline(prompt = "psil> ")
if ( y == 'quit') {
x <- FALSE; print('I have enjoyed our time together.')
} else {
y <- gsub('(', '', y, fixed = TRUE)
y <- gsub(')', '', y, fixed = TRUE)
y <- strsplit(y, ' ')[[1]]
if (y[1] %in% namespace) {
try(print(eval(call(y[1], as.numeric(y[2])))))
} else {
print('I do not understand what you are asking')
}
}
}
}
# Need to tokenize, spaces and s expressions
token <- function(a) {
a <- gsub('(', '( ', a, fixed = TRUE)
a <- gsub(')', ' )', a, fixed = TRUE)
strsplit(a, ' ')[[1]]
}
sexp <- function(a) {
x <- which(token(a) == '(')
y <- which(token(a) == ')')
list(open = x, close = y)
}
evl <- function(sxp) {
x <- token(sxp)
l <- length(x)
ret <- 0
for (i in 3:(l-1)) {
ret <- eval(call(x[2], x[i], as.character(ret)))
}
ret
}
# what are the atomic units
# Numbers
# math operations
# nil - ()
# cons car cdr etc
numb <- function(x) {
grepl("[-]?[0-9]+[.]?[0-9]*|[-]?[0-9]+[L]?|[-]?[0-9]+[.]?[0-9]*[eE][0-9]+", x)
}
op <- function(x) x %in% c('+', '-', '*', '/', '%')
nil <- function(x) x == 'nil' | x == '()'
base <- function(x) x %in% c('cdr', 'car')
is.atom <- function(x) numb(x) | op(x) | nil(x) | base(x)
atomic.type <- function(a) {
if (is.atom(a)) {
if (op(a)) 'operation'
else if (nil(a)) 'nil'
else if (numb(a)) 'numeric'
else 'primitive'
} else 'NULL'
}
library(testthat)
# Test cases
n('29')
n('2.1')
n('-12.311')
n('2.')
n('.32')
op('+')
op('--')
op(4)
op('f%')
nil('nil')
nil('()')
nil(5)
show_condition <- function(code) {
tryCatch(code,
error = function(c) "error",
warning = function(c) "warning",
message = function(c) "message"
)
}
f <- function() stop("!")
tryCatch(f(), error = function(e) 1)
#> [1] 1
withCallingHandlers(f(), error = function(e) 1)
show_condition(as.numeric(1))
show_condition(as.numeric('a'))
show_condition(as.numeric(|))
show_condition(stop("!"))
options(show.error.messages = T)
options(show.error.messages = F)
stop('k')
y <- '(as.numeric, |)'
y <- gsub('(', '', y, fixed = TRUE)
y <- gsub(')', '', y, fixed = TRUE)
y <- strsplit(y, ', ')[[1]]
xx <- try(eval(call(y[1], y[2])), silent = T))
suppressWarnings(try(log('a'), silent = T))
geterrmessage()[1]
tt <- suppressWarnings(tryCatch(eval(call(y[1], y[2])),error=function(e) e, warning=function(w) w))
if (any(class(tt) == "simpleWarning")) {
print(tt[1])
} else if ()
t2 <- eval(call(y[1], y[2]))
geterrmessage()[1]
#
withCallingHandlers(5, message = function(e) stop(e))
tt <- suppressWarnings(tryCatch(as.numeric('a'),error=function(e) e, warning=function(w) w))
options(show.error.messages = T)
suppressWarnings()
f2 <- function(x) {
try(log(x), silent = T)
10
}
f2("a")
x <- try(as.numeric(||), silent = TRUE)
geterrmessage()
stop('k')
geterrmessage()[1]
| /R/lisp.R | no_license | darrkj/clio | R | false | false | 3,167 | r |
# CAR - head
# CDR - tail
# CONS - create cell with head and tail
# EQ - equality
# ATOM
psil <- function() {
namespace <- list('cdr', 'car', 'x', 'define', 'print')
symbols <- list('+', '-', '*', '/', '%')
x <- TRUE
while(x) {
y <- readline(prompt = "psil> ")
if ( y == 'quit') {
x <- FALSE; print('I have enjoyed our time together.')
} else {
y <- gsub('(', '', y, fixed = TRUE)
y <- gsub(')', '', y, fixed = TRUE)
y <- strsplit(y, ' ')[[1]]
if (y[1] %in% namespace) {
try(print(eval(call(y[1], as.numeric(y[2])))))
} else {
print('I do not understand what you are asking')
}
}
}
}
# Need to tokenize, spaces and s expressions
token <- function(a) {
a <- gsub('(', '( ', a, fixed = TRUE)
a <- gsub(')', ' )', a, fixed = TRUE)
strsplit(a, ' ')[[1]]
}
sexp <- function(a) {
x <- which(token(a) == '(')
y <- which(token(a) == ')')
list(open = x, close = y)
}
evl <- function(sxp) {
x <- token(sxp)
l <- length(x)
ret <- 0
for (i in 3:(l-1)) {
ret <- eval(call(x[2], x[i], as.character(ret)))
}
ret
}
# what are the atomic units
# Numbers
# math operations
# nil - ()
# cons car cdr etc
numb <- function(x) {
grepl("[-]?[0-9]+[.]?[0-9]*|[-]?[0-9]+[L]?|[-]?[0-9]+[.]?[0-9]*[eE][0-9]+", x)
}
op <- function(x) x %in% c('+', '-', '*', '/', '%')
nil <- function(x) x == 'nil' | x == '()'
base <- function(x) x %in% c('cdr', 'car')
is.atom <- function(x) numb(x) | op(x) | nil(x) | base(x)
atomic.type <- function(a) {
if (is.atom(a)) {
if (op(a)) 'operation'
else if (nil(a)) 'nil'
else if (numb(a)) 'numeric'
else 'primitive'
} else 'NULL'
}
library(testthat)
# Test cases
n('29')
n('2.1')
n('-12.311')
n('2.')
n('.32')
op('+')
op('--')
op(4)
op('f%')
nil('nil')
nil('()')
nil(5)
show_condition <- function(code) {
tryCatch(code,
error = function(c) "error",
warning = function(c) "warning",
message = function(c) "message"
)
}
f <- function() stop("!")
tryCatch(f(), error = function(e) 1)
#> [1] 1
withCallingHandlers(f(), error = function(e) 1)
show_condition(as.numeric(1))
show_condition(as.numeric('a'))
show_condition(as.numeric(|))
show_condition(stop("!"))
options(show.error.messages = T)
options(show.error.messages = F)
stop('k')
y <- '(as.numeric, |)'
y <- gsub('(', '', y, fixed = TRUE)
y <- gsub(')', '', y, fixed = TRUE)
y <- strsplit(y, ', ')[[1]]
xx <- try(eval(call(y[1], y[2])), silent = T))
suppressWarnings(try(log('a'), silent = T))
geterrmessage()[1]
tt <- suppressWarnings(tryCatch(eval(call(y[1], y[2])),error=function(e) e, warning=function(w) w))
if (any(class(tt) == "simpleWarning")) {
print(tt[1])
} else if ()
t2 <- eval(call(y[1], y[2]))
geterrmessage()[1]
#
withCallingHandlers(5, message = function(e) stop(e))
tt <- suppressWarnings(tryCatch(as.numeric('a'),error=function(e) e, warning=function(w) w))
options(show.error.messages = T)
suppressWarnings()
f2 <- function(x) {
try(log(x), silent = T)
10
}
f2("a")
x <- try(as.numeric(||), silent = TRUE)
geterrmessage()
stop('k')
geterrmessage()[1]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Timing_general.R
\name{GetTimeUnits}
\alias{GetTimeUnits}
\title{Gets time units}
\usage{
GetTimeUnits(fid, timevarname)
}
\description{
Gets time units
}
| /man/GetTimeUnits.Rd | no_license | aukkola/FluxnetLSM | R | false | true | 233 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Timing_general.R
\name{GetTimeUnits}
\alias{GetTimeUnits}
\title{Gets time units}
\usage{
GetTimeUnits(fid, timevarname)
}
\description{
Gets time units
}
|
#' @param quantiles conditional quantiles of y to calculate and display
#' @param formula formula relating y variables to x variables
#' @param method Quantile regression method to use. Available options are `"rq"` (for
#' [`quantreg::rq()`]) and `"rqss"` (for [`quantreg::rqss()`]).
#' @inheritParams layer
#' @inheritParams geom_point
#' @eval rd_computed_vars(
#' quantile = "Quantile of distribution."
#' )
#' @export
#' @rdname geom_quantile
stat_quantile <- function(mapping = NULL, data = NULL,
geom = "quantile", position = "identity",
...,
quantiles = c(0.25, 0.5, 0.75),
formula = NULL,
method = "rq",
method.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatQuantile,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list2(
quantiles = quantiles,
formula = formula,
method = method,
method.args = method.args,
na.rm = na.rm,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatQuantile <- ggproto("StatQuantile", Stat,
required_aes = c("x", "y"),
compute_group = function(data, scales, quantiles = c(0.25, 0.5, 0.75),
formula = NULL, xseq = NULL, method = "rq",
method.args = list(), lambda = 1, na.rm = FALSE) {
check_installed("quantreg", reason = "for `stat_quantile()`")
if (is.null(formula)) {
if (method == "rqss") {
formula <- eval(
substitute(y ~ qss(x, lambda = lambda)),
list(lambda = lambda)
)
# make qss function available in case it is needed;
# works around limitation in quantreg
qss <- quantreg::qss
} else {
formula <- y ~ x
}
cli::cli_inform("Smoothing formula not specified. Using: {deparse(formula)}")
}
if (is.null(data$weight)) data$weight <- 1
if (is.null(xseq)) {
xmin <- min(data$x, na.rm = TRUE)
xmax <- max(data$x, na.rm = TRUE)
xseq <- seq(xmin, xmax, length.out = 100)
}
grid <- data_frame0(x = xseq, .size = length(xseq))
# if method was specified as a character string, replace with
# the corresponding function
if (identical(method, "rq")) {
method <- quantreg::rq
} else if (identical(method, "rqss")) {
method <- quantreg::rqss
} else {
method <- match.fun(method) # allow users to supply their own methods
}
result <- lapply(
quantiles,
quant_pred,
data = data,
method = method,
formula = formula,
weight = weight,
grid = grid,
method.args = method.args
)
vec_rbind0(!!!result)
},
# weight is no longer available after transformation
dropped_aes = "weight"
)
quant_pred <- function(quantile, data, method, formula, weight, grid,
method.args = method.args) {
model <- inject(method(
formula,
data = data,
tau = quantile,
weights = weight,
!!!method.args
))
grid$y <- stats::predict(model, newdata = grid)
grid$quantile <- quantile
grid$group <- paste(data$group[1], quantile, sep = "-")
grid
}
| /R/stat-quantilemethods.R | no_license | cran/ggplot2 | R | false | false | 3,594 | r | #' @param quantiles conditional quantiles of y to calculate and display
#' @param formula formula relating y variables to x variables
#' @param method Quantile regression method to use. Available options are `"rq"` (for
#' [`quantreg::rq()`]) and `"rqss"` (for [`quantreg::rqss()`]).
#' @inheritParams layer
#' @inheritParams geom_point
#' @eval rd_computed_vars(
#' quantile = "Quantile of distribution."
#' )
#' @export
#' @rdname geom_quantile
stat_quantile <- function(mapping = NULL, data = NULL,
geom = "quantile", position = "identity",
...,
quantiles = c(0.25, 0.5, 0.75),
formula = NULL,
method = "rq",
method.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatQuantile,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list2(
quantiles = quantiles,
formula = formula,
method = method,
method.args = method.args,
na.rm = na.rm,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatQuantile <- ggproto("StatQuantile", Stat,
required_aes = c("x", "y"),
compute_group = function(data, scales, quantiles = c(0.25, 0.5, 0.75),
formula = NULL, xseq = NULL, method = "rq",
method.args = list(), lambda = 1, na.rm = FALSE) {
check_installed("quantreg", reason = "for `stat_quantile()`")
if (is.null(formula)) {
if (method == "rqss") {
formula <- eval(
substitute(y ~ qss(x, lambda = lambda)),
list(lambda = lambda)
)
# make qss function available in case it is needed;
# works around limitation in quantreg
qss <- quantreg::qss
} else {
formula <- y ~ x
}
cli::cli_inform("Smoothing formula not specified. Using: {deparse(formula)}")
}
if (is.null(data$weight)) data$weight <- 1
if (is.null(xseq)) {
xmin <- min(data$x, na.rm = TRUE)
xmax <- max(data$x, na.rm = TRUE)
xseq <- seq(xmin, xmax, length.out = 100)
}
grid <- data_frame0(x = xseq, .size = length(xseq))
# if method was specified as a character string, replace with
# the corresponding function
if (identical(method, "rq")) {
method <- quantreg::rq
} else if (identical(method, "rqss")) {
method <- quantreg::rqss
} else {
method <- match.fun(method) # allow users to supply their own methods
}
result <- lapply(
quantiles,
quant_pred,
data = data,
method = method,
formula = formula,
weight = weight,
grid = grid,
method.args = method.args
)
vec_rbind0(!!!result)
},
# weight is no longer available after transformation
dropped_aes = "weight"
)
quant_pred <- function(quantile, data, method, formula, weight, grid,
method.args = method.args) {
model <- inject(method(
formula,
data = data,
tau = quantile,
weights = weight,
!!!method.args
))
grid$y <- stats::predict(model, newdata = grid)
grid$quantile <- quantile
grid$group <- paste(data$group[1], quantile, sep = "-")
grid
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maars-lm.R
\name{print.summary.maars_lm}
\alias{print.summary.maars_lm}
\title{Print summary of \code{maars_lm} object}
\usage{
\method{print}{summary.maars_lm}(x, ...)
}
\arguments{
\item{x}{A \code{maars_lm} object.}
\item{...}{Additional arguments}
}
\description{
Calls \code{print.summary.maars_lm} on a \code{summary} of a \code{maars_lm} object.
}
| /man/print.summary.maars_lm.Rd | permissive | shamindras/maars | R | false | true | 434 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maars-lm.R
\name{print.summary.maars_lm}
\alias{print.summary.maars_lm}
\title{Print summary of \code{maars_lm} object}
\usage{
\method{print}{summary.maars_lm}(x, ...)
}
\arguments{
\item{x}{A \code{maars_lm} object.}
\item{...}{Additional arguments}
}
\description{
Calls \code{print.summary.maars_lm} on a \code{summary} of a \code{maars_lm} object.
}
|
# Load Libraries ----
library(plotly)
library(shiny)
library(htmlwidgets)
# Prepare Data ---
mtcars$name = rownames(mtcars)
ui <- fluidPage(
plotlyOutput('myplot'),
textOutput('hover')
)
renderPlotly2 <- function (expr, env = parent.frame(), quoted = FALSE){
if (!quoted) {
expr <- substitute(expr)
}
shinyRenderWidget(expr, plotlyOutput, env, quoted = TRUE)
}
addHoverBehavior <- "function(el, x){
el.on('plotly_hover', function(data){
var infotext = data.points.map(function(d){
console.log(d)
return (d.data.name[d.pointNumber]+': x= '+d.x+', y= '+d.y.toPrecision(3));
});
console.log(infotext)
Shiny.onInputChange('hover_data', infotext)
})
}"
server <- function(input, output){
output$hover <- renderText({
input$hover_data
})
output$myplot <- renderPlotly2({
p <- plot_ly(mtcars, x = mpg, y = wt, color = gear, name = name, mode = "markers")
as.widget(p) %>% onRender(addHoverBehavior)
})
}
shinyApp(ui = ui, server = server)
library(leaflet)
leaflet() %>% addTiles() %>%
onRender("
function(el, x) {
// Navigate the map to the user's location
this.locate({setView: true});
}
") | /test_plotly_shiny.R | no_license | bbest/consmap-prep | R | false | false | 1,218 | r | # Load Libraries ----
library(plotly)
library(shiny)
library(htmlwidgets)
# Prepare Data ---
mtcars$name = rownames(mtcars)
ui <- fluidPage(
plotlyOutput('myplot'),
textOutput('hover')
)
renderPlotly2 <- function (expr, env = parent.frame(), quoted = FALSE){
if (!quoted) {
expr <- substitute(expr)
}
shinyRenderWidget(expr, plotlyOutput, env, quoted = TRUE)
}
addHoverBehavior <- "function(el, x){
el.on('plotly_hover', function(data){
var infotext = data.points.map(function(d){
console.log(d)
return (d.data.name[d.pointNumber]+': x= '+d.x+', y= '+d.y.toPrecision(3));
});
console.log(infotext)
Shiny.onInputChange('hover_data', infotext)
})
}"
server <- function(input, output){
output$hover <- renderText({
input$hover_data
})
output$myplot <- renderPlotly2({
p <- plot_ly(mtcars, x = mpg, y = wt, color = gear, name = name, mode = "markers")
as.widget(p) %>% onRender(addHoverBehavior)
})
}
shinyApp(ui = ui, server = server)
library(leaflet)
leaflet() %>% addTiles() %>%
onRender("
function(el, x) {
// Navigate the map to the user's location
this.locate({setView: true});
}
") |
# C. McClintock
# Brightest Research
# Yale Climate Change Opinion Maps
# -------------------------------- set up ----------------------------------------
# load libraries
library(tidyverse)
library(usmap)
library(maps)
library(mapdata)
# read in the data
al <- read_csv("alliance.csv")
# ------------------------- build climate -------------------------------
# states in climate alliance
plot_usmap(data = al, values = "alliance", lines = "white") +
guides(fill=F)
| /stories/impactvsaction/alliance/alliance.R | no_license | charlottemcclintock/DataforClimate | R | false | false | 478 | r |
# C. McClintock
# Brightest Research
# Yale Climate Change Opinion Maps
# -------------------------------- set up ----------------------------------------
# load libraries
library(tidyverse)
library(usmap)
library(maps)
library(mapdata)
# read in the data
al <- read_csv("alliance.csv")
# ------------------------- build climate -------------------------------
# states in climate alliance
plot_usmap(data = al, values = "alliance", lines = "white") +
guides(fill=F)
|
#' date_from_decimal_day
#'
#' converts DOY.decimalday into a POSIXct object.
#'
#' @param decimal_day date stored as DOY.decimalday
#' @param date_origin origin of decimal_day as YYYY/MM/DD
#'
#' @return decimal_day as POSIXct
#'
#' @export
date_from_decimal_day = function(decimal_day,date_origin,tz = "UTC"){
DOY = floor(decimal_day)
decimal = decimal_day - DOY
DOY = DOY-1 # as.Date uses zero indexing, therfore Jan 1st is day 0
day = as.Date(DOY, origin = date_origin )
time = chron::times(decimal)
date = as.POSIXct(paste(day,time),format = "%Y-%m-%d %H:%M:%OS",tz = tz)
#return
date
} | /R/func_date_from_decimal_day.R | no_license | willdrysdale/wsdmiscr | R | false | false | 615 | r | #' date_from_decimal_day
#'
#' converts DOY.decimalday into a POSIXct object.
#'
#' @param decimal_day date stored as DOY.decimalday
#' @param date_origin origin of decimal_day as YYYY/MM/DD
#'
#' @return decimal_day as POSIXct
#'
#' @export
date_from_decimal_day = function(decimal_day,date_origin,tz = "UTC"){
DOY = floor(decimal_day)
decimal = decimal_day - DOY
DOY = DOY-1 # as.Date uses zero indexing, therfore Jan 1st is day 0
day = as.Date(DOY, origin = date_origin )
time = chron::times(decimal)
date = as.POSIXct(paste(day,time),format = "%Y-%m-%d %H:%M:%OS",tz = tz)
#return
date
} |
#' Count Network Statistics
#'
#' This function is similar to what [ergm::summary_formula] does, but it provides
#' a fast wrapper suited for matrix class objects (see benchmark in the examples).
#' @param X List of square matrices. (networks)
#' @param terms Character vector with the names of the statistics to calculate.
#' Currently, the only available statistics are: '\Sexpr{paste(ergmito::AVAILABLE_STATS(), collapse="', '")}'.
#' @param ... Passed to the method.
#' @param attrs A list of vectors. This is used when `term` has a nodal attribute
#' such as `nodeicov(attrname="")`.
#' @export
#' @return A matrix of size `length(X) * length(terms)` with the corresponding
#' counts of statistics.
#' @examples
#' # DGP
#' set.seed(123199)
#' x <- rbernoulli(rep(5, 10))
#' ans0 <- count_stats(x, c("mutual", "edges"))
#'
#' # Calculating using summary_formula
#' ans1 <- lapply(x, function(i) {
#' ergm::summary_formula(i ~ mutual + edges)
#' })
#'
#' ans1 <- do.call(rbind, ans1)
#'
#' # Comparing
#' all.equal(unname(ans0), unname(ans1))
#'
#' # count_stats is vectorized (and so faster)
#' bm <- benchmarkito(
#' count_stats = count_stats(x, c("mutual", "edges")),
#' lapply = lapply(x, function(i) {
#' ergm::summary_formula(i ~ mutual + edges)
#' }), times = 50
#' )
#'
#' plot(bm)
#'
count_stats <- function(X, ...) UseMethod("count_stats")
#' @export
#' @rdname count_stats
AVAILABLE_STATS <- function() count_available()
#' @export
#' @rdname count_stats
count_stats.formula <- function(X, ...) {
# Retrieving networks
LHS <- eval(X[[2]], envir = environment(X))
if (inherits(LHS, "matrix") | inherits(LHS, "network"))
LHS <- list(LHS)
# Checking which ones are undirected
are_undirected <- !is_directed(LHS)
are_undirected <- which(are_undirected)
if (length(are_undirected))
stop(
"Counting statistics with count_stats in undirected networks is not ",
"supported yet. The following networks in the formula are undirected: ",
paste(are_undirected, collapse = ", "), ".", call. = FALSE
)
# Analyzing the formula (with network as a reference)
ergm_model <- analyze_formula(X, LHS)
# Can we do it?
available <- which(!(ergm_model$term_names %in% count_available()))
if (length(available))
stop(
"The following term(s)s are not available in count_stats: ",
paste(ergm_model$names[available], collapse = ", "),
".", call. = FALSE
)
# Capturing attributes
for (a in seq_along(ergm_model$term_attrs)) {
ergm_model$attrs[[a]] <- if (!length(ergm_model$term_attrs[[a]]))
double(0)
else {
# This check is important, for now. Future versions may include more
# complex terms that hold more than one attribute.
if (length(ergm_model$term_attrs[[a]]) > 1L)
stop(
"For now, terms with more than one attribute are not supported on. ",
"The current model you are trying to fit uses the term: ",
ergm_model$term_passed[a],
" which includes the following attributes: ",
paste(ergm_model$term_attrs[[a]], collapse=", "),
call. = FALSE
)
lapply(LHS, function(net) {
network::get.vertex.attribute(net, attrname = ergm_model$term_attrs[[a]])
})
}
}
# Coercion is later since we need to check for arguments
LHS <- as_adjmat(LHS)
# Coercing into the appropiate type
if (network::is.network(LHS))
LHS <- list(as_adjmat(LHS))
else if (is.list(LHS)) {
is_net <- sapply(LHS, network::is.network)
# Coercing into a net
for (i in which(is_net))
LHS[[i]] <- as_adjmat(LHS[[i]])
}
out <- matrix(nrow = nnets(LHS), ncol = length(ergm_model$term_names),
dimnames = list(NULL, ergm_model$term_passed))
for (j in 1:ncol(out)) {
out[, j] <- count_stats(
X = LHS,
terms = ergm_model$term_names[j],
attrs = ergm_model$attrs[[j]]
)
}
out
}
#' @export
#' @rdname count_stats
count_stats.list <- function(X, terms, attrs = NULL, ...) {
chunks <- make_chunks(length(X), 2e5)
# if (!length(attrs))
# attrs <- replicate(length(X), numeric(0), simplify = FALSE)
# Checking the types of objects
test <- which(!sapply(X, inherits, what = "matrix"))
if (length(test))
stop("When `X` is a list, it must be a list of matrices. There are ",
"some objects that are not: ", paste(test, collapse = ", "), ".",
call. = FALSE)
ans <- matrix(NA, nrow = length(X), ncol=length(terms))
all_same_attr <- length(attrs) == 1L
for (s in seq_along(chunks$from)) {
i <- chunks$from[s]
j <- chunks$to[s]
for (k in seq_along(terms)) {
if (!length(attrs))
ans[i:j, k] <- count_stats_cpp(X[i:j], terms[k], list(double(0L)))
else if (all_same_attr)
ans[i:j, k] <- count_stats_cpp(X[i:j], terms[k], attrs)
else
ans[i:j, k] <- count_stats_cpp(X[i:j], terms[k], attrs[i:j])
}
}
ans
}
#' Geodesic distance matrix (all pairs)
#'
#' Calculates the shortest path between all pairs of vertices in a network.
#' This uses the power matrices to do so, which makes it efficient only for
#' small networks.
#'
#' @param x Either a list of networks (or square integer matrices), an integer
#' matrix, a network, or an ergmito.
#' @param force Logical scalar. If `force = FALSE` (the default) and `nvertex(x) > 100`
#' it returns with an error. To force computation use `force = TRUE`.
#' @param ... Further arguments passed to the method.
#' @param simplify Logical scalar. When `TRUE` it returns a matrix, otherwise,
#' a list of length `nnets(x)`.
#'
#' @export
#' @examples
#' data(fivenets)
#' geodesic(fivenets)
#'
#' # Comparing with sna
#' if (require("sna")) {
#' net0 <- fivenets[[1]]
#' net <- network::network(fivenets[[1]])
#' benchmarkito(
#' ergmito = ergmito::geodesic(net0),
#' sna = sna::geodist(net), times = 1000
#' )
#' }
geodesic <- function(x, force = FALSE, ...) UseMethod("geodesic")
#' @export
#' @rdname geodesic
geodesita <- geodesic
#' @export
# @rdname geodesic
geodesic.list <- function(x, force = FALSE, ...) {
geodesic_cpp(as_adjmat(x), force = force)
}
#' @export
#' @rdname geodesic
geodesic.matrix <- function(x, force = FALSE, simplify = FALSE, ...) {
ans <- geodesic_cpp(list(x), force = force)
if (simplify)
return(ans[[1]])
ans
}
#' @export
#' @rdname geodesic
geodesic.network <- function(x, force = FALSE, simplify = FALSE, ...) {
ans <- geodesic_cpp(list(as_adjmat(x)), force = force)
if (simplify)
return(ans[[1]])
ans
}
| /R/count_stats.R | permissive | muriteams/ergmito | R | false | false | 6,730 | r | #' Count Network Statistics
#'
#' This function is similar to what [ergm::summary_formula] does, but it provides
#' a fast wrapper suited for matrix class objects (see benchmark in the examples).
#' @param X List of square matrices. (networks)
#' @param terms Character vector with the names of the statistics to calculate.
#' Currently, the only available statistics are: '\Sexpr{paste(ergmito::AVAILABLE_STATS(), collapse="', '")}'.
#' @param ... Passed to the method.
#' @param attrs A list of vectors. This is used when `term` has a nodal attribute
#' such as `nodeicov(attrname="")`.
#' @export
#' @return A matrix of size `length(X) * length(terms)` with the corresponding
#' counts of statistics.
#' @examples
#' # DGP
#' set.seed(123199)
#' x <- rbernoulli(rep(5, 10))
#' ans0 <- count_stats(x, c("mutual", "edges"))
#'
#' # Calculating using summary_formula
#' ans1 <- lapply(x, function(i) {
#' ergm::summary_formula(i ~ mutual + edges)
#' })
#'
#' ans1 <- do.call(rbind, ans1)
#'
#' # Comparing
#' all.equal(unname(ans0), unname(ans1))
#'
#' # count_stats is vectorized (and so faster)
#' bm <- benchmarkito(
#' count_stats = count_stats(x, c("mutual", "edges")),
#' lapply = lapply(x, function(i) {
#' ergm::summary_formula(i ~ mutual + edges)
#' }), times = 50
#' )
#'
#' plot(bm)
#'
count_stats <- function(X, ...) UseMethod("count_stats")
#' @export
#' @rdname count_stats
AVAILABLE_STATS <- function() count_available()
#' @export
#' @rdname count_stats
count_stats.formula <- function(X, ...) {
# Retrieving networks
LHS <- eval(X[[2]], envir = environment(X))
if (inherits(LHS, "matrix") | inherits(LHS, "network"))
LHS <- list(LHS)
# Checking which ones are undirected
are_undirected <- !is_directed(LHS)
are_undirected <- which(are_undirected)
if (length(are_undirected))
stop(
"Counting statistics with count_stats in undirected networks is not ",
"supported yet. The following networks in the formula are undirected: ",
paste(are_undirected, collapse = ", "), ".", call. = FALSE
)
# Analyzing the formula (with network as a reference)
ergm_model <- analyze_formula(X, LHS)
# Can we do it?
available <- which(!(ergm_model$term_names %in% count_available()))
if (length(available))
stop(
"The following term(s)s are not available in count_stats: ",
paste(ergm_model$names[available], collapse = ", "),
".", call. = FALSE
)
# Capturing attributes
for (a in seq_along(ergm_model$term_attrs)) {
ergm_model$attrs[[a]] <- if (!length(ergm_model$term_attrs[[a]]))
double(0)
else {
# This check is important, for now. Future versions may include more
# complex terms that hold more than one attribute.
if (length(ergm_model$term_attrs[[a]]) > 1L)
stop(
"For now, terms with more than one attribute are not supported on. ",
"The current model you are trying to fit uses the term: ",
ergm_model$term_passed[a],
" which includes the following attributes: ",
paste(ergm_model$term_attrs[[a]], collapse=", "),
call. = FALSE
)
lapply(LHS, function(net) {
network::get.vertex.attribute(net, attrname = ergm_model$term_attrs[[a]])
})
}
}
# Coercion is later since we need to check for arguments
LHS <- as_adjmat(LHS)
# Coercing into the appropiate type
if (network::is.network(LHS))
LHS <- list(as_adjmat(LHS))
else if (is.list(LHS)) {
is_net <- sapply(LHS, network::is.network)
# Coercing into a net
for (i in which(is_net))
LHS[[i]] <- as_adjmat(LHS[[i]])
}
out <- matrix(nrow = nnets(LHS), ncol = length(ergm_model$term_names),
dimnames = list(NULL, ergm_model$term_passed))
for (j in 1:ncol(out)) {
out[, j] <- count_stats(
X = LHS,
terms = ergm_model$term_names[j],
attrs = ergm_model$attrs[[j]]
)
}
out
}
#' @export
#' @rdname count_stats
count_stats.list <- function(X, terms, attrs = NULL, ...) {
chunks <- make_chunks(length(X), 2e5)
# if (!length(attrs))
# attrs <- replicate(length(X), numeric(0), simplify = FALSE)
# Checking the types of objects
test <- which(!sapply(X, inherits, what = "matrix"))
if (length(test))
stop("When `X` is a list, it must be a list of matrices. There are ",
"some objects that are not: ", paste(test, collapse = ", "), ".",
call. = FALSE)
ans <- matrix(NA, nrow = length(X), ncol=length(terms))
all_same_attr <- length(attrs) == 1L
for (s in seq_along(chunks$from)) {
i <- chunks$from[s]
j <- chunks$to[s]
for (k in seq_along(terms)) {
if (!length(attrs))
ans[i:j, k] <- count_stats_cpp(X[i:j], terms[k], list(double(0L)))
else if (all_same_attr)
ans[i:j, k] <- count_stats_cpp(X[i:j], terms[k], attrs)
else
ans[i:j, k] <- count_stats_cpp(X[i:j], terms[k], attrs[i:j])
}
}
ans
}
#' Geodesic distance matrix (all pairs)
#'
#' Calculates the shortest path between all pairs of vertices in a network.
#' This uses the power matrices to do so, which makes it efficient only for
#' small networks.
#'
#' @param x Either a list of networks (or square integer matrices), an integer
#' matrix, a network, or an ergmito.
#' @param force Logical scalar. If `force = FALSE` (the default) and `nvertex(x) > 100`
#' it returns with an error. To force computation use `force = TRUE`.
#' @param ... Further arguments passed to the method.
#' @param simplify Logical scalar. When `TRUE` it returns a matrix, otherwise,
#' a list of length `nnets(x)`.
#'
#' @export
#' @examples
#' data(fivenets)
#' geodesic(fivenets)
#'
#' # Comparing with sna
#' if (require("sna")) {
#' net0 <- fivenets[[1]]
#' net <- network::network(fivenets[[1]])
#' benchmarkito(
#' ergmito = ergmito::geodesic(net0),
#' sna = sna::geodist(net), times = 1000
#' )
#' }
geodesic <- function(x, force = FALSE, ...) UseMethod("geodesic")
#' @export
#' @rdname geodesic
geodesita <- geodesic
#' @export
# @rdname geodesic
geodesic.list <- function(x, force = FALSE, ...) {
geodesic_cpp(as_adjmat(x), force = force)
}
#' @export
#' @rdname geodesic
geodesic.matrix <- function(x, force = FALSE, simplify = FALSE, ...) {
ans <- geodesic_cpp(list(x), force = force)
if (simplify)
return(ans[[1]])
ans
}
#' @export
#' @rdname geodesic
geodesic.network <- function(x, force = FALSE, simplify = FALSE, ...) {
ans <- geodesic_cpp(list(as_adjmat(x)), force = force)
if (simplify)
return(ans[[1]])
ans
}
|
suppressMessages(require(sva))
suppressMessages(require(limma))
suppressMessages(require(dplyr))
suppressMessages(require(corrplot))
suppressMessages(require(reshape2))
suppressMessages(require(ggplot2))
suppressMessages(require(gplots))
suppressMessages(require(ggfortify))
suppressMessages(require(argparse))
suppressMessages(library(umap))
parser <- ArgumentParser()
parser$add_argument("--value", action="store")
parser$add_argument("--infile", action="store")
parser$add_argument("--outdir", action="store")
args <- parser$parse_args()
value <- args$value
outdir <- args$outdir
setwd('/hpf/largeprojects/adam/projects/lfs/lfs_germline/methyl_data/')
source('Scripts/util_functions.R')
###################################################################################################################
# Combat batch correction
###################################################################################################################
cat("[ Reading in data ]","\n")
data <- readRDS(paste0('rds/',args$infile))
data <- data[data$Meth != "Problem",]
data <- data[!is.na(data$Project),]
pc <- prcomp(as.matrix(data[45:length(data)]), scale = TRUE)
pc_clin <- cbind(data[1:44], pc$x)
keep_ids <- remove_outliers(pc_clin,3)
data <- data[data$SentrixID %in% keep_ids,]
clin <- data[1:44] ; beta <- data.frame(data[45:length(data)])
beta <- as.matrix(t(sapply( beta, as.numeric )))
cat("[ Calling ComBat ]","\n")
modcombat <- model.matrix(~1, data = data)
batches <- as.numeric(data$Project)
beta_ComBat <- ComBat(dat=beta,batch=batches,mod = modcombat, par.prior=TRUE, prior.plots=FALSE)
beta_ComBat <- data.frame(t(beta_ComBat))
data_ComBat <- cbind(clin,beta_ComBat)
saveRDS(data_ComBat,paste0('rds/',value,'.rds'))
cat("[ PCA after batch correction with ComBat ]","\n")
pc <- prcomp(as.matrix(beta_ComBat), scale = TRUE)
pc_clin <- cbind(clin, pc$x)
write.csv(pc_clin, paste0('Output/',value,'_PCA.csv'),quote=F,row.names=F)
generate_pcsummary(pc_clin,paste0(value,'_PCA_summary.csv'),outdir)
generate_pcplots(pc_clin,value,outdir)
###################################################################################################################
# Visualize differences before and after correction
###################################################################################################################
cat("[ Plotting technical replicates ]","\n")
duplicated_data <- get_technicalreplicates(data_ComBat)
plot_concordance(duplicated_data,value)
pc_beta <- data.frame(duplicated_data[45:length(duplicated_data)], row.names = paste0(duplicated_data$ids," (",duplicated_data$array,")"))
pc <- prcomp(as.matrix(pc_beta), scale = TRUE)
pc_clin <- cbind(duplicated_data[1:44],pc$x)
write.csv(pc_clin,paste0('Output/',value,'_TechnicalReplicates_PCA.csv'),quote=F,row.names=F)
generate_pcsummary(pc_clin,paste0(value,'_TechnicalReplicates_PCA_summary.csv'),outdir)
generate_pcplots(pc_clin,paste0(value,'_TechnicalReplicates'),outdir)
u <- umap(beta_ComBat) ; ud <- cbind(data_ComBat[1:45],u$layout)
write.csv(ud,paste0('Output/Umap_',value,'.csv'))
| /cisCSCE/batch_correction_ComBat.R | no_license | vsubasri/LFS-Germline | R | false | false | 3,083 | r | suppressMessages(require(sva))
suppressMessages(require(limma))
suppressMessages(require(dplyr))
suppressMessages(require(corrplot))
suppressMessages(require(reshape2))
suppressMessages(require(ggplot2))
suppressMessages(require(gplots))
suppressMessages(require(ggfortify))
suppressMessages(require(argparse))
suppressMessages(library(umap))
parser <- ArgumentParser()
parser$add_argument("--value", action="store")
parser$add_argument("--infile", action="store")
parser$add_argument("--outdir", action="store")
args <- parser$parse_args()
value <- args$value
outdir <- args$outdir
setwd('/hpf/largeprojects/adam/projects/lfs/lfs_germline/methyl_data/')
source('Scripts/util_functions.R')
###################################################################################################################
# Combat batch correction
###################################################################################################################
cat("[ Reading in data ]","\n")
data <- readRDS(paste0('rds/',args$infile))
data <- data[data$Meth != "Problem",]
data <- data[!is.na(data$Project),]
pc <- prcomp(as.matrix(data[45:length(data)]), scale = TRUE)
pc_clin <- cbind(data[1:44], pc$x)
keep_ids <- remove_outliers(pc_clin,3)
data <- data[data$SentrixID %in% keep_ids,]
clin <- data[1:44] ; beta <- data.frame(data[45:length(data)])
beta <- as.matrix(t(sapply( beta, as.numeric )))
cat("[ Calling ComBat ]","\n")
modcombat <- model.matrix(~1, data = data)
batches <- as.numeric(data$Project)
beta_ComBat <- ComBat(dat=beta,batch=batches,mod = modcombat, par.prior=TRUE, prior.plots=FALSE)
beta_ComBat <- data.frame(t(beta_ComBat))
data_ComBat <- cbind(clin,beta_ComBat)
saveRDS(data_ComBat,paste0('rds/',value,'.rds'))
cat("[ PCA after batch correction with ComBat ]","\n")
pc <- prcomp(as.matrix(beta_ComBat), scale = TRUE)
pc_clin <- cbind(clin, pc$x)
write.csv(pc_clin, paste0('Output/',value,'_PCA.csv'),quote=F,row.names=F)
generate_pcsummary(pc_clin,paste0(value,'_PCA_summary.csv'),outdir)
generate_pcplots(pc_clin,value,outdir)
###################################################################################################################
# Visualize differences before and after correction
###################################################################################################################
cat("[ Plotting technical replicates ]","\n")
duplicated_data <- get_technicalreplicates(data_ComBat)
plot_concordance(duplicated_data,value)
pc_beta <- data.frame(duplicated_data[45:length(duplicated_data)], row.names = paste0(duplicated_data$ids," (",duplicated_data$array,")"))
pc <- prcomp(as.matrix(pc_beta), scale = TRUE)
pc_clin <- cbind(duplicated_data[1:44],pc$x)
write.csv(pc_clin,paste0('Output/',value,'_TechnicalReplicates_PCA.csv'),quote=F,row.names=F)
generate_pcsummary(pc_clin,paste0(value,'_TechnicalReplicates_PCA_summary.csv'),outdir)
generate_pcplots(pc_clin,paste0(value,'_TechnicalReplicates'),outdir)
u <- umap(beta_ComBat) ; ud <- cbind(data_ComBat[1:45],u$layout)
write.csv(ud,paste0('Output/Umap_',value,'.csv'))
|
\name{astrowrap_boot.array}
\alias{astrowrap_boot.array}
\title{Bootstrap Samples}
\description{
Returns a matrix indicating either the frequency or index of original sample observations in each of the bootstrap samples.
}
\usage{
astrowrap("boot.array", ...)
}
\arguments{
\item{...}{expressions evaluated in the context of the \code{\link[boot]{boot.array}}}
}
\references{
Faber, S. M., & Jackson, R. E. (1976). \emph{Velocity dispersions and mass-to-light ratios for elliptical galaxies}.
The Astrophysical Journal, 204, 668-683. \href{http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1976ApJ...204..668F&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf}{PDF}
}
\seealso{
\code{\link{astrowrap_boot}}, \code{\link{astrowrap_boot.ci}}
}
\examples{
## Faber-Jackson relationship
## For further details on the data, see ?astrowrap_boot
data(fab_jack)
summary(fab_jack)
fab_jack_lm <- function(data, indices=NULL, x_var, y_var) {
data <- data[indices,]
lm_form <- as.formula(paste(y_var, "~", x_var))
lm_sum <- summary(lm(lm_form, data=data))
lm_sum$coefficients[c(1,2), 1]
}
fab_boot <- astrowrap("boot", fab_jack, fab_jack_lm, R=1000,
x_var="Mag", y_var="Vel")
# Frequency of every observation in each resample
astrowrap("boot.array", fab_boot)
# Indices of original observations as they appear in a resample
astrowrap("boot.array", fab_boot, TRUE)
}
\keyword{astrowrap} | /man/astrowrap_boot.array.Rd | no_license | chengjiun/astrowrap | R | false | false | 1,439 | rd | \name{astrowrap_boot.array}
\alias{astrowrap_boot.array}
\title{Bootstrap Samples}
\description{
Returns a matrix indicating either the frequency or index of original sample observations in each of the bootstrap samples.
}
\usage{
astrowrap("boot.array", ...)
}
\arguments{
\item{...}{expressions evaluated in the context of the \code{\link[boot]{boot.array}}}
}
\references{
Faber, S. M., & Jackson, R. E. (1976). \emph{Velocity dispersions and mass-to-light ratios for elliptical galaxies}.
The Astrophysical Journal, 204, 668-683. \href{http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1976ApJ...204..668F&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf}{PDF}
}
\seealso{
\code{\link{astrowrap_boot}}, \code{\link{astrowrap_boot.ci}}
}
\examples{
## Faber-Jackson relationship
## For further details on the data, see ?astrowrap_boot
data(fab_jack)
summary(fab_jack)
fab_jack_lm <- function(data, indices=NULL, x_var, y_var) {
data <- data[indices,]
lm_form <- as.formula(paste(y_var, "~", x_var))
lm_sum <- summary(lm(lm_form, data=data))
lm_sum$coefficients[c(1,2), 1]
}
fab_boot <- astrowrap("boot", fab_jack, fab_jack_lm, R=1000,
x_var="Mag", y_var="Vel")
# Frequency of every observation in each resample
astrowrap("boot.array", fab_boot)
# Indices of original observations as they appear in a resample
astrowrap("boot.array", fab_boot, TRUE)
}
\keyword{astrowrap} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISOFeatureAttribute.R
\docType{class}
\name{ISOFeatureAttribute}
\alias{ISOFeatureAttribute}
\title{ISOFeatureAttribute}
\format{
\code{\link{R6Class}} object.
}
\value{
Object of \code{\link{R6Class}} for modelling an ISOFeatureAttribute
}
\description{
ISOFeatureAttribute
ISOFeatureAttribute
}
\examples{
md <- ISOFeatureAttribute$new()
md$setMemberName("name")
md$setDefinition("definition")
md$setCardinality(lower=1,upper=1)
md$setCode("code")
gml <- GMLBaseUnit$new(id = "ID")
gml$setDescriptionReference("someref")
gml$setIdentifier("identifier", "codespace")
gml$addName("name1", "codespace")
gml$addName("name2", "codespace")
gml$setQuantityTypeReference("someref")
gml$setCatalogSymbol("symbol")
gml$setUnitsSystem("somelink")
md$setValueMeasurementUnit(gml)
val1 <- ISOListedValue$new()
val1$setCode("code1")
val1$setLabel("label1")
val1$setDefinition("definition1")
md$addListedValue(val1)
val2 <- ISOListedValue$new()
val2$setCode("code2")
val2$setLabel("label2")
val2$setDefinition("definition2")
md$addListedValue(val2)
md$setValueType("typeName")
}
\references{
ISO 19110:2005 Methodology for Feature cataloguing
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ISO}
\keyword{feature}
\keyword{operation}
\section{Super classes}{
\code{\link[geometa:geometaLogger]{geometa::geometaLogger}} -> \code{\link[geometa:ISOAbstractObject]{geometa::ISOAbstractObject}} -> \code{\link[geometa:ISOAbstractCarrierOfCharacteristics]{geometa::ISOAbstractCarrierOfCharacteristics}} -> \code{\link[geometa:ISOAbstractPropertyType]{geometa::ISOAbstractPropertyType}} -> \code{\link[geometa:ISOPropertyType]{geometa::ISOPropertyType}} -> \code{ISOFeatureAttribute}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{code}}{code [0..1]: character}
\item{\code{valueMeasurementUnit}}{valueMeasurementUnit [0..1]: GMLUnitDefinition}
\item{\code{valueType}}{valueType [0..1]: ISOTypeName}
\item{\code{listedValue}}{listedValue [0..*]: ISOListedValue}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-ISOFeatureAttribute-new}{\code{ISOFeatureAttribute$new()}}
\item \href{#method-ISOFeatureAttribute-setCode}{\code{ISOFeatureAttribute$setCode()}}
\item \href{#method-ISOFeatureAttribute-setValueMeasurementUnit}{\code{ISOFeatureAttribute$setValueMeasurementUnit()}}
\item \href{#method-ISOFeatureAttribute-setValueType}{\code{ISOFeatureAttribute$setValueType()}}
\item \href{#method-ISOFeatureAttribute-addListedValue}{\code{ISOFeatureAttribute$addListedValue()}}
\item \href{#method-ISOFeatureAttribute-delListedValue}{\code{ISOFeatureAttribute$delListedValue()}}
\item \href{#method-ISOFeatureAttribute-clone}{\code{ISOFeatureAttribute$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="geometa" data-topic="geometaLogger" data-id="ERROR"><a href='../../geometa/html/geometaLogger.html#method-geometaLogger-ERROR'><code>geometa::geometaLogger$ERROR()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="geometaLogger" data-id="INFO"><a href='../../geometa/html/geometaLogger.html#method-geometaLogger-INFO'><code>geometa::geometaLogger$INFO()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="geometaLogger" data-id="WARN"><a href='../../geometa/html/geometaLogger.html#method-geometaLogger-WARN'><code>geometa::geometaLogger$WARN()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="addFieldAttrs"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-addFieldAttrs'><code>geometa::ISOAbstractObject$addFieldAttrs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="addListElement"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-addListElement'><code>geometa::ISOAbstractObject$addListElement()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="contains"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-contains'><code>geometa::ISOAbstractObject$contains()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="createLocalisedProperty"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-createLocalisedProperty'><code>geometa::ISOAbstractObject$createLocalisedProperty()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="decode"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-decode'><code>geometa::ISOAbstractObject$decode()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="delListElement"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-delListElement'><code>geometa::ISOAbstractObject$delListElement()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="encode"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-encode'><code>geometa::ISOAbstractObject$encode()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="getClass"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-getClass'><code>geometa::ISOAbstractObject$getClass()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="getClassName"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-getClassName'><code>geometa::ISOAbstractObject$getClassName()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="getNamespaceDefinition"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-getNamespaceDefinition'><code>geometa::ISOAbstractObject$getNamespaceDefinition()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="isDocument"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-isDocument'><code>geometa::ISOAbstractObject$isDocument()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="isFieldInheritedFrom"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-isFieldInheritedFrom'><code>geometa::ISOAbstractObject$isFieldInheritedFrom()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="print"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-print'><code>geometa::ISOAbstractObject$print()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="save"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-save'><code>geometa::ISOAbstractObject$save()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setAttr"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setAttr'><code>geometa::ISOAbstractObject$setAttr()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setCodeList"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setCodeList'><code>geometa::ISOAbstractObject$setCodeList()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setCodeListValue"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setCodeListValue'><code>geometa::ISOAbstractObject$setCodeListValue()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setCodeSpace"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setCodeSpace'><code>geometa::ISOAbstractObject$setCodeSpace()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setHref"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setHref'><code>geometa::ISOAbstractObject$setHref()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setId"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setId'><code>geometa::ISOAbstractObject$setId()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setIsNull"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setIsNull'><code>geometa::ISOAbstractObject$setIsNull()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setValue"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setValue'><code>geometa::ISOAbstractObject$setValue()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="validate"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-validate'><code>geometa::ISOAbstractObject$validate()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="wrapBaseElement"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-wrapBaseElement'><code>geometa::ISOAbstractObject$wrapBaseElement()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractCarrierOfCharacteristics" data-id="addConstraint"><a href='../../geometa/html/ISOAbstractCarrierOfCharacteristics.html#method-ISOAbstractCarrierOfCharacteristics-addConstraint'><code>geometa::ISOAbstractCarrierOfCharacteristics$addConstraint()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractCarrierOfCharacteristics" data-id="delConstraint"><a href='../../geometa/html/ISOAbstractCarrierOfCharacteristics.html#method-ISOAbstractCarrierOfCharacteristics-delConstraint'><code>geometa::ISOAbstractCarrierOfCharacteristics$delConstraint()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractCarrierOfCharacteristics" data-id="setFeatureType"><a href='../../geometa/html/ISOAbstractCarrierOfCharacteristics.html#method-ISOAbstractCarrierOfCharacteristics-setFeatureType'><code>geometa::ISOAbstractCarrierOfCharacteristics$setFeatureType()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setCardinality"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setCardinality'><code>geometa::ISOAbstractPropertyType$setCardinality()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setDefinition"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setDefinition'><code>geometa::ISOAbstractPropertyType$setDefinition()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setDefinitionReference"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setDefinitionReference'><code>geometa::ISOAbstractPropertyType$setDefinitionReference()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setFeatureCatalogue"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setFeatureCatalogue'><code>geometa::ISOAbstractPropertyType$setFeatureCatalogue()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setMemberName"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setMemberName'><code>geometa::ISOAbstractPropertyType$setMemberName()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-new"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-new}{}}}
\subsection{Method \code{new()}}{
Initializes object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$new(xml = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xml}}{object of class \link{XMLInternalNode-class}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-setCode"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-setCode}{}}}
\subsection{Method \code{setCode()}}{
Set code
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$setCode(code, locales = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{code}}{code}
\item{\code{locales}}{list of localized codes. Default is \code{NULL}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-setValueMeasurementUnit"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-setValueMeasurementUnit}{}}}
\subsection{Method \code{setValueMeasurementUnit()}}{
Set value measurement unit
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$setValueMeasurementUnit(uom)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{uom}}{uom, object of class \link{GMLUnitDefinition}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-setValueType"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-setValueType}{}}}
\subsection{Method \code{setValueType()}}{
Set type name
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$setValueType(typeName, locales = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{typeName}}{typeName}
\item{\code{locales}}{list of localized typeNames. Default is \code{NULL}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-addListedValue"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-addListedValue}{}}}
\subsection{Method \code{addListedValue()}}{
Adds listed value
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$addListedValue(value)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{value}}{value, object of class \link{ISOListedValue}}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
\code{TRUE} if added, \code{FALSE} otherwise
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-delListedValue"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-delListedValue}{}}}
\subsection{Method \code{delListedValue()}}{
Deletes listed value
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$delListedValue(value)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{value}}{value, object of class \link{ISOListedValue}}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
\code{TRUE} if deleted, \code{FALSE} otherwise
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-clone"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/ISOFeatureAttribute.Rd | no_license | cran/geometa | R | false | true | 16,581 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISOFeatureAttribute.R
\docType{class}
\name{ISOFeatureAttribute}
\alias{ISOFeatureAttribute}
\title{ISOFeatureAttribute}
\format{
\code{\link{R6Class}} object.
}
\value{
Object of \code{\link{R6Class}} for modelling an ISOFeatureAttribute
}
\description{
ISOFeatureAttribute
ISOFeatureAttribute
}
\examples{
md <- ISOFeatureAttribute$new()
md$setMemberName("name")
md$setDefinition("definition")
md$setCardinality(lower=1,upper=1)
md$setCode("code")
gml <- GMLBaseUnit$new(id = "ID")
gml$setDescriptionReference("someref")
gml$setIdentifier("identifier", "codespace")
gml$addName("name1", "codespace")
gml$addName("name2", "codespace")
gml$setQuantityTypeReference("someref")
gml$setCatalogSymbol("symbol")
gml$setUnitsSystem("somelink")
md$setValueMeasurementUnit(gml)
val1 <- ISOListedValue$new()
val1$setCode("code1")
val1$setLabel("label1")
val1$setDefinition("definition1")
md$addListedValue(val1)
val2 <- ISOListedValue$new()
val2$setCode("code2")
val2$setLabel("label2")
val2$setDefinition("definition2")
md$addListedValue(val2)
md$setValueType("typeName")
}
\references{
ISO 19110:2005 Methodology for Feature cataloguing
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ISO}
\keyword{feature}
\keyword{operation}
\section{Super classes}{
\code{\link[geometa:geometaLogger]{geometa::geometaLogger}} -> \code{\link[geometa:ISOAbstractObject]{geometa::ISOAbstractObject}} -> \code{\link[geometa:ISOAbstractCarrierOfCharacteristics]{geometa::ISOAbstractCarrierOfCharacteristics}} -> \code{\link[geometa:ISOAbstractPropertyType]{geometa::ISOAbstractPropertyType}} -> \code{\link[geometa:ISOPropertyType]{geometa::ISOPropertyType}} -> \code{ISOFeatureAttribute}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{code}}{code [0..1]: character}
\item{\code{valueMeasurementUnit}}{valueMeasurementUnit [0..1]: GMLUnitDefinition}
\item{\code{valueType}}{valueType [0..1]: ISOTypeName}
\item{\code{listedValue}}{listedValue [0..*]: ISOListedValue}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-ISOFeatureAttribute-new}{\code{ISOFeatureAttribute$new()}}
\item \href{#method-ISOFeatureAttribute-setCode}{\code{ISOFeatureAttribute$setCode()}}
\item \href{#method-ISOFeatureAttribute-setValueMeasurementUnit}{\code{ISOFeatureAttribute$setValueMeasurementUnit()}}
\item \href{#method-ISOFeatureAttribute-setValueType}{\code{ISOFeatureAttribute$setValueType()}}
\item \href{#method-ISOFeatureAttribute-addListedValue}{\code{ISOFeatureAttribute$addListedValue()}}
\item \href{#method-ISOFeatureAttribute-delListedValue}{\code{ISOFeatureAttribute$delListedValue()}}
\item \href{#method-ISOFeatureAttribute-clone}{\code{ISOFeatureAttribute$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="geometa" data-topic="geometaLogger" data-id="ERROR"><a href='../../geometa/html/geometaLogger.html#method-geometaLogger-ERROR'><code>geometa::geometaLogger$ERROR()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="geometaLogger" data-id="INFO"><a href='../../geometa/html/geometaLogger.html#method-geometaLogger-INFO'><code>geometa::geometaLogger$INFO()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="geometaLogger" data-id="WARN"><a href='../../geometa/html/geometaLogger.html#method-geometaLogger-WARN'><code>geometa::geometaLogger$WARN()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="addFieldAttrs"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-addFieldAttrs'><code>geometa::ISOAbstractObject$addFieldAttrs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="addListElement"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-addListElement'><code>geometa::ISOAbstractObject$addListElement()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="contains"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-contains'><code>geometa::ISOAbstractObject$contains()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="createLocalisedProperty"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-createLocalisedProperty'><code>geometa::ISOAbstractObject$createLocalisedProperty()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="decode"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-decode'><code>geometa::ISOAbstractObject$decode()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="delListElement"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-delListElement'><code>geometa::ISOAbstractObject$delListElement()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="encode"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-encode'><code>geometa::ISOAbstractObject$encode()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="getClass"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-getClass'><code>geometa::ISOAbstractObject$getClass()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="getClassName"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-getClassName'><code>geometa::ISOAbstractObject$getClassName()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="getNamespaceDefinition"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-getNamespaceDefinition'><code>geometa::ISOAbstractObject$getNamespaceDefinition()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="isDocument"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-isDocument'><code>geometa::ISOAbstractObject$isDocument()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="isFieldInheritedFrom"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-isFieldInheritedFrom'><code>geometa::ISOAbstractObject$isFieldInheritedFrom()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="print"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-print'><code>geometa::ISOAbstractObject$print()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="save"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-save'><code>geometa::ISOAbstractObject$save()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setAttr"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setAttr'><code>geometa::ISOAbstractObject$setAttr()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setCodeList"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setCodeList'><code>geometa::ISOAbstractObject$setCodeList()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setCodeListValue"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setCodeListValue'><code>geometa::ISOAbstractObject$setCodeListValue()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setCodeSpace"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setCodeSpace'><code>geometa::ISOAbstractObject$setCodeSpace()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setHref"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setHref'><code>geometa::ISOAbstractObject$setHref()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setId"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setId'><code>geometa::ISOAbstractObject$setId()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setIsNull"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setIsNull'><code>geometa::ISOAbstractObject$setIsNull()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="setValue"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-setValue'><code>geometa::ISOAbstractObject$setValue()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="validate"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-validate'><code>geometa::ISOAbstractObject$validate()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractObject" data-id="wrapBaseElement"><a href='../../geometa/html/ISOAbstractObject.html#method-ISOAbstractObject-wrapBaseElement'><code>geometa::ISOAbstractObject$wrapBaseElement()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractCarrierOfCharacteristics" data-id="addConstraint"><a href='../../geometa/html/ISOAbstractCarrierOfCharacteristics.html#method-ISOAbstractCarrierOfCharacteristics-addConstraint'><code>geometa::ISOAbstractCarrierOfCharacteristics$addConstraint()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractCarrierOfCharacteristics" data-id="delConstraint"><a href='../../geometa/html/ISOAbstractCarrierOfCharacteristics.html#method-ISOAbstractCarrierOfCharacteristics-delConstraint'><code>geometa::ISOAbstractCarrierOfCharacteristics$delConstraint()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractCarrierOfCharacteristics" data-id="setFeatureType"><a href='../../geometa/html/ISOAbstractCarrierOfCharacteristics.html#method-ISOAbstractCarrierOfCharacteristics-setFeatureType'><code>geometa::ISOAbstractCarrierOfCharacteristics$setFeatureType()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setCardinality"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setCardinality'><code>geometa::ISOAbstractPropertyType$setCardinality()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setDefinition"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setDefinition'><code>geometa::ISOAbstractPropertyType$setDefinition()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setDefinitionReference"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setDefinitionReference'><code>geometa::ISOAbstractPropertyType$setDefinitionReference()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setFeatureCatalogue"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setFeatureCatalogue'><code>geometa::ISOAbstractPropertyType$setFeatureCatalogue()</code></a></span></li>
<li><span class="pkg-link" data-pkg="geometa" data-topic="ISOAbstractPropertyType" data-id="setMemberName"><a href='../../geometa/html/ISOAbstractPropertyType.html#method-ISOAbstractPropertyType-setMemberName'><code>geometa::ISOAbstractPropertyType$setMemberName()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-new"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-new}{}}}
\subsection{Method \code{new()}}{
Initializes object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$new(xml = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xml}}{object of class \link{XMLInternalNode-class}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-setCode"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-setCode}{}}}
\subsection{Method \code{setCode()}}{
Set code
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$setCode(code, locales = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{code}}{code}
\item{\code{locales}}{list of localized codes. Default is \code{NULL}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-setValueMeasurementUnit"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-setValueMeasurementUnit}{}}}
\subsection{Method \code{setValueMeasurementUnit()}}{
Set value measurement unit
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$setValueMeasurementUnit(uom)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{uom}}{uom, object of class \link{GMLUnitDefinition}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-setValueType"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-setValueType}{}}}
\subsection{Method \code{setValueType()}}{
Set type name
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$setValueType(typeName, locales = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{typeName}}{typeName}
\item{\code{locales}}{list of localized typeNames. Default is \code{NULL}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-addListedValue"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-addListedValue}{}}}
\subsection{Method \code{addListedValue()}}{
Adds listed value
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$addListedValue(value)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{value}}{value, object of class \link{ISOListedValue}}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
\code{TRUE} if added, \code{FALSE} otherwise
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-delListedValue"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-delListedValue}{}}}
\subsection{Method \code{delListedValue()}}{
Deletes listed value
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$delListedValue(value)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{value}}{value, object of class \link{ISOListedValue}}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
\code{TRUE} if deleted, \code{FALSE} otherwise
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ISOFeatureAttribute-clone"></a>}}
\if{latex}{\out{\hypertarget{method-ISOFeatureAttribute-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ISOFeatureAttribute$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
# These functions are tested indirectly when the models are used. Since this
# function is executed on package startup, you can't execute them to test since
# they are already in the parsnip model database. We'll exclude them from
# coverage stats for this reason.
# nocov start
make_sarima_reg <- function() {
parsnip::set_new_model("sarima_reg")
parsnip::set_model_mode("sarima_reg", "regression")
# arima ----
# * Model ----
parsnip::set_model_engine("sarima_reg", mode = "regression", eng = "stan")
parsnip::set_dependency("sarima_reg", "stan", "bayesforecast")
parsnip::set_dependency("sarima_reg", "stan", "bayesmodels")
# * Args ----
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "seasonal_period",
original = "period",
func = list(pkg = "bayesmodels", fun = "seasonal_period"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "non_seasonal_ar",
original = "p",
func = list(pkg = "bayesmodels", fun = "non_seasonal_ar"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "non_seasonal_differences",
original = "d",
func = list(pkg = "bayesmodels", fun = "non_seasonal_differences"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "non_seasonal_ma",
original = "q",
func = list(pkg = "bayesmodels", fun = "non_seasonal_ma"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "seasonal_ar",
original = "P",
func = list(pkg = "bayesmodels", fun = "seasonal_ar"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "seasonal_differences",
original = "D",
func = list(pkg = "bayesmodels", fun = "seasonal_differences"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "seasonal_ma",
original = "Q",
func = list(pkg = "bayesmodels", fun = "seasonal_ma"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "markov_chains",
original = "chains",
func = list(pkg = "bayesmodels", fun = "markov_chains"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "chain_iter",
original = "iter",
func = list(pkg = "bayesmodels", fun = "chain_iter"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "warmup_iter",
original = "warmup",
func = list(pkg = "bayesmodels", fun = "warmup_iter"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "adapt_delta",
original = "adapt.delta",
func = list(pkg = "bayesmodels", fun = "adapt_delta"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "tree_depth",
original = "tree.depth",
func = list(pkg = "bayesmodels", fun = "tree_depth"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "pred_seed",
original = "seed",
func = list(pkg = "bayesmodels", fun = "pred_seed"),
has_submodel = FALSE
)
# * Encoding ----
parsnip::set_encoding(
model = "sarima_reg",
eng = "stan",
mode = "regression",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
# * Fit ----
parsnip::set_fit(
model = "sarima_reg",
eng = "stan",
mode = "regression",
value = list(
interface = "data.frame",
protect = c("x", "y"),
func = c(fun = "Sarima_stan_fit_impl"),
defaults = list()
)
)
# * Predict ----
parsnip::set_pred(
model = "sarima_reg",
eng = "stan",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = rlang::expr(object$fit),
new_data = rlang::expr(new_data)
)
)
)
}
# nocov end | /R/parsnip-sarima_reg_data.R | no_license | cran/bayesmodels | R | false | false | 5,777 | r | # These functions are tested indirectly when the models are used. Since this
# function is executed on package startup, you can't execute them to test since
# they are already in the parsnip model database. We'll exclude them from
# coverage stats for this reason.
# nocov start
make_sarima_reg <- function() {
parsnip::set_new_model("sarima_reg")
parsnip::set_model_mode("sarima_reg", "regression")
# arima ----
# * Model ----
parsnip::set_model_engine("sarima_reg", mode = "regression", eng = "stan")
parsnip::set_dependency("sarima_reg", "stan", "bayesforecast")
parsnip::set_dependency("sarima_reg", "stan", "bayesmodels")
# * Args ----
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "seasonal_period",
original = "period",
func = list(pkg = "bayesmodels", fun = "seasonal_period"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "non_seasonal_ar",
original = "p",
func = list(pkg = "bayesmodels", fun = "non_seasonal_ar"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "non_seasonal_differences",
original = "d",
func = list(pkg = "bayesmodels", fun = "non_seasonal_differences"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "non_seasonal_ma",
original = "q",
func = list(pkg = "bayesmodels", fun = "non_seasonal_ma"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "seasonal_ar",
original = "P",
func = list(pkg = "bayesmodels", fun = "seasonal_ar"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "seasonal_differences",
original = "D",
func = list(pkg = "bayesmodels", fun = "seasonal_differences"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "seasonal_ma",
original = "Q",
func = list(pkg = "bayesmodels", fun = "seasonal_ma"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "markov_chains",
original = "chains",
func = list(pkg = "bayesmodels", fun = "markov_chains"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "chain_iter",
original = "iter",
func = list(pkg = "bayesmodels", fun = "chain_iter"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "warmup_iter",
original = "warmup",
func = list(pkg = "bayesmodels", fun = "warmup_iter"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "adapt_delta",
original = "adapt.delta",
func = list(pkg = "bayesmodels", fun = "adapt_delta"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "tree_depth",
original = "tree.depth",
func = list(pkg = "bayesmodels", fun = "tree_depth"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = "sarima_reg",
eng = "stan",
parsnip = "pred_seed",
original = "seed",
func = list(pkg = "bayesmodels", fun = "pred_seed"),
has_submodel = FALSE
)
# * Encoding ----
parsnip::set_encoding(
model = "sarima_reg",
eng = "stan",
mode = "regression",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
# * Fit ----
parsnip::set_fit(
model = "sarima_reg",
eng = "stan",
mode = "regression",
value = list(
interface = "data.frame",
protect = c("x", "y"),
func = c(fun = "Sarima_stan_fit_impl"),
defaults = list()
)
)
# * Predict ----
parsnip::set_pred(
model = "sarima_reg",
eng = "stan",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = rlang::expr(object$fit),
new_data = rlang::expr(new_data)
)
)
)
}
# nocov end |
library(Biobase)
library(biomaRt)
uams.5.eset <- function(eset,already.log2.transformed=FALSE) {
probesets <- c('204033_at','200916_at','204023_at','202345_s_at','201231_s_at')
cutoff <- 10.68
if(already.log2.transformed) {
raw.score <- apply(exprs(eset[featureNames(eset) %in% probesets,]),2,mean)
} else {
raw.score <- apply(log2(exprs(eset[featureNames(eset) %in% probesets,])),2,mean)
}
data.frame(ID=sampleNames(eset),raw.score=raw.score,high.risk=round(raw.score,digits=2) > cutoff)
}
# assumes that mapping is a data.frame with 2 columns: INDEX and GENE
# INDEX = any type of ID (e.g., ENTREZID)
# GENE = Official gene symbol
uams.5.gene <- function(inmatrix,mapping,already.log2.transformed=FALSE) {
if(nrow(inmatrix) != nrow(mapping)) {
inter <- intersect(rownames(inmatrix),mapping$INDEX);
inmatrix <- inmatrix[match(inter,rownames(inmatrix)),];
mapping <- mapping[match(inter,mapping$INDEX),]
}
if(length(intersect(rownames(inmatrix),mapping$INDEX)) == 0) {
rownames(inmatrix) <- mapping$INDEX
}
genes <- c('TRIP13','TAGLN2','RFC4','FABP5','ENO1')
probesets <- c('204033_at','200916_at','204023_at','202345_s_at','201231_s_at')
available <- mapping[['INDEX']][match(toupper(genes),toupper(mapping[['GENE']]))];
if(any(is.na(available))) {
warning(paste('The following genes are missing from the supplied dataset: ',paste(genes[is.na(available)],collapse=', ')))
}
inmatrix <- inmatrix[na.exclude(available),]
rownames(inmatrix) <- probesets[!is.na(available)];
inmat <- apply(inmatrix,c(1:2),as.numeric)
inmat <- inmat[,!is.na(colnames(inmat))]
eset <- ExpressionSet(assayData = inmat)
uams.5.eset(eset=eset,already.log2.transformed=already.log2.transformed)
}
uams.5.entrez <- function(inmatrix,already.log2.transformed=FALSE) {
ensembl = useMart("ensembl",dataset="hsapiens_gene_ensembl")
bm <- getBM(attributes=c('entrezgene', 'hgnc_symbol'),
filters = 'entrezgene',
values = rownames(inmatrix),
mart = ensembl)
names(bm) <- c('INDEX','GENE');
uams.5.gene(inmatrix,bm,already.log2.transformed=already.log2.transformed)
} | /risk_stratification_signatures/uams-5/uams-5.R | no_license | celgene-research/mgp_ngs | R | false | false | 2,160 | r | library(Biobase)
library(biomaRt)
uams.5.eset <- function(eset,already.log2.transformed=FALSE) {
probesets <- c('204033_at','200916_at','204023_at','202345_s_at','201231_s_at')
cutoff <- 10.68
if(already.log2.transformed) {
raw.score <- apply(exprs(eset[featureNames(eset) %in% probesets,]),2,mean)
} else {
raw.score <- apply(log2(exprs(eset[featureNames(eset) %in% probesets,])),2,mean)
}
data.frame(ID=sampleNames(eset),raw.score=raw.score,high.risk=round(raw.score,digits=2) > cutoff)
}
# assumes that mapping is a data.frame with 2 columns: INDEX and GENE
# INDEX = any type of ID (e.g., ENTREZID)
# GENE = Official gene symbol
uams.5.gene <- function(inmatrix,mapping,already.log2.transformed=FALSE) {
if(nrow(inmatrix) != nrow(mapping)) {
inter <- intersect(rownames(inmatrix),mapping$INDEX);
inmatrix <- inmatrix[match(inter,rownames(inmatrix)),];
mapping <- mapping[match(inter,mapping$INDEX),]
}
if(length(intersect(rownames(inmatrix),mapping$INDEX)) == 0) {
rownames(inmatrix) <- mapping$INDEX
}
genes <- c('TRIP13','TAGLN2','RFC4','FABP5','ENO1')
probesets <- c('204033_at','200916_at','204023_at','202345_s_at','201231_s_at')
available <- mapping[['INDEX']][match(toupper(genes),toupper(mapping[['GENE']]))];
if(any(is.na(available))) {
warning(paste('The following genes are missing from the supplied dataset: ',paste(genes[is.na(available)],collapse=', ')))
}
inmatrix <- inmatrix[na.exclude(available),]
rownames(inmatrix) <- probesets[!is.na(available)];
inmat <- apply(inmatrix,c(1:2),as.numeric)
inmat <- inmat[,!is.na(colnames(inmat))]
eset <- ExpressionSet(assayData = inmat)
uams.5.eset(eset=eset,already.log2.transformed=already.log2.transformed)
}
uams.5.entrez <- function(inmatrix,already.log2.transformed=FALSE) {
ensembl = useMart("ensembl",dataset="hsapiens_gene_ensembl")
bm <- getBM(attributes=c('entrezgene', 'hgnc_symbol'),
filters = 'entrezgene',
values = rownames(inmatrix),
mart = ensembl)
names(bm) <- c('INDEX','GENE');
uams.5.gene(inmatrix,bm,already.log2.transformed=already.log2.transformed)
} |
rankall<-function(outcome,num='best'){
hospitals<-read.csv('/Users/tobiamartens/Desktop/rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv',stringsAsFactors = FALSE,na.strings = "Not Available")
outcomes<-c("heart failure"=17,"heart attack"=11,"pneumonia"=23)
hospitals<-hospitals[,c(2,7,outcomes[outcome])]
if (!(outcome %in% names(outcomes))){
stop("invalid outcome")
}
names(hospitals)<-c('hospital','state','outcome')
hospitals<-hospitals[order(hospitals$state,hospitals$outcome,hospitals$hospital,na.last = NA),]
split_data<-split(hospitals,hospitals$state)
pickRank<-function(list_of_Dfs,index=num){
if (index == 'best'){
list_of_Dfs$hospital[1]
} else if (index == 'worst') {
list_of_Dfs$hospital[nrow(list_of_Dfs)]
} else list_of_Dfs$hospital[index]
}
hospital<-lapply(split_data,pickRank)
state<-names(lapply(split_data,pickRank))
as.data.frame(cbind(hospital,state))
}
| /rankall.R | no_license | paolomarco/coursera_rprogramming_2015_Assignment1 | R | false | false | 964 | r | rankall<-function(outcome,num='best'){
hospitals<-read.csv('/Users/tobiamartens/Desktop/rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv',stringsAsFactors = FALSE,na.strings = "Not Available")
outcomes<-c("heart failure"=17,"heart attack"=11,"pneumonia"=23)
hospitals<-hospitals[,c(2,7,outcomes[outcome])]
if (!(outcome %in% names(outcomes))){
stop("invalid outcome")
}
names(hospitals)<-c('hospital','state','outcome')
hospitals<-hospitals[order(hospitals$state,hospitals$outcome,hospitals$hospital,na.last = NA),]
split_data<-split(hospitals,hospitals$state)
pickRank<-function(list_of_Dfs,index=num){
if (index == 'best'){
list_of_Dfs$hospital[1]
} else if (index == 'worst') {
list_of_Dfs$hospital[nrow(list_of_Dfs)]
} else list_of_Dfs$hospital[index]
}
hospital<-lapply(split_data,pickRank)
state<-names(lapply(split_data,pickRank))
as.data.frame(cbind(hospital,state))
}
|
testlist <- list(m = NULL, repetitions = -304992360L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536852496e+146), .Dim = c(3L, 1L )))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615785471-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 254 | r | testlist <- list(m = NULL, repetitions = -304992360L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536852496e+146), .Dim = c(3L, 1L )))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
jpeg("~/Desktop/comb_fredii.jpg", width=3600, height=2000, units="px")
chr=c(1,2,3,4,5,6)
max_chr_length=67000000
par(mfcol=c(9,2), mar=c(2,2,0.5,0.5), oma=c(1,2,1,1))
tmp <- read.table('/Users/sua/Documents/Stig_aarhus/Lab_lists/Journal_files/2011_08_20_qtl/Lotus_markers/gph5_6_r_out.txt',header=F, sep="\t")
head(tmp)
names(tmp)
for(i in 1:length(chr)) {
tmp_chr <- tmp[which(chr[i]==tmp$V1),]
pool1 <- cbind(tmp_chr$V2,tmp_chr$V11)
pool1_smooth <- loess.smooth(tmp_chr$V2,tmp_chr$V11,span=1/10)
plot(tmp_chr$V2,tmp_chr$V11, type="p", main="this", xlim=c(0,max_chr_length),col="blue", las=1, ann="FALSE", cex=0.3, cex.axis=0.6)
abline(h=0, lwd=2)
lines(pool1_smooth, lwd=10, col="blue")
pool2_smooth <- loess.smooth(tmp_chr$V2,tmp_chr$V12,span=1/10)
plot(tmp_chr$V2,tmp_chr$V12, type="p", main="this", xlim=c(0,max_chr_length), col="red", las=1, ann="FALSE", cex=0.3, cex.axis=0.6)
abline(h=0, lwd=2)
lines(pool2_smooth, lwd=10, col=" red")
plot(tmp_chr$V2,tmp_chr$V11, type="p", main="this", xlim=c(0,max_chr_length), col="blue", las=1, ann="FALSE", cex=0.3, cex.axis=0.6)
abline(h=0, lwd=2)
points(tmp_chr$V2,tmp_chr$V12, col="red", cex=0.3, cex.axis=0.6)
lines(pool1_smooth, lwd=10, col="blue")
lines(pool2_smooth, lwd=10, col=" red")
#points(tmp_chr$position,tmp_chr$difference_percentage_parentA_pool1.pool2,col="forestgreen")
#points(tmp_chr$position,tmp_chr$p_value_Fisher_s_exact_test,col="black")
}
dev.off() | /09_plotByChr_no_headers.r | no_license | raramayo/R_vikas0633 | R | false | false | 1,454 | r | jpeg("~/Desktop/comb_fredii.jpg", width=3600, height=2000, units="px")
chr=c(1,2,3,4,5,6)
max_chr_length=67000000
par(mfcol=c(9,2), mar=c(2,2,0.5,0.5), oma=c(1,2,1,1))
tmp <- read.table('/Users/sua/Documents/Stig_aarhus/Lab_lists/Journal_files/2011_08_20_qtl/Lotus_markers/gph5_6_r_out.txt',header=F, sep="\t")
head(tmp)
names(tmp)
for(i in 1:length(chr)) {
tmp_chr <- tmp[which(chr[i]==tmp$V1),]
pool1 <- cbind(tmp_chr$V2,tmp_chr$V11)
pool1_smooth <- loess.smooth(tmp_chr$V2,tmp_chr$V11,span=1/10)
plot(tmp_chr$V2,tmp_chr$V11, type="p", main="this", xlim=c(0,max_chr_length),col="blue", las=1, ann="FALSE", cex=0.3, cex.axis=0.6)
abline(h=0, lwd=2)
lines(pool1_smooth, lwd=10, col="blue")
pool2_smooth <- loess.smooth(tmp_chr$V2,tmp_chr$V12,span=1/10)
plot(tmp_chr$V2,tmp_chr$V12, type="p", main="this", xlim=c(0,max_chr_length), col="red", las=1, ann="FALSE", cex=0.3, cex.axis=0.6)
abline(h=0, lwd=2)
lines(pool2_smooth, lwd=10, col=" red")
plot(tmp_chr$V2,tmp_chr$V11, type="p", main="this", xlim=c(0,max_chr_length), col="blue", las=1, ann="FALSE", cex=0.3, cex.axis=0.6)
abline(h=0, lwd=2)
points(tmp_chr$V2,tmp_chr$V12, col="red", cex=0.3, cex.axis=0.6)
lines(pool1_smooth, lwd=10, col="blue")
lines(pool2_smooth, lwd=10, col=" red")
#points(tmp_chr$position,tmp_chr$difference_percentage_parentA_pool1.pool2,col="forestgreen")
#points(tmp_chr$position,tmp_chr$p_value_Fisher_s_exact_test,col="black")
}
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{sel_proxy}
\alias{sel_proxy}
\title{Set Selenium proxy configuration}
\usage{
sel_proxy(proxy, eCaps = NULL, browser = c("firefox", "chrome"),
bmpIPaddress = NULL)
}
\arguments{
\item{proxy}{An object of class "proxy". A proxy object see
\code{\link{proxy}}.}
\item{eCaps}{A list of existing extra capabilities.}
\item{browser}{The browser type to set the config for. Can use firefox
or chrome. The default is firefox. If left NULL firefox default is
used.}
\item{bmpIPaddress}{Stipulate an alternative BMP ip address. The
Selenium server may for example be running in a docker container
as may the BMP server. Defaults to NULL and the ip address implied
by proxy is used}
}
\value{
Returns an extra capabilities list that can be passed to
Selenium
}
\description{
Sets the proxy configuration for use with Selenium
}
\examples{
\dontrun{
prxy <- proxy(bmpPort = 9090L, port = 39500L)
eCap <- sel_proxy(prxy, browser = "chrome")
}
}
| /man/sel_proxy.Rd | no_license | johndharrison/bmproxy | R | false | true | 1,040 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{sel_proxy}
\alias{sel_proxy}
\title{Set Selenium proxy configuration}
\usage{
sel_proxy(proxy, eCaps = NULL, browser = c("firefox", "chrome"),
bmpIPaddress = NULL)
}
\arguments{
\item{proxy}{An object of class "proxy". A proxy object see
\code{\link{proxy}}.}
\item{eCaps}{A list of existing extra capabilities.}
\item{browser}{The browser type to set the config for. Can use firefox
or chrome. The default is firefox. If left NULL firefox default is
used.}
\item{bmpIPaddress}{Stipulate an alternative BMP ip address. The
Selenium server may for example be running in a docker container
as may the BMP server. Defaults to NULL and the ip address implied
by proxy is used}
}
\value{
Returns an extra capabilities list that can be passed to
Selenium
}
\description{
Sets the proxy configuration for use with Selenium
}
\examples{
\dontrun{
prxy <- proxy(bmpPort = 9090L, port = 39500L)
eCap <- sel_proxy(prxy, browser = "chrome")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_functions.R
\name{targetPools.getHealth}
\alias{targetPools.getHealth}
\title{Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.}
\usage{
targetPools.getHealth(InstanceReference, project, region, targetPool)
}
\arguments{
\item{InstanceReference}{The \link{InstanceReference} object to pass to this method}
\item{project}{Project ID for this request}
\item{region}{Name of the region scoping this request}
\item{targetPool}{Name of the TargetPool resource to which the queried instance belongs}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/compute
\item https://www.googleapis.com/auth/compute.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/compute.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/compute/docs/reference/latest/}{Google Documentation}
Other InstanceReference functions: \code{\link{InstanceReference}}
}
| /googlecomputev1.auto/man/targetPools.getHealth.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,437 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_functions.R
\name{targetPools.getHealth}
\alias{targetPools.getHealth}
\title{Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.}
\usage{
targetPools.getHealth(InstanceReference, project, region, targetPool)
}
\arguments{
\item{InstanceReference}{The \link{InstanceReference} object to pass to this method}
\item{project}{Project ID for this request}
\item{region}{Name of the region scoping this request}
\item{targetPool}{Name of the TargetPool resource to which the queried instance belongs}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/compute
\item https://www.googleapis.com/auth/compute.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/compute.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/compute/docs/reference/latest/}{Google Documentation}
Other InstanceReference functions: \code{\link{InstanceReference}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwise_latent_trawl.R
\name{PairwiseOneZero}
\alias{PairwiseOneZero}
\title{Computes term in latent trawl pairwise likelihood with \code{(x,0)} where \code{x} positive
with exponential trawl function.}
\usage{
PairwiseOneZero(x1, alpha, beta, kappa, B1, B2, B3, transformation = F,
n.moments = 4)
}
\arguments{
\item{x1}{Positive value corresponding to \code{t1}.}
\item{alpha}{Shape parameter. Should be positive.}
\item{beta}{Latent Gamma scale parameter. Should be positive.}
\item{kappa}{Exceedance probability parameter. Should be positive.}
\item{B1}{Difference area between \code{t1} and \code{t2} (in this order).}
\item{B2}{intersection area between \code{t1} and \code{t2} (in this order).}
\item{B3}{Difference area between \code{t2} and \code{t1} (in this order).}
\item{transformation}{Boolean to use the Marginal Transform (MT) method.}
\item{n.moments}{Number of moments achieved by transformed GPD marginals, if used.}
}
\value{
Second term in latent trawl pairwise likelihood with \code{(x,0)} where \code{x} positive
with exponential trawl function.
}
\description{
Computes term in latent trawl pairwise likelihood with \code{(x,0)} where \code{x} positive
with exponential trawl function.
}
\examples{
PairwiseOneZero(x1=0.5, alpha=0.3, beta=2, kappa=3, B1=0.3, B2=0.7, B3=0.3)
}
| /man/PairwiseOneZero.Rd | permissive | valcourgeau/ev-trawl | R | false | true | 1,391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwise_latent_trawl.R
\name{PairwiseOneZero}
\alias{PairwiseOneZero}
\title{Computes term in latent trawl pairwise likelihood with \code{(x,0)} where \code{x} positive
with exponential trawl function.}
\usage{
PairwiseOneZero(x1, alpha, beta, kappa, B1, B2, B3, transformation = F,
n.moments = 4)
}
\arguments{
\item{x1}{Positive value corresponding to \code{t1}.}
\item{alpha}{Shape parameter. Should be positive.}
\item{beta}{Latent Gamma scale parameter. Should be positive.}
\item{kappa}{Exceedance probability parameter. Should be positive.}
\item{B1}{Difference area between \code{t1} and \code{t2} (in this order).}
\item{B2}{intersection area between \code{t1} and \code{t2} (in this order).}
\item{B3}{Difference area between \code{t2} and \code{t1} (in this order).}
\item{transformation}{Boolean to use the Marginal Transform (MT) method.}
\item{n.moments}{Number of moments achieved by transformed GPD marginals, if used.}
}
\value{
Second term in latent trawl pairwise likelihood with \code{(x,0)} where \code{x} positive
with exponential trawl function.
}
\description{
Computes term in latent trawl pairwise likelihood with \code{(x,0)} where \code{x} positive
with exponential trawl function.
}
\examples{
PairwiseOneZero(x1=0.5, alpha=0.3, beta=2, kappa=3, B1=0.3, B2=0.7, B3=0.3)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/benchmarkTest.R
\name{friedmanTestBMR}
\alias{friedmanTestBMR}
\title{Perform overall Friedman test for a BenchmarkResult.}
\usage{
friedmanTestBMR(bmr, measure = NULL, aggregation = "default")
}
\arguments{
\item{bmr}{[\code{\link{BenchmarkResult}}]\cr
Benchmark result.}
\item{measure}{[\code{\link{Measure}}]\cr
Performance measure.
Default is the default measure for the task, see here \code{\link{getDefaultMeasure}}.}
\item{aggregation}{[\code{character(1)}] \cr
\dQuote{mean} or \dQuote{default}. See \code{\link{getBMRAggrPerformances}}
for details on \dQuote{default}.}
}
\value{
A list of class \code{htest}. \cr
See \code{\link{friedman.test}} for details.\cr
}
\description{
Performs a \code{\link{friedman.test}} for a selected measure. \cr
The null hypothesis is that apart from an effect of the different
[\code{tasks}], the location parameter (aggregated performance-measure)
is the same for each \code{learner}.
}
\examples{
lrns = list(makeLearner("classif.nnet"), makeLearner("classif.rpart"))
tasks = list(iris.task, sonar.task)
rdesc = makeResampleDesc("CV", iters = 2L)
res = benchmark(lrns, tasks, rdesc, acc)
friedmanTestBMR(res)
}
\seealso{
Other benchmark: \code{\link{BenchmarkResult}};
\code{\link{benchmark}};
\code{\link{convertBMRToRankMatrix}};
\code{\link{friedmanPostHocTestBMR}};
\code{\link{generateBenchmarkSummaryData}};
\code{\link{generateCritDifferencesData}};
\code{\link{generateRankMatrixAsBarData}};
\code{\link{getBMRAggrPerformances}};
\code{\link{getBMRFeatSelResults}};
\code{\link{getBMRFilteredFeatures}};
\code{\link{getBMRLearnerIds}};
\code{\link{getBMRLearners}};
\code{\link{getBMRMeasureIds}};
\code{\link{getBMRMeasures}};
\code{\link{getBMRPerformances}};
\code{\link{getBMRPredictions}};
\code{\link{getBMRTaskIds}};
\code{\link{getBMRTuneResults}};
\code{\link{plotBenchmarkResult}};
\code{\link{plotBenchmarkSummary}};
\code{\link{plotCritDifferences}};
\code{\link{plotRankMatrixAsBar}}
}
| /man/friedmanTestBMR.Rd | no_license | gragusa/mlr | R | false | false | 2,083 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/benchmarkTest.R
\name{friedmanTestBMR}
\alias{friedmanTestBMR}
\title{Perform overall Friedman test for a BenchmarkResult.}
\usage{
friedmanTestBMR(bmr, measure = NULL, aggregation = "default")
}
\arguments{
\item{bmr}{[\code{\link{BenchmarkResult}}]\cr
Benchmark result.}
\item{measure}{[\code{\link{Measure}}]\cr
Performance measure.
Default is the default measure for the task, see here \code{\link{getDefaultMeasure}}.}
\item{aggregation}{[\code{character(1)}] \cr
\dQuote{mean} or \dQuote{default}. See \code{\link{getBMRAggrPerformances}}
for details on \dQuote{default}.}
}
\value{
A list of class \code{htest}. \cr
See \code{\link{friedman.test}} for details.\cr
}
\description{
Performs a \code{\link{friedman.test}} for a selected measure. \cr
The null hypothesis is that apart from an effect of the different
[\code{tasks}], the location parameter (aggregated performance-measure)
is the same for each \code{learner}.
}
\examples{
lrns = list(makeLearner("classif.nnet"), makeLearner("classif.rpart"))
tasks = list(iris.task, sonar.task)
rdesc = makeResampleDesc("CV", iters = 2L)
res = benchmark(lrns, tasks, rdesc, acc)
friedmanTestBMR(res)
}
\seealso{
Other benchmark: \code{\link{BenchmarkResult}};
\code{\link{benchmark}};
\code{\link{convertBMRToRankMatrix}};
\code{\link{friedmanPostHocTestBMR}};
\code{\link{generateBenchmarkSummaryData}};
\code{\link{generateCritDifferencesData}};
\code{\link{generateRankMatrixAsBarData}};
\code{\link{getBMRAggrPerformances}};
\code{\link{getBMRFeatSelResults}};
\code{\link{getBMRFilteredFeatures}};
\code{\link{getBMRLearnerIds}};
\code{\link{getBMRLearners}};
\code{\link{getBMRMeasureIds}};
\code{\link{getBMRMeasures}};
\code{\link{getBMRPerformances}};
\code{\link{getBMRPredictions}};
\code{\link{getBMRTaskIds}};
\code{\link{getBMRTuneResults}};
\code{\link{plotBenchmarkResult}};
\code{\link{plotBenchmarkSummary}};
\code{\link{plotCritDifferences}};
\code{\link{plotRankMatrixAsBar}}
}
|
set_new_model("linear_reg")
set_model_mode("linear_reg", "regression")
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "lm")
set_dependency("linear_reg", "lm", "stats")
set_fit(
model = "linear_reg",
eng = "lm",
mode = "regression",
value = list(
interface = "formula",
protect = c("formula", "data", "weights"),
func = c(pkg = "stats", fun = "lm"),
defaults = list()
)
)
set_encoding(
model = "linear_reg",
eng = "lm",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = FALSE
)
)
set_pred(
model = "linear_reg",
eng = "lm",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data),
type = "response"
)
)
)
set_pred(
model = "linear_reg",
eng = "lm",
mode = "regression",
type = "conf_int",
value = list(
pre = NULL,
post = function(results, object) {
tibble::as_tibble(results) %>%
dplyr::select(-fit) %>%
setNames(c(".pred_lower", ".pred_upper"))
},
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data),
interval = "confidence",
level = expr(level),
type = "response"
)
)
)
set_pred(
model = "linear_reg",
eng = "lm",
mode = "regression",
type = "pred_int",
value = list(
pre = NULL,
post = function(results, object) {
tibble::as_tibble(results) %>%
dplyr::select(-fit) %>%
setNames(c(".pred_lower", ".pred_upper"))
},
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data),
interval = "prediction",
level = expr(level),
type = "response"
)
)
)
set_pred(
model = "linear_reg",
eng = "lm",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args = list(object = expr(object$fit), newdata = expr(new_data))
)
)
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "glmnet")
set_dependency("linear_reg", "glmnet", "glmnet")
set_fit(
model = "linear_reg",
eng = "glmnet",
mode = "regression",
value = list(
interface = "matrix",
protect = c("x", "y", "weights"),
func = c(pkg = "glmnet", fun = "glmnet"),
defaults = list(family = "gaussian")
)
)
set_encoding(
model = "linear_reg",
eng = "glmnet",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = TRUE
)
)
set_model_arg(
model = "linear_reg",
eng = "glmnet",
parsnip = "penalty",
original = "lambda",
func = list(pkg = "dials", fun = "penalty"),
has_submodel = TRUE
)
set_model_arg(
model = "linear_reg",
eng = "glmnet",
parsnip = "mixture",
original = "alpha",
func = list(pkg = "dials", fun = "mixture"),
has_submodel = FALSE
)
set_pred(
model = "linear_reg",
eng = "glmnet",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = .organize_glmnet_pred,
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newx = expr(as.matrix(new_data[, rownames(object$fit$beta), drop = FALSE])),
type = "response",
s = expr(object$spec$args$penalty)
)
)
)
set_pred(
model = "linear_reg",
eng = "glmnet",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(object = expr(object$fit),
newx = expr(as.matrix(new_data)))
)
)
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "stan")
set_dependency("linear_reg", "stan", "rstanarm")
set_fit(
model = "linear_reg",
eng = "stan",
mode = "regression",
value = list(
interface = "formula",
protect = c("formula", "data", "weights"),
func = c(pkg = "rstanarm", fun = "stan_glm"),
defaults = list(family = expr(stats::gaussian), refresh = 0)
)
)
set_encoding(
model = "linear_reg",
eng = "stan",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = FALSE
)
)
set_pred(
model = "linear_reg",
eng = "stan",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args = list(object = expr(object$fit), newdata = expr(new_data))
)
)
set_pred(
model = "linear_reg",
eng = "stan",
mode = "regression",
type = "conf_int",
value = list(
pre = NULL,
post = function(results, object) {
res <-
tibble(
.pred_lower =
convert_stan_interval(
results,
level = object$spec$method$pred$conf_int$extras$level
),
.pred_upper =
convert_stan_interval(
results,
level = object$spec$method$pred$conf_int$extras$level,
lower = FALSE
),
)
if (object$spec$method$pred$conf_int$extras$std_error)
res$.std_error <- apply(results, 2, sd, na.rm = TRUE)
res
},
func = c(pkg = "parsnip", fun = "stan_conf_int"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data)
)
)
)
set_pred(
model = "linear_reg",
eng = "stan",
mode = "regression",
type = "pred_int",
value = list(
pre = NULL,
post = function(results, object) {
res <-
tibble(
.pred_lower =
convert_stan_interval(
results,
level = object$spec$method$pred$pred_int$extras$level
),
.pred_upper =
convert_stan_interval(
results,
level = object$spec$method$pred$pred_int$extras$level,
lower = FALSE
),
)
if (object$spec$method$pred$pred_int$extras$std_error)
res$.std_error <- apply(results, 2, sd, na.rm = TRUE)
res
},
func = c(pkg = "rstanarm", fun = "posterior_predict"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data),
seed = expr(sample.int(10^5, 1))
)
)
)
set_pred(
model = "linear_reg",
eng = "stan",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args = list(object = expr(object$fit), newdata = expr(new_data))
)
)
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "spark")
set_dependency("linear_reg", "spark", "sparklyr")
set_fit(
model = "linear_reg",
eng = "spark",
mode = "regression",
value = list(
interface = "formula",
data = c(formula = "formula", data = "x"),
protect = c("x", "formula", "weight_col"),
func = c(pkg = "sparklyr", fun = "ml_linear_regression"),
defaults = list()
)
)
set_encoding(
model = "linear_reg",
eng = "spark",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = FALSE
)
)
set_model_arg(
model = "linear_reg",
eng = "spark",
parsnip = "penalty",
original = "reg_param",
func = list(pkg = "dials", fun = "penalty"),
has_submodel = FALSE
)
set_model_arg(
model = "linear_reg",
eng = "spark",
parsnip = "mixture",
original = "elastic_net_param",
func = list(pkg = "dials", fun = "mixture"),
has_submodel = FALSE
)
set_pred(
model = "linear_reg",
eng = "spark",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = function(results, object) {
results <- dplyr::rename(results, pred = prediction)
results <- dplyr::select(results, pred)
results
},
func = c(pkg = "sparklyr", fun = "ml_predict"),
args = list(x = expr(object$fit), dataset = expr(new_data))
)
)
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "keras")
set_dependency("linear_reg", "keras", "keras")
set_dependency("linear_reg", "keras", "magrittr")
set_fit(
model = "linear_reg",
eng = "keras",
mode = "regression",
value = list(
interface = "matrix",
protect = c("x", "y"),
func = c(pkg = "parsnip", fun = "keras_mlp"),
defaults = list(hidden_units = 1, act = "linear")
)
)
set_encoding(
model = "linear_reg",
eng = "keras",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = FALSE
)
)
set_model_arg(
model = "linear_reg",
eng = "keras",
parsnip = "penalty",
original = "penalty",
func = list(pkg = "dials", fun = "penalty"),
has_submodel = FALSE
)
set_pred(
model = "linear_reg",
eng = "keras",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = maybe_multivariate,
func = c(fun = "predict"),
args = list(object = quote(object$fit), x = quote(as.matrix(new_data)))
)
)
| /R/linear_reg_data.R | permissive | mgaldame/parsnip | R | false | false | 9,622 | r | set_new_model("linear_reg")
set_model_mode("linear_reg", "regression")
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "lm")
set_dependency("linear_reg", "lm", "stats")
set_fit(
model = "linear_reg",
eng = "lm",
mode = "regression",
value = list(
interface = "formula",
protect = c("formula", "data", "weights"),
func = c(pkg = "stats", fun = "lm"),
defaults = list()
)
)
set_encoding(
model = "linear_reg",
eng = "lm",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = FALSE
)
)
set_pred(
model = "linear_reg",
eng = "lm",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data),
type = "response"
)
)
)
set_pred(
model = "linear_reg",
eng = "lm",
mode = "regression",
type = "conf_int",
value = list(
pre = NULL,
post = function(results, object) {
tibble::as_tibble(results) %>%
dplyr::select(-fit) %>%
setNames(c(".pred_lower", ".pred_upper"))
},
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data),
interval = "confidence",
level = expr(level),
type = "response"
)
)
)
set_pred(
model = "linear_reg",
eng = "lm",
mode = "regression",
type = "pred_int",
value = list(
pre = NULL,
post = function(results, object) {
tibble::as_tibble(results) %>%
dplyr::select(-fit) %>%
setNames(c(".pred_lower", ".pred_upper"))
},
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data),
interval = "prediction",
level = expr(level),
type = "response"
)
)
)
set_pred(
model = "linear_reg",
eng = "lm",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args = list(object = expr(object$fit), newdata = expr(new_data))
)
)
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "glmnet")
set_dependency("linear_reg", "glmnet", "glmnet")
set_fit(
model = "linear_reg",
eng = "glmnet",
mode = "regression",
value = list(
interface = "matrix",
protect = c("x", "y", "weights"),
func = c(pkg = "glmnet", fun = "glmnet"),
defaults = list(family = "gaussian")
)
)
set_encoding(
model = "linear_reg",
eng = "glmnet",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = TRUE
)
)
set_model_arg(
model = "linear_reg",
eng = "glmnet",
parsnip = "penalty",
original = "lambda",
func = list(pkg = "dials", fun = "penalty"),
has_submodel = TRUE
)
set_model_arg(
model = "linear_reg",
eng = "glmnet",
parsnip = "mixture",
original = "alpha",
func = list(pkg = "dials", fun = "mixture"),
has_submodel = FALSE
)
set_pred(
model = "linear_reg",
eng = "glmnet",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = .organize_glmnet_pred,
func = c(fun = "predict"),
args =
list(
object = expr(object$fit),
newx = expr(as.matrix(new_data[, rownames(object$fit$beta), drop = FALSE])),
type = "response",
s = expr(object$spec$args$penalty)
)
)
)
set_pred(
model = "linear_reg",
eng = "glmnet",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(object = expr(object$fit),
newx = expr(as.matrix(new_data)))
)
)
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "stan")
set_dependency("linear_reg", "stan", "rstanarm")
set_fit(
model = "linear_reg",
eng = "stan",
mode = "regression",
value = list(
interface = "formula",
protect = c("formula", "data", "weights"),
func = c(pkg = "rstanarm", fun = "stan_glm"),
defaults = list(family = expr(stats::gaussian), refresh = 0)
)
)
set_encoding(
model = "linear_reg",
eng = "stan",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = FALSE
)
)
set_pred(
model = "linear_reg",
eng = "stan",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args = list(object = expr(object$fit), newdata = expr(new_data))
)
)
set_pred(
model = "linear_reg",
eng = "stan",
mode = "regression",
type = "conf_int",
value = list(
pre = NULL,
post = function(results, object) {
res <-
tibble(
.pred_lower =
convert_stan_interval(
results,
level = object$spec$method$pred$conf_int$extras$level
),
.pred_upper =
convert_stan_interval(
results,
level = object$spec$method$pred$conf_int$extras$level,
lower = FALSE
),
)
if (object$spec$method$pred$conf_int$extras$std_error)
res$.std_error <- apply(results, 2, sd, na.rm = TRUE)
res
},
func = c(pkg = "parsnip", fun = "stan_conf_int"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data)
)
)
)
set_pred(
model = "linear_reg",
eng = "stan",
mode = "regression",
type = "pred_int",
value = list(
pre = NULL,
post = function(results, object) {
res <-
tibble(
.pred_lower =
convert_stan_interval(
results,
level = object$spec$method$pred$pred_int$extras$level
),
.pred_upper =
convert_stan_interval(
results,
level = object$spec$method$pred$pred_int$extras$level,
lower = FALSE
),
)
if (object$spec$method$pred$pred_int$extras$std_error)
res$.std_error <- apply(results, 2, sd, na.rm = TRUE)
res
},
func = c(pkg = "rstanarm", fun = "posterior_predict"),
args =
list(
object = expr(object$fit),
newdata = expr(new_data),
seed = expr(sample.int(10^5, 1))
)
)
)
set_pred(
model = "linear_reg",
eng = "stan",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args = list(object = expr(object$fit), newdata = expr(new_data))
)
)
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "spark")
set_dependency("linear_reg", "spark", "sparklyr")
set_fit(
model = "linear_reg",
eng = "spark",
mode = "regression",
value = list(
interface = "formula",
data = c(formula = "formula", data = "x"),
protect = c("x", "formula", "weight_col"),
func = c(pkg = "sparklyr", fun = "ml_linear_regression"),
defaults = list()
)
)
set_encoding(
model = "linear_reg",
eng = "spark",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = FALSE
)
)
set_model_arg(
model = "linear_reg",
eng = "spark",
parsnip = "penalty",
original = "reg_param",
func = list(pkg = "dials", fun = "penalty"),
has_submodel = FALSE
)
set_model_arg(
model = "linear_reg",
eng = "spark",
parsnip = "mixture",
original = "elastic_net_param",
func = list(pkg = "dials", fun = "mixture"),
has_submodel = FALSE
)
set_pred(
model = "linear_reg",
eng = "spark",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = function(results, object) {
results <- dplyr::rename(results, pred = prediction)
results <- dplyr::select(results, pred)
results
},
func = c(pkg = "sparklyr", fun = "ml_predict"),
args = list(x = expr(object$fit), dataset = expr(new_data))
)
)
# ------------------------------------------------------------------------------
set_model_engine("linear_reg", "regression", "keras")
set_dependency("linear_reg", "keras", "keras")
set_dependency("linear_reg", "keras", "magrittr")
set_fit(
model = "linear_reg",
eng = "keras",
mode = "regression",
value = list(
interface = "matrix",
protect = c("x", "y"),
func = c(pkg = "parsnip", fun = "keras_mlp"),
defaults = list(hidden_units = 1, act = "linear")
)
)
set_encoding(
model = "linear_reg",
eng = "keras",
mode = "regression",
options = list(
predictor_indicators = "traditional",
compute_intercept = TRUE,
remove_intercept = TRUE,
allow_sparse_x = FALSE
)
)
set_model_arg(
model = "linear_reg",
eng = "keras",
parsnip = "penalty",
original = "penalty",
func = list(pkg = "dials", fun = "penalty"),
has_submodel = FALSE
)
set_pred(
model = "linear_reg",
eng = "keras",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = maybe_multivariate,
func = c(fun = "predict"),
args = list(object = quote(object$fit), x = quote(as.matrix(new_data)))
)
)
|
#Caching inverse of a matrix
#Creates a matrix that can cache it's inverse
#Args:
#x: A matrix
#Returns:
#A matrix with functions to get/set value & get/set inverse
makeCacheMatrix <- function(x = matrix()) {
#cached inverse of matrix
inv <- NULL
#getter/setter for matrix
get <- function() x
set <- function(y) {
x <<- y
inv <<- NULL
}
#getter/setter for matrix inverse
getinv <- function() inv
setinv <- function(inverse) inv <<- inverse
#return list of functions for matrix
list(get=get, set=set, getinv=getinv, setinv=setinv)
}
#Computes the inverse of a matrix. If the inverse has already been
#calculated before, the cached inverse is returned
cacheSolve <- function(x, ...) {
inv <- x$getinv()
# return cached matrix inverse if it's been already computed
if (!is.null(inv)) {
message("inverse is cached")
return(inv)
}
# compute inverse of matrix
m <- x$get()
inv <- solve(m, ...)
# cache inverse
x$setinv(inv)
# return inverse of matrix
return(inv)
}
#Example
#Avoid the singulrity
m <- matrix(c(1, 2, 3, 0, 5, 6, 7, 8, 107), nrow = 3, ncol = 3, byrow = TRUE)
m2 <- makeCacheMatrix(m)
cacheSolve(m2)
#[,1] [,2] [,3]
#[1,] 1.04506438 -0.40772532 -0.006437768
#[2,] 0.09012876 0.18454936 -0.012875536
#[3,] -0.07510730 0.01287554 0.010729614
cacheSolve(m2)
inverse is cached
#[,1] [,2] [,3]
#[1,] 1.04506438 -0.40772532 -0.006437768
#[2,] 0.09012876 0.18454936 -0.012875536
#[3,] -0.07510730 0.01287554 0.010729614 | /cachematrix.R | no_license | Smirnov-Artem/ProgrammingAssignment2 | R | false | false | 1,519 | r | #Caching inverse of a matrix
#Creates a matrix that can cache it's inverse
#Args:
#x: A matrix
#Returns:
#A matrix with functions to get/set value & get/set inverse
makeCacheMatrix <- function(x = matrix()) {
#cached inverse of matrix
inv <- NULL
#getter/setter for matrix
get <- function() x
set <- function(y) {
x <<- y
inv <<- NULL
}
#getter/setter for matrix inverse
getinv <- function() inv
setinv <- function(inverse) inv <<- inverse
#return list of functions for matrix
list(get=get, set=set, getinv=getinv, setinv=setinv)
}
#Computes the inverse of a matrix. If the inverse has already been
#calculated before, the cached inverse is returned
cacheSolve <- function(x, ...) {
inv <- x$getinv()
# return cached matrix inverse if it's been already computed
if (!is.null(inv)) {
message("inverse is cached")
return(inv)
}
# compute inverse of matrix
m <- x$get()
inv <- solve(m, ...)
# cache inverse
x$setinv(inv)
# return inverse of matrix
return(inv)
}
#Example
#Avoid the singulrity
m <- matrix(c(1, 2, 3, 0, 5, 6, 7, 8, 107), nrow = 3, ncol = 3, byrow = TRUE)
m2 <- makeCacheMatrix(m)
cacheSolve(m2)
#[,1] [,2] [,3]
#[1,] 1.04506438 -0.40772532 -0.006437768
#[2,] 0.09012876 0.18454936 -0.012875536
#[3,] -0.07510730 0.01287554 0.010729614
cacheSolve(m2)
inverse is cached
#[,1] [,2] [,3]
#[1,] 1.04506438 -0.40772532 -0.006437768
#[2,] 0.09012876 0.18454936 -0.012875536
#[3,] -0.07510730 0.01287554 0.010729614 |
\name{tornado-package}
\alias{tornado-package}
\alias{tornado}
\docType{package}
\title{
Tornado
}
\description{
Differential expression analysis of per-nucleotide coverage tables.
}
\details{
\tabular{ll}{
Package: \tab tornado\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2012-08-22\cr
License: \tab What license is it under?\cr
}
Creates SQLite database of per-nucleotide coverage files (\code{makeDb}), fits linear model to each nucleotide to determine differential expression (\code{getLimmaInput},\code{getTstats}), fits Hidden Markov Model using moderated t statistics from linear models as state emissions to get a list of differentially expressed regions (\code{getRegions}), connects found regions with known annotation (\code{getAnnotation}, \code{plotRegion}, \code{plotExon}, \code{plotGene}).
}
\author{
Alyssa Frazee <afrazee@jhsph.edu>
}
\references{
Paper coming soon.
}
\keyword{ package }
| /Rpackage-21Aug/tornado/man/tornado-package.Rd | no_license | Libardo1/tornado | R | false | false | 920 | rd | \name{tornado-package}
\alias{tornado-package}
\alias{tornado}
\docType{package}
\title{
Tornado
}
\description{
Differential expression analysis of per-nucleotide coverage tables.
}
\details{
\tabular{ll}{
Package: \tab tornado\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2012-08-22\cr
License: \tab What license is it under?\cr
}
Creates SQLite database of per-nucleotide coverage files (\code{makeDb}), fits linear model to each nucleotide to determine differential expression (\code{getLimmaInput},\code{getTstats}), fits Hidden Markov Model using moderated t statistics from linear models as state emissions to get a list of differentially expressed regions (\code{getRegions}), connects found regions with known annotation (\code{getAnnotation}, \code{plotRegion}, \code{plotExon}, \code{plotGene}).
}
\author{
Alyssa Frazee <afrazee@jhsph.edu>
}
\references{
Paper coming soon.
}
\keyword{ package }
|
#########################################################################################
# retire les colonnes iodentiques
distinc_col <- function(df_func,return_list_col_supr = TRUE){
column_unique_df <- df_func %>% t.df() %>% distinct_at(vars(-key),.keep_all = TRUE)
df_col_unique <- df_func %>% select(all_of(column_unique_df$key))
if (return_list_col_supr) {
col_supr <- colnames(df_func)[colnames(df_func) %notin% column_unique_df$key]
df_col_supr <- df_func %>% select(all_of(col_supr))
liste_col_supr <- column_comparator(df_col_supr,df_col_unique)
res <- list(df = df_col_unique, colonne_suprime = liste_col_supr)
} else {res <- df_col_unique}
return(res)
}
#########################################################################################
# liste les colonnes iodentiques
column_comparator <- function(col_a_compar,df_compar_func){ # recherche les collonnes identique et les regroupe sous la forme de nom de collones supprimé : nom de colonnes restant matchant
# Attention une colonnes restante étant égale a 2 coçlonnes supprimé génère donc deux liste
liste_colum_identique <- lapply(seq_along(df_compar_func),function(x) {
col_a_compar_temp <- df_compar_func[,x] %>% unlist( use.names = FALSE)
column_compared <- (col_a_compar_temp == col_a_compar ) |
(is.na(col_a_compar_temp) & is.na(col_a_compar ))
matching_col <- c(names(df_compar_func[,x]),names(which(apply(column_compared,2,all))))
})
liste_colum_identique <- lapply(liste_colum_identique, function(x) x[length(x) > 1]) %>% compact()
return(liste_colum_identique)
}
##########################################################################################
# count number of NA per variables avec les individus groupé par une variables
compte_na_par_var_par_grp <- function(df,group_col,colonnes){
df_NA_var_fonc <- df %>% select(all_of(group_col),all_of(colonnes)) %>% setDT
nb_val_manq_par_var <- df_NA_var_fonc %>%
.[, lapply(.SD, function(x) sum(is.na(x))), group_col] %>%
select(-all_of(group_col)) %>%
gather(name,presence_na) %>% # reshape datset
count(name, presence_na) %>% # count combinations
pivot_wider(names_from = name,
values_from = n,
values_fill = list(n = 0))
return(nb_val_manq_par_var)
}
##########################################################################################
# Fonction usuful when i need to hard code choose beetween to colinear variables check that i choose all nedd varaibles and no more
select_manuel_verif <- function(fuc_list_manu,choose_elem){
# cette fonction ne sert que de vérifications
if(length(fuc_list_manu) != length(choose_elem)) {
print("number of element choose differ with list length")
if(length(fuc_list_manu) > length(choose_elem)) {
stop("not enougth manual choosen elements")
} else { stop("to many manual choosen elements") }
} else if(length(fuc_list_manu) == length(choose_elem)){
# vérif que tout les éléments corresponde bien a ceux dans lequels manuellement choisir
bool_in_list <- sapply(seq_along(choose_elem), function(x) choose_elem[x] %in% fuc_list_manu[[x]])
if (all(bool_in_list)) {
# selection de tout les éléements qui ne sont pas celui manuel
res <- sapply(seq_along(choose_elem), function(x) fuc_list_manu[[x]][fuc_list_manu[[x]] %notin% choose_elem[x]]) %>%
compact %>% unlist
} else {
stop(paste0("choose elements ",choose_elem[!bool_in_list], " not in list"))
}
}
return(res)
}
##########################################################################################
# transposé dataframe
t.df <- function(df,pivot=NULL){
if (is.null(pivot)){
pivot <- "row_id"
df <- df %>% mutate(row_id=paste0("col_",1:nrow(df) ))
}
res <- df %>% pivot_longer(cols = -!!pivot,"key","value") %>%
pivot_wider(names_from = !!pivot,values_from = value)
return(res)
}
##########################################################################################
`%notin%` <- Negate(`%in%`)
##########################################################################################
##Imputation si changment de données##
####################################
#####################################################
impute_si_changement <- function(data_frame_a_verif,path,reimputation = FALSE){
df_impute_ex <- tryCatch(read_rds(path),
error = function(e) data.frame())
colanmes_manq_func <- data_frame_a_verif %>%
select_if(data_frame_a_verif %>%
summarise_all(list(~sum(is.na(.)))) != 0) %>%
colnames()
if ((all(sort(colnames(data_frame_a_verif)) == sort(colnames(df_impute_ex)) ) &
(nrow(data_frame_a_verif) == nrow(df_impute_ex))) & !reimputation) {
res <- df_impute_ex
} else if (any(sort(colnames(data_frame_a_verif)) != sort(colnames(df_impute_ex))) | reimputation | nrow(df_impute_ex) == 0 | nrow(df_impute_ex) != nrow(data_frame_a_verif)) {
cat("nécessité de réimputer les données
ça va être long + ou - une heure")
imputed_Data_func <- mice::mice(data_frame_a_verif,
m = 10,
maxit = 50,
method = 'pmm',
printFlag = FALSE)
saveRDS(imputed_Data_func, file = paste0("data/genere/imputation_object.rds"))
if (!is.null(imputed_Data_func$loggedEvents)) {
print("imputation avec warning")
print(imputed_Data_func$loggedEvents)}
res <- mice::complete(imputed_Data_func)
saveRDS(res, file = paste0("data/genere/data_impute.rds"))
} else {stop("Condition non remplie problème fonction")}
return(res)
}
#########################################################################################
# selection des variables en univarié
# a améliorer pour rendre généraliste
model_selection <- function(data_frame ,name_expo_fonc , name_outcome_fonc, seuil_pval = c(0.2,0.2), #vecteur de seuil de pval considéré comme significatifs first outcome second weight
rerun = TRUE) {
if (rerun){
df_model_select_var <- data_frame %>% group_by_at(name_expo_fonc) %>% slice_sample(prop= 0.2) %>% ungroup %>% as.data.frame()
# other patients
df_analyse_final <- data_frame %>% anti_join(df_model_select_var , by = "id_patient")
df_select_var_base <- df_model_select_var %>% select(-id_patient)
var_to_select_from <- df_select_var_base %>% select(-{{name_expo_fonc}},{{name_outcome_fonc}}) %>% colnames()
model_outcome <- df_select_var_base %>%
select(-all_of(name_expo_fonc)) %>%
gather(measure, value, -all_of(name_outcome_fonc)) %>%
mutate(value = as.numeric(value)) %>%
group_by(measure) %>%
nest() %>%
ungroup() %>%
mutate(fit = map(data, ~ glm(paste0(name_outcome_fonc,"~ value"), data = .x), family = gaussian(link = "identity"),na.action = na.omit),
tidied = map(fit, broom::tidy)) %>%
unnest(tidied) %>%
filter(term != "(Intercept)") %>% # on elnlève l'intercept
select(-data,-fit,-term) # retire les donnée
P_val_out_come_colnames <- model_outcome %>%
select(measure,contains("p.value")) %>%
filter(p.value < seuil_pval[1]) %>%
select(measure) %>%
unlist(use.names = FALSE) %>%
sort
Var_model_outcome <- var_to_select_from[var_to_select_from %in% P_val_out_come_colnames]
res <- list(data_frame = list(df_analyse = df_analyse_final,
df_var_selec = df_model_select_var ),
var_select = list(model_outcome = Var_model_outcome))
saveRDS(res, file = paste0("data/genere/select_var.rds"))
} else {
res <- readRDS("data/genere/select_var.rds")
}
return(res)
}
##########################################################################################
# fonction maison pour rechercher le type des données
## objectifs suivant différence entre int et float
typages_function <- function(df,nb_moda_max_fact = NULL ){
if (is.null(nb_moda_max_fact)) {
df %>%
summarise_all(list(~n_distinct(na.omit(.)))) %>%
t.df() %>% filter(col_1 > 2) %>%
arrange(col_1) %>%
print
stop('Si la liste des facteurs n\'est pas fournis le nombre de modalité à partir
duquel un facteur doit etre considéré comme un numéric avec `nb_moda_max_fact = `,
\n pour vous aidez dans le choix du nombre de modalité la liste des variables
avec plus de deux modalité différente est présenté au dessus')}
else {
temp_moda_par_var <- df %>% summarise_all(list(~n_distinct(na.omit(.)))) %>%
t.df() %>%
mutate(binaire = col_1 == 2, numeric = col_1 >= nb_moda_max_fact,
multinomial = (col_1 < nb_moda_max_fact & col_1 > 2)) %>%
arrange(col_1)
list_factor <- temp_moda_par_var %>%
filter(multinomial) %>%
select(key) %>%
unlist(use.names = FALSE)
liste_booleen <- temp_moda_par_var %>%
filter(binaire) %>%
select(key) %>%
unlist(use.names = FALSE)
liste_numeric <- temp_moda_par_var %>%
filter(numeric) %>%
select(key) %>%
unlist(use.names = FALSE)
res <- list(colonne_type = list(facteur = list_factor,
booleen = liste_booleen,
numerique = liste_numeric),
data_frame_tot = temp_moda_par_var)
}
return(res)
}
##########################################################################################
# fonction maison pour le one hot encoding
one_hot_fb <- function(df, nb_moda_max_fact = NULL, list_factor = NULL){
if (is.null(list_factor)) {
list_factor <- typages_function(df, nb_moda_max_fact)$colonne_type$facteur
cat("Le nombre maximum de modalité par facteur est de ",
nb_moda_max_fact,
"\n pour supprimer ce warning utilisez `list_factor = ", "c( ",
paste0("\"",list_factor,"\"",collapse = " , "),
" )` \n au lieu de `nb_moda_max_fact = ", nb_moda_max_fact,"`")
}
if (all(is.na(list_factor))) { # cas ou il n'y aucun facteur
res <- df
} else {
df <- df %>% mutate_at( list_factor,as.factor)
dmy <- dummyVars(paste0(" ~ ", paste0(list_factor,collapse = " + ")), data = df)
trsf <- data.frame(predict(dmy, newdata = df))
res <- df %>% select(-all_of(list_factor)) %>% cbind(trsf) # ajout all_of retirer si bug
}
return(res)
# reste a ajouter une partie qui renomme les varibles mieux
}
# fonction servant à la troncature des poids
#########################################################################################
fun_trunc <- function(x,.probs) {
pmin(pmax(x, quantile(x, probs = .probs)),
quantile(x, probs = 1-.probs))}
##function Truncature des poids
## prend un vecteur de poids et le trunc selon un vecteur de percentile
#########################################################################################
Trucature_pds_function <- function(vect_poids,percentile_tronc) {
res <- map(percentile_tronc,function(x) fun_trunc(vect_poids,x)) %>%
as.data.frame() %>%
structure(names = paste0("(",
as.character(percentile_tronc),
"; ",
as.character(1 - percentile_tronc),
")" ) )
return(res)
}
##function générant la table de base du modèle de poids pré insertion des résultat##
#########################################################################################
Table_base_result_weighting <- function(donne,expo,Id_data,PS_table){
exposure <- donne[,as.character(expo)]
res <- data.frame(ID = Id_data,exposition = exposure , PS = NA) %>%
setNames(c("ID","exposition", "PS"))
res <- res %>%
group_by(exposition) %>%
mutate(n = n(),numerator = n / nrow(.)) %>% select(-n) # numerator for stabilisation
# Probability of being expose to observed outcomes
res$PS <- sapply(1:nrow(PS_table),
function(x) PS_table[x,as.character(as.numeric(res$exposition[x]))])
res <- res %>% mutate(SWeight = numerator/PS, Weight = 1/PS)
return(res)
}
##Ponderation via regression multinomial
#########################################################################################
# Premiere fonction du genre le but est de prendre une matrice de donne
#et retourne une probabilité d'appartenance à chacun des groupes
# ainsi que le modele en question
# destiné à etre utilsier deans calcules pds_stage
multinomial_IPTW <- function(donne,expo,covar,Id_data){
# mutlinomial regression to compute probability to belong in each group knowing confunders
mod1 <- multinom(formula = eval(parse(text =
paste("as.numeric(",expo,")", # Exposure
paste0("~ ",paste0(covar, collapse = " + ")), # all confunders additive models but can easily be change with another kind of model
sep = "")
)),data = donne, na.action = na.fail , # vérif d'erreur
trace = FALSE)
# Predict using previously train models
proba_tps_inv <- cbind(ID = Id_data,predict(mod1, type = "probs") %>%
as.data.frame() %>%
rename("1" = names(.)[1]))
return(list(matrice_proba = proba_tps_inv , model_ponderation = mod1))
}
##function principale du stage##
#########################################################################################
# Note perso le temps d'execution et très majoritairent liée à la regression > 90 %
##function principale du stage##
#########################################################################################
# Note perso le temps d'execution et très majoritairent liée à la regression > 90 %
calcule_pds_stage <- function(donne,
weighting_function,
supplementary_weighting_function_params = NULL, # add here a name list of named parameters
Id_data = NULL,
expo,
covar,
out_come,
percentile_tronc = c(0,1,5,10,25,50)/100 ,
talbe_and_plot = TRUE){
# fetch function parameters
tempcall <- match.call()
# TODO Séparer le calcules des poids par IPTW dans une autre fonction
# TODO globalement cette fonction devrait devenir uniquement des appel à fonction de type
# TODO prevoir des tests pour les différentes fonctions
# Calcules de poids avec méthode X puis on déroules
# La méthode X doit prendre en entrée une table avec
# Probablement à réflechir est a delete
exposure <- donne[,as.character(tempcall$expo)]
ID <- donne[,as.character(tempcall$Id_data)]
weighting_function_params <- list(donne = donne,
expo = tempcall$expo,
covar = covar ,
Id_data = ID)
if (!is.null(supplementary_weighting_function_params)){
weighting_function_params <- append(weighting_function_params,supplementary_weighting_function_params)
}
ponderation <- do.call(weighting_function,weighting_function_params)
proba_tps_inv <- ponderation$matrice_proba %>% as.data.frame()
res <- Table_base_result_weighting(donne,tempcall$expo,ID,proba_tps_inv)
# troncature des poids
poid_trunc_df <- Trucature_pds_function(res$Weight,percentile_tronc)
poid_trunc_stab_df <- Trucature_pds_function(res$SWeight,percentile_tronc)
### graphique et autres table explicative
return(list(df = res,
res_intermediaire = list(
poids = list(
poids_tronc = list(poids_trunc = poid_trunc_df,
poids_trunc_stab = poid_trunc_stab_df)),
regression_modele_pds = list(
regression_temps_ind = ponderation$model_ponderation,
data_frame_coef = proba_tps_inv)
)
)
)
}
#########################################################################################
#########################################################################################
# Boot strap other a function
boot_strap_fun <- function(df,fonction, nb_iter,params_fun){
replicate(nb_iter,fonction(df,sample(1:nrow(df), size = nrow(df), replace = TRUE),params_fun))
}
###############################################################################
# RESULT FUNCTION
##########################################################################################
# convertie les heures avec virgule en heure et minute
roud_hour <- function(decimal_hour){
heure <- floor(decimal_hour)
minutes <- round(60 * (decimal_hour - floor(decimal_hour)), 0)
res <- sprintf("%02d h %02d min", heure, minutes)
return(res)
}
#
################################################################################
#to lower first char
firstlow <- function(x) {
substr(x, 1, 1) <- tolower(substr(x, 1, 1))
x
}
##########################################################################################
# fait des arrondie + notation scientifique pour les nombres a virugules dans les DF en
# conservant les integer telquel
arrondie_df <- function(df_func,digit_to_round = 3){
res <- df_func %>%
rownames_to_column() %>%
mutate_if(is.numeric,
# ancien avec probablement un problème dans l'ordre des ifelse
# ~ifelse(.%%1==0,as.character(round(.,0)),ifelse((. > 10^3| 1/abs(.) > 10^3 ),
# formatC(., format = "e", digits = 1), # tentative de gestion des nombres
# as.character(round(.,3))
# )
# )
~ifelse(.%%1==0 & . < 10^3,as.character(round(.,0)),ifelse((. > 10^3| 1/abs(.) > 10^3 ),
formatC(., format = "e", digits = 1), # tentative de gestion des nombres
as.character(round(.,digit_to_round))
)
)
) %>%
column_to_rownames()
return(res)
}
##########################################################################################
# Fonction qui sert a rename les colonnes depuis un fichier
rename_variables <- function(table_func, var_instrum_name = NULL, path_to_var_lab = "data/List_of_variables.xls") {
# récup fichier avec les noms
label_var <- readxl::read_excel(path_to_var_lab) %>% select(Variable,Label) %>% mutate(Label = unlist(lapply(strsplit(Label, " "),function(x)
paste0(ifelse(str_count(x,"[A-Z]") == 1,firstlow(x),x),collapse = " ")
)))
lapply(strsplit(label_var$Label, " "),function(x)
paste0(ifelse(str_count(x,"[A-Z]") == 1,firstlow(x),x),collapse = " ")
)
#On ajoute le fait que c'est le delta epworth
# script variable name
actual_name <- data.frame(var_name = colnames(table_func)) %>%
mutate(suffix_factor = str_extract(var_name,"\\.+\\d$"), # récupération du cas ou il y a eu one hot encode
preffix_all = str_extract(var_name,"^Vis_init_|^Mesure_unique_"), # récupération de mes 3 preffix crée
var_ins = str_extract(var_name,"INS_"), # variables instrumentale que tu devras nommer toi meme
real_name = str_remove_all(var_name,"INS_|^Vis_init_|^Mesure_unique_|\\.+\\d$") # on enlève tout ce qu'on a détecté
)
join_table <- actual_name %>% left_join(label_var, by = c("real_name" = "Variable")) # joionture avec les noms
# gestions des variables instrumentales
name_need_supply <- join_table %>%
filter(!is.na(var_ins)) %>%
select(var_name) %>%
distinct() %>%
unlist(use.names = FALSE)
if(is.null(var_instrum_name)){
cat("You must supply name for variable you create \n variables you must name are \n")
print(name_need_supply)
cat("\n for that ` var_instrum_name = c(\"variable_name\" = \"new name\")`\n")
stop()
} else if (length(name_need_supply) != length(var_instrum_name)) {
# heler pour les variables manquante ou en trop
out_temp <- if_else(condition = length(name_need_supply) > length(var_instrum_name),
true = paste0("Not enought name provide, missing name for \n ",
paste(name_need_supply[name_need_supply%notin%names(var_instrum_name)], collapse = " ,")),
false = paste0("to many name provide, don't need name for \n ",
paste(var_instrum_name[names(var_instrum_name)%notin%name_need_supply], collapse = " ,"))
)
cat(out_temp)
stop()
} else {
instrum_name <- data.frame(var_name = names(var_instrum_name) , Label = var_instrum_name)
complete_name_table <- join_table %>%
left_join(instrum_name,by = "var_name") %>%
mutate(Label = coalesce(Label.x, Label.y)) %>%
select(-contains("Label.")) %>%
mutate(Label = ifelse(!is.na(suffix_factor),
paste0(Label, suffix_factor),
Label),# on remet les indices de facteur
Label = ifelse((!is.na(preffix_all)&preffix_all == "Vis_init_"),
paste0("diagnosis ",Label),
Label)
)
short_name_table <- complete_name_table$var_name
names(short_name_table) <- complete_name_table$Label
res <- table_func %>% rename(all_of(short_name_table))
}
return(list(table_rename = res,
complete_table = complete_name_table ))
}
ci_fb <- function(con_int,x) {
#qnorm(con_int)*(sd(x)/sqrt(length(x)))
qt(con_int,df=length(x)-1)*(sd(x)/sqrt(length(x)))
}
#########################################################################################
# Fonction rapide pour écrire dans un fichier
ecriture_fichier <- function(table_latex,path_to_file) {
write(table_latex, path_to_file,append = TRUE)
}
##########################################################################################
# table descriptive des variables
# df_one_hot_encode_fonc généré avec one_hot_fb
# Version prévue pour échapper les caracter latex
table_resume_latex <- function(df_one_hot_encode_fonc,name_expo_fonc,nom_grp = "clusters", p_val = FALSE, arrondie = TRUE, alpha = 0.05,ponderation = NULL) {
# Typage des variables a réusmer en booleen ou numeric
# Car normalement df pré_one hot encode
table_bool_var <- df_one_hot_encode_fonc %>%
select(-all_of(name_expo_fonc)) %>%
summarise_all(list(~n_distinct(., na.rm = TRUE))) %>%
t.df %>%
rename(booleen = col_1) %>%
{temp_verif_bool <<- .} %>%
mutate(booleen = booleen <= 2)
if (any(temp_verif_bool$booleen < 2)) {
print(temp_verif_bool$key[temp_verif_bool$booleen < 2])
stop("moins de deux valeurs distinct pour une variables")}
if (!is.null(ponderation)){
df_one_hot_encode_fonc <- df_one_hot_encode_fonc %>% mutate_at(vars(-name_expo_fonc),funs(.*ponderation)) #mutate_at(vars(-name_expo_fonc),funs(if(is_whole(.)){round(.*ponderation,0)} else{.*ponderation}))
}
name_all_grp <- paste0("all ",nom_grp)
all_cluster_descript_var <- df_one_hot_encode_fonc %>%
recup_var_table_res(name_expo_fonc) %>%
#{ifelse(arrondie ,arrondie_df(.), . )} %>% print %>%
mutate(nb_NA = ifelse(nb_NA == 0,"",paste0("NA:", nb_NA )))
nb_grp <- df_one_hot_encode_fonc %>% select(all_of(name_expo_fonc)) %>%
unique() %>%
unlist(use.names = FALSE) %>%
sort
group_cluster_descript_var <- lapply(nb_grp, function(x) {
col_name <- paste0(nom_grp,"_",x)
res <- df_one_hot_encode_fonc %>%
filter(!!sym(name_expo_fonc) == x) %>%
recup_var_table_res(name_expo_fonc) %>%
mutate(nb_NA = ifelse(nb_NA == 0,"",paste0("NA:", nb_NA ))) %>%
inner_join(table_bool_var,by = "key") %>%
mutate({{col_name}} := ifelse( booleen
, paste0(round(n,0),"(",round(pourcent,1), "%)", nb_NA),
paste0(round(moy,1) ,"(",
round(std,1), ")", nb_NA))) %>%
select(all_of(col_name))
return(res)
}
)
table_res <- all_cluster_descript_var %>%
inner_join(table_bool_var,by = "key") %>%
mutate( {{name_all_grp}} := ifelse(booleen
, paste0(round(n,0),"(",round(pourcent,1), "%)", nb_NA),
paste0(round(moy,1) ,"(",
round(std,1), ")", nb_NA) ) ) %>%
select(key,all_of(name_all_grp)) %>%
cbind(bind_cols(group_cluster_descript_var)) %>%
data.frame(., row.names = 1)
# rename_at(vars(contains(".grp")),funs(str_replace(.,"\\."," ")))
table_res <- table_res %>% rename_all(list(~str_replace_all(.,"\\."," ")))
# si choix de calculer les p-val
if (p_val) {
# prevoir un groupe de plus pour le toutes les catégorie
nb_group_pval <- as.character(c(as.numeric(nb_grp),max(as.numeric(nb_grp)) + 1 ))
# création de toutes les combinaisons de groupe a tester
combin_grp <- nb_group_pval %>% combn(2)
# Création du groupe supplémentaire tout les groupes en dupliquant la dataframe avec un groupes de plus
df_pval <- df_one_hot_encode_fonc %>%
mutate(!!sym(name_expo_fonc) := max(as.numeric(nb_group_pval))) %>%
rbind(df_one_hot_encode_fonc)
non_boolean_var <- table_bool_var %>% filter(!booleen) %>% select(key) %>% unlist(use.names = FALSE)
boolean_var <- table_bool_var %>% filter(booleen) %>% select(key) %>% unlist(use.names = FALSE)
# création de la table avec p-value pour chaque combin et rename chaque colonnes a_b
combin_ttest_pval <- apply(combin_grp, 2, function(x)
df_pval %>%
select(sym(name_expo_fonc),all_of(non_boolean_var)) %>%
#summarise_at(vars(-(sym(name_expo_fonc))),list(~t.test(.[!!sym(name_expo_fonc) == x[1]], .[!!sym(name_expo_fonc) == x[2]])$p.value)) %>%
summarise_at(vars(-(sym(name_expo_fonc))),list(~t.test(.[!!sym(name_expo_fonc) == x[1]], .[!!sym(name_expo_fonc) == x[2]])$p.value)) %>%
t.df %>%
rename_at("col_1",list( ~paste0(x[1],"_",x[2])))
)
if (length(boolean_var) != 0){
combin_chisq_pval <- apply(combin_grp, 2, function(x)
df_pval %>%
select(sym(name_expo_fonc),all_of(boolean_var)) %>%
dplyr::summarise_at(vars(-sym(name_expo_fonc)),
list(~ifelse(sum(.[!!sym(name_expo_fonc) == x[1]],na.rm = TRUE) < 8| sum(.[!!sym(name_expo_fonc) == x[2]],na.rm = TRUE) < 8,
NA,
prop.test(
x = c(sum(.[!!sym(name_expo_fonc) == x[1]],na.rm = TRUE), sum(.[!!sym(name_expo_fonc) == x[2]],na.rm = TRUE)), # compute number of success
n = c(sum(!is.na(.[!!sym(name_expo_fonc) == x[1]])), sum(!is.na(.[!!sym(name_expo_fonc) == x[2]])))
)$p.value))
) %>%
t.df %>%
rename_at("col_1",list( ~paste0(x[1],"_",x[2])))
)
combin_total_pval <- mapply(rbind,combin_chisq_pval,combin_ttest_pval,SIMPLIFY=FALSE)
} else combin_total_pval <-combin_ttest_pval
# transformation de la p-value en booléen en avec comme seuil le alpha définis en appliquant une correction de bonneferonni en considérant le nombre total de test nb ligne X nb collones
result_pval <- bind_cols(combin_total_pval) %>%
rename(key = key...1) %>%
select(key,contains("_")) %>%
mutate_at(vars(-key),list(~(. < (alpha / ncol(combin_grp))
))
) %>% # hypothèse et correction de bonneferonnie
mutate_at(vars(-key), function(x) {
x_var <- rlang::enquo(x)
ifelse(x , rlang::quo_name(x_var), "non") # remplacement des p-val non signif par une chaine spécifique
}) %>%
mutate_at(vars(-key), function(x) {
x_var <- rlang::enquo(x)
ifelse(is.na(x) , paste0(rlang::quo_name(x_var),"*"),x) # remplacement des p-val non signif par une chaine spécifique
})
# REcherche avec une simili boucle des p-val signif pour chaque colonnnes
# on en lève la chaine spécifique de non corrélation
df_pval_final <- lapply(nb_group_pval, function(x) {
result_pval %>% select(key,contains(x)) %>%
mutate_at(vars(contains(x)),list(~str_remove_all(.,paste(c("_",x), collapse = "|")))) %>%
unite(!!sym(paste0(nom_grp,"_",x)) ,contains(x),sep = ",")
}
) %>% bind_cols() %>%
rename(key = key...1) %>%
select(key,contains(nom_grp)) %>%
rename(!!sym(name_all_grp) := paste0(nom_grp,"_",max(as.numeric(nb_group_pval)))) %>%
mutate_all(list(~str_remove_all(.,"non,|,non"))) %>%
mutate_all(list(~str_remove_all(.,"non"))) %>%
mutate_all(list(~str_remove_all(.,paste0(",",as.character(length(nb_grp) +1),"|",as.character(length(nb_grp) +1)) ))) # nouveau symbole pour all adherence grou^p
if(df_pval_final %>% transmute_at(vars(-key),list(~str_detect(.,"non"))) %>% as.matrix() %>% any) {
stop("il reste des p-val non traité")}
# Gestion des tables latex pour que les différences statisquement significative soit en subscript
# en échappant les underscore
table_res_pval <- table_res %>%
rownames_to_column() %>%
pivot_longer(-rowname,values_to = "valeur") %>%
inner_join((df_pval_final %>% pivot_longer(-key,values_to = "pvalue") ), by = c("rowname" = "key", "name" = "name")) %>%
mutate(combin = paste0(valeur,"\\textsubscript{",pvalue, "}")) %>%
select(rowname,name,combin) %>%
pivot_wider(names_from = name, values_from = combin) %>%
column_to_rownames()
table_res_pval <- table_res_pval %>%
rownames_to_column() %>%
mutate_at(vars(-rowname),list(~str_replace_all(.,"%","\\\\%"))) %>%
column_to_rownames() %>%
#mutate_all(funs(str_replace_all(.,"%","\\\\%"))) %>%
select(all_of(name_all_grp),sort(tidyselect::peek_vars()))
rownames(table_res_pval) <- str_replace_all(rownames(table_res_pval),"_","\\\\_")
colnames(table_res_pval) <- str_replace_all(colnames(table_res_pval),"_","\\\\_")
} else {table_res_pval <- table_res %>%
select(all_of(name_all_grp),sort(tidyselect::peek_vars())) }
tbl_nb_ind_grp <- table(df_one_hot_encode_fonc[,name_expo_fonc])
nb_pat_par_grp <- c(nrow(df_one_hot_encode_fonc),
tbl_nb_ind_grp[order(as.numeric(names(tbl_nb_ind_grp)))] )
nb_pat_par_grp <- paste0(nb_pat_par_grp , " (" , round(nb_pat_par_grp*100/nrow(df_one_hot_encode_fonc),1) ," \\%)")
res <- rbind(`Number of patient` = nb_pat_par_grp,table_res_pval)
return(res)
}
##########################################################################################
# function récupérant les variables pour table descriptive des variables
# df_one_hot_encode_fonc généré avec one_hot_fb
recup_var_table_res <- function(df_one_hot_encode_fonc,name_expo_fonc){
res <- df_one_hot_encode_fonc %>%
select(-all_of(name_expo_fonc)) %>%
mutate_if(is.factor, ~as.numeric(as.character(.))) %>% # points litigieux j'utilise cette méthode pour convertir mes facteur booleen en numeric a surveilllé a l'avenir
summarise_all(list(fonc_moy = ~mean(.,na.rm = TRUE),
fonc_std =~sd(.,na.rm = TRUE),
fonc_med = ~median(.,na.rm = TRUE),
fonc_quart1 = ~quantile(.,0.25,na.rm = TRUE),
fonc_quart2 = ~quantile(.,0.75,na.rm = TRUE),
fonc_n = ~sum(.,na.rm = TRUE),
fonc_pourcent = ~mean(.,na.rm = TRUE)*100,
fonc_nb_NA = ~sum(is.na(.))
)
) %>%
pivot_longer(cols = everything(),
names_to = c(".value", "level"),
names_pattern = "(.*)_fonc_(.*)") %>%
t.df(.,"level")
return(res)
}
| /Source_papier_prett.R | no_license | fbettega/OSFP_IPTW | R | false | false | 33,402 | r | #########################################################################################
# retire les colonnes iodentiques
distinc_col <- function(df_func,return_list_col_supr = TRUE){
column_unique_df <- df_func %>% t.df() %>% distinct_at(vars(-key),.keep_all = TRUE)
df_col_unique <- df_func %>% select(all_of(column_unique_df$key))
if (return_list_col_supr) {
col_supr <- colnames(df_func)[colnames(df_func) %notin% column_unique_df$key]
df_col_supr <- df_func %>% select(all_of(col_supr))
liste_col_supr <- column_comparator(df_col_supr,df_col_unique)
res <- list(df = df_col_unique, colonne_suprime = liste_col_supr)
} else {res <- df_col_unique}
return(res)
}
#########################################################################################
# liste les colonnes iodentiques
column_comparator <- function(col_a_compar,df_compar_func){ # recherche les collonnes identique et les regroupe sous la forme de nom de collones supprimé : nom de colonnes restant matchant
# Attention une colonnes restante étant égale a 2 coçlonnes supprimé génère donc deux liste
liste_colum_identique <- lapply(seq_along(df_compar_func),function(x) {
col_a_compar_temp <- df_compar_func[,x] %>% unlist( use.names = FALSE)
column_compared <- (col_a_compar_temp == col_a_compar ) |
(is.na(col_a_compar_temp) & is.na(col_a_compar ))
matching_col <- c(names(df_compar_func[,x]),names(which(apply(column_compared,2,all))))
})
liste_colum_identique <- lapply(liste_colum_identique, function(x) x[length(x) > 1]) %>% compact()
return(liste_colum_identique)
}
##########################################################################################
# count number of NA per variables avec les individus groupé par une variables
compte_na_par_var_par_grp <- function(df,group_col,colonnes){
df_NA_var_fonc <- df %>% select(all_of(group_col),all_of(colonnes)) %>% setDT
nb_val_manq_par_var <- df_NA_var_fonc %>%
.[, lapply(.SD, function(x) sum(is.na(x))), group_col] %>%
select(-all_of(group_col)) %>%
gather(name,presence_na) %>% # reshape datset
count(name, presence_na) %>% # count combinations
pivot_wider(names_from = name,
values_from = n,
values_fill = list(n = 0))
return(nb_val_manq_par_var)
}
##########################################################################################
# Fonction usuful when i need to hard code choose beetween to colinear variables check that i choose all nedd varaibles and no more
select_manuel_verif <- function(fuc_list_manu,choose_elem){
# cette fonction ne sert que de vérifications
if(length(fuc_list_manu) != length(choose_elem)) {
print("number of element choose differ with list length")
if(length(fuc_list_manu) > length(choose_elem)) {
stop("not enougth manual choosen elements")
} else { stop("to many manual choosen elements") }
} else if(length(fuc_list_manu) == length(choose_elem)){
# vérif que tout les éléments corresponde bien a ceux dans lequels manuellement choisir
bool_in_list <- sapply(seq_along(choose_elem), function(x) choose_elem[x] %in% fuc_list_manu[[x]])
if (all(bool_in_list)) {
# selection de tout les éléements qui ne sont pas celui manuel
res <- sapply(seq_along(choose_elem), function(x) fuc_list_manu[[x]][fuc_list_manu[[x]] %notin% choose_elem[x]]) %>%
compact %>% unlist
} else {
stop(paste0("choose elements ",choose_elem[!bool_in_list], " not in list"))
}
}
return(res)
}
##########################################################################################
# transposé dataframe
t.df <- function(df,pivot=NULL){
if (is.null(pivot)){
pivot <- "row_id"
df <- df %>% mutate(row_id=paste0("col_",1:nrow(df) ))
}
res <- df %>% pivot_longer(cols = -!!pivot,"key","value") %>%
pivot_wider(names_from = !!pivot,values_from = value)
return(res)
}
##########################################################################################
`%notin%` <- Negate(`%in%`)
##########################################################################################
##Imputation si changment de données##
####################################
#####################################################
impute_si_changement <- function(data_frame_a_verif,path,reimputation = FALSE){
df_impute_ex <- tryCatch(read_rds(path),
error = function(e) data.frame())
colanmes_manq_func <- data_frame_a_verif %>%
select_if(data_frame_a_verif %>%
summarise_all(list(~sum(is.na(.)))) != 0) %>%
colnames()
if ((all(sort(colnames(data_frame_a_verif)) == sort(colnames(df_impute_ex)) ) &
(nrow(data_frame_a_verif) == nrow(df_impute_ex))) & !reimputation) {
res <- df_impute_ex
} else if (any(sort(colnames(data_frame_a_verif)) != sort(colnames(df_impute_ex))) | reimputation | nrow(df_impute_ex) == 0 | nrow(df_impute_ex) != nrow(data_frame_a_verif)) {
cat("nécessité de réimputer les données
ça va être long + ou - une heure")
imputed_Data_func <- mice::mice(data_frame_a_verif,
m = 10,
maxit = 50,
method = 'pmm',
printFlag = FALSE)
saveRDS(imputed_Data_func, file = paste0("data/genere/imputation_object.rds"))
if (!is.null(imputed_Data_func$loggedEvents)) {
print("imputation avec warning")
print(imputed_Data_func$loggedEvents)}
res <- mice::complete(imputed_Data_func)
saveRDS(res, file = paste0("data/genere/data_impute.rds"))
} else {stop("Condition non remplie problème fonction")}
return(res)
}
#########################################################################################
# selection des variables en univarié
# a améliorer pour rendre généraliste
model_selection <- function(data_frame ,name_expo_fonc , name_outcome_fonc, seuil_pval = c(0.2,0.2), #vecteur de seuil de pval considéré comme significatifs first outcome second weight
rerun = TRUE) {
if (rerun){
df_model_select_var <- data_frame %>% group_by_at(name_expo_fonc) %>% slice_sample(prop= 0.2) %>% ungroup %>% as.data.frame()
# other patients
df_analyse_final <- data_frame %>% anti_join(df_model_select_var , by = "id_patient")
df_select_var_base <- df_model_select_var %>% select(-id_patient)
var_to_select_from <- df_select_var_base %>% select(-{{name_expo_fonc}},{{name_outcome_fonc}}) %>% colnames()
model_outcome <- df_select_var_base %>%
select(-all_of(name_expo_fonc)) %>%
gather(measure, value, -all_of(name_outcome_fonc)) %>%
mutate(value = as.numeric(value)) %>%
group_by(measure) %>%
nest() %>%
ungroup() %>%
mutate(fit = map(data, ~ glm(paste0(name_outcome_fonc,"~ value"), data = .x), family = gaussian(link = "identity"),na.action = na.omit),
tidied = map(fit, broom::tidy)) %>%
unnest(tidied) %>%
filter(term != "(Intercept)") %>% # on elnlève l'intercept
select(-data,-fit,-term) # retire les donnée
P_val_out_come_colnames <- model_outcome %>%
select(measure,contains("p.value")) %>%
filter(p.value < seuil_pval[1]) %>%
select(measure) %>%
unlist(use.names = FALSE) %>%
sort
Var_model_outcome <- var_to_select_from[var_to_select_from %in% P_val_out_come_colnames]
res <- list(data_frame = list(df_analyse = df_analyse_final,
df_var_selec = df_model_select_var ),
var_select = list(model_outcome = Var_model_outcome))
saveRDS(res, file = paste0("data/genere/select_var.rds"))
} else {
res <- readRDS("data/genere/select_var.rds")
}
return(res)
}
##########################################################################################
# fonction maison pour rechercher le type des données
## objectifs suivant différence entre int et float
typages_function <- function(df,nb_moda_max_fact = NULL ){
if (is.null(nb_moda_max_fact)) {
df %>%
summarise_all(list(~n_distinct(na.omit(.)))) %>%
t.df() %>% filter(col_1 > 2) %>%
arrange(col_1) %>%
print
stop('Si la liste des facteurs n\'est pas fournis le nombre de modalité à partir
duquel un facteur doit etre considéré comme un numéric avec `nb_moda_max_fact = `,
\n pour vous aidez dans le choix du nombre de modalité la liste des variables
avec plus de deux modalité différente est présenté au dessus')}
else {
temp_moda_par_var <- df %>% summarise_all(list(~n_distinct(na.omit(.)))) %>%
t.df() %>%
mutate(binaire = col_1 == 2, numeric = col_1 >= nb_moda_max_fact,
multinomial = (col_1 < nb_moda_max_fact & col_1 > 2)) %>%
arrange(col_1)
list_factor <- temp_moda_par_var %>%
filter(multinomial) %>%
select(key) %>%
unlist(use.names = FALSE)
liste_booleen <- temp_moda_par_var %>%
filter(binaire) %>%
select(key) %>%
unlist(use.names = FALSE)
liste_numeric <- temp_moda_par_var %>%
filter(numeric) %>%
select(key) %>%
unlist(use.names = FALSE)
res <- list(colonne_type = list(facteur = list_factor,
booleen = liste_booleen,
numerique = liste_numeric),
data_frame_tot = temp_moda_par_var)
}
return(res)
}
##########################################################################################
# fonction maison pour le one hot encoding
one_hot_fb <- function(df, nb_moda_max_fact = NULL, list_factor = NULL){
if (is.null(list_factor)) {
list_factor <- typages_function(df, nb_moda_max_fact)$colonne_type$facteur
cat("Le nombre maximum de modalité par facteur est de ",
nb_moda_max_fact,
"\n pour supprimer ce warning utilisez `list_factor = ", "c( ",
paste0("\"",list_factor,"\"",collapse = " , "),
" )` \n au lieu de `nb_moda_max_fact = ", nb_moda_max_fact,"`")
}
if (all(is.na(list_factor))) { # cas ou il n'y aucun facteur
res <- df
} else {
df <- df %>% mutate_at( list_factor,as.factor)
dmy <- dummyVars(paste0(" ~ ", paste0(list_factor,collapse = " + ")), data = df)
trsf <- data.frame(predict(dmy, newdata = df))
res <- df %>% select(-all_of(list_factor)) %>% cbind(trsf) # ajout all_of retirer si bug
}
return(res)
# reste a ajouter une partie qui renomme les varibles mieux
}
# fonction servant à la troncature des poids
#########################################################################################
fun_trunc <- function(x,.probs) {
pmin(pmax(x, quantile(x, probs = .probs)),
quantile(x, probs = 1-.probs))}
##function Truncature des poids
## prend un vecteur de poids et le trunc selon un vecteur de percentile
#########################################################################################
Trucature_pds_function <- function(vect_poids,percentile_tronc) {
res <- map(percentile_tronc,function(x) fun_trunc(vect_poids,x)) %>%
as.data.frame() %>%
structure(names = paste0("(",
as.character(percentile_tronc),
"; ",
as.character(1 - percentile_tronc),
")" ) )
return(res)
}
##function générant la table de base du modèle de poids pré insertion des résultat##
#########################################################################################
Table_base_result_weighting <- function(donne,expo,Id_data,PS_table){
exposure <- donne[,as.character(expo)]
res <- data.frame(ID = Id_data,exposition = exposure , PS = NA) %>%
setNames(c("ID","exposition", "PS"))
res <- res %>%
group_by(exposition) %>%
mutate(n = n(),numerator = n / nrow(.)) %>% select(-n) # numerator for stabilisation
# Probability of being expose to observed outcomes
res$PS <- sapply(1:nrow(PS_table),
function(x) PS_table[x,as.character(as.numeric(res$exposition[x]))])
res <- res %>% mutate(SWeight = numerator/PS, Weight = 1/PS)
return(res)
}
##Ponderation via regression multinomial
#########################################################################################
# Premiere fonction du genre le but est de prendre une matrice de donne
#et retourne une probabilité d'appartenance à chacun des groupes
# ainsi que le modele en question
# destiné à etre utilsier deans calcules pds_stage
multinomial_IPTW <- function(donne,expo,covar,Id_data){
# mutlinomial regression to compute probability to belong in each group knowing confunders
mod1 <- multinom(formula = eval(parse(text =
paste("as.numeric(",expo,")", # Exposure
paste0("~ ",paste0(covar, collapse = " + ")), # all confunders additive models but can easily be change with another kind of model
sep = "")
)),data = donne, na.action = na.fail , # vérif d'erreur
trace = FALSE)
# Predict using previously train models
proba_tps_inv <- cbind(ID = Id_data,predict(mod1, type = "probs") %>%
as.data.frame() %>%
rename("1" = names(.)[1]))
return(list(matrice_proba = proba_tps_inv , model_ponderation = mod1))
}
##function principale du stage##
#########################################################################################
# Note perso le temps d'execution et très majoritairent liée à la regression > 90 %
##function principale du stage##
#########################################################################################
# Note perso le temps d'execution et très majoritairent liée à la regression > 90 %
calcule_pds_stage <- function(donne,
weighting_function,
supplementary_weighting_function_params = NULL, # add here a name list of named parameters
Id_data = NULL,
expo,
covar,
out_come,
percentile_tronc = c(0,1,5,10,25,50)/100 ,
talbe_and_plot = TRUE){
# fetch function parameters
tempcall <- match.call()
# TODO Séparer le calcules des poids par IPTW dans une autre fonction
# TODO globalement cette fonction devrait devenir uniquement des appel à fonction de type
# TODO prevoir des tests pour les différentes fonctions
# Calcules de poids avec méthode X puis on déroules
# La méthode X doit prendre en entrée une table avec
# Probablement à réflechir est a delete
exposure <- donne[,as.character(tempcall$expo)]
ID <- donne[,as.character(tempcall$Id_data)]
weighting_function_params <- list(donne = donne,
expo = tempcall$expo,
covar = covar ,
Id_data = ID)
if (!is.null(supplementary_weighting_function_params)){
weighting_function_params <- append(weighting_function_params,supplementary_weighting_function_params)
}
ponderation <- do.call(weighting_function,weighting_function_params)
proba_tps_inv <- ponderation$matrice_proba %>% as.data.frame()
res <- Table_base_result_weighting(donne,tempcall$expo,ID,proba_tps_inv)
# troncature des poids
poid_trunc_df <- Trucature_pds_function(res$Weight,percentile_tronc)
poid_trunc_stab_df <- Trucature_pds_function(res$SWeight,percentile_tronc)
### graphique et autres table explicative
return(list(df = res,
res_intermediaire = list(
poids = list(
poids_tronc = list(poids_trunc = poid_trunc_df,
poids_trunc_stab = poid_trunc_stab_df)),
regression_modele_pds = list(
regression_temps_ind = ponderation$model_ponderation,
data_frame_coef = proba_tps_inv)
)
)
)
}
#########################################################################################
#########################################################################################
# Boot strap other a function
boot_strap_fun <- function(df,fonction, nb_iter,params_fun){
replicate(nb_iter,fonction(df,sample(1:nrow(df), size = nrow(df), replace = TRUE),params_fun))
}
###############################################################################
# RESULT FUNCTION
##########################################################################################
# convertie les heures avec virgule en heure et minute
roud_hour <- function(decimal_hour){
heure <- floor(decimal_hour)
minutes <- round(60 * (decimal_hour - floor(decimal_hour)), 0)
res <- sprintf("%02d h %02d min", heure, minutes)
return(res)
}
#
################################################################################
#to lower first char
firstlow <- function(x) {
substr(x, 1, 1) <- tolower(substr(x, 1, 1))
x
}
##########################################################################################
# fait des arrondie + notation scientifique pour les nombres a virugules dans les DF en
# conservant les integer telquel
arrondie_df <- function(df_func,digit_to_round = 3){
res <- df_func %>%
rownames_to_column() %>%
mutate_if(is.numeric,
# ancien avec probablement un problème dans l'ordre des ifelse
# ~ifelse(.%%1==0,as.character(round(.,0)),ifelse((. > 10^3| 1/abs(.) > 10^3 ),
# formatC(., format = "e", digits = 1), # tentative de gestion des nombres
# as.character(round(.,3))
# )
# )
~ifelse(.%%1==0 & . < 10^3,as.character(round(.,0)),ifelse((. > 10^3| 1/abs(.) > 10^3 ),
formatC(., format = "e", digits = 1), # tentative de gestion des nombres
as.character(round(.,digit_to_round))
)
)
) %>%
column_to_rownames()
return(res)
}
##########################################################################################
# Fonction qui sert a rename les colonnes depuis un fichier
rename_variables <- function(table_func, var_instrum_name = NULL, path_to_var_lab = "data/List_of_variables.xls") {
# récup fichier avec les noms
label_var <- readxl::read_excel(path_to_var_lab) %>% select(Variable,Label) %>% mutate(Label = unlist(lapply(strsplit(Label, " "),function(x)
paste0(ifelse(str_count(x,"[A-Z]") == 1,firstlow(x),x),collapse = " ")
)))
lapply(strsplit(label_var$Label, " "),function(x)
paste0(ifelse(str_count(x,"[A-Z]") == 1,firstlow(x),x),collapse = " ")
)
#On ajoute le fait que c'est le delta epworth
# script variable name
actual_name <- data.frame(var_name = colnames(table_func)) %>%
mutate(suffix_factor = str_extract(var_name,"\\.+\\d$"), # récupération du cas ou il y a eu one hot encode
preffix_all = str_extract(var_name,"^Vis_init_|^Mesure_unique_"), # récupération de mes 3 preffix crée
var_ins = str_extract(var_name,"INS_"), # variables instrumentale que tu devras nommer toi meme
real_name = str_remove_all(var_name,"INS_|^Vis_init_|^Mesure_unique_|\\.+\\d$") # on enlève tout ce qu'on a détecté
)
join_table <- actual_name %>% left_join(label_var, by = c("real_name" = "Variable")) # joionture avec les noms
# gestions des variables instrumentales
name_need_supply <- join_table %>%
filter(!is.na(var_ins)) %>%
select(var_name) %>%
distinct() %>%
unlist(use.names = FALSE)
if(is.null(var_instrum_name)){
cat("You must supply name for variable you create \n variables you must name are \n")
print(name_need_supply)
cat("\n for that ` var_instrum_name = c(\"variable_name\" = \"new name\")`\n")
stop()
} else if (length(name_need_supply) != length(var_instrum_name)) {
# heler pour les variables manquante ou en trop
out_temp <- if_else(condition = length(name_need_supply) > length(var_instrum_name),
true = paste0("Not enought name provide, missing name for \n ",
paste(name_need_supply[name_need_supply%notin%names(var_instrum_name)], collapse = " ,")),
false = paste0("to many name provide, don't need name for \n ",
paste(var_instrum_name[names(var_instrum_name)%notin%name_need_supply], collapse = " ,"))
)
cat(out_temp)
stop()
} else {
instrum_name <- data.frame(var_name = names(var_instrum_name) , Label = var_instrum_name)
complete_name_table <- join_table %>%
left_join(instrum_name,by = "var_name") %>%
mutate(Label = coalesce(Label.x, Label.y)) %>%
select(-contains("Label.")) %>%
mutate(Label = ifelse(!is.na(suffix_factor),
paste0(Label, suffix_factor),
Label),# on remet les indices de facteur
Label = ifelse((!is.na(preffix_all)&preffix_all == "Vis_init_"),
paste0("diagnosis ",Label),
Label)
)
short_name_table <- complete_name_table$var_name
names(short_name_table) <- complete_name_table$Label
res <- table_func %>% rename(all_of(short_name_table))
}
return(list(table_rename = res,
complete_table = complete_name_table ))
}
ci_fb <- function(con_int,x) {
#qnorm(con_int)*(sd(x)/sqrt(length(x)))
qt(con_int,df=length(x)-1)*(sd(x)/sqrt(length(x)))
}
#########################################################################################
# Fonction rapide pour écrire dans un fichier
ecriture_fichier <- function(table_latex,path_to_file) {
write(table_latex, path_to_file,append = TRUE)
}
##########################################################################################
# table descriptive des variables
# df_one_hot_encode_fonc généré avec one_hot_fb
# Version prévue pour échapper les caracter latex
table_resume_latex <- function(df_one_hot_encode_fonc,name_expo_fonc,nom_grp = "clusters", p_val = FALSE, arrondie = TRUE, alpha = 0.05,ponderation = NULL) {
# Typage des variables a réusmer en booleen ou numeric
# Car normalement df pré_one hot encode
table_bool_var <- df_one_hot_encode_fonc %>%
select(-all_of(name_expo_fonc)) %>%
summarise_all(list(~n_distinct(., na.rm = TRUE))) %>%
t.df %>%
rename(booleen = col_1) %>%
{temp_verif_bool <<- .} %>%
mutate(booleen = booleen <= 2)
if (any(temp_verif_bool$booleen < 2)) {
print(temp_verif_bool$key[temp_verif_bool$booleen < 2])
stop("moins de deux valeurs distinct pour une variables")}
if (!is.null(ponderation)){
df_one_hot_encode_fonc <- df_one_hot_encode_fonc %>% mutate_at(vars(-name_expo_fonc),funs(.*ponderation)) #mutate_at(vars(-name_expo_fonc),funs(if(is_whole(.)){round(.*ponderation,0)} else{.*ponderation}))
}
name_all_grp <- paste0("all ",nom_grp)
all_cluster_descript_var <- df_one_hot_encode_fonc %>%
recup_var_table_res(name_expo_fonc) %>%
#{ifelse(arrondie ,arrondie_df(.), . )} %>% print %>%
mutate(nb_NA = ifelse(nb_NA == 0,"",paste0("NA:", nb_NA )))
nb_grp <- df_one_hot_encode_fonc %>% select(all_of(name_expo_fonc)) %>%
unique() %>%
unlist(use.names = FALSE) %>%
sort
group_cluster_descript_var <- lapply(nb_grp, function(x) {
col_name <- paste0(nom_grp,"_",x)
res <- df_one_hot_encode_fonc %>%
filter(!!sym(name_expo_fonc) == x) %>%
recup_var_table_res(name_expo_fonc) %>%
mutate(nb_NA = ifelse(nb_NA == 0,"",paste0("NA:", nb_NA ))) %>%
inner_join(table_bool_var,by = "key") %>%
mutate({{col_name}} := ifelse( booleen
, paste0(round(n,0),"(",round(pourcent,1), "%)", nb_NA),
paste0(round(moy,1) ,"(",
round(std,1), ")", nb_NA))) %>%
select(all_of(col_name))
return(res)
}
)
table_res <- all_cluster_descript_var %>%
inner_join(table_bool_var,by = "key") %>%
mutate( {{name_all_grp}} := ifelse(booleen
, paste0(round(n,0),"(",round(pourcent,1), "%)", nb_NA),
paste0(round(moy,1) ,"(",
round(std,1), ")", nb_NA) ) ) %>%
select(key,all_of(name_all_grp)) %>%
cbind(bind_cols(group_cluster_descript_var)) %>%
data.frame(., row.names = 1)
# rename_at(vars(contains(".grp")),funs(str_replace(.,"\\."," ")))
table_res <- table_res %>% rename_all(list(~str_replace_all(.,"\\."," ")))
# si choix de calculer les p-val
if (p_val) {
# prevoir un groupe de plus pour le toutes les catégorie
nb_group_pval <- as.character(c(as.numeric(nb_grp),max(as.numeric(nb_grp)) + 1 ))
# création de toutes les combinaisons de groupe a tester
combin_grp <- nb_group_pval %>% combn(2)
# Création du groupe supplémentaire tout les groupes en dupliquant la dataframe avec un groupes de plus
df_pval <- df_one_hot_encode_fonc %>%
mutate(!!sym(name_expo_fonc) := max(as.numeric(nb_group_pval))) %>%
rbind(df_one_hot_encode_fonc)
non_boolean_var <- table_bool_var %>% filter(!booleen) %>% select(key) %>% unlist(use.names = FALSE)
boolean_var <- table_bool_var %>% filter(booleen) %>% select(key) %>% unlist(use.names = FALSE)
# création de la table avec p-value pour chaque combin et rename chaque colonnes a_b
combin_ttest_pval <- apply(combin_grp, 2, function(x)
df_pval %>%
select(sym(name_expo_fonc),all_of(non_boolean_var)) %>%
#summarise_at(vars(-(sym(name_expo_fonc))),list(~t.test(.[!!sym(name_expo_fonc) == x[1]], .[!!sym(name_expo_fonc) == x[2]])$p.value)) %>%
summarise_at(vars(-(sym(name_expo_fonc))),list(~t.test(.[!!sym(name_expo_fonc) == x[1]], .[!!sym(name_expo_fonc) == x[2]])$p.value)) %>%
t.df %>%
rename_at("col_1",list( ~paste0(x[1],"_",x[2])))
)
if (length(boolean_var) != 0){
combin_chisq_pval <- apply(combin_grp, 2, function(x)
df_pval %>%
select(sym(name_expo_fonc),all_of(boolean_var)) %>%
dplyr::summarise_at(vars(-sym(name_expo_fonc)),
list(~ifelse(sum(.[!!sym(name_expo_fonc) == x[1]],na.rm = TRUE) < 8| sum(.[!!sym(name_expo_fonc) == x[2]],na.rm = TRUE) < 8,
NA,
prop.test(
x = c(sum(.[!!sym(name_expo_fonc) == x[1]],na.rm = TRUE), sum(.[!!sym(name_expo_fonc) == x[2]],na.rm = TRUE)), # compute number of success
n = c(sum(!is.na(.[!!sym(name_expo_fonc) == x[1]])), sum(!is.na(.[!!sym(name_expo_fonc) == x[2]])))
)$p.value))
) %>%
t.df %>%
rename_at("col_1",list( ~paste0(x[1],"_",x[2])))
)
combin_total_pval <- mapply(rbind,combin_chisq_pval,combin_ttest_pval,SIMPLIFY=FALSE)
} else combin_total_pval <-combin_ttest_pval
# transformation de la p-value en booléen en avec comme seuil le alpha définis en appliquant une correction de bonneferonni en considérant le nombre total de test nb ligne X nb collones
result_pval <- bind_cols(combin_total_pval) %>%
rename(key = key...1) %>%
select(key,contains("_")) %>%
mutate_at(vars(-key),list(~(. < (alpha / ncol(combin_grp))
))
) %>% # hypothèse et correction de bonneferonnie
mutate_at(vars(-key), function(x) {
x_var <- rlang::enquo(x)
ifelse(x , rlang::quo_name(x_var), "non") # remplacement des p-val non signif par une chaine spécifique
}) %>%
mutate_at(vars(-key), function(x) {
x_var <- rlang::enquo(x)
ifelse(is.na(x) , paste0(rlang::quo_name(x_var),"*"),x) # remplacement des p-val non signif par une chaine spécifique
})
# REcherche avec une simili boucle des p-val signif pour chaque colonnnes
# on en lève la chaine spécifique de non corrélation
df_pval_final <- lapply(nb_group_pval, function(x) {
result_pval %>% select(key,contains(x)) %>%
mutate_at(vars(contains(x)),list(~str_remove_all(.,paste(c("_",x), collapse = "|")))) %>%
unite(!!sym(paste0(nom_grp,"_",x)) ,contains(x),sep = ",")
}
) %>% bind_cols() %>%
rename(key = key...1) %>%
select(key,contains(nom_grp)) %>%
rename(!!sym(name_all_grp) := paste0(nom_grp,"_",max(as.numeric(nb_group_pval)))) %>%
mutate_all(list(~str_remove_all(.,"non,|,non"))) %>%
mutate_all(list(~str_remove_all(.,"non"))) %>%
mutate_all(list(~str_remove_all(.,paste0(",",as.character(length(nb_grp) +1),"|",as.character(length(nb_grp) +1)) ))) # nouveau symbole pour all adherence grou^p
if(df_pval_final %>% transmute_at(vars(-key),list(~str_detect(.,"non"))) %>% as.matrix() %>% any) {
stop("il reste des p-val non traité")}
# Gestion des tables latex pour que les différences statisquement significative soit en subscript
# en échappant les underscore
table_res_pval <- table_res %>%
rownames_to_column() %>%
pivot_longer(-rowname,values_to = "valeur") %>%
inner_join((df_pval_final %>% pivot_longer(-key,values_to = "pvalue") ), by = c("rowname" = "key", "name" = "name")) %>%
mutate(combin = paste0(valeur,"\\textsubscript{",pvalue, "}")) %>%
select(rowname,name,combin) %>%
pivot_wider(names_from = name, values_from = combin) %>%
column_to_rownames()
table_res_pval <- table_res_pval %>%
rownames_to_column() %>%
mutate_at(vars(-rowname),list(~str_replace_all(.,"%","\\\\%"))) %>%
column_to_rownames() %>%
#mutate_all(funs(str_replace_all(.,"%","\\\\%"))) %>%
select(all_of(name_all_grp),sort(tidyselect::peek_vars()))
rownames(table_res_pval) <- str_replace_all(rownames(table_res_pval),"_","\\\\_")
colnames(table_res_pval) <- str_replace_all(colnames(table_res_pval),"_","\\\\_")
} else {table_res_pval <- table_res %>%
select(all_of(name_all_grp),sort(tidyselect::peek_vars())) }
tbl_nb_ind_grp <- table(df_one_hot_encode_fonc[,name_expo_fonc])
nb_pat_par_grp <- c(nrow(df_one_hot_encode_fonc),
tbl_nb_ind_grp[order(as.numeric(names(tbl_nb_ind_grp)))] )
nb_pat_par_grp <- paste0(nb_pat_par_grp , " (" , round(nb_pat_par_grp*100/nrow(df_one_hot_encode_fonc),1) ," \\%)")
res <- rbind(`Number of patient` = nb_pat_par_grp,table_res_pval)
return(res)
}
##########################################################################################
# function récupérant les variables pour table descriptive des variables
# df_one_hot_encode_fonc généré avec one_hot_fb
recup_var_table_res <- function(df_one_hot_encode_fonc,name_expo_fonc){
res <- df_one_hot_encode_fonc %>%
select(-all_of(name_expo_fonc)) %>%
mutate_if(is.factor, ~as.numeric(as.character(.))) %>% # points litigieux j'utilise cette méthode pour convertir mes facteur booleen en numeric a surveilllé a l'avenir
summarise_all(list(fonc_moy = ~mean(.,na.rm = TRUE),
fonc_std =~sd(.,na.rm = TRUE),
fonc_med = ~median(.,na.rm = TRUE),
fonc_quart1 = ~quantile(.,0.25,na.rm = TRUE),
fonc_quart2 = ~quantile(.,0.75,na.rm = TRUE),
fonc_n = ~sum(.,na.rm = TRUE),
fonc_pourcent = ~mean(.,na.rm = TRUE)*100,
fonc_nb_NA = ~sum(is.na(.))
)
) %>%
pivot_longer(cols = everything(),
names_to = c(".value", "level"),
names_pattern = "(.*)_fonc_(.*)") %>%
t.df(.,"level")
return(res)
}
|
# Dependencies should be installed with renv ------------------------------
# https://blog.rstudio.com/2019/11/06/renv-project-environments-for-r/
# renv::init() # Initialize a project
renv::snapshot() # Create snapshot of packages
# If you are trying to run the scripts here for the first time:
if (!require('renv')) install.packages('renv'); library('renv')
renv::restore()
# Manually install dependencies -------------------------------------------
# **** REVIEW ------------------------------------------------------
# If needed, you can manually install the dependencies
# if (!require('dplyr')) install.packages('dplyr'); library('dplyr')
# if (!require('readr')) install.packages('readr'); library('readr')
# if (!require('tidyr')) install.packages('tidyr'); library('tidyr')
# if (!require('patchwork')) install.packages('patchwork'); library('patchwork')
# if (!require('ggalluvial')) install.packages('ggalluvial'); library('ggalluvial')
# if (!require('lme4')) install.packages('lme4'); library('lme4')
# if (!require('sjPlot')) install.packages('sjPlot'); library('sjPlot')
# devtools::install_github(c("easystats/insight",
# "easystats/bayestestR",
# "easystats/performance",
# "easystats/parameters",
# "easystats/correlation",
# "easystats/estimate",
# "easystats/see",
# "easystats/report")) | /R/0.install-dependencies.R | no_license | gorkang/JDM_brochures | R | false | false | 1,564 | r |
# Dependencies should be installed with renv ------------------------------
# https://blog.rstudio.com/2019/11/06/renv-project-environments-for-r/
# renv::init() # Initialize a project
renv::snapshot() # Create snapshot of packages
# If you are trying to run the scripts here for the first time:
if (!require('renv')) install.packages('renv'); library('renv')
renv::restore()
# Manually install dependencies -------------------------------------------
# **** REVIEW ------------------------------------------------------
# If needed, you can manually install the dependencies
# if (!require('dplyr')) install.packages('dplyr'); library('dplyr')
# if (!require('readr')) install.packages('readr'); library('readr')
# if (!require('tidyr')) install.packages('tidyr'); library('tidyr')
# if (!require('patchwork')) install.packages('patchwork'); library('patchwork')
# if (!require('ggalluvial')) install.packages('ggalluvial'); library('ggalluvial')
# if (!require('lme4')) install.packages('lme4'); library('lme4')
# if (!require('sjPlot')) install.packages('sjPlot'); library('sjPlot')
# devtools::install_github(c("easystats/insight",
# "easystats/bayestestR",
# "easystats/performance",
# "easystats/parameters",
# "easystats/correlation",
# "easystats/estimate",
# "easystats/see",
# "easystats/report")) |
\alias{gSocketListen}
\name{gSocketListen}
\title{gSocketListen}
\description{Marks the socket as a server socket, i.e. a socket that is used
to accept incoming requests using \code{\link{gSocketAccept}}.}
\usage{gSocketListen(object, .errwarn = TRUE)}
\arguments{
\item{\verb{object}}{a \code{\link{GSocket}}.}
\item{.errwarn}{Whether to issue a warning on error or fail silently}
}
\details{Before calling this the socket must be bound to a local address using
\code{\link{gSocketBind}}.
To set the maximum amount of outstanding clients, use
\code{\link{gSocketSetListenBacklog}}.
Since 2.22}
\value{
A list containing the following elements:
\item{retval}{[logical] \code{TRUE} on success, \code{FALSE} on error.}
\item{\verb{error}}{\code{\link{GError}} for error reporting, or \code{NULL} to ignore.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gSocketListen.Rd | no_license | lawremi/RGtk2 | R | false | false | 884 | rd | \alias{gSocketListen}
\name{gSocketListen}
\title{gSocketListen}
\description{Marks the socket as a server socket, i.e. a socket that is used
to accept incoming requests using \code{\link{gSocketAccept}}.}
\usage{gSocketListen(object, .errwarn = TRUE)}
\arguments{
\item{\verb{object}}{a \code{\link{GSocket}}.}
\item{.errwarn}{Whether to issue a warning on error or fail silently}
}
\details{Before calling this the socket must be bound to a local address using
\code{\link{gSocketBind}}.
To set the maximum amount of outstanding clients, use
\code{\link{gSocketSetListenBacklog}}.
Since 2.22}
\value{
A list containing the following elements:
\item{retval}{[logical] \code{TRUE} on success, \code{FALSE} on error.}
\item{\verb{error}}{\code{\link{GError}} for error reporting, or \code{NULL} to ignore.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
## Load data
if(!file.exists("exdata-data-household_power_consumption.zip")) {
tmp <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",tmp)
file <- unzip(tmp)
unlink(tmp)
}
data <- read.table(file, sep = ";", header = T, na.strings = "?")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
feb <- data[(data$Date=="2007-02-01") | (data$Date=="2007-02-02"),]
## Plot 1
plot1 <- function() {
hist(feb$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
}
plot1() | /plot1.R | no_license | lcalima/ExData_Plotting1 | R | false | false | 661 | r | ## Load data
if(!file.exists("exdata-data-household_power_consumption.zip")) {
tmp <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",tmp)
file <- unzip(tmp)
unlink(tmp)
}
data <- read.table(file, sep = ";", header = T, na.strings = "?")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
feb <- data[(data$Date=="2007-02-01") | (data$Date=="2007-02-02"),]
## Plot 1
plot1 <- function() {
hist(feb$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
}
plot1() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mgcv.R
\name{mgcv}
\alias{mgcv}
\title{Model module:mgcv}
\usage{
mgcv(.df, k = -1, bs = "tp")
}
\arguments{
\item{.df}{\strong{Internal parameter, do not use in the workflow function}.
\code{.df} is data frame that combines the occurrence}
\item{k}{the dimension of the basis used to represent the smooth term.
The default depends on the number of variables that the smooth is a
function of. k should not be less than the dimension of the null space
of the penalty for the term (see \code{\link{mgcv::null.space.dimension}}),
but will be reset if it is. See \code{\link{mgcv::choose.k}} for further
information}
\item{bs}{a two letter character string indicating the (penalized)
smoothing basis to use. (eg "tp" for thin plate regression spline,
"cr" for cubic regression spline). See \code{\link{mgcv::smooth.terms}}
for an overview of what is available.}
}
\description{
Model module to fit a generalized additive model using generalized
crossvalidation via the mgcv R package.
}
\section{Version}{
1.0
}
\section{Date submitted}{
2015-11-13
}
\section{Data type}{
presence/absence
}
\author{
ZOON Developers, \email{zoonproject@gmail.com}
}
\seealso{
Other model: \code{\link{BiomodModel}}, \code{\link{GBM}},
\code{\link{LogisticRegression}},
\code{\link{MachineLearn}}, \code{\link{MaxEnt}},
\code{\link{OptGRaF}}, \code{\link{QuickGRaF}},
\code{\link{RandomForest}}
}
| /man/mgcv.Rd | no_license | samuelbosch/modules | R | false | true | 1,478 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mgcv.R
\name{mgcv}
\alias{mgcv}
\title{Model module:mgcv}
\usage{
mgcv(.df, k = -1, bs = "tp")
}
\arguments{
\item{.df}{\strong{Internal parameter, do not use in the workflow function}.
\code{.df} is data frame that combines the occurrence}
\item{k}{the dimension of the basis used to represent the smooth term.
The default depends on the number of variables that the smooth is a
function of. k should not be less than the dimension of the null space
of the penalty for the term (see \code{\link{mgcv::null.space.dimension}}),
but will be reset if it is. See \code{\link{mgcv::choose.k}} for further
information}
\item{bs}{a two letter character string indicating the (penalized)
smoothing basis to use. (eg "tp" for thin plate regression spline,
"cr" for cubic regression spline). See \code{\link{mgcv::smooth.terms}}
for an overview of what is available.}
}
\description{
Model module to fit a generalized additive model using generalized
crossvalidation via the mgcv R package.
}
\section{Version}{
1.0
}
\section{Date submitted}{
2015-11-13
}
\section{Data type}{
presence/absence
}
\author{
ZOON Developers, \email{zoonproject@gmail.com}
}
\seealso{
Other model: \code{\link{BiomodModel}}, \code{\link{GBM}},
\code{\link{LogisticRegression}},
\code{\link{MachineLearn}}, \code{\link{MaxEnt}},
\code{\link{OptGRaF}}, \code{\link{QuickGRaF}},
\code{\link{RandomForest}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constructors.R
\name{cattonum_df2}
\alias{cattonum_df2}
\title{Constructor for class cattonum_df2}
\usage{
cattonum_df2(train = NULL, test = NULL)
}
\arguments{
\item{train}{\code{NULL} (the default), or a tibble or data.frame.}
\item{test}{\code{NULL} (the default), or a tibble or data.frame
with the same names as \code{train}.}
}
\value{
A list of class \code{cattonum_df2} with names "train"
and "test".
}
\description{
Constructor for class cattonum_df2
}
\examples{
cattonum_df2()
}
| /man/cattonum_df2.Rd | permissive | bfgray3/cattonum | R | false | true | 569 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constructors.R
\name{cattonum_df2}
\alias{cattonum_df2}
\title{Constructor for class cattonum_df2}
\usage{
cattonum_df2(train = NULL, test = NULL)
}
\arguments{
\item{train}{\code{NULL} (the default), or a tibble or data.frame.}
\item{test}{\code{NULL} (the default), or a tibble or data.frame
with the same names as \code{train}.}
}
\value{
A list of class \code{cattonum_df2} with names "train"
and "test".
}
\description{
Constructor for class cattonum_df2
}
\examples{
cattonum_df2()
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 171
c
c Performing A1-Autarky iteration.
c Running Lingeling ...
c
c Remaining clauses count after A-Reduction: 171
c
c Input Parameter (command line, file):
c input filename dqbf18//scholl_z4ml.blif_0.60_1.00_7_2_henkin.dqdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 65
c no.of clauses 171
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 171
c
c dqbf18//scholl_z4ml.blif_0.60_1.00_7_2_henkin.dqdimacs 65 171 E1+A1 [] 0 14 51 NONE
| /code/dcnf-ankit-optimized/Results/DQBF-TRACK-2018/E1+A1/Experiments/scholl_z4ml.blif_0.60_1.00_7_2_henkin/scholl_z4ml.blif_0.60_1.00_7_2_henkin.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 714 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 171
c
c Performing A1-Autarky iteration.
c Running Lingeling ...
c
c Remaining clauses count after A-Reduction: 171
c
c Input Parameter (command line, file):
c input filename dqbf18//scholl_z4ml.blif_0.60_1.00_7_2_henkin.dqdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 65
c no.of clauses 171
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 171
c
c dqbf18//scholl_z4ml.blif_0.60_1.00_7_2_henkin.dqdimacs 65 171 E1+A1 [] 0 14 51 NONE
|
# load library
library(tidyverse)
# read data in
annotated_files <- read_csv("processed_corpus/micusp_engl_subset.csv")
# get concordance lines
search_expression <- "(argue)"
# simplest way
annotated_files %>%
filter(grepl(search_expression, lemma)) %>%
select(sentence_number, sentence) %>%
mutate(sentence = gsub(search_expression, "**\\1", sentence))
# kwic way
annotated_files %>%
mutate(kwic = ifelse(grepl(search_expression, lemma),
TRUE, FALSE)) %>%
mutate(before = gsub("NA\\s", "", paste(lag(token, 3), lag(token, 2), lag(token))),
after = gsub("NA\\s", "", paste(lead(token), lead(token, 2), lead(token, 3)))
) %>%
filter(kwic) %>%
select(before, token, after) %>%
view()
# another way, which gets the whole sentence
search_results <- annotated_files %>%
mutate(kwic = ifelse(grepl(search_expression, lemma),
TRUE, FALSE))
concordance_lines <- data.frame()
for (i in 1:nrow(search_results)) {
if (search_results$kwic[i]) {
# get sentence for this kwic
selected_sentence_number <- search_results$sentence_number[i]
selected_sentence <- search_results %>%
filter(sentence_number == selected_sentence_number)
kwic_token_number <- search_results$token_number[i]
context_before <- selected_sentence %>%
filter(token_number < kwic_token_number) %>%
mutate(context_before = paste(token, collapse = " ")) %>%
distinct(context_before) %>%
pull(context_before)
context_after <- selected_sentence %>%
filter(token_number > kwic_token_number) %>%
mutate(context_after = paste(token, collapse = " ")) %>%
distinct(context_after) %>%
pull(context_after)
this_concordance_line <- data.frame(context_before = context_before,
kwic = search_results$token[i],
context_after = context_after)
concordance_lines <- bind_rows(concordance_lines,
this_concordance_line)
}
}
############# INCLUDE PART OF SPEECH IN SEARCH #############################
# read pos_description data
pos_descriptions <- read_csv("auxiliary_data/pos_descriptions.csv")
# add pos_description to data
annotated_files <- left_join(annotated_files,
pos_descriptions)
# simplest way -- with lemma and pos
annotated_files %>%
filter(grepl(search_expression, lemma)) %>%
filter(grepl("^V", pos)) %>%
select(pos, pos_description, lemma, token, sentence_number, sentence) %>%
mutate(sentence = gsub(search_expression, "**\\1", sentence)) %>%
view()
# simplest way -- with just pos
annotated_files %>%
filter(grepl("^V", pos)) %>%
select(pos, pos_description, lemma, token, sentence_number, sentence) %>%
mutate(sentence = gsub(search_expression, "**\\1", sentence)) %>%
view()
| /02-corpus-searches.R | no_license | picoral/crow_workshop_02 | R | false | false | 2,896 | r | # load library
library(tidyverse)
# read data in
annotated_files <- read_csv("processed_corpus/micusp_engl_subset.csv")
# get concordance lines
search_expression <- "(argue)"
# simplest way
annotated_files %>%
filter(grepl(search_expression, lemma)) %>%
select(sentence_number, sentence) %>%
mutate(sentence = gsub(search_expression, "**\\1", sentence))
# kwic way
annotated_files %>%
mutate(kwic = ifelse(grepl(search_expression, lemma),
TRUE, FALSE)) %>%
mutate(before = gsub("NA\\s", "", paste(lag(token, 3), lag(token, 2), lag(token))),
after = gsub("NA\\s", "", paste(lead(token), lead(token, 2), lead(token, 3)))
) %>%
filter(kwic) %>%
select(before, token, after) %>%
view()
# another way, which gets the whole sentence
search_results <- annotated_files %>%
mutate(kwic = ifelse(grepl(search_expression, lemma),
TRUE, FALSE))
concordance_lines <- data.frame()
for (i in 1:nrow(search_results)) {
if (search_results$kwic[i]) {
# get sentence for this kwic
selected_sentence_number <- search_results$sentence_number[i]
selected_sentence <- search_results %>%
filter(sentence_number == selected_sentence_number)
kwic_token_number <- search_results$token_number[i]
context_before <- selected_sentence %>%
filter(token_number < kwic_token_number) %>%
mutate(context_before = paste(token, collapse = " ")) %>%
distinct(context_before) %>%
pull(context_before)
context_after <- selected_sentence %>%
filter(token_number > kwic_token_number) %>%
mutate(context_after = paste(token, collapse = " ")) %>%
distinct(context_after) %>%
pull(context_after)
this_concordance_line <- data.frame(context_before = context_before,
kwic = search_results$token[i],
context_after = context_after)
concordance_lines <- bind_rows(concordance_lines,
this_concordance_line)
}
}
############# INCLUDE PART OF SPEECH IN SEARCH #############################
# read pos_description data
pos_descriptions <- read_csv("auxiliary_data/pos_descriptions.csv")
# add pos_description to data
annotated_files <- left_join(annotated_files,
pos_descriptions)
# simplest way -- with lemma and pos
annotated_files %>%
filter(grepl(search_expression, lemma)) %>%
filter(grepl("^V", pos)) %>%
select(pos, pos_description, lemma, token, sentence_number, sentence) %>%
mutate(sentence = gsub(search_expression, "**\\1", sentence)) %>%
view()
# simplest way -- with just pos
annotated_files %>%
filter(grepl("^V", pos)) %>%
select(pos, pos_description, lemma, token, sentence_number, sentence) %>%
mutate(sentence = gsub(search_expression, "**\\1", sentence)) %>%
view()
|
#------------------------------------------------------------------------------#
#### SERVER SCRIPT ####
#------------------------------------------------------------------------------#
library(shiny)
#------------------------------------------------------------------------------#
#### Define SERVER ####
#------------------------------------------------------------------------------#
# load("./dane.RData")
# source("./data_exploration/plot_histogram.R")
source("./plot_histogram.R")
shinyServer(function(input, output, session) {
#### REACTIVES ####
## df_raw
df_raw <- eventReactive(input$upload, {
req(input$file_input, input$separator)
df_0 <- data.table::fread(file = input$file_input$datapath,
stringsAsFactors = F,
sep = input$separator)
vars <- names(df_0)
updateSelectInput(session,
inputId = "target",
label = "Select target variable",
choices = vars)
updateSelectInput(session,
inputId = "var_name",
label = "Select predictor variable",
choices = vars)
df_0
})
## df_base
df_base <- reactive({
req(input$var_name, input$target)
df_raw() %>%
select(rlang::UQ(as.name(input$var_name)),
rlang::UQ(as.name(input$target)))
})
## df
df <- reactive({
# req(input$log_dummy, input$outlier_dummy, input$outlier_def)
## calculate min to enable applying logarithm
df_base() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
min(na.rm = T) -> min_x
df_temp <- df_base()
## change names to simplify code
names(df_temp) <- c("x", "y")
df_temp$x <- as.numeric(df_temp$x)
## if log_dummy == T then apply logarithm
if(input$log_dummy){
if(min_x <= 0){
df_temp %>%
mutate(x = log(x + min_x + 1)) %>%
select(x, y) -> df_temp
} else {
df_temp %>%
mutate(x = log(x)) %>%
select(x, y) -> df_temp
}
}
## if outlier_dummy == T then remove outliers according to definition
if(input$outlier_dummy){
df_temp %>%
mutate(x_scaled = as.numeric(scale(x))) %>%
filter(abs(x_scaled) <= input$outlier_def) %>%
select(- x_scaled) -> df_temp
}
#####
# if(input$target_type == "numeric") {
#
# ## calculate min to enable applying logarithm
# df_base() %>%
# select(rlang::UQ(as.name(input$target))) %>%
# min(na.rm = T) -> min_y
#
# df_temp$y <- as.numeric(df_temp$y)
#
# ## if log_dummy_tar == T then apply logarithm
# if(input$log_dummy_tar){
# if(min_y <= 0){
# df_temp %>%
# mutate(y = log(y + min_y + 1)) %>%
# select(x, y) -> df_temp
# } else {
# df_temp %>%
# mutate(y = log(y)) %>%
# select(x, y) -> df_temp
# }
# }
#
# ## if outlier_dummy_tar == T then remove outliers according to definition
# if(input$outlier_dummy_tar){
# df_temp %>%
# mutate(y_scaled = as.numeric(scale(y))) %>%
# filter(abs(y_scaled) <= 3) %>%
# select(- y_scaled) -> df_temp
# }
#
# }
#####
## fix the names
names(df_temp) <- c(input$var_name, input$target)
df_temp
})
df_y <- reactive({
df() %>%
select(rlang::UQ(as.name(input$target))) %>%
.[[1]] %>%
class() -> target_class
if(input$target_type == "numeric") {
## calculate min to enable applying logarithm
df() %>%
select(rlang::UQ(as.name(input$target))) %>%
min(na.rm = T) -> min_y
df_temp <- df()
## change names to simplify code
names(df_temp) <- c("x", "y")
df_temp$x <- as.numeric(df_temp$x)
df_temp$y <- as.numeric(df_temp$y)
## if log_dummy_tar == T then apply logarithm
if(input$log_dummy_tar){
if(min_y <= 0){
df_temp %>%
mutate(y = log(y + min_y + 1)) %>%
select(x, y) -> df_temp
} else {
df_temp %>%
mutate(y = log(y)) %>%
select(x, y) -> df_temp
}
}
## if outlier_dummy_tar == T then remove outliers according to definition
if(input$outlier_dummy_tar){
df_temp %>%
mutate(y_scaled = as.numeric(scale(y))) %>%
filter(abs(y_scaled) <= 3) %>%
select(- y_scaled) -> df_temp
}
## fix the names
names(df_temp) <- c(input$var_name, input$target)
df_temp
} else {
if(target_class != "factor") {
df_temp <- df()
names(df_temp)[2] <- "y"
df_temp %>%
mutate(y = as.factor(y)) -> df_temp
names(df_temp)[2] <- input$target
df_temp
}
}
})
#### OUTPUTS ####
output$dane <- renderDataTable({
df_raw() %>% head(100)
})
output$summary <- renderPrint({
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
DescTools::Desc(plotit = F)
})
output$hist_plot <- renderPlotly({
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
summarise_all(mean) %>%
.[[1]] -> mean_x
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
summarise_all(sd) %>%
.[[1]] -> sd_x
plot_histogram(df = df(),
var_name = input$var_name,
bins_num = input$bins,
mean_x = mean_x,
sd_x = sd_x,
title = paste0("Histogram of ", input$var_name)
) -> plot_1
plotly::ggplotly(plot_1)
})
output$box_plot <- renderPlotly({
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
ggplot(aes_string(x = factor(0), y = input$var_name)) +
geom_boxplot(fill = RColorBrewer::brewer.pal(n = 3, name = "Set1")[2],
na.rm = TRUE,
outlier.color = "red",
outlier.fill = "red") +
scale_x_discrete(breaks = NULL) +
labs(title = paste0("Boxplot of ", input$var_name)) +
xlab(NULL) +
coord_fixed(ratio = 0.05) +
theme_bw() -> plot_2
plotly::ggplotly(plot_2)
})
output$norm_test <- renderPrint({
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
.[[1]] %>%
nortest::lillie.test()
})
output$scatter_plot <- renderPlotly({
df_y() %>%
## define plot
ggplot(aes_string(x = input$var_name, y = input$target)) +
geom_point(fill = "black", alpha = 0.5) +
geom_smooth(color = "red", fill = "darkblue") +
## layout
labs(x = paste(input$var_name),
y = paste(input$target),
title = paste0(input$target, " vs. ", input$var_name)) +
theme_bw() -> plot_2
plotly::ggplotly(plot_2)
})
output$correlation <- renderText({
df_y() %>%
cor(use = "complete.obs") %>%
.[1,2] -> cor_coef
paste0("Correlation coefficient = ",
round(cor_coef, digits = 3))
})
output$dens_plot <- renderPlotly({
df_y() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
unique() %>%
dim () %>%
.[1] -> n_val
if(n_val == 2) {
color_palette <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[2:1]
} else {
color_palette <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[1:n_val]
}
df_y() %>%
ggplot(aes_string(x = input$var_name, fill = input$target)) +
# geom_density(alpha = 0.5, size = 1) +
geom_density(alpha = 0.5) +
## layout
scale_fill_manual(
name = paste0("Levels of ", input$target),
values = color_palette
) +
labs(x = paste(input$var_name),
y = "Density",
title = "Conditional densities comparison"
) +
scale_y_continuous(labels = scales::comma) +
theme_bw() +
theme(legend.position = "bottom") -> plot_3
plotly::ggplotly(plot_3)
})
output$box_plot_2 <- renderPlotly({
df_y() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
unique() %>%
dim () %>%
.[1] -> n_val
if(n_val == 2) {
color_palette <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[2:1]
} else {
color_palette <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[1:n_val]
}
df_y() %>%
ggplot(aes_string(y = input$var_name, x = input$target,
fill = input$target)) +
geom_boxplot() +
## layout
scale_fill_manual(
name = paste0("Levels of ", input$target),
values = color_palette
) +
labs(x = paste(input$target),
y = paste(input$var_name),
title = paste0(input$target, " vs. ", input$var_name)
) +
scale_y_continuous(labels = scales::comma) +
theme_bw() +
theme(legend.position = "bottom") -> plot_4
plotly::ggplotly(plot_4)
})
output$t_test <- renderPrint({
t.test(
as.formula(paste(input$var_name, " ~ ", input$target)),
data = df_y())
})
output$wilcoxon_test <- renderPrint({
wilcox.test(
as.formula(paste(input$var_name, " ~ ", input$target)),
data = df_y())
})
})
| /data_exploration/server.R | no_license | kubasmolik/shiny_EDA | R | false | false | 11,452 | r | #------------------------------------------------------------------------------#
#### SERVER SCRIPT ####
#------------------------------------------------------------------------------#
library(shiny)
#------------------------------------------------------------------------------#
#### Define SERVER ####
#------------------------------------------------------------------------------#
# load("./dane.RData")
# source("./data_exploration/plot_histogram.R")
source("./plot_histogram.R")
shinyServer(function(input, output, session) {
#### REACTIVES ####
## df_raw
df_raw <- eventReactive(input$upload, {
req(input$file_input, input$separator)
df_0 <- data.table::fread(file = input$file_input$datapath,
stringsAsFactors = F,
sep = input$separator)
vars <- names(df_0)
updateSelectInput(session,
inputId = "target",
label = "Select target variable",
choices = vars)
updateSelectInput(session,
inputId = "var_name",
label = "Select predictor variable",
choices = vars)
df_0
})
## df_base
df_base <- reactive({
req(input$var_name, input$target)
df_raw() %>%
select(rlang::UQ(as.name(input$var_name)),
rlang::UQ(as.name(input$target)))
})
## df
df <- reactive({
# req(input$log_dummy, input$outlier_dummy, input$outlier_def)
## calculate min to enable applying logarithm
df_base() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
min(na.rm = T) -> min_x
df_temp <- df_base()
## change names to simplify code
names(df_temp) <- c("x", "y")
df_temp$x <- as.numeric(df_temp$x)
## if log_dummy == T then apply logarithm
if(input$log_dummy){
if(min_x <= 0){
df_temp %>%
mutate(x = log(x + min_x + 1)) %>%
select(x, y) -> df_temp
} else {
df_temp %>%
mutate(x = log(x)) %>%
select(x, y) -> df_temp
}
}
## if outlier_dummy == T then remove outliers according to definition
if(input$outlier_dummy){
df_temp %>%
mutate(x_scaled = as.numeric(scale(x))) %>%
filter(abs(x_scaled) <= input$outlier_def) %>%
select(- x_scaled) -> df_temp
}
#####
# if(input$target_type == "numeric") {
#
# ## calculate min to enable applying logarithm
# df_base() %>%
# select(rlang::UQ(as.name(input$target))) %>%
# min(na.rm = T) -> min_y
#
# df_temp$y <- as.numeric(df_temp$y)
#
# ## if log_dummy_tar == T then apply logarithm
# if(input$log_dummy_tar){
# if(min_y <= 0){
# df_temp %>%
# mutate(y = log(y + min_y + 1)) %>%
# select(x, y) -> df_temp
# } else {
# df_temp %>%
# mutate(y = log(y)) %>%
# select(x, y) -> df_temp
# }
# }
#
# ## if outlier_dummy_tar == T then remove outliers according to definition
# if(input$outlier_dummy_tar){
# df_temp %>%
# mutate(y_scaled = as.numeric(scale(y))) %>%
# filter(abs(y_scaled) <= 3) %>%
# select(- y_scaled) -> df_temp
# }
#
# }
#####
## fix the names
names(df_temp) <- c(input$var_name, input$target)
df_temp
})
df_y <- reactive({
df() %>%
select(rlang::UQ(as.name(input$target))) %>%
.[[1]] %>%
class() -> target_class
if(input$target_type == "numeric") {
## calculate min to enable applying logarithm
df() %>%
select(rlang::UQ(as.name(input$target))) %>%
min(na.rm = T) -> min_y
df_temp <- df()
## change names to simplify code
names(df_temp) <- c("x", "y")
df_temp$x <- as.numeric(df_temp$x)
df_temp$y <- as.numeric(df_temp$y)
## if log_dummy_tar == T then apply logarithm
if(input$log_dummy_tar){
if(min_y <= 0){
df_temp %>%
mutate(y = log(y + min_y + 1)) %>%
select(x, y) -> df_temp
} else {
df_temp %>%
mutate(y = log(y)) %>%
select(x, y) -> df_temp
}
}
## if outlier_dummy_tar == T then remove outliers according to definition
if(input$outlier_dummy_tar){
df_temp %>%
mutate(y_scaled = as.numeric(scale(y))) %>%
filter(abs(y_scaled) <= 3) %>%
select(- y_scaled) -> df_temp
}
## fix the names
names(df_temp) <- c(input$var_name, input$target)
df_temp
} else {
if(target_class != "factor") {
df_temp <- df()
names(df_temp)[2] <- "y"
df_temp %>%
mutate(y = as.factor(y)) -> df_temp
names(df_temp)[2] <- input$target
df_temp
}
}
})
#### OUTPUTS ####
output$dane <- renderDataTable({
df_raw() %>% head(100)
})
output$summary <- renderPrint({
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
DescTools::Desc(plotit = F)
})
output$hist_plot <- renderPlotly({
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
summarise_all(mean) %>%
.[[1]] -> mean_x
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
summarise_all(sd) %>%
.[[1]] -> sd_x
plot_histogram(df = df(),
var_name = input$var_name,
bins_num = input$bins,
mean_x = mean_x,
sd_x = sd_x,
title = paste0("Histogram of ", input$var_name)
) -> plot_1
plotly::ggplotly(plot_1)
})
output$box_plot <- renderPlotly({
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
ggplot(aes_string(x = factor(0), y = input$var_name)) +
geom_boxplot(fill = RColorBrewer::brewer.pal(n = 3, name = "Set1")[2],
na.rm = TRUE,
outlier.color = "red",
outlier.fill = "red") +
scale_x_discrete(breaks = NULL) +
labs(title = paste0("Boxplot of ", input$var_name)) +
xlab(NULL) +
coord_fixed(ratio = 0.05) +
theme_bw() -> plot_2
plotly::ggplotly(plot_2)
})
output$norm_test <- renderPrint({
df() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
.[[1]] %>%
nortest::lillie.test()
})
output$scatter_plot <- renderPlotly({
df_y() %>%
## define plot
ggplot(aes_string(x = input$var_name, y = input$target)) +
geom_point(fill = "black", alpha = 0.5) +
geom_smooth(color = "red", fill = "darkblue") +
## layout
labs(x = paste(input$var_name),
y = paste(input$target),
title = paste0(input$target, " vs. ", input$var_name)) +
theme_bw() -> plot_2
plotly::ggplotly(plot_2)
})
output$correlation <- renderText({
df_y() %>%
cor(use = "complete.obs") %>%
.[1,2] -> cor_coef
paste0("Correlation coefficient = ",
round(cor_coef, digits = 3))
})
output$dens_plot <- renderPlotly({
df_y() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
unique() %>%
dim () %>%
.[1] -> n_val
if(n_val == 2) {
color_palette <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[2:1]
} else {
color_palette <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[1:n_val]
}
df_y() %>%
ggplot(aes_string(x = input$var_name, fill = input$target)) +
# geom_density(alpha = 0.5, size = 1) +
geom_density(alpha = 0.5) +
## layout
scale_fill_manual(
name = paste0("Levels of ", input$target),
values = color_palette
) +
labs(x = paste(input$var_name),
y = "Density",
title = "Conditional densities comparison"
) +
scale_y_continuous(labels = scales::comma) +
theme_bw() +
theme(legend.position = "bottom") -> plot_3
plotly::ggplotly(plot_3)
})
output$box_plot_2 <- renderPlotly({
df_y() %>%
select(rlang::UQ(as.name(input$var_name))) %>%
unique() %>%
dim () %>%
.[1] -> n_val
if(n_val == 2) {
color_palette <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[2:1]
} else {
color_palette <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[1:n_val]
}
df_y() %>%
ggplot(aes_string(y = input$var_name, x = input$target,
fill = input$target)) +
geom_boxplot() +
## layout
scale_fill_manual(
name = paste0("Levels of ", input$target),
values = color_palette
) +
labs(x = paste(input$target),
y = paste(input$var_name),
title = paste0(input$target, " vs. ", input$var_name)
) +
scale_y_continuous(labels = scales::comma) +
theme_bw() +
theme(legend.position = "bottom") -> plot_4
plotly::ggplotly(plot_4)
})
output$t_test <- renderPrint({
t.test(
as.formula(paste(input$var_name, " ~ ", input$target)),
data = df_y())
})
output$wilcoxon_test <- renderPrint({
wilcox.test(
as.formula(paste(input$var_name, " ~ ", input$target)),
data = df_y())
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/radio_get.R
\name{radio_get}
\alias{radio_get}
\title{Downloads radiosondes from the university of Wyoming website for a radar in .html format}
\usage{
radio_get(dates, output, radar, nexrad_n, years = unique(nights2$year) + 1900)
}
\arguments{
\item{dates}{The Dates to be downloaded. In the format mm/dd/yyyy. These are converted to the date at midnight of the next day}
\item{output}{Location to save files}
\item{radar}{The 3 letter radar code}
\item{nexrad_n}{Location of the nexrad site table. Default is the location on the AP's server}
\item{years}{The years you want to download. Default is all years in the dates list.}
}
\description{
This program uses site ID's taken from the University of Wyoming that are stored in the nexradsite table. You may need to update that file to download radiosondes. Output is in .html.
}
| /man/radio_get.Rd | no_license | birderboone/Radar | R | false | true | 915 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/radio_get.R
\name{radio_get}
\alias{radio_get}
\title{Downloads radiosondes from the university of Wyoming website for a radar in .html format}
\usage{
radio_get(dates, output, radar, nexrad_n, years = unique(nights2$year) + 1900)
}
\arguments{
\item{dates}{The Dates to be downloaded. In the format mm/dd/yyyy. These are converted to the date at midnight of the next day}
\item{output}{Location to save files}
\item{radar}{The 3 letter radar code}
\item{nexrad_n}{Location of the nexrad site table. Default is the location on the AP's server}
\item{years}{The years you want to download. Default is all years in the dates list.}
}
\description{
This program uses site ID's taken from the University of Wyoming that are stored in the nexradsite table. You may need to update that file to download radiosondes. Output is in .html.
}
|
library(shiny)
library(shinydashboard)
library(tidyverse)
library(tidycensus)
library(data.table)
library(tigris)
library(readxl)
library(DT)
library(ggplot2)
library(psych)
library(leaflet)
library(plotly)
function(input, output, session){
output$map <- renderLeaflet({
leaflet(fullmap) %>% addProviderTiles('CartoDB.Positron') %>%
setView(lng=-97.5833, lat=38.8333, zoom = 4,options=(animate=FALSE)) #%>%
# addLegend("bottomright", pal = colorBin("YlOrRd", col1, n=8), values = col1, opacity = 1,
# title = "Quantile Subset")
})
# pal <- reactive({
# colorBin("YlOrRd", vars[as.character(input$selected)][[1]], 6)
# })
observeEvent(input$selected, {
pal <- colorQuantile("YlOrRd", domain = vars[as.character(input$selected)][[1]])
col <- subset(fullmap, select = input$selected)[[1]]
leafletProxy("map", data=fullmap) %>%
clearShapes() %>% clearControls() %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
fillColor = ~pal(col),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE),
popup = paste0(fullmap$County,"<br>","State: ", fullmap$State,"<br>",
"Cancer Incidence Rate (per 100,000): ", fullmap$Cases)) %>%
addLegend("bottomright", pal = colorBin("YlOrRd", col, n=8), values = col, opacity = 1,
title = "Quantile Subset")
smp <- geo_join(counties, filter(vars, State == input$statels),
by="GEOID", how='inner')
cols <- subset(smp, select = input$selected)[[1]]
leafletProxy("mapstate", data=smp) %>%
clearShapes() %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
fillColor = ~colorQuantile("YlOrRd", cols)(cols),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE),
popup = paste0(smp$County,"<br>",
"Cancer Incidence Rate (per 100,000): ", smp$Cases)) %>%
fitBounds(~min(fortify(smp)$long,na.rm=T), ~min(fortify(smp)$lat,na.rm=T),
~max(fortify(smp)$long,na.rm=T), ~max(fortify(smp)$lat,na.rm=T))
})
stateinit <- geo_join(counties, filter(vars, State == 'Alabama'),
by="GEOID", how='inner')
output$mapstate <- renderLeaflet({
leaflet(stateinit) %>% #addProviderTiles('CartoDB.Positron') %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE))
})
observeEvent(input$statels, {
pal <- colorQuantile("YlOrRd", domain = vars[as.character(input$selected)][[1]])
smp <- geo_join(counties, statev(),
by="GEOID", how='inner')
cols <- subset(smp, select = input$selected)[[1]]
leafletProxy("mapstate", data=smp) %>%
clearShapes() %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
fillColor = ~pal(cols),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE)) %>%
fitBounds(~min(fortify(smp)$long,na.rm=T), ~min(fortify(smp)$lat,na.rm=T),
~max(fortify(smp)$long,na.rm=T), ~max(fortify(smp)$lat,na.rm=T))
})
output$alltab <- renderTable(desc)
output$cortest <- renderTable(cors)
desc <- describe(vars[4:8])[c(2,4,5,8,9,10)]
desc <- mutate(desc, Variables = rownames(desc))
desc <- desc[,c(7, 1:6)]
statetable <- reactive({
describe((vars %>% filter(State == input$statels))[4:8])[c(2,4,5,8,9,10)]
})
output$statetab <- renderTable(statetable())
pick <- reactive({
vars[as.character(input$selected)]
})
output$dens <- renderPlotly(
ggplotly(ggplot(pick(), aes(x=pick()[1])) +
geom_density(fill='red') + xlab(input$selected) +
ggtitle(paste0("Density of ",as.character(input$selected))))
)
output$boxp <- renderPlotly(
ggplotly(ggplot(pick(), aes(x='', y=pick()[1])) +
geom_boxplot(outlier.shape = NA) + ylab(input$selected) + xlab('U.S.') +
ggtitle(paste0('Distribution of ',as.character(input$selected)))))
statev <- reactive({
vars %>% filter(State == input$statels)
})
stateAllV <- reactive({
vars %>% filter(State == input$statels) %>% select(input$selected)
})
# stv <- vars %>% filter(State == 'New York') %>% select('Cases')
# ggplotly(ggplot(stv, aes(Cases)) +
# geom_density())
output$sdens <- renderPlotly(
ggplotly(ggplot(statev(), aes_string(input$selected)) +
geom_density(fill='red') + xlab(input$selected) +
ggtitle(paste0("Density of ",as.character(input$selected), ' in ',
as.character(input$statels)))))
output$sboxp <- renderPlotly(
ggplotly(ggplot(stateAllV(), aes(x = '', y = stateAllV()[1])) +
geom_boxplot()+ ylab(input$selected) + xlab(input$statels) +
ggtitle(paste0('Distribution of ', as.character(input$selected),' in ',
as.character(input$statels)))))
}
#vars %>% filter(State == "New York") %>% ggplot(aes(x='', y=Cases)) + geom_boxplot()
| /server.R | no_license | samtrost/shinyvis | R | false | false | 5,778 | r | library(shiny)
library(shinydashboard)
library(tidyverse)
library(tidycensus)
library(data.table)
library(tigris)
library(readxl)
library(DT)
library(ggplot2)
library(psych)
library(leaflet)
library(plotly)
function(input, output, session){
output$map <- renderLeaflet({
leaflet(fullmap) %>% addProviderTiles('CartoDB.Positron') %>%
setView(lng=-97.5833, lat=38.8333, zoom = 4,options=(animate=FALSE)) #%>%
# addLegend("bottomright", pal = colorBin("YlOrRd", col1, n=8), values = col1, opacity = 1,
# title = "Quantile Subset")
})
# pal <- reactive({
# colorBin("YlOrRd", vars[as.character(input$selected)][[1]], 6)
# })
observeEvent(input$selected, {
pal <- colorQuantile("YlOrRd", domain = vars[as.character(input$selected)][[1]])
col <- subset(fullmap, select = input$selected)[[1]]
leafletProxy("map", data=fullmap) %>%
clearShapes() %>% clearControls() %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
fillColor = ~pal(col),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE),
popup = paste0(fullmap$County,"<br>","State: ", fullmap$State,"<br>",
"Cancer Incidence Rate (per 100,000): ", fullmap$Cases)) %>%
addLegend("bottomright", pal = colorBin("YlOrRd", col, n=8), values = col, opacity = 1,
title = "Quantile Subset")
smp <- geo_join(counties, filter(vars, State == input$statels),
by="GEOID", how='inner')
cols <- subset(smp, select = input$selected)[[1]]
leafletProxy("mapstate", data=smp) %>%
clearShapes() %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
fillColor = ~colorQuantile("YlOrRd", cols)(cols),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE),
popup = paste0(smp$County,"<br>",
"Cancer Incidence Rate (per 100,000): ", smp$Cases)) %>%
fitBounds(~min(fortify(smp)$long,na.rm=T), ~min(fortify(smp)$lat,na.rm=T),
~max(fortify(smp)$long,na.rm=T), ~max(fortify(smp)$lat,na.rm=T))
})
stateinit <- geo_join(counties, filter(vars, State == 'Alabama'),
by="GEOID", how='inner')
output$mapstate <- renderLeaflet({
leaflet(stateinit) %>% #addProviderTiles('CartoDB.Positron') %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE))
})
observeEvent(input$statels, {
pal <- colorQuantile("YlOrRd", domain = vars[as.character(input$selected)][[1]])
smp <- geo_join(counties, statev(),
by="GEOID", how='inner')
cols <- subset(smp, select = input$selected)[[1]]
leafletProxy("mapstate", data=smp) %>%
clearShapes() %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
fillColor = ~pal(cols),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE)) %>%
fitBounds(~min(fortify(smp)$long,na.rm=T), ~min(fortify(smp)$lat,na.rm=T),
~max(fortify(smp)$long,na.rm=T), ~max(fortify(smp)$lat,na.rm=T))
})
output$alltab <- renderTable(desc)
output$cortest <- renderTable(cors)
desc <- describe(vars[4:8])[c(2,4,5,8,9,10)]
desc <- mutate(desc, Variables = rownames(desc))
desc <- desc[,c(7, 1:6)]
statetable <- reactive({
describe((vars %>% filter(State == input$statels))[4:8])[c(2,4,5,8,9,10)]
})
output$statetab <- renderTable(statetable())
pick <- reactive({
vars[as.character(input$selected)]
})
output$dens <- renderPlotly(
ggplotly(ggplot(pick(), aes(x=pick()[1])) +
geom_density(fill='red') + xlab(input$selected) +
ggtitle(paste0("Density of ",as.character(input$selected))))
)
output$boxp <- renderPlotly(
ggplotly(ggplot(pick(), aes(x='', y=pick()[1])) +
geom_boxplot(outlier.shape = NA) + ylab(input$selected) + xlab('U.S.') +
ggtitle(paste0('Distribution of ',as.character(input$selected)))))
statev <- reactive({
vars %>% filter(State == input$statels)
})
stateAllV <- reactive({
vars %>% filter(State == input$statels) %>% select(input$selected)
})
# stv <- vars %>% filter(State == 'New York') %>% select('Cases')
# ggplotly(ggplot(stv, aes(Cases)) +
# geom_density())
output$sdens <- renderPlotly(
ggplotly(ggplot(statev(), aes_string(input$selected)) +
geom_density(fill='red') + xlab(input$selected) +
ggtitle(paste0("Density of ",as.character(input$selected), ' in ',
as.character(input$statels)))))
output$sboxp <- renderPlotly(
ggplotly(ggplot(stateAllV(), aes(x = '', y = stateAllV()[1])) +
geom_boxplot()+ ylab(input$selected) + xlab(input$statels) +
ggtitle(paste0('Distribution of ', as.character(input$selected),' in ',
as.character(input$statels)))))
}
#vars %>% filter(State == "New York") %>% ggplot(aes(x='', y=Cases)) + geom_boxplot()
|
#!!!!!!!!!!!!!!!!!!!!! TO CONVERT HOURLY WAGE INTO YEARLY WAGE !!!!!!!!!!!!!!!!!!!!!!!!!#
################################### USE kk1 (NAs removed) ###############################
summary(kk1)
unit <- kk1 %>% filter(PW_UNIT_OF_PAY == "Hour")
NROW(unit)
unit$PREVAILING_WAGE <- unit$PREVAILING_WAGE * 2080 #(52*40 = 2080)
max(unit$PREVAILING_WAGE)
min(unit$PREVAILING_WAGE)
NROW(kk1$PREVAILING_WAGE)
kk2 <- kk1 %>% filter(!PW_UNIT_OF_PAY == "Hour")
NROW(kk2)
kk1 <- rbind(unit, kk2)
NROW(kk1$PREVAILING_WAGE)
max(kk1$PREVAILING_WAGE)
min(kk1$PREVAILING_WAGE)
#################### CREATING DUMMY VARIABLES ######################################
library(dummies)
#1) CASE_STATUS -> Certified/Denied
as.data.frame(model.matrix(~0 + CASE_STATUS, data = kk1))
data.frame(colnames(kk1))
unique(kk1$CASE_STATUS)
kk1$STATUS <- as.numeric(ifelse(kk1$CASE_STATUS == "DENIED", 0, 1))
unique(kk1$STATUS)
#2) PROCESSING_TIME = DECISION_DATE - CASE_SUBMITTED
kk1 %>%
group_by(CASE_SUBMITTED) %>%
summarise(count = n()) %>%
arrange(desc(count))
str(kk1)
kk1$process_time <- as.Date(as.character(kk1$DECISION_DATE), format="%Y-%m-%d") -
as.Date(as.character(kk1$CASE_SUBMITTED), format = "%Y-%m-%d")
summary(kk1)
unique(kk1$process_time)
#3) Employment_time = EMPLOYMENT_END_DATE - EMPLOYMENT_START_DATE
str(kk1)
kk1$Employment_Time <- as.Date(as.character(kk1$EMPLOYMENT_END_DATE), format="%Y-%m-%d") -
as.Date(as.character(kk1$EMPLOYMENT_START_DATE), format = "%Y-%m-%d")
summary(kk1)
max(kk1$Employment_Time)
#4) FULL_TIME_POSITION
unique(kk1$FULL_TIME_POSITION)
kk1$POSITION <- as.numeric(ifelse(kk1$FULL_TIME_POSITION == "N", 0, 1))
unique(kk1$POSITION)
data.frame(colnames(kk1))
#5) H1B_DEPENDENT
unique(kk1$H1B_DEPENDENT)
kk1$H1B_Dependent <- as.numeric(ifelse(kk1$H1B_DEPENDENT == "N", 0, 1))
unique(kk1$H1B_Dependent)
#6) WILLFUL_VIOLATOR
unique(kk1$WILLFUL_VIOLATOR)
kk1$Willful_Violator <- as.numeric(ifelse(kk1$WILLFUL_VIOLATOR == "N", 0, 1))
unique(kk1$Willful_Violator)
data.frame(colnames(kk1))
k <- kk1[-c(1,2,3,4,5,9,10,12,14,16,20,21,22,23,24,25,26)]
data.frame(colnames(k))
#################### CREATING DUMMY VARIABLES ######################################
data.frame(colnames(kk2))
library(caret)
l <- kk2[c(1,16,19,25,26)]
m <- kk2[-c(1,16,19,25,26)]
#
dmy <- dummyVars("~.", data = l)
trsf <- data.frame(predict(dmy, newdata = l))
trsf
#
data.frame(colnames(m))
kk3 <- cbind(trsf, m)
data.frame(colnames(kk3))
#2) PROCESSING_TIME = DECISION_DATE - CASE_SUBMITTED
kk3 %>%
group_by(CASE_SUBMITTED) %>%
summarise(count = n()) %>%
arrange(desc(count))
str(kk3)
kk3$process_time <- as.Date(as.character(kk3$DECISION_DATE), format="%Y-%m-%d") -
as.Date(as.character(kk3$CASE_SUBMITTED), format = "%Y-%m-%d")
unique(kk3$process_time)
#3) Employment_time = EMPLOYMENT_END_DATE - EMPLOYMENT_START_DATE
kk3$Employment_Time <- as.Date(as.character(kk3$EMPLOYMENT_END_DATE), format="%Y-%m-%d") -
as.Date(as.character(kk3$EMPLOYMENT_START_DATE), format = "%Y-%m-%d")
max(kk3$Employment_Time)
data.frame(colnames(kk3))
kk3 <- kk3[-c()]
#SCREE PLOT
library(psych)
str(k)
k_new <- k[,c(6, 7, 16, 17, 18)]
new_k <- fa.parallel(k_new, fa = 'pc', n.iter=100, show.legend = FALSE, main = "Scree plot")
unique(kk1$PW_SOURCE)
| /dummy.R | no_license | Pragya-ps14/LCA_Classification | R | false | false | 3,492 | r |
#!!!!!!!!!!!!!!!!!!!!! TO CONVERT HOURLY WAGE INTO YEARLY WAGE !!!!!!!!!!!!!!!!!!!!!!!!!#
################################### USE kk1 (NAs removed) ###############################
summary(kk1)
unit <- kk1 %>% filter(PW_UNIT_OF_PAY == "Hour")
NROW(unit)
unit$PREVAILING_WAGE <- unit$PREVAILING_WAGE * 2080 #(52*40 = 2080)
max(unit$PREVAILING_WAGE)
min(unit$PREVAILING_WAGE)
NROW(kk1$PREVAILING_WAGE)
kk2 <- kk1 %>% filter(!PW_UNIT_OF_PAY == "Hour")
NROW(kk2)
kk1 <- rbind(unit, kk2)
NROW(kk1$PREVAILING_WAGE)
max(kk1$PREVAILING_WAGE)
min(kk1$PREVAILING_WAGE)
#################### CREATING DUMMY VARIABLES ######################################
library(dummies)
#1) CASE_STATUS -> Certified/Denied
as.data.frame(model.matrix(~0 + CASE_STATUS, data = kk1))
data.frame(colnames(kk1))
unique(kk1$CASE_STATUS)
kk1$STATUS <- as.numeric(ifelse(kk1$CASE_STATUS == "DENIED", 0, 1))
unique(kk1$STATUS)
#2) PROCESSING_TIME = DECISION_DATE - CASE_SUBMITTED
kk1 %>%
group_by(CASE_SUBMITTED) %>%
summarise(count = n()) %>%
arrange(desc(count))
str(kk1)
kk1$process_time <- as.Date(as.character(kk1$DECISION_DATE), format="%Y-%m-%d") -
as.Date(as.character(kk1$CASE_SUBMITTED), format = "%Y-%m-%d")
summary(kk1)
unique(kk1$process_time)
#3) Employment_time = EMPLOYMENT_END_DATE - EMPLOYMENT_START_DATE
str(kk1)
kk1$Employment_Time <- as.Date(as.character(kk1$EMPLOYMENT_END_DATE), format="%Y-%m-%d") -
as.Date(as.character(kk1$EMPLOYMENT_START_DATE), format = "%Y-%m-%d")
summary(kk1)
max(kk1$Employment_Time)
#4) FULL_TIME_POSITION
unique(kk1$FULL_TIME_POSITION)
kk1$POSITION <- as.numeric(ifelse(kk1$FULL_TIME_POSITION == "N", 0, 1))
unique(kk1$POSITION)
data.frame(colnames(kk1))
#5) H1B_DEPENDENT
unique(kk1$H1B_DEPENDENT)
kk1$H1B_Dependent <- as.numeric(ifelse(kk1$H1B_DEPENDENT == "N", 0, 1))
unique(kk1$H1B_Dependent)
#6) WILLFUL_VIOLATOR
unique(kk1$WILLFUL_VIOLATOR)
kk1$Willful_Violator <- as.numeric(ifelse(kk1$WILLFUL_VIOLATOR == "N", 0, 1))
unique(kk1$Willful_Violator)
data.frame(colnames(kk1))
k <- kk1[-c(1,2,3,4,5,9,10,12,14,16,20,21,22,23,24,25,26)]
data.frame(colnames(k))
#################### CREATING DUMMY VARIABLES ######################################
data.frame(colnames(kk2))
library(caret)
l <- kk2[c(1,16,19,25,26)]
m <- kk2[-c(1,16,19,25,26)]
#
dmy <- dummyVars("~.", data = l)
trsf <- data.frame(predict(dmy, newdata = l))
trsf
#
data.frame(colnames(m))
kk3 <- cbind(trsf, m)
data.frame(colnames(kk3))
#2) PROCESSING_TIME = DECISION_DATE - CASE_SUBMITTED
kk3 %>%
group_by(CASE_SUBMITTED) %>%
summarise(count = n()) %>%
arrange(desc(count))
str(kk3)
kk3$process_time <- as.Date(as.character(kk3$DECISION_DATE), format="%Y-%m-%d") -
as.Date(as.character(kk3$CASE_SUBMITTED), format = "%Y-%m-%d")
unique(kk3$process_time)
#3) Employment_time = EMPLOYMENT_END_DATE - EMPLOYMENT_START_DATE
kk3$Employment_Time <- as.Date(as.character(kk3$EMPLOYMENT_END_DATE), format="%Y-%m-%d") -
as.Date(as.character(kk3$EMPLOYMENT_START_DATE), format = "%Y-%m-%d")
max(kk3$Employment_Time)
data.frame(colnames(kk3))
kk3 <- kk3[-c()]
#SCREE PLOT
library(psych)
str(k)
k_new <- k[,c(6, 7, 16, 17, 18)]
new_k <- fa.parallel(k_new, fa = 'pc', n.iter=100, show.legend = FALSE, main = "Scree plot")
unique(kk1$PW_SOURCE)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2592
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2592
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_4_2_5.sat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 906
c no.of clauses 2592
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2592
c
c QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_4_2_5.sat.qdimacs 906 2592 E1 [] 0 19 887 2592 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/mult-matrix/mult_bool_matrix_4_2_5.sat/mult_bool_matrix_4_2_5.sat.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 650 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2592
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2592
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_4_2_5.sat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 906
c no.of clauses 2592
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2592
c
c QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_4_2_5.sat.qdimacs 906 2592 E1 [] 0 19 887 2592 NONE
|
#' Update all files that are out of date
#'
#' \code{update_site} rebuilds all source files that are new or have changed since
#' the last time the site was built.
#'
#' Given a source directory (by default the "content" directory in the
#' root directory of the project), find all source files (\code{.Rmd} and
#' \code{.rmarkdown}) in the directory tree under the source directory,
#' calculate hashed digests of the files, and compare them to a
#' stored list of digests from the last time the site was built.
#'
#' If the digests of either the source or output files don't match,
#' if a source file is new since the last time the site was built,
#' or if the output file does not exist,
#' then render the source file.
#'
#' After rendering any out-of-date files, regenerate the digest list
#' and saves it to a file.
#'
#' @param dir A string containing the root directory for checking.
#' By default, the "content" directory of the project.
#' @param quiet Suppress output. By default this is \code{FALSE} and the
#' function emits an informational message about how many files will
#' be rebuilt.
#' @param force Force rebuilding source files that are not out of date.
#'
#' @inheritParams blogdown::build_site
#'
#' @return This function does not return anything
#'
#' @seealso \code{\link[blogdown]{build_site}()}, \code{\link[blogdown]{build_dir}()},
#' \code{\link{digests}}.
#'
#' @export
update_site <- function(dir = NULL, quiet = FALSE, force = FALSE,
local = FALSE, run_hugo = TRUE) {
old_wd <- getwd()
setwd(blogdown:::site_root())
on.exit(setwd(old_wd))
if (is.null(dir)) {
dir <- find_blog_content()
}
cd <- paste0(normalizePath(getwd(), winslash = "/"), "/")
dir <- normalizePath(dir, winslash = "/")
dir <- str_replace(dir, fixed(cd), "")
# message("Dir = ", dir, ", cd = ", cd, ", d = ", d)
method <- getOption("blogdown.method")
if (is.na(method)) {
method <- "html"
}
on.exit(blogdown:::run_script("R/build.R", as.character(local)), add = TRUE,
after = FALSE)
if (method == "custom")
return()
files <- blogdown:::list_rmds(dir, TRUE)
if (force) {
to_build <- files
} else {
to_build <- filter_needs_rebuild(files)
}
to_build <- str_replace(normalizePath(to_build, winslash = "/"), fixed(cd), "")
# message("To build: ", str_c(to_build, collapse = ", "))
if (! quiet) {
message("Building ", length(to_build), " out of date ",
ifelse(length(to_build) == 1, "file", "files"),
"; site has ", length(files), " ",
ifelse(length(files) == 1, "file", "files"),
" in total.")
}
blogdown:::build_rmds(to_build)
if (run_hugo)
on.exit(setwd(cd), add = TRUE, after = TRUE)
on.exit(hugo_build(local), add = TRUE, after = TRUE)
on.exit(setwd(old_wd), add = TRUE, after = TRUE)
# message("On exit stack: ", deparse(sys.on.exit()))
update_rmd_digests(files)
}
#' Rebuild changed files in a subdirectory of "content"
#'
#' \code{update_dir} updates changed files in a subdirectory of "content"
#'
#' @rdname update_site
#' @inheritParams update_site
#' @param ignore A regular expression pattern for files to ignore.
update_dir <- function(dir = '.', quiet = FALSE, force = FALSE,
ignore = NA_character_) {
if (! dir.exists(dir)) {
new_dir <- file.path(find_blog_content(), dir)
if (! dir.exists(new_dir)) {
stop("Directory does not exist: ", dir)
} else {
dir <- new_dir
}
}
files <- blogdown:::list_rmds(dir, TRUE)
if (! is.na(ignore))
files <- files %>% discard(~str_detect(.x, ignore))
if (force) {
to_build <- files
} else {
to_build <- filter_needs_rebuild(files)
}
if (! quiet) {
message("Building ", length(to_build), " out of date ",
ifelse(length(to_build) == 1, "file", "files"),
"; site has ", length(files), " ",
ifelse(length(files) == 1, "file", "files"),
" in total.")
}
blogdown:::build_rmds(to_build)
update_rmd_digests(files, partial = TRUE)
invisible(files)
}
| /R/update.R | permissive | jonathan-g/blogdownDigest | R | false | false | 4,102 | r | #' Update all files that are out of date
#'
#' \code{update_site} rebuilds all source files that are new or have changed since
#' the last time the site was built.
#'
#' Given a source directory (by default the "content" directory in the
#' root directory of the project), find all source files (\code{.Rmd} and
#' \code{.rmarkdown}) in the directory tree under the source directory,
#' calculate hashed digests of the files, and compare them to a
#' stored list of digests from the last time the site was built.
#'
#' If the digests of either the source or output files don't match,
#' if a source file is new since the last time the site was built,
#' or if the output file does not exist,
#' then render the source file.
#'
#' After rendering any out-of-date files, regenerate the digest list
#' and saves it to a file.
#'
#' @param dir A string containing the root directory for checking.
#' By default, the "content" directory of the project.
#' @param quiet Suppress output. By default this is \code{FALSE} and the
#' function emits an informational message about how many files will
#' be rebuilt.
#' @param force Force rebuilding source files that are not out of date.
#'
#' @inheritParams blogdown::build_site
#'
#' @return This function does not return anything
#'
#' @seealso \code{\link[blogdown]{build_site}()}, \code{\link[blogdown]{build_dir}()},
#' \code{\link{digests}}.
#'
#' @export
update_site <- function(dir = NULL, quiet = FALSE, force = FALSE,
local = FALSE, run_hugo = TRUE) {
old_wd <- getwd()
setwd(blogdown:::site_root())
on.exit(setwd(old_wd))
if (is.null(dir)) {
dir <- find_blog_content()
}
cd <- paste0(normalizePath(getwd(), winslash = "/"), "/")
dir <- normalizePath(dir, winslash = "/")
dir <- str_replace(dir, fixed(cd), "")
# message("Dir = ", dir, ", cd = ", cd, ", d = ", d)
method <- getOption("blogdown.method")
if (is.na(method)) {
method <- "html"
}
on.exit(blogdown:::run_script("R/build.R", as.character(local)), add = TRUE,
after = FALSE)
if (method == "custom")
return()
files <- blogdown:::list_rmds(dir, TRUE)
if (force) {
to_build <- files
} else {
to_build <- filter_needs_rebuild(files)
}
to_build <- str_replace(normalizePath(to_build, winslash = "/"), fixed(cd), "")
# message("To build: ", str_c(to_build, collapse = ", "))
if (! quiet) {
message("Building ", length(to_build), " out of date ",
ifelse(length(to_build) == 1, "file", "files"),
"; site has ", length(files), " ",
ifelse(length(files) == 1, "file", "files"),
" in total.")
}
blogdown:::build_rmds(to_build)
if (run_hugo)
on.exit(setwd(cd), add = TRUE, after = TRUE)
on.exit(hugo_build(local), add = TRUE, after = TRUE)
on.exit(setwd(old_wd), add = TRUE, after = TRUE)
# message("On exit stack: ", deparse(sys.on.exit()))
update_rmd_digests(files)
}
#' Rebuild changed files in a subdirectory of "content"
#'
#' \code{update_dir} updates changed files in a subdirectory of "content"
#'
#' @rdname update_site
#' @inheritParams update_site
#' @param ignore A regular expression pattern for files to ignore.
update_dir <- function(dir = '.', quiet = FALSE, force = FALSE,
ignore = NA_character_) {
if (! dir.exists(dir)) {
new_dir <- file.path(find_blog_content(), dir)
if (! dir.exists(new_dir)) {
stop("Directory does not exist: ", dir)
} else {
dir <- new_dir
}
}
files <- blogdown:::list_rmds(dir, TRUE)
if (! is.na(ignore))
files <- files %>% discard(~str_detect(.x, ignore))
if (force) {
to_build <- files
} else {
to_build <- filter_needs_rebuild(files)
}
if (! quiet) {
message("Building ", length(to_build), " out of date ",
ifelse(length(to_build) == 1, "file", "files"),
"; site has ", length(files), " ",
ifelse(length(files) == 1, "file", "files"),
" in total.")
}
blogdown:::build_rmds(to_build)
update_rmd_digests(files, partial = TRUE)
invisible(files)
}
|
## Get dataset
data_full <- read.csv("../Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subset data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Convert dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Save to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() | /Plot1.r | no_license | abdoukm/ExData_Plotting1 | R | false | false | 719 | r | ## Get dataset
data_full <- read.csv("../Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subset data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Convert dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Save to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() |
#' Run ascr tests
#'
#' Runs tests for ascr.
#'
#' This function allows users to test their ascr installation.
#'
#' @param quick Logical, if \code{TRUE}, only a quick check is carried
#' out that tests whether or not the AD Model Builder executable
#' is running correctly.
#'
#' @export
test.ascr <- function(quick = FALSE){
dir <- ifelse(quick, "quick", "full")
if (quick){
example.data <- ascr::example.data
simple.capt <- example.data$capt["bincapt"]
fit <- try(fit.ascr(capt = simple.capt, traps = example.data$traps,
mask = example.data$mask, fix = list(g0 = 1)),
silent = TRUE)
if (class(fit)[1] == "try-error"){
message("ADMB executable test: FAIL\n")
} else {
relative.error <- coef(fit, "D")/2267.7395 - 1
if (abs(relative.error) < 1e-4){
message("ADMB executable check: PASS\n")
} else {
message("ADMB executable check: INCONCLUSIVE\n Executable has run successfully but results may not be correct.\n")
}
}
} else {
suppressWarnings(RNGkind(sample.kind = "Rounding"))
dir <- paste(system.file(package = "ascr"), "tests", sep = "/")
test_dir(dir)
suppressWarnings(RNGkind(sample.kind = "default"))
}
}
## Aliasing old test.admbsecr() function name.
#' @rdname test.ascr
#' @export
test.admbsecr <- test.ascr
| /R/test.r | no_license | b-steve/ascr | R | false | false | 1,463 | r | #' Run ascr tests
#'
#' Runs tests for ascr.
#'
#' This function allows users to test their ascr installation.
#'
#' @param quick Logical, if \code{TRUE}, only a quick check is carried
#' out that tests whether or not the AD Model Builder executable
#' is running correctly.
#'
#' @export
test.ascr <- function(quick = FALSE){
dir <- ifelse(quick, "quick", "full")
if (quick){
example.data <- ascr::example.data
simple.capt <- example.data$capt["bincapt"]
fit <- try(fit.ascr(capt = simple.capt, traps = example.data$traps,
mask = example.data$mask, fix = list(g0 = 1)),
silent = TRUE)
if (class(fit)[1] == "try-error"){
message("ADMB executable test: FAIL\n")
} else {
relative.error <- coef(fit, "D")/2267.7395 - 1
if (abs(relative.error) < 1e-4){
message("ADMB executable check: PASS\n")
} else {
message("ADMB executable check: INCONCLUSIVE\n Executable has run successfully but results may not be correct.\n")
}
}
} else {
suppressWarnings(RNGkind(sample.kind = "Rounding"))
dir <- paste(system.file(package = "ascr"), "tests", sep = "/")
test_dir(dir)
suppressWarnings(RNGkind(sample.kind = "default"))
}
}
## Aliasing old test.admbsecr() function name.
#' @rdname test.ascr
#' @export
test.admbsecr <- test.ascr
|
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#4. Across the United States, how have emissions from coal combustion-related
## sources changed from 1999–2008?
library(dplyr)
mrg <- merge(SCC, NEI, by.x = "SCC", by.y = "SCC")
x <- grep("Coal", mrg$EI.Sector)
Coal <- mrg[x, ]
Coal1 <- select(Coal, fips:year)
# Open "png" graphics device
png(filename = "plot4.png", width = 480, height = 480, units = "px")
g <- ggplot(Coal1, aes(factor(year), log10(Emissions))) + geom_boxplot() +
xlab("Year") + ylab("log10(PM2.5 Emissions, Tons)") +
ggtitle("Baltimore: PM2.5 Combustible Coal Emissions Trend") +
geom_smooth(method = "lm", se=FALSE, color="green", aes(group=1))
print(g)
dev.off() | /plot4.R | no_license | StanfordB3/Exploratory-Data-Analysis | R | false | false | 813 | r | ## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#4. Across the United States, how have emissions from coal combustion-related
## sources changed from 1999–2008?
library(dplyr)
mrg <- merge(SCC, NEI, by.x = "SCC", by.y = "SCC")
x <- grep("Coal", mrg$EI.Sector)
Coal <- mrg[x, ]
Coal1 <- select(Coal, fips:year)
# Open "png" graphics device
png(filename = "plot4.png", width = 480, height = 480, units = "px")
g <- ggplot(Coal1, aes(factor(year), log10(Emissions))) + geom_boxplot() +
xlab("Year") + ylab("log10(PM2.5 Emissions, Tons)") +
ggtitle("Baltimore: PM2.5 Combustible Coal Emissions Trend") +
geom_smooth(method = "lm", se=FALSE, color="green", aes(group=1))
print(g)
dev.off() |
#' @import ggplot2
#' @importFrom data.table :=
NULL
| /R/ggcharts-downloads.R | no_license | thomas-neitmann/ggcharts-downloads | R | false | false | 53 | r | #' @import ggplot2
#' @importFrom data.table :=
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/state.R
\name{stateSpec}
\alias{stateSpec}
\title{Set State Parameters}
\usage{
stateSpec(name = NULL, group = "common", labels = NULL, layout = NULL,
sort = NULL, filter = NULL)
}
\arguments{
\item{name}{the name of the display}
\item{group}{the group of the display}
\item{labels}{a vector of names of cognostics to be shown as labels underneath each panel. If not specified, the default is to show labels for any of the splitting variables that created the partition of the data being plotted.}
\item{layout}{a list with optional elements \code{nrow}, \code{ncol}, and \code{arrange}. \code{nrow} and \code{ncol} specify the arrangement of the panels into rows and columns (\code{nrow = 1} and \code{ncol = 1} are defaults), and \code{arrange} can be either "row" or "col" and specified whether to sort the panels by row or by column ("row" is default)}
\item{sort}{a named list where each name corresponds to a cognostic name and the value is either "asc" or "desc" for sorting in ascending or descending order. The order in which sorting is applied to each variable is according to the order of the variables specified.}
\item{filter}{a named list where each name corresponds to a cognostic name and the value is a specification of either "regex" or "select" for categorical variables, or a range, "from" and "to", for quantitative variables. For a "regex", a simple regular expression string is specified, and the filter finds all matches for the regular expression against the variable. For "select" a vector of strings is specified, and all exact matches are returned. For the range filter, all values of the specified variable within the range "from" and "to" are returned. If either "from" or "to" are omitted, they are treated as \code{-Inf} and \code{Inf} respectively.}
}
\description{
Set State Parameters
}
\details{
Trelliscope allows you to specify either a default state in \code{\link{makeDisplay}} or specify the state of the display when you call \code{\link{view}}.
}
\examples{
state <- stateSpec(
name = "my_display",
sort = list(state = "desc", county = "asc"),
filter = list(
county = list(regex = "Ben"),
state = list(select = c("OR", "WA")),
meanList = list(from = 50, to = 150)
),
layout = list(nrow = 2, ncol = 4),
labels = c("county", "state")
)
state <- validateState(state, checkDisplay = FALSE)
makeStateHash(state)
}
| /man/stateSpec.Rd | permissive | hafen/trelliscope | R | false | true | 2,470 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/state.R
\name{stateSpec}
\alias{stateSpec}
\title{Set State Parameters}
\usage{
stateSpec(name = NULL, group = "common", labels = NULL, layout = NULL,
sort = NULL, filter = NULL)
}
\arguments{
\item{name}{the name of the display}
\item{group}{the group of the display}
\item{labels}{a vector of names of cognostics to be shown as labels underneath each panel. If not specified, the default is to show labels for any of the splitting variables that created the partition of the data being plotted.}
\item{layout}{a list with optional elements \code{nrow}, \code{ncol}, and \code{arrange}. \code{nrow} and \code{ncol} specify the arrangement of the panels into rows and columns (\code{nrow = 1} and \code{ncol = 1} are defaults), and \code{arrange} can be either "row" or "col" and specified whether to sort the panels by row or by column ("row" is default)}
\item{sort}{a named list where each name corresponds to a cognostic name and the value is either "asc" or "desc" for sorting in ascending or descending order. The order in which sorting is applied to each variable is according to the order of the variables specified.}
\item{filter}{a named list where each name corresponds to a cognostic name and the value is a specification of either "regex" or "select" for categorical variables, or a range, "from" and "to", for quantitative variables. For a "regex", a simple regular expression string is specified, and the filter finds all matches for the regular expression against the variable. For "select" a vector of strings is specified, and all exact matches are returned. For the range filter, all values of the specified variable within the range "from" and "to" are returned. If either "from" or "to" are omitted, they are treated as \code{-Inf} and \code{Inf} respectively.}
}
\description{
Set State Parameters
}
\details{
Trelliscope allows you to specify either a default state in \code{\link{makeDisplay}} or specify the state of the display when you call \code{\link{view}}.
}
\examples{
state <- stateSpec(
name = "my_display",
sort = list(state = "desc", county = "asc"),
filter = list(
county = list(regex = "Ben"),
state = list(select = c("OR", "WA")),
meanList = list(from = 50, to = 150)
),
layout = list(nrow = 2, ncol = 4),
labels = c("county", "state")
)
state <- validateState(state, checkDisplay = FALSE)
makeStateHash(state)
}
|
# assign 0 to 10
x1 <- 0:10
# seq
# more like range in python
?seq
(x3 <- seq(30, 0, by = -3))
| /10-entering-data.R | no_license | ShawonAshraf/Learning-R | R | false | false | 97 | r | # assign 0 to 10
x1 <- 0:10
# seq
# more like range in python
?seq
(x3 <- seq(30, 0, by = -3))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Beta.R
\name{pBETA}
\alias{pBETA}
\title{Beta Distribution}
\usage{
pBETA(p,a,b)
}
\arguments{
\item{p}{vector of probabilities.}
\item{a}{single value for shape parameter alpha representing as a.}
\item{b}{single value for shape parameter beta representing as b.}
}
\value{
The output of \code{pBETA} gives the cumulative density values in vector form.
}
\description{
These functions provide the ability for generating probability density values,
cumulative probability density values and moment about zero values for the
Beta Distribution bounded between [0,1].
}
\details{
The probability density function and cumulative density function of a unit
bounded beta distribution with random variable P are given by
\deqn{g_{P}(p)= \frac{p^{a-1}(1-p)^{b-1}}{B(a,b)} } ; \eqn{0 \le p \le 1}
\deqn{G_{P}(p)= \frac{B_p(a,b)}{B(a,b)} } ; \eqn{0 \le p \le 1}
\deqn{a,b > 0}
The mean and the variance are denoted by
\deqn{E[P]= \frac{a}{a+b} }
\deqn{var[P]= \frac{ab}{(a+b)^2(a+b+1)} }
The moments about zero is denoted as
\deqn{E[P^r]= \prod_{i=0}^{r-1} (\frac{a+i}{a+b+i}) }
\eqn{r = 1,2,3,...}
Defined as \eqn{B_p(a,b)=\int^p_0 t^{a-1} (1-t)^{b-1}\,dt} is
incomplete beta integrals and \eqn{B(a,b)} is the beta function.
\strong{NOTE} : If input parameters are not in given domain conditions necessary error
messages will be provided to go further.
}
\examples{
#plotting the random variables and probability values
col <- rainbow(4)
a <- c(1,2,5,10)
plot(0,0,main="Probability density graph",xlab="Random variable",ylab="Probability density values",
xlim = c(0,1),ylim = c(0,4))
for (i in 1:4)
{
lines(seq(0,1,by=0.01),dBETA(seq(0,1,by=0.01),a[i],a[i])$pdf,col = col[i])
}
dBETA(seq(0,1,by=0.01),2,3)$pdf #extracting the pdf values
dBETA(seq(0,1,by=0.01),2,3)$mean #extracting the mean
dBETA(seq(0,1,by=0.01),2,3)$var #extracting the variance
#plotting the random variables and cumulative probability values
col <- rainbow(4)
a <- c(1,2,5,10)
plot(0,0,main="Cumulative density graph",xlab="Random variable",ylab="Cumulative density values",
xlim = c(0,1),ylim = c(0,1))
for (i in 1:4)
{
lines(seq(0,1,by=0.01),pBETA(seq(0,1,by=0.01),a[i],a[i]),col = col[i])
}
pBETA(seq(0,1,by=0.01),2,3) #acquiring the cumulative probability values
mazBETA(1.4,3,2) #acquiring the moment about zero values
mazBETA(2,3,2)-mazBETA(1,3,2)^2 #acquiring the variance for a=3,b=2
#only the integer value of moments is taken here because moments cannot be decimal
mazBETA(1.9,5.5,6)
}
\references{
Johnson, N. L., Kotz, S. and Balakrishnan, N. (1994) Continuous Univariate Distributions, Vol. 2,
Wiley Series in Probability and Mathematical Statistics, Wiley.
Trenkler, G., 1996. Continuous univariate distributions. Computational Statistics & Data Analysis,
21(1), p.119.
Available at: \doi{10.1016/0167-9473(96)90015-8}.
}
\seealso{
\code{\link[stats]{Beta}}
or
\url{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Beta.html}
}
| /man/pBETA.Rd | no_license | cran/fitODBOD | R | false | true | 3,056 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Beta.R
\name{pBETA}
\alias{pBETA}
\title{Beta Distribution}
\usage{
pBETA(p,a,b)
}
\arguments{
\item{p}{vector of probabilities.}
\item{a}{single value for shape parameter alpha representing as a.}
\item{b}{single value for shape parameter beta representing as b.}
}
\value{
The output of \code{pBETA} gives the cumulative density values in vector form.
}
\description{
These functions provide the ability for generating probability density values,
cumulative probability density values and moment about zero values for the
Beta Distribution bounded between [0,1].
}
\details{
The probability density function and cumulative density function of a unit
bounded beta distribution with random variable P are given by
\deqn{g_{P}(p)= \frac{p^{a-1}(1-p)^{b-1}}{B(a,b)} } ; \eqn{0 \le p \le 1}
\deqn{G_{P}(p)= \frac{B_p(a,b)}{B(a,b)} } ; \eqn{0 \le p \le 1}
\deqn{a,b > 0}
The mean and the variance are denoted by
\deqn{E[P]= \frac{a}{a+b} }
\deqn{var[P]= \frac{ab}{(a+b)^2(a+b+1)} }
The moments about zero is denoted as
\deqn{E[P^r]= \prod_{i=0}^{r-1} (\frac{a+i}{a+b+i}) }
\eqn{r = 1,2,3,...}
Defined as \eqn{B_p(a,b)=\int^p_0 t^{a-1} (1-t)^{b-1}\,dt} is
incomplete beta integrals and \eqn{B(a,b)} is the beta function.
\strong{NOTE} : If input parameters are not in given domain conditions necessary error
messages will be provided to go further.
}
\examples{
#plotting the random variables and probability values
col <- rainbow(4)
a <- c(1,2,5,10)
plot(0,0,main="Probability density graph",xlab="Random variable",ylab="Probability density values",
xlim = c(0,1),ylim = c(0,4))
for (i in 1:4)
{
lines(seq(0,1,by=0.01),dBETA(seq(0,1,by=0.01),a[i],a[i])$pdf,col = col[i])
}
dBETA(seq(0,1,by=0.01),2,3)$pdf #extracting the pdf values
dBETA(seq(0,1,by=0.01),2,3)$mean #extracting the mean
dBETA(seq(0,1,by=0.01),2,3)$var #extracting the variance
#plotting the random variables and cumulative probability values
col <- rainbow(4)
a <- c(1,2,5,10)
plot(0,0,main="Cumulative density graph",xlab="Random variable",ylab="Cumulative density values",
xlim = c(0,1),ylim = c(0,1))
for (i in 1:4)
{
lines(seq(0,1,by=0.01),pBETA(seq(0,1,by=0.01),a[i],a[i]),col = col[i])
}
pBETA(seq(0,1,by=0.01),2,3) #acquiring the cumulative probability values
mazBETA(1.4,3,2) #acquiring the moment about zero values
mazBETA(2,3,2)-mazBETA(1,3,2)^2 #acquiring the variance for a=3,b=2
#only the integer value of moments is taken here because moments cannot be decimal
mazBETA(1.9,5.5,6)
}
\references{
Johnson, N. L., Kotz, S. and Balakrishnan, N. (1994) Continuous Univariate Distributions, Vol. 2,
Wiley Series in Probability and Mathematical Statistics, Wiley.
Trenkler, G., 1996. Continuous univariate distributions. Computational Statistics & Data Analysis,
21(1), p.119.
Available at: \doi{10.1016/0167-9473(96)90015-8}.
}
\seealso{
\code{\link[stats]{Beta}}
or
\url{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Beta.html}
}
|
aru_forecast <- function(lat, lon,
max_wsp_kmh = 20,
tzone = "America/New_York") {
if (!requireNamespace("lubridate", quietly = TRUE))
install.packages("lubridate", quiet = TRUE)
if (!requireNamespace("ggplot2", quietly = TRUE))
install.packages("ggplot2", quiet = TRUE)
wx <- get_hourly(lat, lon, tz = tzone)
dts <- unique(wx$date_str)[1:5] # Only want forecast 4 days out
sun <- nrsmisc::get_sun(lon, lat, start = min(dts), end = max(dts),
direction = c("sunrise", "sunset"), out_tz = tzone)
wx <- wx %>%
filter(date_str %in% dts) %>%
left_join(sun, by = "date_str") %>%
rowwise() %>%
mutate(AM_survey = sunrise + as.difftime(30, units = "mins"),
PM_survey = sunset - as.difftime(30, units = "mins"),
window = factor(
case_when(
hour %in% (lubridate::hour(AM_survey) + -2:2) ~ "Sunrise",
hour %in% (lubridate::hour(PM_survey) + -2:2) ~ "Sunset",
hour %in% c(22:23, 0:2) ~ "Midnight",
TRUE ~ NA_character_),
levels = c("Midnight", "Sunrise", "Sunset")),
rain = case_when(
grepl("Slight Chance Rain Showers", forecast) ~ 0.8,
grepl("Chance Rain Showers", forecast) ~ 0.5,
grepl("Rain Showers Likely", forecast) ~ 0.3,
grepl("Showers|storms", forecast) ~ 0,
TRUE ~ 1),
wind= case_when(
wspd_kmh <= 0.75 * max_wsp_kmh ~ 1,
wspd_kmh <= max_wsp_kmh ~ 0.75,
wspd_kmh <= 1.25 * max_wsp_kmh ~ 0.5,
TRUE ~ 0),
date = as.Date(ifelse(hour > 21,
as.character(as.Date(date_str) + as.difftime(1, units = "days")),
date_str))) %>%
filter(!is.na(window)) %>% ungroup() %>%
group_by(date, window) %>%
summarise(n = n(),
wind_OK = sum(wspd_kmh <= max_wsp_kmh) / n,
rain_OK = sum(rain) / n) %>%
filter(n == 5)
p <- ggplot2::ggplot(wx, ggplot2::aes(date, window)) +
ggplot2::geom_tile(ggplot2::aes(fill = wind_OK * rain_OK)) +
ggplot2::scale_fill_distiller(palette = 8, type = "div", direction = 1, guide = "none",
limits = c(0, 1)) +
ggplot2::geom_label(ggplot2::aes(label = sprintf("%.2f", round(wind_OK * rain_OK, 2))), size = 3) +
ggplot2::scale_y_discrete("Survey window", expand=c(0,0), limits = rev(levels(wx$window))) +
ggplot2::scale_x_date("", position = "top", expand=c(0,0),
date_breaks = "1 day", date_labels = "%a%n%d %b") +
ggplot2::geom_vline(xintercept = as.numeric(unique(wx$date)) + 0.5, color = "black") +
ggplot2::geom_hline(yintercept = seq(from = 0.5, by = 1, length = 4), color = "black") +
ggplot2::theme_bw() +
ggplot2::theme(panel.background = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank()) +
ggplot2::ggtitle(paste0("ARU Deployment Outlook", ": ", lat, ", ", lon))
p
}
get_hourly <- function(lat, lon, tz) {
if (!requireNamespace("httr", quietly = TRUE))
install.packages("httr", quiet = TRUE)
if (!requireNamespace("jsonlite", quietly = TRUE))
install.packages("jsonlite", quiet = TRUE)
res <- httr::GET(paste0("https://api.weather.gov/points/", lat, ",", lon))
httr::stop_for_status(res)
con <- httr::content(res, "text", encoding = "UTF-8")
fch <- jsonlite::fromJSON(con)$properties$forecastHourly
res <- httr::GET(fch)
httr::stop_for_status(res)
con <- httr::content(res, "text", encoding = "UTF-8")
wx <- jsonlite::fromJSON(con)$properties$periods
out_wx <- wx %>%
mutate(dt = lubridate::ymd_hms(startTime, tz = tz),
date = as.Date(dt, tz = tz),
date_str = as.character(date),
hour = lubridate::hour(dt),
wspd_kmh = round(as.integer(sub(" mph", "", windSpeed)) * 1.60934)) %>%
select(date, date_str, hour, wspd_kmh, forecast = shortForecast)
out_wx
}
| /R/aru_forecast.R | no_license | adamdsmith/BLRA_ARUs | R | false | false | 4,435 | r | aru_forecast <- function(lat, lon,
max_wsp_kmh = 20,
tzone = "America/New_York") {
if (!requireNamespace("lubridate", quietly = TRUE))
install.packages("lubridate", quiet = TRUE)
if (!requireNamespace("ggplot2", quietly = TRUE))
install.packages("ggplot2", quiet = TRUE)
wx <- get_hourly(lat, lon, tz = tzone)
dts <- unique(wx$date_str)[1:5] # Only want forecast 4 days out
sun <- nrsmisc::get_sun(lon, lat, start = min(dts), end = max(dts),
direction = c("sunrise", "sunset"), out_tz = tzone)
wx <- wx %>%
filter(date_str %in% dts) %>%
left_join(sun, by = "date_str") %>%
rowwise() %>%
mutate(AM_survey = sunrise + as.difftime(30, units = "mins"),
PM_survey = sunset - as.difftime(30, units = "mins"),
window = factor(
case_when(
hour %in% (lubridate::hour(AM_survey) + -2:2) ~ "Sunrise",
hour %in% (lubridate::hour(PM_survey) + -2:2) ~ "Sunset",
hour %in% c(22:23, 0:2) ~ "Midnight",
TRUE ~ NA_character_),
levels = c("Midnight", "Sunrise", "Sunset")),
rain = case_when(
grepl("Slight Chance Rain Showers", forecast) ~ 0.8,
grepl("Chance Rain Showers", forecast) ~ 0.5,
grepl("Rain Showers Likely", forecast) ~ 0.3,
grepl("Showers|storms", forecast) ~ 0,
TRUE ~ 1),
wind= case_when(
wspd_kmh <= 0.75 * max_wsp_kmh ~ 1,
wspd_kmh <= max_wsp_kmh ~ 0.75,
wspd_kmh <= 1.25 * max_wsp_kmh ~ 0.5,
TRUE ~ 0),
date = as.Date(ifelse(hour > 21,
as.character(as.Date(date_str) + as.difftime(1, units = "days")),
date_str))) %>%
filter(!is.na(window)) %>% ungroup() %>%
group_by(date, window) %>%
summarise(n = n(),
wind_OK = sum(wspd_kmh <= max_wsp_kmh) / n,
rain_OK = sum(rain) / n) %>%
filter(n == 5)
p <- ggplot2::ggplot(wx, ggplot2::aes(date, window)) +
ggplot2::geom_tile(ggplot2::aes(fill = wind_OK * rain_OK)) +
ggplot2::scale_fill_distiller(palette = 8, type = "div", direction = 1, guide = "none",
limits = c(0, 1)) +
ggplot2::geom_label(ggplot2::aes(label = sprintf("%.2f", round(wind_OK * rain_OK, 2))), size = 3) +
ggplot2::scale_y_discrete("Survey window", expand=c(0,0), limits = rev(levels(wx$window))) +
ggplot2::scale_x_date("", position = "top", expand=c(0,0),
date_breaks = "1 day", date_labels = "%a%n%d %b") +
ggplot2::geom_vline(xintercept = as.numeric(unique(wx$date)) + 0.5, color = "black") +
ggplot2::geom_hline(yintercept = seq(from = 0.5, by = 1, length = 4), color = "black") +
ggplot2::theme_bw() +
ggplot2::theme(panel.background = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank()) +
ggplot2::ggtitle(paste0("ARU Deployment Outlook", ": ", lat, ", ", lon))
p
}
get_hourly <- function(lat, lon, tz) {
if (!requireNamespace("httr", quietly = TRUE))
install.packages("httr", quiet = TRUE)
if (!requireNamespace("jsonlite", quietly = TRUE))
install.packages("jsonlite", quiet = TRUE)
res <- httr::GET(paste0("https://api.weather.gov/points/", lat, ",", lon))
httr::stop_for_status(res)
con <- httr::content(res, "text", encoding = "UTF-8")
fch <- jsonlite::fromJSON(con)$properties$forecastHourly
res <- httr::GET(fch)
httr::stop_for_status(res)
con <- httr::content(res, "text", encoding = "UTF-8")
wx <- jsonlite::fromJSON(con)$properties$periods
out_wx <- wx %>%
mutate(dt = lubridate::ymd_hms(startTime, tz = tz),
date = as.Date(dt, tz = tz),
date_str = as.character(date),
hour = lubridate::hour(dt),
wspd_kmh = round(as.integer(sub(" mph", "", windSpeed)) * 1.60934)) %>%
select(date, date_str, hour, wspd_kmh, forecast = shortForecast)
out_wx
}
|
# Install needed packages
library(ggplot2)
library(vcd)
library(stats)
library(car)
# Load data that will be used for this analysis
# Rename Football Stats dataset (with drank rankings)
footballstats.data<-read.csv("Dataset1FootballStats.csv")
# Rename Team Football Stats dataset
teamfootballstats.data<-read.csv("Dataset2TeamFootballStats.csv")
# Rename Deflategate dataset
football.data<-read.csv("Dataset3FootballDeflatagate.csv")
#Data Preparation
footballstats.data$Rank <- as.numeric(as.character(footballstats.data$Rank))
head(footballstats.data$Rank, n=10)
#histogram
hist(footballstats.data$Rank,breaks=10, col="green", border = "blue", main = "Histogram of Rank")
hist(footballstats.data$Wins,breaks=10, col="green", border = "blue", main = "Histogram of Wins")
#scatter plot
plot(footballstats.data$Rank, footballstats.data$Losses) + title(main="Scatter Plot of Draft Rank to Team Losses")
#Correlations
cor(footballstats.data$Rank, footballstats.data$Wins, use="complete", method="pearson")
# 0.4007555
cor(footballstats.data$Grade, footballstats.data$Rank, use="complete", method="pearson")
# -0.5723498
#Data Preparation
teamfootballstats.data$Wins <- as.numeric(as.character(footballstats.data$Rank))
NFC <- subset(teamfootballstats.data,NFC==1)
AFC <- subset(teamfootballstats.data,NFC==0)
Redskins <- subset(teamfootballstats.data,Team.Name=="Washington.Redskins")
Tampa <- subset(teamfootballstats.data,Team.Name=="Tampa.Bay.Bucaneers")
Jaguars <- subset(teamfootballstats.data,Team.Name=="Jacksonville.Jaguars")
#histogram
hist(teamfootballstats.data$Wins,breaks=10, col="blue", border = "green", main = "Histogram of Wins - ALL TEAMS")
hist(NFC$Wins,breaks=10, col="blue", border = "green", main = "Histogram of Wins - NFC")
hist(AFC$Wins,breaks=10, col="blue", border = "green", main = "Histogram of Wins - AFC")
#Correlations
cor(teamfootballstats.data$Wins, teamfootballstats.data$Playoff.Berth, use="complete", method="pearson")
# 0.7324997
cor(teamfootballstats.data$Wins, teamfootballstats.data$Clinched.Division, use="complete", method="pearson")
# 0.608269
cor(teamfootballstats.data$Wins, teamfootballstats.data$PCT, use="complete", method="pearson")
# 0.9992527
cor(NFC$Wins, NFC$Playoff.Berth, use="complete", method="pearson")
# 0.7592389
cor(NFC$Wins, NFC$Clinched.Division, use="complete", method="pearson")
# 0.5499623
cor(NFC$Wins, NFC$PCT, use="complete", method="pearson")
# 0.9991874
cor(AFC$Wins, AFC$Playoff.Berth, use="complete", method="pearson")
# 0.7086859
cor(AFC$Wins, AFC$Clinched.Division, use="complete", method="pearson")
# 0.658829
cor(AFC$Wins, AFC$PCT, use="complete", method="pearson")
# 0.993231
#T-Test
t.test(NFC$Wins, AFC$Wins)
#ANOVA
anova.Wins <- aov(Playoff.Berth ~ Wins + Loss, data=teamfootballstats.data)
summary(anova.Wins)
#Regression - Number of wins
fit.Wins <- lm(Wins ~ Playoff.Berth + Clinched.Division + Clinched.1st.Round.Bye, data=teamfootballstats.data)
coefficients(fit.Wins)
fitted(fit.Wins) # predicted values
residuals(fit.Wins) # residuals
anova(fit.Wins) # anova table plot(fit.Wins)
plot(fit.Wins)
#Regression - Playoffs
fit.Playoffs <- lm(Playoff.Berth ~ Wins, data=teamfootballstats.data)
coefficients(fit.Playoffs)
fitted(fit.Playoffs) # predicted values
residuals(fit.Playoffs) # residuals
anova(fit.Playoffs) # anova table
plot(fit.Playoffs)
#create Colts & Patriots data subsets
Colts <- subset(football.data,Team=="Colts")
Patriots <- subset(football.data, Team=="Patriots")
#correlation between Blakeman and Prioleau
cor(football.data$Blakeman, football.data$Prioleau, use="complete", method="pearson")
# 0.9218864
t.test(Colts$Blakeman, Patriots$Blakeman)
t.test(Colts$Prioleau, Patriots$Prioleau)
# Deflategate - Dataset V2 (Combined Officials Measurements)
# rename data set to data
football.data2<-read.csv("Dataset3FootballDeflatagatev2.csv")
# Create Colts & Patriots data subsets
Colts2 <- subset(football.data2,Team=="Colts")
Patriots2 <- subset(football.data2, Team=="Patriots")
# Were the balls inflated similarly?
t.test(Colts2$Pressure, Patriots2$Pressure)
| /Football_R_KAD.R | no_license | kathryn2/R_Projects | R | false | false | 4,101 | r | # Install needed packages
library(ggplot2)
library(vcd)
library(stats)
library(car)
# Load data that will be used for this analysis
# Rename Football Stats dataset (with drank rankings)
footballstats.data<-read.csv("Dataset1FootballStats.csv")
# Rename Team Football Stats dataset
teamfootballstats.data<-read.csv("Dataset2TeamFootballStats.csv")
# Rename Deflategate dataset
football.data<-read.csv("Dataset3FootballDeflatagate.csv")
#Data Preparation
footballstats.data$Rank <- as.numeric(as.character(footballstats.data$Rank))
head(footballstats.data$Rank, n=10)
#histogram
hist(footballstats.data$Rank,breaks=10, col="green", border = "blue", main = "Histogram of Rank")
hist(footballstats.data$Wins,breaks=10, col="green", border = "blue", main = "Histogram of Wins")
#scatter plot
plot(footballstats.data$Rank, footballstats.data$Losses) + title(main="Scatter Plot of Draft Rank to Team Losses")
#Correlations
cor(footballstats.data$Rank, footballstats.data$Wins, use="complete", method="pearson")
# 0.4007555
cor(footballstats.data$Grade, footballstats.data$Rank, use="complete", method="pearson")
# -0.5723498
#Data Preparation
teamfootballstats.data$Wins <- as.numeric(as.character(footballstats.data$Rank))
NFC <- subset(teamfootballstats.data,NFC==1)
AFC <- subset(teamfootballstats.data,NFC==0)
Redskins <- subset(teamfootballstats.data,Team.Name=="Washington.Redskins")
Tampa <- subset(teamfootballstats.data,Team.Name=="Tampa.Bay.Bucaneers")
Jaguars <- subset(teamfootballstats.data,Team.Name=="Jacksonville.Jaguars")
#histogram
hist(teamfootballstats.data$Wins,breaks=10, col="blue", border = "green", main = "Histogram of Wins - ALL TEAMS")
hist(NFC$Wins,breaks=10, col="blue", border = "green", main = "Histogram of Wins - NFC")
hist(AFC$Wins,breaks=10, col="blue", border = "green", main = "Histogram of Wins - AFC")
#Correlations
cor(teamfootballstats.data$Wins, teamfootballstats.data$Playoff.Berth, use="complete", method="pearson")
# 0.7324997
cor(teamfootballstats.data$Wins, teamfootballstats.data$Clinched.Division, use="complete", method="pearson")
# 0.608269
cor(teamfootballstats.data$Wins, teamfootballstats.data$PCT, use="complete", method="pearson")
# 0.9992527
cor(NFC$Wins, NFC$Playoff.Berth, use="complete", method="pearson")
# 0.7592389
cor(NFC$Wins, NFC$Clinched.Division, use="complete", method="pearson")
# 0.5499623
cor(NFC$Wins, NFC$PCT, use="complete", method="pearson")
# 0.9991874
cor(AFC$Wins, AFC$Playoff.Berth, use="complete", method="pearson")
# 0.7086859
cor(AFC$Wins, AFC$Clinched.Division, use="complete", method="pearson")
# 0.658829
cor(AFC$Wins, AFC$PCT, use="complete", method="pearson")
# 0.993231
#T-Test
t.test(NFC$Wins, AFC$Wins)
#ANOVA
anova.Wins <- aov(Playoff.Berth ~ Wins + Loss, data=teamfootballstats.data)
summary(anova.Wins)
#Regression - Number of wins
fit.Wins <- lm(Wins ~ Playoff.Berth + Clinched.Division + Clinched.1st.Round.Bye, data=teamfootballstats.data)
coefficients(fit.Wins)
fitted(fit.Wins) # predicted values
residuals(fit.Wins) # residuals
anova(fit.Wins) # anova table plot(fit.Wins)
plot(fit.Wins)
#Regression - Playoffs
fit.Playoffs <- lm(Playoff.Berth ~ Wins, data=teamfootballstats.data)
coefficients(fit.Playoffs)
fitted(fit.Playoffs) # predicted values
residuals(fit.Playoffs) # residuals
anova(fit.Playoffs) # anova table
plot(fit.Playoffs)
#create Colts & Patriots data subsets
Colts <- subset(football.data,Team=="Colts")
Patriots <- subset(football.data, Team=="Patriots")
#correlation between Blakeman and Prioleau
cor(football.data$Blakeman, football.data$Prioleau, use="complete", method="pearson")
# 0.9218864
t.test(Colts$Blakeman, Patriots$Blakeman)
t.test(Colts$Prioleau, Patriots$Prioleau)
# Deflategate - Dataset V2 (Combined Officials Measurements)
# rename data set to data
football.data2<-read.csv("Dataset3FootballDeflatagatev2.csv")
# Create Colts & Patriots data subsets
Colts2 <- subset(football.data2,Team=="Colts")
Patriots2 <- subset(football.data2, Team=="Patriots")
# Were the balls inflated similarly?
t.test(Colts2$Pressure, Patriots2$Pressure)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/swne_plotting.R
\name{FeaturePlotDims}
\alias{FeaturePlotDims}
\title{Plots 2d embedding with feature overlayed}
\usage{
FeaturePlotDims(
dim.scores,
feature.scores,
feature.name = NULL,
x.lab = "tsne1",
y.lab = "tsne2",
alpha.plot = 0.5,
quantiles = c(0.01, 0.99),
show.axes = T,
pt.size = 1,
font.size = 12,
color.palette = "YlOrRd"
)
}
\arguments{
\item{dim.scores}{2D embedding coordinates. Must be N x 2 samples}
\item{feature.name}{Name of feature}
\item{x.lab}{X axis label}
\item{y.lab}{Y axis label}
\item{alpha.plot}{Data point transparency}
\item{quantiles}{Quantiles to trim outliers from}
\item{show.axes}{Plot x and y axes}
\item{pt.size}{Sample point size}
\item{font.size}{Font size for axis labels}
\item{color.palette}{RColorbrewer palette to use}
\item{feature.score}{Feature vector to overlay}
}
\value{
ggplot2 object with dim plot with feature overlayed
}
\description{
Plots 2d embedding with feature overlayed
}
| /man/FeaturePlotDims.Rd | permissive | yanwu2014/swne | R | false | true | 1,046 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/swne_plotting.R
\name{FeaturePlotDims}
\alias{FeaturePlotDims}
\title{Plots 2d embedding with feature overlayed}
\usage{
FeaturePlotDims(
dim.scores,
feature.scores,
feature.name = NULL,
x.lab = "tsne1",
y.lab = "tsne2",
alpha.plot = 0.5,
quantiles = c(0.01, 0.99),
show.axes = T,
pt.size = 1,
font.size = 12,
color.palette = "YlOrRd"
)
}
\arguments{
\item{dim.scores}{2D embedding coordinates. Must be N x 2 samples}
\item{feature.name}{Name of feature}
\item{x.lab}{X axis label}
\item{y.lab}{Y axis label}
\item{alpha.plot}{Data point transparency}
\item{quantiles}{Quantiles to trim outliers from}
\item{show.axes}{Plot x and y axes}
\item{pt.size}{Sample point size}
\item{font.size}{Font size for axis labels}
\item{color.palette}{RColorbrewer palette to use}
\item{feature.score}{Feature vector to overlay}
}
\value{
ggplot2 object with dim plot with feature overlayed
}
\description{
Plots 2d embedding with feature overlayed
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.