content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Empirical Orthogonal Function
#'
#' Computes Singular Value Decomposition (also known as Principal Components
#' Analysis or Empirical Orthogonal Functions).
#'
#' @param data a data.frame
#' @param formula a formula passed to \code{\link[data.table]{dcast}} to build
#' the matrix that will be used in the SVD decomposition (see Details)
#' @param value.var optional name of the data column (see Details)
#' @param n which singular values to return (if \code{NULL}, returns all)
#'
#' @return
#' A list of 3 named elements containing tidy data.tables of the right and left
#' singular vectors, and of their explained variance.
#'
#' @details
#' Singular values can be computed over matrices so \code{formula} denotes how
#' to build a matrix from the data. It is a formula of the form VAR ~ LEFT | RIGHT
#' (see [Formula::Formula]) in which VAR is the variable whose values will
#' populate the matrix, and LEFT represent the variables used to make the rows
#' and RIGHT, the columns of the matrix.
#' Think it like "VAR *as a function* of LEFT *and* RIGHT".
#'
#' Alternatively, if `value.var` is not `NULL`, it's possible to use the
#' (probably) more familiar [data.table::dcast] formula interface. In that case,
#' `data` must be provided.
#'
#' The result of VAR ~ LHS | RHS and VAR ~ RHS | LHS (ie, terms reversed)
#' is the same, with the exception that the order of the singular vector is
#' reversed (right is left and left is right).
#'
#' The variable combination used in this formula *must* identify
#' an unique value in a cell. For the time being, no error will be raised, but
#' there will be a message from \code{\link[data.table]{dcast}}.
#'
#' In the result, the \code{right} and \code{left} singular vectors have a
#' value for each singular value and each combination of the variables
#' used in RIGHT and LEFT of \code{formula}, respectively.
#'
#' It is much faster to compute only some singular vectors, so is advisable not
#' to set n to \code{NULL}. If the irba package is installed, EOF uses
#' [irlba::irlba] instead of [base::svd] since it's much faster.
#'
#' @examples
#' # The Antarctic Oscillation is computed from the
#' # monthly geopotential height anomalies weigthed by latitude.
#' library(data.table)
#' data(geopotential)
#' geopotential <- copy(geopotential)
#' geopotential[, gh.t.w := Anomaly(gh)*sqrt(cos(lat*pi/180)),
#' by = .(lon, lat, month(date))]
#'
#' aao <- EOF(gh.t.w ~ lat + lon | date, data = geopotential, n = 1)
#'
#' # AAO field
#' library(ggplot2)
#' ggplot(aao$left, aes(lon, lat, z = gh.t.w)) +
#' geom_contour(aes(color = ..level..)) +
#' coord_polar()
#'
#' # AAO signal
#' ggplot(aao$right, aes(date, gh.t.w)) +
#' geom_line()
#'
#' # % of explained variance
#' aao$sdev
#'
#' # 1st eof for each month.
#' aao2 <- geopotential[, EOF(gh.t.w ~ lat + lon | date, n = 1)$left, by = month(date)]
#'
#' ggplot(aao2, aes(lon, lat)) +
#' geom_contour(aes(z = gh.t.w, color = ..level..)) +
#' facet_wrap(~ month)
#'
#' # Alternative interface
#'
#' aao2 <- EOF(lon + lat ~ date, value.var = "gh.t.w", data = geopotential)
#'
#' @family meteorology functions
#' @export
#' @import data.table
#' @import Formula
#' @importFrom stats as.formula
EOF <- function(formula, value.var = NULL, data = NULL, n = 1) {
if (!is.null(value.var)) {
if (is.null(data)) stop("data must not be NULL if value.var is NULL", .call = FALSE)
data <- copy(data)
f <- as.character(formula)
f <- stringr::str_replace(f, "~", "\\|")
formula <- Formula::as.Formula(paste0(value.var, " ~ ", f))
}
if (is.null(data)) {
formula <- Formula::as.Formula(formula)
data <- as.data.table(eval(quote(model.frame(formula, data = data))))
}
f <- as.character(formula)
f <- stringr::str_split(f,"~", n = 2)[[1]]
dcast.formula <- stringr::str_squish(f[stringr::str_detect(f, "\\|")])
dcast.formula <- as.formula(stringr::str_replace(dcast.formula, "\\|", "~"))
value.var <- stringr::str_squish(f[!stringr::str_detect(f, "\\|")])
g <- .tidy2matrix(data, dcast.formula, value.var)
if (is.null(n)) n <- min(ncol(g$matrix), nrow(g$matrix))
if (requireNamespace("irlba", quietly = TRUE)) {
set.seed(42)
eof <- irlba::irlba(g$matrix, nv = max(n), nu = max(n))
} else {
eof <- svd(g$matrix, nu = max(n), nv = max(n))
eof$d <- eof$d[1:max(n)]
}
right <- as.data.table(eof$v[, n])
pcomps <- n
colnames(right) <- as.character(pcomps)
right <- cbind(right, as.data.table(g$coldims))
right <- data.table::melt(right, id.vars = names(g$coldims), variable = "PC",
value.name = value.var)
left <- as.data.table(eof$u[, n])
colnames(left) <- as.character(pcomps)
left <- cbind(left, as.data.table(g$rowdims))
left <- data.table::melt(left, id.vars = names(g$rowdims), variable = "PC",
value.name = value.var)
v.g <- norm(g$matrix, type = "F")
sdev <- data.table(PC = pcomps, sd = eof$d[n])
sdev[, r2 := sd^2/v.g^2]
return(list(right = right, left = left, sdev = sdev))
}
| /R/EOF.R | no_license | R-forks-to-learn/metR | R | false | false | 5,165 | r | #' Empirical Orthogonal Function
#'
#' Computes Singular Value Decomposition (also known as Principal Components
#' Analysis or Empirical Orthogonal Functions).
#'
#' @param data a data.frame
#' @param formula a formula passed to \code{\link[data.table]{dcast}} to build
#' the matrix that will be used in the SVD decomposition (see Details)
#' @param value.var optional name of the data column (see Details)
#' @param n which singular values to return (if \code{NULL}, returns all)
#'
#' @return
#' A list of 3 named elements containing tidy data.tables of the right and left
#' singular vectors, and of their explained variance.
#'
#' @details
#' Singular values can be computed over matrices so \code{formula} denotes how
#' to build a matrix from the data. It is a formula of the form VAR ~ LEFT | RIGHT
#' (see [Formula::Formula]) in which VAR is the variable whose values will
#' populate the matrix, and LEFT represent the variables used to make the rows
#' and RIGHT, the columns of the matrix.
#' Think it like "VAR *as a function* of LEFT *and* RIGHT".
#'
#' Alternatively, if `value.var` is not `NULL`, it's possible to use the
#' (probably) more familiar [data.table::dcast] formula interface. In that case,
#' `data` must be provided.
#'
#' The result of VAR ~ LHS | RHS and VAR ~ RHS | LHS (ie, terms reversed)
#' is the same, with the exception that the order of the singular vector is
#' reversed (right is left and left is right).
#'
#' The variable combination used in this formula *must* identify
#' an unique value in a cell. For the time being, no error will be raised, but
#' there will be a message from \code{\link[data.table]{dcast}}.
#'
#' In the result, the \code{right} and \code{left} singular vectors have a
#' value for each singular value and each combination of the variables
#' used in RIGHT and LEFT of \code{formula}, respectively.
#'
#' It is much faster to compute only some singular vectors, so is advisable not
#' to set n to \code{NULL}. If the irba package is installed, EOF uses
#' [irlba::irlba] instead of [base::svd] since it's much faster.
#'
#' @examples
#' # The Antarctic Oscillation is computed from the
#' # monthly geopotential height anomalies weigthed by latitude.
#' library(data.table)
#' data(geopotential)
#' geopotential <- copy(geopotential)
#' geopotential[, gh.t.w := Anomaly(gh)*sqrt(cos(lat*pi/180)),
#' by = .(lon, lat, month(date))]
#'
#' aao <- EOF(gh.t.w ~ lat + lon | date, data = geopotential, n = 1)
#'
#' # AAO field
#' library(ggplot2)
#' ggplot(aao$left, aes(lon, lat, z = gh.t.w)) +
#' geom_contour(aes(color = ..level..)) +
#' coord_polar()
#'
#' # AAO signal
#' ggplot(aao$right, aes(date, gh.t.w)) +
#' geom_line()
#'
#' # % of explained variance
#' aao$sdev
#'
#' # 1st eof for each month.
#' aao2 <- geopotential[, EOF(gh.t.w ~ lat + lon | date, n = 1)$left, by = month(date)]
#'
#' ggplot(aao2, aes(lon, lat)) +
#' geom_contour(aes(z = gh.t.w, color = ..level..)) +
#' facet_wrap(~ month)
#'
#' # Alternative interface
#'
#' aao2 <- EOF(lon + lat ~ date, value.var = "gh.t.w", data = geopotential)
#'
#' @family meteorology functions
#' @export
#' @import data.table
#' @import Formula
#' @importFrom stats as.formula
EOF <- function(formula, value.var = NULL, data = NULL, n = 1) {
if (!is.null(value.var)) {
if (is.null(data)) stop("data must not be NULL if value.var is NULL", .call = FALSE)
data <- copy(data)
f <- as.character(formula)
f <- stringr::str_replace(f, "~", "\\|")
formula <- Formula::as.Formula(paste0(value.var, " ~ ", f))
}
if (is.null(data)) {
formula <- Formula::as.Formula(formula)
data <- as.data.table(eval(quote(model.frame(formula, data = data))))
}
f <- as.character(formula)
f <- stringr::str_split(f,"~", n = 2)[[1]]
dcast.formula <- stringr::str_squish(f[stringr::str_detect(f, "\\|")])
dcast.formula <- as.formula(stringr::str_replace(dcast.formula, "\\|", "~"))
value.var <- stringr::str_squish(f[!stringr::str_detect(f, "\\|")])
g <- .tidy2matrix(data, dcast.formula, value.var)
if (is.null(n)) n <- min(ncol(g$matrix), nrow(g$matrix))
if (requireNamespace("irlba", quietly = TRUE)) {
set.seed(42)
eof <- irlba::irlba(g$matrix, nv = max(n), nu = max(n))
} else {
eof <- svd(g$matrix, nu = max(n), nv = max(n))
eof$d <- eof$d[1:max(n)]
}
right <- as.data.table(eof$v[, n])
pcomps <- n
colnames(right) <- as.character(pcomps)
right <- cbind(right, as.data.table(g$coldims))
right <- data.table::melt(right, id.vars = names(g$coldims), variable = "PC",
value.name = value.var)
left <- as.data.table(eof$u[, n])
colnames(left) <- as.character(pcomps)
left <- cbind(left, as.data.table(g$rowdims))
left <- data.table::melt(left, id.vars = names(g$rowdims), variable = "PC",
value.name = value.var)
v.g <- norm(g$matrix, type = "F")
sdev <- data.table(PC = pcomps, sd = eof$d[n])
sdev[, r2 := sd^2/v.g^2]
return(list(right = right, left = left, sdev = sdev))
}
|
library(StackLimit)
c = stackLimit()
stackLimit(c[1]*2)
stackLimit()
| /tests/stack.R | no_license | duncantl/StackLimit | R | false | false | 70 | r | library(StackLimit)
c = stackLimit()
stackLimit(c[1]*2)
stackLimit()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods-summarizeMolecularProfiles.R
\name{summarizeMolecularProfiles,PharmacoSet-method}
\alias{summarizeMolecularProfiles,PharmacoSet-method}
\title{Takes molecular data from a PharmacoSet, and summarises them
into one entry per drug}
\usage{
\S4method{summarizeMolecularProfiles}{PharmacoSet}(
object,
mDataType,
cell.lines,
features,
summary.stat = c("mean", "median", "first", "last", "and", "or"),
fill.missing = TRUE,
summarize = TRUE,
verbose = TRUE,
binarize.threshold = NA,
binarize.direction = c("less", "greater"),
removeTreated = TRUE
)
}
\arguments{
\item{object}{\code{PharmacoSet} The PharmacoSet to summarize}
\item{mDataType}{\code{character} which one of the molecular data types
to use in the analysis, out of all the molecular data types available for the pset
for example: rna, rnaseq, snp}
\item{cell.lines}{\code{character} The cell lines to be summarized.
If any cell.line has no data, missing values will be created}
\item{features}{\code{caracter} A vector of the feature names to include in the summary}
\item{summary.stat}{\code{character} which summary method to use if there are repeated
cell.lines? Choices are "mean", "median", "first", or "last"
In case molecular data type is mutation or fusion "and" and "or" choices are available}
\item{fill.missing}{\code{boolean} should the missing cell lines not in the
molecular data object be filled in with missing values?}
\item{summarize}{A flag which when set to FALSE (defaults to TRUE) disables summarizing and
returns the data unchanged as a ExpressionSet}
\item{verbose}{\code{boolean} should messages be printed}
\item{binarize.threshold}{\code{numeric} A value on which the molecular data is binarized.
If NA, no binarization is done.}
\item{binarize.direction}{\code{character} One of "less" or "greater", the direction of binarization on
binarize.threshold, if it is not NA.}
\item{removeTreated}{\code{logical} If treated/perturbation experiments are present, should they
be removed? Defaults to yes.}
}
\value{
\code{matrix} An updated PharmacoSet with the molecular data summarized
per cell line.
}
\description{
Given a PharmacoSet with molecular data, this function will summarize
the data into one profile per cell line, using the chosen summary.stat. Note
that this does not really make sense with perturbation type data, and will
combine experiments and controls when doing the summary if run on a
perturbation dataset.
}
\examples{
data(GDSCsmall)
GDSCsmall <- summarizeMolecularProfiles(GDSCsmall,
mDataType = "rna", cell.lines=cellNames(GDSCsmall),
summary.stat = 'median', fill.missing = TRUE, verbose=TRUE)
GDSCsmall
}
| /man/summarizeMolecularProfiles-PharmacoSet-method.Rd | no_license | FuChunjin/PharmacoGx | R | false | true | 2,775 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods-summarizeMolecularProfiles.R
\name{summarizeMolecularProfiles,PharmacoSet-method}
\alias{summarizeMolecularProfiles,PharmacoSet-method}
\title{Takes molecular data from a PharmacoSet, and summarises them
into one entry per drug}
\usage{
\S4method{summarizeMolecularProfiles}{PharmacoSet}(
object,
mDataType,
cell.lines,
features,
summary.stat = c("mean", "median", "first", "last", "and", "or"),
fill.missing = TRUE,
summarize = TRUE,
verbose = TRUE,
binarize.threshold = NA,
binarize.direction = c("less", "greater"),
removeTreated = TRUE
)
}
\arguments{
\item{object}{\code{PharmacoSet} The PharmacoSet to summarize}
\item{mDataType}{\code{character} which one of the molecular data types
to use in the analysis, out of all the molecular data types available for the pset
for example: rna, rnaseq, snp}
\item{cell.lines}{\code{character} The cell lines to be summarized.
If any cell.line has no data, missing values will be created}
\item{features}{\code{caracter} A vector of the feature names to include in the summary}
\item{summary.stat}{\code{character} which summary method to use if there are repeated
cell.lines? Choices are "mean", "median", "first", or "last"
In case molecular data type is mutation or fusion "and" and "or" choices are available}
\item{fill.missing}{\code{boolean} should the missing cell lines not in the
molecular data object be filled in with missing values?}
\item{summarize}{A flag which when set to FALSE (defaults to TRUE) disables summarizing and
returns the data unchanged as a ExpressionSet}
\item{verbose}{\code{boolean} should messages be printed}
\item{binarize.threshold}{\code{numeric} A value on which the molecular data is binarized.
If NA, no binarization is done.}
\item{binarize.direction}{\code{character} One of "less" or "greater", the direction of binarization on
binarize.threshold, if it is not NA.}
\item{removeTreated}{\code{logical} If treated/perturbation experiments are present, should they
be removed? Defaults to yes.}
}
\value{
\code{matrix} An updated PharmacoSet with the molecular data summarized
per cell line.
}
\description{
Given a PharmacoSet with molecular data, this function will summarize
the data into one profile per cell line, using the chosen summary.stat. Note
that this does not really make sense with perturbation type data, and will
combine experiments and controls when doing the summary if run on a
perturbation dataset.
}
\examples{
data(GDSCsmall)
GDSCsmall <- summarizeMolecularProfiles(GDSCsmall,
mDataType = "rna", cell.lines=cellNames(GDSCsmall),
summary.stat = 'median', fill.missing = TRUE, verbose=TRUE)
GDSCsmall
}
|
# Some Phylogenetic Comparative Methods: PCMs
## packages: nlme, ape, geiger, phytools, geomorph
library(nlme) # Contains GLS
library(ape) # Many phylogenetics-related analyses (see Paradis, Analysis of Phylogenetics and Evolution with R)
library(geiger) # Many evolutionary analyses
library(phytools)
library(geomorph) #contains multivariate PCMs
## Read data, phylogeny, and match the two
#NOTE: a critical issue in R is that the species names in the data matrix [names(Y) or rownames(Y)
#depending on input type] match those on the phylogeny [phy$tip.label]:
anolis<-read.table("Data/Lab-12-anolis.SVL.txt", sep=",", header=T)
anolis<-na.exclude(anolis)
anolis[,5:6]<-log10(anolis[,5:6])
head(anolis)
tree<-read.tree("Data/Lab-12-anolis.tree.tre") #read tree
plot(tree, cex=0.5)
# Prune tree to match taxa
droplist<-setdiff(tree$tip, rownames(anolis))
tree<-drop.tip(tree,droplist)
plot(tree, cex=0.7) # dev.off() #for fixing plot window if needed
#NOTE: the function 'treedata' in geiger prunes both the data matrix and the tree to match
anolis<-anolis[tree$tip,] #re-order data to match tree order (necessary for some functions)
ml<-anolis$Male_SVL; names(ml)<-rownames(anolis)
fml<-anolis$Female_SVL; names(fml)<-rownames(anolis)
ecomorph<-as.factor(anolis$geo_ecomorph); names(ecomorph)<-rownames(anolis)
Y<-cbind(anolis$Male_SVL,anolis$Female_SVL);rownames(Y)<-rownames(anolis)
gdf<-geomorph.data.frame(ml=ml,fml=fml,ecomorph=ecomorph,Y=Y,tree=tree)
#another dataset (multivariate)
data(plethspecies)
Y.gpa<-gpagen(plethspecies$land, print.progress = FALSE) #GPA-alignment
land.gps<-c("A","A","A","A","A","B","B","B","B","B","B")
gp.end<-factor(c(0,0,1,0,0,1,1,0,0)) #endangered species vs. rest
names(gp.end)<-plethspecies$phy$tip
###################
### Analyses
#Regression: non-phylogenetic
anova(lm(anolis$Female_SVL~anolis$Male_SVL))
summary(lm(anolis$Female_SVL~anolis$Male_SVL))
plot(anolis$Female_SVL~anolis$Male_SVL)
abline(lm(anolis$Female_SVL~anolis$Male_SVL))
summary(gls(Female_SVL~Male_SVL, data=anolis)) #NOTE: non-phylogenetic done another way
# 1: PhylogeneticRegression (PGLS)
#using GLS
bm.gls<-gls(Female_SVL~Male_SVL, correlation=corBrownian(phy=tree), data=anolis)
summary(bm.gls) #Here the correlation structure of the phylogeny is used
anova(bm.gls)
#using D-PGLS: same
pgls.res<-procD.pgls(fml~ml,phy=tree,data=gdf, print.progress = FALSE)
summary(pgls.res)
#using Independent Contrasts: same
picM<-pic(anolis$Male_SVL, tree)
picF<-pic(anolis$Female_SVL, tree)
cor.test(picM, picF)
summary(lm(picF~picM - 1)) #Contrasts run through origin: see Garland et al. 1992
plot(picF~picM)
abline(lm(picF~picM - 1))
#Phylogenetic anova
procD.pgls(fml~ecomorph,phy=tree,data=gdf, print.progress = FALSE)
anova(gls(Female_SVL~geo_ecomorph, correlation=corBrownian(phy=tree), data=anolis)) #same
#multivariate phy-anova/regression (even when p>N)
procD.pgls(Y~ecomorph,phy=tree,data=gdf, print.progress = FALSE) #Here Y is multivariate Y~X|phy.
# 2: Phylogenetic PLS: multivariate
IT<- phylo.integration(Y.gpa$coords,partition.gp=land.gps,phy=plethspecies$phy, print.progress = FALSE)
summary(IT) ; plot(IT)
# 3: Phylogenetic ordination
plotGMPhyloMorphoSpace(plethspecies$phy,Y.gpa$coords,ancStates = FALSE)
# 4: Phylogenetic signal
phylosig(tree, anolis$Female_SVL, method="K", test=T, nsim=1000) #phytools
physignal(gdf$fml,gdf$tree, print.progress = FALSE) #geomorph
res<-physignal(gdf$Y,gdf$tree) #multivariate example
summary(res); plot(res)
# 5: Compare evolutionary rates
#univariate [geomorph]: provide data, tree and groups
compare.evol.rates(A=fml,gp=ecomorph,phy = tree, print.progress = FALSE)
#multivariate net evolutionary rates [geomorph]
ER<-compare.evol.rates(A=Y.gpa$coords, phy=plethspecies$phy,gp=gp.end, print.progress = FALSE)
summary(ER); plot(ER)
#NOTE: [phytools] can compare univariate rates, and rate matrices
# 6: Comparing evolutionary models
## Compare BM vs. OU models using GEIGER
geo=get(data(geospiza)) #a smaller dataset (for time reasons)
tmp=treedata(geo$phy, geo$dat)
phy=tmp$phy; dat=tmp$data[,"tarsusL"]
plot(phy)
bm.M<-fitContinuous(phy, dat, model="BM")
ou.M<-fitContinuous(phy, dat, bounds = list(alpha = c(min = exp(-500), max = exp(2))),model="OU")
bm.M
ou.M
# Compare models: LRT & AIC
LRT.M<-(2*(ou.M$opt$lnL-bm.M$opt$lnL))
prob.M<-pchisq(LRT.M, 1, lower.tail=FALSE)
LRT.M
prob.M
bm.M$opt$aic
ou.M$opt$aic #OU does not provide a better fit: use simpler model (BM)
# NOTE: The package OUCH, MVSLOUCH, OUwie, and otherscan compare more complicated models: BM1, BMM, OU1, OUM, etc.). | /Lab/Scripts/12-PhylogeneticComparativeMethods.r | no_license | morganmackert/EEOB590C-DA | R | false | false | 4,647 | r | # Some Phylogenetic Comparative Methods: PCMs
## packages: nlme, ape, geiger, phytools, geomorph
library(nlme) # Contains GLS
library(ape) # Many phylogenetics-related analyses (see Paradis, Analysis of Phylogenetics and Evolution with R)
library(geiger) # Many evolutionary analyses
library(phytools)
library(geomorph) #contains multivariate PCMs
## Read data, phylogeny, and match the two
#NOTE: a critical issue in R is that the species names in the data matrix [names(Y) or rownames(Y)
#depending on input type] match those on the phylogeny [phy$tip.label]:
anolis<-read.table("Data/Lab-12-anolis.SVL.txt", sep=",", header=T)
anolis<-na.exclude(anolis)
anolis[,5:6]<-log10(anolis[,5:6])
head(anolis)
tree<-read.tree("Data/Lab-12-anolis.tree.tre") #read tree
plot(tree, cex=0.5)
# Prune tree to match taxa
droplist<-setdiff(tree$tip, rownames(anolis))
tree<-drop.tip(tree,droplist)
plot(tree, cex=0.7) # dev.off() #for fixing plot window if needed
#NOTE: the function 'treedata' in geiger prunes both the data matrix and the tree to match
anolis<-anolis[tree$tip,] #re-order data to match tree order (necessary for some functions)
ml<-anolis$Male_SVL; names(ml)<-rownames(anolis)
fml<-anolis$Female_SVL; names(fml)<-rownames(anolis)
ecomorph<-as.factor(anolis$geo_ecomorph); names(ecomorph)<-rownames(anolis)
Y<-cbind(anolis$Male_SVL,anolis$Female_SVL);rownames(Y)<-rownames(anolis)
gdf<-geomorph.data.frame(ml=ml,fml=fml,ecomorph=ecomorph,Y=Y,tree=tree)
#another dataset (multivariate)
data(plethspecies)
Y.gpa<-gpagen(plethspecies$land, print.progress = FALSE) #GPA-alignment
land.gps<-c("A","A","A","A","A","B","B","B","B","B","B")
gp.end<-factor(c(0,0,1,0,0,1,1,0,0)) #endangered species vs. rest
names(gp.end)<-plethspecies$phy$tip
###################
### Analyses
#Regression: non-phylogenetic
anova(lm(anolis$Female_SVL~anolis$Male_SVL))
summary(lm(anolis$Female_SVL~anolis$Male_SVL))
plot(anolis$Female_SVL~anolis$Male_SVL)
abline(lm(anolis$Female_SVL~anolis$Male_SVL))
summary(gls(Female_SVL~Male_SVL, data=anolis)) #NOTE: non-phylogenetic done another way
# 1: PhylogeneticRegression (PGLS)
#using GLS
bm.gls<-gls(Female_SVL~Male_SVL, correlation=corBrownian(phy=tree), data=anolis)
summary(bm.gls) #Here the correlation structure of the phylogeny is used
anova(bm.gls)
#using D-PGLS: same
pgls.res<-procD.pgls(fml~ml,phy=tree,data=gdf, print.progress = FALSE)
summary(pgls.res)
#using Independent Contrasts: same
picM<-pic(anolis$Male_SVL, tree)
picF<-pic(anolis$Female_SVL, tree)
cor.test(picM, picF)
summary(lm(picF~picM - 1)) #Contrasts run through origin: see Garland et al. 1992
plot(picF~picM)
abline(lm(picF~picM - 1))
#Phylogenetic anova
procD.pgls(fml~ecomorph,phy=tree,data=gdf, print.progress = FALSE)
anova(gls(Female_SVL~geo_ecomorph, correlation=corBrownian(phy=tree), data=anolis)) #same
#multivariate phy-anova/regression (even when p>N)
procD.pgls(Y~ecomorph,phy=tree,data=gdf, print.progress = FALSE) #Here Y is multivariate Y~X|phy.
# 2: Phylogenetic PLS: multivariate
IT<- phylo.integration(Y.gpa$coords,partition.gp=land.gps,phy=plethspecies$phy, print.progress = FALSE)
summary(IT) ; plot(IT)
# 3: Phylogenetic ordination
plotGMPhyloMorphoSpace(plethspecies$phy,Y.gpa$coords,ancStates = FALSE)
# 4: Phylogenetic signal
phylosig(tree, anolis$Female_SVL, method="K", test=T, nsim=1000) #phytools
physignal(gdf$fml,gdf$tree, print.progress = FALSE) #geomorph
res<-physignal(gdf$Y,gdf$tree) #multivariate example
summary(res); plot(res)
# 5: Compare evolutionary rates
#univariate [geomorph]: provide data, tree and groups
compare.evol.rates(A=fml,gp=ecomorph,phy = tree, print.progress = FALSE)
#multivariate net evolutionary rates [geomorph]
ER<-compare.evol.rates(A=Y.gpa$coords, phy=plethspecies$phy,gp=gp.end, print.progress = FALSE)
summary(ER); plot(ER)
#NOTE: [phytools] can compare univariate rates, and rate matrices
# 6: Comparing evolutionary models
## Compare BM vs. OU models using GEIGER
geo=get(data(geospiza)) #a smaller dataset (for time reasons)
tmp=treedata(geo$phy, geo$dat)
phy=tmp$phy; dat=tmp$data[,"tarsusL"]
plot(phy)
bm.M<-fitContinuous(phy, dat, model="BM")
ou.M<-fitContinuous(phy, dat, bounds = list(alpha = c(min = exp(-500), max = exp(2))),model="OU")
bm.M
ou.M
# Compare models: LRT & AIC
LRT.M<-(2*(ou.M$opt$lnL-bm.M$opt$lnL))
prob.M<-pchisq(LRT.M, 1, lower.tail=FALSE)
LRT.M
prob.M
bm.M$opt$aic
ou.M$opt$aic #OU does not provide a better fit: use simpler model (BM)
# NOTE: The package OUCH, MVSLOUCH, OUwie, and otherscan compare more complicated models: BM1, BMM, OU1, OUM, etc.). |
library(pkr)
### Name: sNCA
### Title: Simplest NCA
### Aliases: sNCA
### Keywords: Output Form
### ** Examples
# For one subject
x = Theoph[Theoph$Subject=="1","Time"]
y = Theoph[Theoph$Subject=="1","conc"]
sNCA(x, y, dose=320, doseUnit="mg", concUnit="mg/L", timeUnit="h")
sNCA(x, y, dose=320, concUnit="mg/L", returnNA=FALSE)
iAUC = data.frame(Name=c("AUC[0-12h]","AUC[0-24h]"), Start=c(0,0), End=c(12,24))
sNCA(x, y, dose=320, doseUnit="mg", concUnit="mg/L", timeUnit="h", iAUC=iAUC)
MW = 180.164 # Molecular weight of theophylline
sNCA(x, y/MW, dose=320, doseUnit="mg", concUnit="mmol/L", timeUnit="h")
sNCA(x, y/MW, dose=320, doseUnit="mg", concUnit="mmol/L", timeUnit="h", MW=MW)
sNCA(x, y, dose=320/MW, doseUnit="mmol", concUnit="mg/L", timeUnit="h", MW=MW)
sNCA(x, y/MW, dose=320/MW, doseUnit="mmol", concUnit="mmol/L", timeUnit="h", MW=MW)
sNCA(x, y/MW, dose=320/MW, doseUnit="mmol", concUnit="mmol/L", timeUnit="h", MW=MW,
returnNA=FALSE)
sNCA(x, y/MW, doseUnit="mmol", concUnit="mmol/L", timeUnit="h", MW=MW, returnNA=FALSE)
sNCA(x, y/MW, dose=as.numeric(NA), doseUnit="mmol", concUnit="mmol/L", timeUnit="h",
MW=MW, returnNA=FALSE)
sNCA(x, y, dose=320, concUnit="mg/L", timeUnit="hr")
sNCA(x*60, y, dose=320, concUnit="mg/L", timeUnit="min")
| /data/genthat_extracted_code/pkr/examples/sNCA.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,280 | r | library(pkr)
### Name: sNCA
### Title: Simplest NCA
### Aliases: sNCA
### Keywords: Output Form
### ** Examples
# For one subject
x = Theoph[Theoph$Subject=="1","Time"]
y = Theoph[Theoph$Subject=="1","conc"]
sNCA(x, y, dose=320, doseUnit="mg", concUnit="mg/L", timeUnit="h")
sNCA(x, y, dose=320, concUnit="mg/L", returnNA=FALSE)
iAUC = data.frame(Name=c("AUC[0-12h]","AUC[0-24h]"), Start=c(0,0), End=c(12,24))
sNCA(x, y, dose=320, doseUnit="mg", concUnit="mg/L", timeUnit="h", iAUC=iAUC)
MW = 180.164 # Molecular weight of theophylline
sNCA(x, y/MW, dose=320, doseUnit="mg", concUnit="mmol/L", timeUnit="h")
sNCA(x, y/MW, dose=320, doseUnit="mg", concUnit="mmol/L", timeUnit="h", MW=MW)
sNCA(x, y, dose=320/MW, doseUnit="mmol", concUnit="mg/L", timeUnit="h", MW=MW)
sNCA(x, y/MW, dose=320/MW, doseUnit="mmol", concUnit="mmol/L", timeUnit="h", MW=MW)
sNCA(x, y/MW, dose=320/MW, doseUnit="mmol", concUnit="mmol/L", timeUnit="h", MW=MW,
returnNA=FALSE)
sNCA(x, y/MW, doseUnit="mmol", concUnit="mmol/L", timeUnit="h", MW=MW, returnNA=FALSE)
sNCA(x, y/MW, dose=as.numeric(NA), doseUnit="mmol", concUnit="mmol/L", timeUnit="h",
MW=MW, returnNA=FALSE)
sNCA(x, y, dose=320, concUnit="mg/L", timeUnit="hr")
sNCA(x*60, y, dose=320, concUnit="mg/L", timeUnit="min")
|
#source("C:/Users/lisanjie2/Documents/1_R/2_Fx_FUNCTIONS/Fx_over_disp.R")
########################################
### Bolker's overdispersion function #c
# http://glmm.wikidot.com/faq
# I think this is designed for POISSOn data, not sure...
overdisp_fun <- function(model) {
## number of variance parameters in
## an n-by-n variance-covariance matrix
vpars <- function(m) {
nrow(m)*(nrow(m)+1)/2
}
model.df <- sum(sapply(VarCorr(model),vpars))+length(fixef(model))
rdf <- nrow(model.frame(model))-model.df
rp <- residuals(model,type="pearson")
Pearson.chisq <- sum(rp^2)
prat <- Pearson.chisq/rdf
pval <- pchisq(Pearson.chisq, df=rdf, lower.tail=FALSE)
c(chisq=Pearson.chisq,ratio=prat,rdf=rdf,p=pval)
}
print("overdisp_fun(model)")
### Bolker's overdispersion function ####
########################################
### ACHTUNG: there is a package with an overdispersion function also!
#################################
#### overdispersion with aod3 ###
# Check for overdispersion: you can do this by hand by computing
# `sum(residuals(gmod_lme4_L,"pearson")^2))`,
# but the `gof()` function
# from the `aods3` package is a handy shortcut (it computes overdispersion
# based on the deviance (`D` below) and Pearson residuals (`X2`): when
# they disagree, use the latter:
# library(aod3)
#
# gof(gmod_lme4_L) | /over_disp.R | no_license | brouwern/FUNCTIONS | R | false | false | 1,454 | r |
#source("C:/Users/lisanjie2/Documents/1_R/2_Fx_FUNCTIONS/Fx_over_disp.R")
########################################
### Bolker's overdispersion function #c
# http://glmm.wikidot.com/faq
# I think this is designed for POISSOn data, not sure...
overdisp_fun <- function(model) {
## number of variance parameters in
## an n-by-n variance-covariance matrix
vpars <- function(m) {
nrow(m)*(nrow(m)+1)/2
}
model.df <- sum(sapply(VarCorr(model),vpars))+length(fixef(model))
rdf <- nrow(model.frame(model))-model.df
rp <- residuals(model,type="pearson")
Pearson.chisq <- sum(rp^2)
prat <- Pearson.chisq/rdf
pval <- pchisq(Pearson.chisq, df=rdf, lower.tail=FALSE)
c(chisq=Pearson.chisq,ratio=prat,rdf=rdf,p=pval)
}
print("overdisp_fun(model)")
### Bolker's overdispersion function ####
########################################
### ACHTUNG: there is a package with an overdispersion function also!
#################################
#### overdispersion with aod3 ###
# Check for overdispersion: you can do this by hand by computing
# `sum(residuals(gmod_lme4_L,"pearson")^2))`,
# but the `gof()` function
# from the `aods3` package is a handy shortcut (it computes overdispersion
# based on the deviance (`D` below) and Pearson residuals (`X2`): when
# they disagree, use the latter:
# library(aod3)
#
# gof(gmod_lme4_L) |
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\TiffsforR\\State_Park_Scores_0510411.csv",sep=",")
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\BIG_State_Parks_Table.csv",sep=",")
sp_score<-sp_score[ which(sp_score$Include=='Y'),]
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\BIG_State_Parks_Table_Yonly.csv",sep=",")
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\BIG_State_Parks_Table_W_Prop_in_Top_n_226.csv",sep=",")
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\State_Parks_Tables\\BIG_State_Parks_Table_345.csv",sep=",")
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\Images_for_Report\\State_Parks_Corrected_Prop_High_Quality.csv",sep=",")
plot(sp_score$Comp_Score_AREA,sp_score$Comp_Score_SUM)
plot(log(sp_score$LCA_Score_MEAN),log(sp_score$Comp_Score_MEAN))
abline(lm(log(sp_score$Comp_Score_MEAN)~log(sp_score$LCA_Score_MEAN)))
plot(sp_score$Comp_Score_AREA,sp_score$Comp_Score_MEAN)
plot(log(sp_score$Comp_Score_AREA),log(sp_score$Comp_Score_MEAN))
abline(lm(log(sp_score$Comp_Score_MEAN)~log(sp_score$Comp_Score_AREA)))
summary(lm(log(sp_score$Comp_Score_MEAN)~log(sp_score$Comp_Score_AREA)))
model<-lm(log(sp_score$Comp_Score_MEAN)~log(sp_score$Comp_Score_AREA))
y<-log(sp_score$Comp_Score_MEAN)
x<-log(sp_score$Comp_Score_AREA)
plot(x,y)
model<-lm(y~x)
abline(model)ggplot(sp_score,a
summary(model)
newx<-seq(min(log(sp_score$Comp_Score_AREA)),max(log(sp_score$Comp_Score_AREA)),length.out=1000)
preds<-predict(model,newdata=data.frame(x=newx),interval=c('confidence'),level=0.90,type="response")
preds_preds<-predict(model,newdata=data.frame(x=newx),interval=c('prediction'),level=0.90,type="response")
lines(newx,preds[ ,3],lty='dashed',col='red')
lines(newx,preds[ ,2],lty='dashed',col='red')
lines(newx,preds_preds[ ,3],lty='dashed',col='blue')
lines(newx,preds_preds[ ,2],lty='dashed',col='blue')
textxy(log(sp_score$AREA),log(sp_score$MEAN),log(sp_score$Name,cex=0.5)
################
recreate this with ggplot
library(ggplot2)
library(ggrepel)
ggplot(sp_score,aes(x=log(sp_score$LCA_Score_MEAN),y=log(sp_score$Comp_Score_MEAN))) +
geom_point(aes(color=factor(sp_score$Region_Name),size=sp_score$SUM_Shape_Area,shape=factor(sp_score$Category)))+
scale_shape_manual(values=c(8,8,0,12,1,10))+
scale_size_continuous(range=c(2,20),guide=FALSE)+
scale_color_brewer(palette="Paired")+
geom_smooth(method=lm)+
geom_text_repel(
aes(
log(sp_score$LCA_Score_MEAN),log(sp_score$Comp_Score_MEAN),
label=sp_score$Name,color=factor(sp_score$Region_Name)
),
size=2,segment.color="black")+
theme_dark()
summary(lm(log(sp_score$Comp_Score_MEAN)~sp_score$LCA_Score_MEAN))
summary(lm(log(sp_score$Comp_Score_MEAN)~sp_score$Comp_Score_AREA))
summary(lm(log(sp_score$Comp_Score_SUM)~sp_score$Comp_Score_AREA))
###
##Parks Plots
library(plyr)
sum_sp<-ddply(sp_score,~sp_score$Region_Name,summarize,mean=mean(Comp_Score_MEAN),lca=mean(LCA_Score_MEAN))
plot(log(sum_sp$lca),log(sum_sp$mean))
plot(sp_score$Region_Name,sp_score$Comp_Score_MEAN)
sp_score$Region<-as.factor(reorder(sp_score$Region,sp_score$Comp_Score_MEAN,median))
boxy<-ggplot(sp_score,aes(x=Region,y=Comp_Score_MEAN,fill=Region,group=Region))
boxy +
geom_hline(aes(yintercept=1.54),linetype="dotted")+
geom_text(aes(x=12,y=1.54,label="NYS Mean: 1.54"),vjust=-1)+
geom_hline(aes(yintercept=2.73),linetype="longdash")+
geom_text(aes(x=11.75,y=2.73,label="State Park Mean: 2.73"),vjust=-1)+
geom_boxplot(outlier.shape=NA,width=0.5,coef=0,alpha=0.4)+
geom_point(shape=21,size=5)+
scale_x_discrete(expand=c(0.05,0),limits=rev(levels(sp_score$Region)),breaks=c("1","2","3","4","5","7","8","9","10","11","12","13"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City",""))+
# geom_text_repel(
# aes(
# x=(as.factor(reorder(Region,Comp_Score_MEAN,median))),y=Comp_Score_MEAN,
# label=sp_score$Name,color=factor(sp_score$Region_Name)
# ),
# size=5)+
labs(x="Parks Region",y="Mean Biodiversity Index")+
scale_fill_hue(l=45)+
theme(plot.margin=unit(c(1,1,1,1),"cm"),panel.background = element_blank(),axis.line = element_line(colour = "black"),axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(Region,LCA_Score_MEAN,group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,log(EO_Score_SUM),group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,log(Richness_Score_MEAN),group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,Resilience_Score_MEAN,group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,Richness_Score_MEAN,group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,MFB_Linkage_Score_MEAN,group=Region))
boxy +
geom_boxplot()+
# geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(
aes(
Region,MFB_Linkage_Score_MEAN,
label=sp_score$Name,color=factor(sp_score$Region_Name)
),
size=3,segment.color="black")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_10_percent,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4)+
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_10_percent>0),
aes(
x=as.factor(Region),y=Prop_Top_10_percent,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 90th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_10,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4)+
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_10>0),
aes(
x=as.factor(Region),y=Prop_Top_10,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 90th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_5,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4)+
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_5>0),
aes(
x=as.factor(Region),y=Prop_Top_5,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 95th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_1,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4)+
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_1>0),
aes(
x=as.factor(Region),y=Prop_Top_1,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 99th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_5_percent,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_5_percent>0),
aes(
x=as.factor(Region),y=Prop_Top_5_percent,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 95th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_1_percent,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_1_percent>0),
aes(
x=as.factor(Region),y=Prop_Top_1_percent,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 99th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Area_Top_1_percent,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Area_Top_1_percent>0),
aes(
x=as.factor(Region),y=Area_Top_1_percent,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Park Area >= 99th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
summary(lm(sp_score$Comp_Score_MEAN~sp_score$LCA_Score_MEAN+sp_score$EO_Score_MEAN+sp_score$Richness_Score_MEAN+sp_score$Resilience_Score_MEAN+sp_score$MFB_Linkage_Score_MEAN))
summary(lm(sp_score$Comp_Score_MEAN~sp_score$EO_Score_MEAN+sp_score$Richness_Score_MEAN+sp_score$LCA_Score_MEAN+sp_score$Resilience_Score_MEAN+sp_score$MFB_Linkage_Score_MEAN))
anova(lm(sp_score$Comp_Score_MEAN~sp_score$LCA_Score_MEAN+sp_score$EO_Score_MEAN+sp_score$Richness_Score_MEAN+sp_score$Resilience_Score_MEAN+sp_score$MFB_Linkage_Score_MEAN)))
hist(log(sp_score$AREA))
hist((sp_score$AREA))
model<-(lm(log(sp_score$MEAN)~log(sp_score$AREA)))
sp_score$resid<-model$resid
write.table(sp_score,file="D:\\GIS Projects\\StateParks\\TiffsforR\\State_Park_Scores_0510411_w_resid.csv")
plot(sp_score$SUM,sp_score$MEAN)
plot(log(sp_score$SUM),log(sp_score$MEAN))
abline(lm(log(sp_score$MEAN)~log(sp_score$SUM)))
sp_score$resid<-model$resid
plot(sp_score$MAX,sp_score$MEAN)
plot(log(sp_score$MAX),log(sp_score$MEAN))
abline(lm(log(sp_score$MEAN)~log(sp_score$MAX)))
model_2<-lm(log(sp_score$MEAN)~log(sp_score$MAX))
sp_score$resid_2<-model_2$resid
plot(sp_score$MIN,sp_score$MEAN)
plot(log(sp_score$MIN),log(sp_score$MEAN))
abline(lm(log(sp_score$MEAN)~log(sp_score$MIN)))
model_3<-lm(log(sp_score$MEAN)~log(sp_score$MIN))
sp_score$resid_3<-model_3$resid
plot(sp_score$MIN,sp_score$MAX)
plot(log(sp_score$MIN),log(sp_score$MAX))
abline(lm(log(sp_score$MAX)~log(sp_score$MIN)))
model_3<-lm(log(sp_score$MEAN)~log(sp_score$MIN))
sp_score$resid_3<-model_3$resid
##################
##Which individual component is max
sub_parks<-sp_score[,c("Name","Region","Comp_Score_MEAN","LCA_Score_MEAN","Resilience_Score_MEAN","MFB_Linkage_Score_MEAN","EO_Score_MEAN","Richness_Score_MEAN")]
sub_sub_parks<-sp_score[,c("LCA_Score_MEAN","Resilience_Score_MEAN","MFB_Linkage_Score_MEAN","EO_Score_MEAN","Richness_Score_MEAN")]
test<-princomp(sub_sub_parks)
#library("stats")
biplot(test)
test2<-prcomp(sub_sub_parks)
biplot(test2)
test2$rotation
max_factor<-apply(sub_parks[,2:6],1,which.max)
##sum(sp_score$Area_Top_1_percent)/1272739500
##sum(sp_score$Area_Top_5_percent)/6337584000
##sum(sp_score$Area_Top_10_percent)/12699473400
| /r_script_State_Parks_Plots.R | no_license | NYNatHeritage/OPRHP_Biodiversity_Index | R | false | false | 15,474 | r | sp_score<-read.csv("D:\\GIS Projects\\StateParks\\TiffsforR\\State_Park_Scores_0510411.csv",sep=",")
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\BIG_State_Parks_Table.csv",sep=",")
sp_score<-sp_score[ which(sp_score$Include=='Y'),]
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\BIG_State_Parks_Table_Yonly.csv",sep=",")
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\BIG_State_Parks_Table_W_Prop_in_Top_n_226.csv",sep=",")
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\State_Parks_Tables\\BIG_State_Parks_Table_345.csv",sep=",")
sp_score<-read.csv("D:\\GIS Projects\\StateParks\\Images_for_Report\\State_Parks_Corrected_Prop_High_Quality.csv",sep=",")
plot(sp_score$Comp_Score_AREA,sp_score$Comp_Score_SUM)
plot(log(sp_score$LCA_Score_MEAN),log(sp_score$Comp_Score_MEAN))
abline(lm(log(sp_score$Comp_Score_MEAN)~log(sp_score$LCA_Score_MEAN)))
plot(sp_score$Comp_Score_AREA,sp_score$Comp_Score_MEAN)
plot(log(sp_score$Comp_Score_AREA),log(sp_score$Comp_Score_MEAN))
abline(lm(log(sp_score$Comp_Score_MEAN)~log(sp_score$Comp_Score_AREA)))
summary(lm(log(sp_score$Comp_Score_MEAN)~log(sp_score$Comp_Score_AREA)))
model<-lm(log(sp_score$Comp_Score_MEAN)~log(sp_score$Comp_Score_AREA))
y<-log(sp_score$Comp_Score_MEAN)
x<-log(sp_score$Comp_Score_AREA)
plot(x,y)
model<-lm(y~x)
abline(model)ggplot(sp_score,a
summary(model)
newx<-seq(min(log(sp_score$Comp_Score_AREA)),max(log(sp_score$Comp_Score_AREA)),length.out=1000)
preds<-predict(model,newdata=data.frame(x=newx),interval=c('confidence'),level=0.90,type="response")
preds_preds<-predict(model,newdata=data.frame(x=newx),interval=c('prediction'),level=0.90,type="response")
lines(newx,preds[ ,3],lty='dashed',col='red')
lines(newx,preds[ ,2],lty='dashed',col='red')
lines(newx,preds_preds[ ,3],lty='dashed',col='blue')
lines(newx,preds_preds[ ,2],lty='dashed',col='blue')
textxy(log(sp_score$AREA),log(sp_score$MEAN),log(sp_score$Name,cex=0.5)
################
recreate this with ggplot
library(ggplot2)
library(ggrepel)
ggplot(sp_score,aes(x=log(sp_score$LCA_Score_MEAN),y=log(sp_score$Comp_Score_MEAN))) +
geom_point(aes(color=factor(sp_score$Region_Name),size=sp_score$SUM_Shape_Area,shape=factor(sp_score$Category)))+
scale_shape_manual(values=c(8,8,0,12,1,10))+
scale_size_continuous(range=c(2,20),guide=FALSE)+
scale_color_brewer(palette="Paired")+
geom_smooth(method=lm)+
geom_text_repel(
aes(
log(sp_score$LCA_Score_MEAN),log(sp_score$Comp_Score_MEAN),
label=sp_score$Name,color=factor(sp_score$Region_Name)
),
size=2,segment.color="black")+
theme_dark()
summary(lm(log(sp_score$Comp_Score_MEAN)~sp_score$LCA_Score_MEAN))
summary(lm(log(sp_score$Comp_Score_MEAN)~sp_score$Comp_Score_AREA))
summary(lm(log(sp_score$Comp_Score_SUM)~sp_score$Comp_Score_AREA))
###
##Parks Plots
library(plyr)
sum_sp<-ddply(sp_score,~sp_score$Region_Name,summarize,mean=mean(Comp_Score_MEAN),lca=mean(LCA_Score_MEAN))
plot(log(sum_sp$lca),log(sum_sp$mean))
plot(sp_score$Region_Name,sp_score$Comp_Score_MEAN)
sp_score$Region<-as.factor(reorder(sp_score$Region,sp_score$Comp_Score_MEAN,median))
boxy<-ggplot(sp_score,aes(x=Region,y=Comp_Score_MEAN,fill=Region,group=Region))
boxy +
geom_hline(aes(yintercept=1.54),linetype="dotted")+
geom_text(aes(x=12,y=1.54,label="NYS Mean: 1.54"),vjust=-1)+
geom_hline(aes(yintercept=2.73),linetype="longdash")+
geom_text(aes(x=11.75,y=2.73,label="State Park Mean: 2.73"),vjust=-1)+
geom_boxplot(outlier.shape=NA,width=0.5,coef=0,alpha=0.4)+
geom_point(shape=21,size=5)+
scale_x_discrete(expand=c(0.05,0),limits=rev(levels(sp_score$Region)),breaks=c("1","2","3","4","5","7","8","9","10","11","12","13"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City",""))+
# geom_text_repel(
# aes(
# x=(as.factor(reorder(Region,Comp_Score_MEAN,median))),y=Comp_Score_MEAN,
# label=sp_score$Name,color=factor(sp_score$Region_Name)
# ),
# size=5)+
labs(x="Parks Region",y="Mean Biodiversity Index")+
scale_fill_hue(l=45)+
theme(plot.margin=unit(c(1,1,1,1),"cm"),panel.background = element_blank(),axis.line = element_line(colour = "black"),axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(Region,LCA_Score_MEAN,group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,log(EO_Score_SUM),group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,log(Richness_Score_MEAN),group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,Resilience_Score_MEAN,group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,Richness_Score_MEAN,group=Region))
boxy +
geom_boxplot()+
geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14))
boxy<-ggplot(sp_score,aes(Region,MFB_Linkage_Score_MEAN,group=Region))
boxy +
geom_boxplot()+
# geom_jitter(width=0.2) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(
aes(
Region,MFB_Linkage_Score_MEAN,
label=sp_score$Name,color=factor(sp_score$Region_Name)
),
size=3,segment.color="black")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_10_percent,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4)+
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_10_percent>0),
aes(
x=as.factor(Region),y=Prop_Top_10_percent,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 90th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_10,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4)+
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_10>0),
aes(
x=as.factor(Region),y=Prop_Top_10,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 90th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_5,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4)+
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_5>0),
aes(
x=as.factor(Region),y=Prop_Top_5,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 95th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_1,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4)+
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_1>0),
aes(
x=as.factor(Region),y=Prop_Top_1,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 99th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_5_percent,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_5_percent>0),
aes(
x=as.factor(Region),y=Prop_Top_5_percent,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 95th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Prop_Top_1_percent,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Prop_Top_1_percent>0),
aes(
x=as.factor(Region),y=Prop_Top_1_percent,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Proportion of Park Area >= 99th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
boxy<-ggplot(sp_score,aes(x=as.factor(Region),y=Area_Top_1_percent,group=Region))
boxy +
geom_boxplot(outlier.shape=NA)+
geom_point(shape=1,size=4) +
scale_x_discrete(breaks=c("1","2","3","4","5","7","8","9","10","11","12"),labels=c("Niagra","Allegany","Genesee","Finger Lakes","Central","Taconic","Palisades","Long Island","Thousand Islands","Saratoga/Capital District","New York City"))+
geom_text_repel(data=subset(sp_score,Area_Top_1_percent>0),
aes(
x=as.factor(Region),y=Area_Top_1_percent,
label=Name,color=factor(Region_Name)
),
size=3,segment.color="black")+
labs(x="Parks Region",y="Park Area >= 99th Percentile")+
theme(axis.text.x = element_text(angle = 30, vjust = 1, hjust=1,size=14),legend.position="none")
summary(lm(sp_score$Comp_Score_MEAN~sp_score$LCA_Score_MEAN+sp_score$EO_Score_MEAN+sp_score$Richness_Score_MEAN+sp_score$Resilience_Score_MEAN+sp_score$MFB_Linkage_Score_MEAN))
summary(lm(sp_score$Comp_Score_MEAN~sp_score$EO_Score_MEAN+sp_score$Richness_Score_MEAN+sp_score$LCA_Score_MEAN+sp_score$Resilience_Score_MEAN+sp_score$MFB_Linkage_Score_MEAN))
anova(lm(sp_score$Comp_Score_MEAN~sp_score$LCA_Score_MEAN+sp_score$EO_Score_MEAN+sp_score$Richness_Score_MEAN+sp_score$Resilience_Score_MEAN+sp_score$MFB_Linkage_Score_MEAN)))
hist(log(sp_score$AREA))
hist((sp_score$AREA))
model<-(lm(log(sp_score$MEAN)~log(sp_score$AREA)))
sp_score$resid<-model$resid
write.table(sp_score,file="D:\\GIS Projects\\StateParks\\TiffsforR\\State_Park_Scores_0510411_w_resid.csv")
plot(sp_score$SUM,sp_score$MEAN)
plot(log(sp_score$SUM),log(sp_score$MEAN))
abline(lm(log(sp_score$MEAN)~log(sp_score$SUM)))
sp_score$resid<-model$resid
plot(sp_score$MAX,sp_score$MEAN)
plot(log(sp_score$MAX),log(sp_score$MEAN))
abline(lm(log(sp_score$MEAN)~log(sp_score$MAX)))
model_2<-lm(log(sp_score$MEAN)~log(sp_score$MAX))
sp_score$resid_2<-model_2$resid
plot(sp_score$MIN,sp_score$MEAN)
plot(log(sp_score$MIN),log(sp_score$MEAN))
abline(lm(log(sp_score$MEAN)~log(sp_score$MIN)))
model_3<-lm(log(sp_score$MEAN)~log(sp_score$MIN))
sp_score$resid_3<-model_3$resid
plot(sp_score$MIN,sp_score$MAX)
plot(log(sp_score$MIN),log(sp_score$MAX))
abline(lm(log(sp_score$MAX)~log(sp_score$MIN)))
model_3<-lm(log(sp_score$MEAN)~log(sp_score$MIN))
sp_score$resid_3<-model_3$resid
##################
##Which individual component is max
sub_parks<-sp_score[,c("Name","Region","Comp_Score_MEAN","LCA_Score_MEAN","Resilience_Score_MEAN","MFB_Linkage_Score_MEAN","EO_Score_MEAN","Richness_Score_MEAN")]
sub_sub_parks<-sp_score[,c("LCA_Score_MEAN","Resilience_Score_MEAN","MFB_Linkage_Score_MEAN","EO_Score_MEAN","Richness_Score_MEAN")]
test<-princomp(sub_sub_parks)
#library("stats")
biplot(test)
test2<-prcomp(sub_sub_parks)
biplot(test2)
test2$rotation
max_factor<-apply(sub_parks[,2:6],1,which.max)
##sum(sp_score$Area_Top_1_percent)/1272739500
##sum(sp_score$Area_Top_5_percent)/6337584000
##sum(sp_score$Area_Top_10_percent)/12699473400
|
# 2017.8.9 BY Juan Zhang
rm(list=ls())
options(stringsAsFactors = F)
## 加载差异分析结果
lnames <- load("../Analysis/deg_analysis/Step03-limma_voom_nrDEG.Rdata")
lnames
# 提取差异表达的gene
DEG <- DEG_limma_voom$Geneid[which(DEG_limma_voom$regulated!="normal")]
# 将symbol转换成gene id
library(org.Hs.eg.db)
keytypes(org.Hs.eg.db)
library(clusterProfiler)
id2ENTREZ <- bitr(DEG, fromType = "ENSEMBL", toType = "ENTREZID", OrgDb = org.Hs.eg.db )
DEGs <- unique(id2ENTREZ$ENTREZID)
# 分析时所用表达谱所有基因
id2ENTREZ <- bitr(DEG_limma_voom$Geneid, fromType = "ENSEMBL", toType = "ENTREZID", OrgDb = org.Hs.eg.db )
ref_gene <- unique(id2ENTREZ$ENTREZID)
## 加载通路数据,
# kegg_gid为KEGG数据库里面所有基因
# non_dis_pathway,通路数据,即gene set,每一个通路由一组功能相关的基因组成一个基因集合,行使特定生物学功能。
lnames <- load("../Analysis/data/kegg_pathway240_2017.6.Rdata")
lnames
class(kegg_gid)
kegg_gid
## 实现超几何检验原理
diff_gene <- intersect(kegg_gid,DEGs) # 与数据库取交集后的差异表达基因
backg_gene <- intersect(kegg_gid,ref_gene) # 数据库中的基因与表达谱所有基因取交集作为背景
K <- length(diff_gene) # 差异表达基因数L2
N <- length(backg_gene) # 背景基因数L
p_value <- NULL
path_hsa <- NULL
path_name <- NULL
for(i in 1:length(non_dis_pathway)){
path <- non_dis_pathway[[i]]
path_hsa[i] <- path$pathwayId # 得到此通路的KEGG ID
path_name[i] <- path$pathwayName # 得到通路的名字
M <- length(intersect(path$genesInPathway,backg_gene)) # 一条通路中的基因数L1
X <- length(intersect(diff_gene,path$genesInPathway)) # 这条通路中感兴趣的基因数K
if(X==0){
p_value[i] <- 1
}else{
p_value[i] <- (1-phyper(X-1,M,N-M,K))
}
}
fdr <- p.adjust(p_value,method="BH",length(p_value))
res <- data.frame(path_hsa,path_name,p_value,fdr)
res <- res[order(res$p_value),]
write.table(res,file="../Analysis/hyperDistribution_DEG_KEGG.xls",sep="\t",row.name=F,quote=F)
| /step06-hyperDistrubition.R | no_license | luohaohao/hello-world | R | false | false | 2,176 | r | # 2017.8.9 BY Juan Zhang
rm(list=ls())
options(stringsAsFactors = F)
## 加载差异分析结果
lnames <- load("../Analysis/deg_analysis/Step03-limma_voom_nrDEG.Rdata")
lnames
# 提取差异表达的gene
DEG <- DEG_limma_voom$Geneid[which(DEG_limma_voom$regulated!="normal")]
# 将symbol转换成gene id
library(org.Hs.eg.db)
keytypes(org.Hs.eg.db)
library(clusterProfiler)
id2ENTREZ <- bitr(DEG, fromType = "ENSEMBL", toType = "ENTREZID", OrgDb = org.Hs.eg.db )
DEGs <- unique(id2ENTREZ$ENTREZID)
# 分析时所用表达谱所有基因
id2ENTREZ <- bitr(DEG_limma_voom$Geneid, fromType = "ENSEMBL", toType = "ENTREZID", OrgDb = org.Hs.eg.db )
ref_gene <- unique(id2ENTREZ$ENTREZID)
## 加载通路数据,
# kegg_gid为KEGG数据库里面所有基因
# non_dis_pathway,通路数据,即gene set,每一个通路由一组功能相关的基因组成一个基因集合,行使特定生物学功能。
lnames <- load("../Analysis/data/kegg_pathway240_2017.6.Rdata")
lnames
class(kegg_gid)
kegg_gid
## 实现超几何检验原理
diff_gene <- intersect(kegg_gid,DEGs) # 与数据库取交集后的差异表达基因
backg_gene <- intersect(kegg_gid,ref_gene) # 数据库中的基因与表达谱所有基因取交集作为背景
K <- length(diff_gene) # 差异表达基因数L2
N <- length(backg_gene) # 背景基因数L
p_value <- NULL
path_hsa <- NULL
path_name <- NULL
for(i in 1:length(non_dis_pathway)){
path <- non_dis_pathway[[i]]
path_hsa[i] <- path$pathwayId # 得到此通路的KEGG ID
path_name[i] <- path$pathwayName # 得到通路的名字
M <- length(intersect(path$genesInPathway,backg_gene)) # 一条通路中的基因数L1
X <- length(intersect(diff_gene,path$genesInPathway)) # 这条通路中感兴趣的基因数K
if(X==0){
p_value[i] <- 1
}else{
p_value[i] <- (1-phyper(X-1,M,N-M,K))
}
}
fdr <- p.adjust(p_value,method="BH",length(p_value))
res <- data.frame(path_hsa,path_name,p_value,fdr)
res <- res[order(res$p_value),]
write.table(res,file="../Analysis/hyperDistribution_DEG_KEGG.xls",sep="\t",row.name=F,quote=F)
|
#문제1
grade <- sample(1:6,1)
grade <- paste(grade,'학년' ,sep='')
grade
result<- switch(EXPR = grade ,"1학년"=,"2학년"=,'3학년'='저학년입니다.',
'고학년입니다.')
cat( grade,'은 ', result ,sep='')
#문제2
choice <- sample(1:5,1)
result<-if(choice ==1){ 300+50
}else if(choice ==2){ 300-50
}else if(choice ==4) { 300/50
}else if(choice ==3) { 300*50
}else {300%%50}
cat('결과값',':',result)
#문제3
count <- sample(3:10,1)
deco <- sample(1:3,1)
if(deco==1){
rep('*',count)
}else if(deco ==2){
rep('$',count)
}else {
rep('#',count)}
#a*4 > error
#문제4
score <- sample(1:100,1)
score2<-as.character(score %/% 10)
score2
result <- switch(EXPR = score2 , '0'=,'9'= 'A 등급',
'8'= 'B 등급',
'7'= 'C 등급',
'6'='D 등급',
'F 등급')
result
cat( score,'점은 ',result,'입니다.' ,sep='')
#문제5
Alpha<-paste(LETTERS[1:26],letters[1:26], sep='')
Alpha
| /R_training/실습제출/배윤성/10월 28일 까지/lab_05.R | no_license | BaeYS-marketing/R | R | false | false | 1,013 | r |
#문제1
grade <- sample(1:6,1)
grade <- paste(grade,'학년' ,sep='')
grade
result<- switch(EXPR = grade ,"1학년"=,"2학년"=,'3학년'='저학년입니다.',
'고학년입니다.')
cat( grade,'은 ', result ,sep='')
#문제2
choice <- sample(1:5,1)
result<-if(choice ==1){ 300+50
}else if(choice ==2){ 300-50
}else if(choice ==4) { 300/50
}else if(choice ==3) { 300*50
}else {300%%50}
cat('결과값',':',result)
#문제3
count <- sample(3:10,1)
deco <- sample(1:3,1)
if(deco==1){
rep('*',count)
}else if(deco ==2){
rep('$',count)
}else {
rep('#',count)}
#a*4 > error
#문제4
score <- sample(1:100,1)
score2<-as.character(score %/% 10)
score2
result <- switch(EXPR = score2 , '0'=,'9'= 'A 등급',
'8'= 'B 등급',
'7'= 'C 등급',
'6'='D 등급',
'F 등급')
result
cat( score,'점은 ',result,'입니다.' ,sep='')
#문제5
Alpha<-paste(LETTERS[1:26],letters[1:26], sep='')
Alpha
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psy.phd.R
\name{d_ci95}
\alias{d_ci95}
\title{95% confidence interval for Cohen's d (Rosnow & Rosenthal, 2009)}
\usage{
d_ci95(d, n1, n2)
}
\arguments{
\item{d}{Cohen's d}
\item{n1}{n in group1}
\item{n2}{n in group2}
}
\value{
numeric
}
\description{
95% confidence interval for Cohen's d (Rosnow & Rosenthal, 2009)
}
\details{
95% confidence interval for Cohen's d (Rosnow & Rosenthal, 2009)
}
| /man/d_ci95.Rd | permissive | paulsharpeY/psy.phd | R | false | true | 476 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psy.phd.R
\name{d_ci95}
\alias{d_ci95}
\title{95% confidence interval for Cohen's d (Rosnow & Rosenthal, 2009)}
\usage{
d_ci95(d, n1, n2)
}
\arguments{
\item{d}{Cohen's d}
\item{n1}{n in group1}
\item{n2}{n in group2}
}
\value{
numeric
}
\description{
95% confidence interval for Cohen's d (Rosnow & Rosenthal, 2009)
}
\details{
95% confidence interval for Cohen's d (Rosnow & Rosenthal, 2009)
}
|
library(RMySQL)
library(zipcode)
library(dplyr)
library(ggmap)
keys<-fromJSON("../keys.json")$street_data
con <- dbConnect(RMySQL::MySQL(),
host = keys$host,
dbname = keys$dbname,
user = keys$id,
password = keys$pw)
spooky <- read.csv("halloween_words.csv")
names(spooky) <- "word"
data(zipcode)
query <- paste0("SELECT leftzip, name, count(*) as 'num' FROM address",
" GROUP BY leftzip, name")
zip_streets <- dbGetQuery(con,query)
zip_streets$name <- tolower(zip_streets$name)
#zsample <- zip_streets[1:50,]
#zsample <- rbind(zs, data.frame(leftzip=0, name='aliens', num=3))
zs<- zip_streets %>%
rowwise() %>%
mutate(is_spooky = any(sapply(spooky$word, grepl, name)))
zs$leftzip <- clean.zipcodes(zs$leftzip)
zs2<- merge(zs,zipcode, by.x="leftzip",by.y="zip")
zs3 <- aggregate(num ~ state + is_spooky, data = zs2, sum)
state_spook <- zs3[which(zs3$is_spooky == T),c(1,3)]
names(state_spook) <- c("state","num_spooky")
state_nospook <- zs3[which(zs3$is_spooky == F), c(1,3)]
state_nospook <- merge(state_nospook, state_spook, by=c("state"), all=T)
state_ratio <- mutate(state_nospook, ratio = num_spooky/(num+num_spooky))
state_ratio <- mutate(state_ratio, total = num+num_spooky)
state_ratio$num_spooky[which(is.na(state_ratio$num_spooky))] <- 0
state_ratio$ratio[which(is.na(state_ratio$ratio))] <- 0
state_ratio<- state_ratio[which(!(state_ratio$state %in% c("AA","AE","AP","VI"))),]
#sapply(spooky$word, grepl, "Chatham")
#str(as.data.frame(unlist(lapply(zip_streets$name, function(x){ any(sapply(spooky$word,grepl,x))}))))
# df <- merge(city_ratio,zipcode,by=c('city','state'),all.x=T)
# us<- get_map(location='us')
#
# ggmap(us, extent = "device") +
# geom_density2d(data = df,
# aes(x = longitude, y = latitude), size = 0.3) +
# stat_density2d(data = df,
# aes(x = longitude, y = latitude, fill = ..level.., alpha = ..level..),
# size = 0.01,
# bins = 16, geom = "polygon") +
# scale_fill_gradient(low = "green", high = "red") +
# scale_alpha(range = c(0, 0.3), guide = FALSE)
write.csv(state_ratio,"spooky_state.csv")
| /dark_alleys.R | no_license | joeramirez/street_data | R | false | false | 2,212 | r | library(RMySQL)
library(zipcode)
library(dplyr)
library(ggmap)
keys<-fromJSON("../keys.json")$street_data
con <- dbConnect(RMySQL::MySQL(),
host = keys$host,
dbname = keys$dbname,
user = keys$id,
password = keys$pw)
spooky <- read.csv("halloween_words.csv")
names(spooky) <- "word"
data(zipcode)
query <- paste0("SELECT leftzip, name, count(*) as 'num' FROM address",
" GROUP BY leftzip, name")
zip_streets <- dbGetQuery(con,query)
zip_streets$name <- tolower(zip_streets$name)
#zsample <- zip_streets[1:50,]
#zsample <- rbind(zs, data.frame(leftzip=0, name='aliens', num=3))
zs<- zip_streets %>%
rowwise() %>%
mutate(is_spooky = any(sapply(spooky$word, grepl, name)))
zs$leftzip <- clean.zipcodes(zs$leftzip)
zs2<- merge(zs,zipcode, by.x="leftzip",by.y="zip")
zs3 <- aggregate(num ~ state + is_spooky, data = zs2, sum)
state_spook <- zs3[which(zs3$is_spooky == T),c(1,3)]
names(state_spook) <- c("state","num_spooky")
state_nospook <- zs3[which(zs3$is_spooky == F), c(1,3)]
state_nospook <- merge(state_nospook, state_spook, by=c("state"), all=T)
state_ratio <- mutate(state_nospook, ratio = num_spooky/(num+num_spooky))
state_ratio <- mutate(state_ratio, total = num+num_spooky)
state_ratio$num_spooky[which(is.na(state_ratio$num_spooky))] <- 0
state_ratio$ratio[which(is.na(state_ratio$ratio))] <- 0
state_ratio<- state_ratio[which(!(state_ratio$state %in% c("AA","AE","AP","VI"))),]
#sapply(spooky$word, grepl, "Chatham")
#str(as.data.frame(unlist(lapply(zip_streets$name, function(x){ any(sapply(spooky$word,grepl,x))}))))
# df <- merge(city_ratio,zipcode,by=c('city','state'),all.x=T)
# us<- get_map(location='us')
#
# ggmap(us, extent = "device") +
# geom_density2d(data = df,
# aes(x = longitude, y = latitude), size = 0.3) +
# stat_density2d(data = df,
# aes(x = longitude, y = latitude, fill = ..level.., alpha = ..level..),
# size = 0.01,
# bins = 16, geom = "polygon") +
# scale_fill_gradient(low = "green", high = "red") +
# scale_alpha(range = c(0, 0.3), guide = FALSE)
write.csv(state_ratio,"spooky_state.csv")
|
\name{bedr.join.region}
\alias{bedr.join.region}
\title{
join two region objects using a left outer join
}
\description{
join two region objects using a left outer join
}
\usage{
bedr.join.region(
x,
y,
fraction.overlap = 1/1e9,
reciporical = FALSE,
report.n.overlap = FALSE,
check.zero.based = TRUE,
check.chr = TRUE,
check.valid = TRUE,
check.sort = TRUE,
check.merge = TRUE,
verbose = TRUE
)
}
\arguments{
\item{x}{ object a}
\item{y}{ object b}
\item{fraction.overlap}{proportion of overlap to be considered a match}
\item{report.n.overlap}{should the number of overlapping bases be reported}
\item{reciporical}{should the fraction overlap be applied to object b as well}
\item{check.zero.based}{should 0 based coordinates be checked}
\item{check.chr}{should chr prefix be checked}
\item{check.valid}{check if region is valid}
\item{check.sort}{check if region is sorted}
\item{check.merge}{check if overlapping regions are merged}
\item{verbose}{messages and checks}
}
%\details{
%}
%\value{
%}
\references{
\url{http://bedtools.readthedocs.org/en/latest/content/tools/intersect.html}
}
\author{
Daryl Waggott
}
\examples{
if (check.binary("bedtools")) {
index <- get.example.regions();
a <- index[[1]];
b <- index[[2]];
a.sort <- bedr.sort.region(a);
b.sort <- bedr.sort.region(b);
d <- bedr.join.region(a.sort, b.sort);
}
}
\keyword{ join}
| /man/bedr.join.region.Rd | no_license | cran/bedr | R | false | false | 1,391 | rd | \name{bedr.join.region}
\alias{bedr.join.region}
\title{
join two region objects using a left outer join
}
\description{
join two region objects using a left outer join
}
\usage{
bedr.join.region(
x,
y,
fraction.overlap = 1/1e9,
reciporical = FALSE,
report.n.overlap = FALSE,
check.zero.based = TRUE,
check.chr = TRUE,
check.valid = TRUE,
check.sort = TRUE,
check.merge = TRUE,
verbose = TRUE
)
}
\arguments{
\item{x}{ object a}
\item{y}{ object b}
\item{fraction.overlap}{proportion of overlap to be considered a match}
\item{report.n.overlap}{should the number of overlapping bases be reported}
\item{reciporical}{should the fraction overlap be applied to object b as well}
\item{check.zero.based}{should 0 based coordinates be checked}
\item{check.chr}{should chr prefix be checked}
\item{check.valid}{check if region is valid}
\item{check.sort}{check if region is sorted}
\item{check.merge}{check if overlapping regions are merged}
\item{verbose}{messages and checks}
}
%\details{
%}
%\value{
%}
\references{
\url{http://bedtools.readthedocs.org/en/latest/content/tools/intersect.html}
}
\author{
Daryl Waggott
}
\examples{
if (check.binary("bedtools")) {
index <- get.example.regions();
a <- index[[1]];
b <- index[[2]];
a.sort <- bedr.sort.region(a);
b.sort <- bedr.sort.region(b);
d <- bedr.join.region(a.sort, b.sort);
}
}
\keyword{ join}
|
#' Fast optimization of Gaussian Mixture Copula Models
#'
#' Gaussian mixture copula models (GMCM) are a flexible class of statistical
#' models which can be used for unsupervised clustering, meta analysis, and
#' many other things. In meta analysis, GMCMs can be used to
#' quantify and identify which features which have been reproduced across
#' multiple experiments. This package provides a fast and general
#' implementation of GMCM cluster analysis and serves as an improvement and
#' extension of the features available in the \code{idr} package.
#'
#' @name GMCM-package
#' @aliases GMCM-package GMCM
#' @details If the meta analysis of Li et al. (2011) is to be performed, the
#' function \code{\link{fit.meta.GMCM}} is used to identify the maximum
#' likelihood estimate of the special Gaussian mixture copula model (GMCM)
#' defined by Li et al. (2011). The function \code{\link{get.IDR}}
#' computes the local and adjusted Irreproducible Discovery Rates defined
#' by Li et al. (2011) to determine the level of reproducibility.
#'
#' Tewari et. al. (2011) proposed using GMCMs as an general unsupervised
#' clustering tool. If such a general unsupervised clustering is needed, like
#' above, the function \code{\link{fit.full.GMCM}} computes the maximum
#' likelihood estimate of the general GMCM. The function
#' \code{\link{get.prob}} is used to estimate the class membership
#' probabilities of each observation.
#'
#' \code{\link{SimulateGMCMData}} provide easy simulation from the GMCMs.
#'
#' @author
#' Anders Ellern Bilgrau,
#' Martin Boegsted,
#' Poul Svante Eriksen
#'
#' Maintainer: Anders Ellern Bilgrau <anders.ellern.bilgrau@@gmail.com>
#' @docType package
#' @references
#' Anders Ellern Bilgrau, Poul Svante Eriksen, Jakob Gulddahl Rasmussen,
#' Hans Erik Johnsen, Karen Dybkaer, Martin Boegsted (2016). GMCM:
#' Unsupervised Clustering and Meta-Analysis Using Gaussian Mixture Copula
#' Models. Journal of Statistical Software, 70(2), 1-23.
#' doi:10.18637/jss.v070.i02
#'
#' Li, Q., Brown, J. B. J. B., Huang, H., & Bickel, P. J. (2011).
#' Measuring reproducibility of high-throughput experiments. The Annals of
#' Applied Statistics, 5(3), 1752-1779. doi:10.1214/11-AOAS466
#'
#' Tewari, A., Giering, M. J., & Raghunathan, A. (2011). Parametric
#' Characterization of Multimodal Distributions with Non-gaussian Modes.
#' 2011 IEEE 11th International Conference on Data Mining Workshops,
#' 286-292. doi:10.1109/ICDMW.2011.135
#' @seealso
#' Core user functions: \code{\link{fit.meta.GMCM}},
#' \code{\link{fit.full.GMCM}}, \code{\link{get.IDR}},
#' \code{\link{get.prob}}, \code{\link{SimulateGMCMData}},
#' \code{\link{SimulateGMMData}}, \code{\link{rtheta}},
#' \code{\link{Uhat}}, \code{\link{choose.theta}},
#' \code{\link{full2meta}}, \code{\link{meta2full}}
#'
#' Package by Li et. al. (2011): \code{\link[idr:idr-package]{idr}}.
#' @useDynLib GMCM
#' @importFrom Rcpp evalCpp
#' @importFrom stats approxfun cov.wt cov2cor kmeans optim rchisq rnorm runif
#' @importFrom utils flush.console
#' @examples
#' # Loading data
#' data(u133VsExon)
#'
#' # Subsetting data to reduce computation time
#' u133VsExon <- u133VsExon[1:5000, ]
#'
#' # Ranking and scaling,
#' # Remember large values should be critical to the null!
#' uhat <- Uhat(1 - u133VsExon)
#'
#' # Visualizing P-values and the ranked and scaled P-values
#' \dontrun{
#' par(mfrow = c(1,2))
#' plot(u133VsExon, cex = 0.5, pch = 4, col = "tomato", main = "P-values",
#' xlab = "P (U133)", ylab = "P (Exon)")
#' plot(uhat, cex = 0.5, pch = 4, col = "tomato", main = "Ranked P-values",
#' xlab = "rank(1-P) (U133)", ylab = "rank(1-P) (Exon)")
#' }
#'
#' # Fitting using BFGS
#' fit <- fit.meta.GMCM(uhat, init.par = c(0.5, 1, 1, 0.5), pgtol = 1e-2,
#' method = "L-BFGS", positive.rho = TRUE, verbose = TRUE)
#'
#' # Compute IDR values and classify
#' idr <- get.IDR(uhat, par = fit)
#' table(idr$K) # 1 = irreproducible, 2 = reproducible
#'
#' \dontrun{
#' # See clustering results
#' par(mfrow = c(1,2))
#' plot(u133VsExon, cex = 0.5, pch = 4, main = "Classified genes",
#' col = c("tomato", "steelblue")[idr$K],
#' xlab = "P-value (U133)", ylab = "P-value (Exon)")
#' plot(uhat, cex = 0.5, pch = 4, main = "Classified genes",
#' col = c("tomato", "steelblue")[idr$K],
#' xlab = "rank(1-P) (U133)", ylab = "rank(1-P) (Exon)")
#' }
NULL
#' Reproducibility between U133 plus 2 and Exon microarrays
#'
#' This dataset contains a \code{data.frame} of unadjusted P-values for
#' differential expression between germinal center cells and other B-cells
#' within tonsils for two different experiments. The experiments differ
#' primarily in the microarray platform used. The first column corresponds the
#' evidence from the Affymetrix GeneChip Human Genome U133 Plus 2.0 Array.
#' The second column corresponds to the Affymetrix GeneChip Human Exon 1.0 ST
#' Array.
#' @docType data
#' @name u133VsExon
#' @details Further details can be found in Bergkvist et al. (2014) and
#' Rasmussen and Bilgrau et al. (2014).
#' @format The format of the \code{data.frame} is:
#'
#' \code{'data.frame': 19577 obs. of 2 variables:}\cr
#' \code{$ u133: num 0.17561 0.00178 0.005371 0.000669 0.655261 ...}\cr
#' \code{$ exon: num 1.07e-01 6.74e-10 1.51e-03 6.76e-05 3.36e-01 ...}\cr
#'
#' @author Anders Ellern Bilgrau <anders.ellern.bilgrau@@gmail.com>
#' @references
#' Bergkvist, Kim Steve, Mette Nyegaard, Martin Boegsted, Alexander Schmitz,
#' Julie Stoeve Boedker, Simon Mylius Rasmussen, Martin Perez-Andres et al.
#' (2014). "Validation and Implementation of a Method for Microarray Gene
#' Expression Profiling of Minor B-Cell Subpopulations in Man".
#' BMC immunology, 15(1), 3.
#'
#' Rasmussen SM, Bilgrau AE, Schmitz A, Falgreen S, Bergkvist KS, Tramm AM,
#' Baech J, Jacobsen CL, Gaihede M, Kjeldsen MK, Boedker JS, Dybkaer K,
#' Boegsted M, Johnsen HE (2015). "Stable Phenotype Of B-Cell Subsets Following
#' Cryopreservation and Thawing of Normal Human Lymphocytes Stored in a Tissue
#' Biobank." Cytometry Part B: Clinical Cytometry, 88(1), 40-49.
#' @keywords datasets, data
#' @examples
#' data(u133VsExon)
#' str(u133VsExon)
#'
#' # Plot P-values
#' plot(u133VsExon, cex = 0.5)
#'
#' # Plot ranked and scaled P-values
#' plot(Uhat(1-u133VsExon), cex = 0.5)
NULL
#' Reproducibility between Fresh and Frozen B-cell subtypes
#'
#' This dataset contains a \code{data.frame} of \eqn{t}-scores (from a Linear
#' mixed effects model) and \eqn{p}-values for
#' differential expression between pre (Im, N) and post germinal (M, PB) centre
#' cells within peripheral blood.
#' The first and second column contain the the test for the hypothesis of no
#' differentially expression between pre and post germinal cells for the
#' freshly sorted and gene profiled cells.
#' The third and fourth column contain the the test for the hypothesis of no
#' differentially expression between pre and post germinal cells for the
#' cryopreserved (frozen), thawed, sorted, and gene profiled cells.
#' The fifth and sixth column contain the the test for the hypothesis of no
#' differentially expression between fresh and frozen cells.
#' The used array type was Affymetrix Human Exon 1.0 ST microarray.
#'
#' @docType data
#' @name freshVsFrozen
#' @details Further details can be found in Rasmussen and Bilgrau et al. (2015).
#' @format The format of the \code{data.frame} is:
#'
#' \code{'data.frame': 18708 obs. of 6 variables:}\cr
#' \code{$ PreVsPost.Fresh.tstat : num -1.073 -0.381 -1.105 -0.559 -1.054 ...}\cr
#' \code{$ PreVsPost.Fresh.pval : num 0.283 0.703 0.269 0.576 0.292 ...}\cr
#' \code{$ PreVsPost.Frozen.tstat: num -0.245 -0.731 -0.828 -0.568 -1.083 ...}\cr
#' \code{$ PreVsPost.Frozen.pval : num 0.806 0.465 0.408 0.57 0.279 ...}\cr
#' \code{$ FreshVsFrozen.tstat : num 0.836 1.135 -0.221 0.191 -0.783 ...}\cr
#' \code{$ FreshVsFrozen.pval : num 0.403 0.256 0.825 0.849 0.434 ...}\cr
#'
#' @author Anders Ellern Bilgrau <anders.ellern.bilgrau@@gmail.com>
#' @references
#' Rasmussen SM, Bilgrau AE, Schmitz A, Falgreen S, Bergkvist KS, Tramm AM,
#' Baech J, Jacobsen CL, Gaihede M, Kjeldsen MK, Boedker JS, Dybkaer K,
#' Boegsted M, Johnsen HE (2015). "Stable Phenotype Of B-Cell Subsets Following
#' Cryopreservation and Thawing of Normal Human Lymphocytes Stored in a Tissue
#' Biobank." Cytometry Part B: Clinical Cytometry, 88(1), 40-49.
#' @keywords datasets, data
#' @examples
#' data(freshVsFrozen)
#' str(freshVsFrozen)
#'
#' # Plot P-values
#' plot(freshVsFrozen[,c(2,4)], cex = 0.5)
#'
#' # Plot ranked and scaled P-values
#' plot(Uhat(abs(freshVsFrozen[,c(1,3)])), cex = 0.5)
NULL
# The following ensures that the DLL is unloaded when the package is unloaded.
# See http://r-pkgs.had.co.nz/src.html
.onUnload <- function(libpath) {
library.dynam.unload("GMCM", libpath)
}
# 6f037e235ba745d55fea4baa9f32caf64ce2d336ac431578a4ce67ee6e1aebf2
| /R/GMCM-package.R | no_license | Guillermogsjc/GMCM | R | false | false | 9,010 | r | #' Fast optimization of Gaussian Mixture Copula Models
#'
#' Gaussian mixture copula models (GMCM) are a flexible class of statistical
#' models which can be used for unsupervised clustering, meta analysis, and
#' many other things. In meta analysis, GMCMs can be used to
#' quantify and identify which features which have been reproduced across
#' multiple experiments. This package provides a fast and general
#' implementation of GMCM cluster analysis and serves as an improvement and
#' extension of the features available in the \code{idr} package.
#'
#' @name GMCM-package
#' @aliases GMCM-package GMCM
#' @details If the meta analysis of Li et al. (2011) is to be performed, the
#' function \code{\link{fit.meta.GMCM}} is used to identify the maximum
#' likelihood estimate of the special Gaussian mixture copula model (GMCM)
#' defined by Li et al. (2011). The function \code{\link{get.IDR}}
#' computes the local and adjusted Irreproducible Discovery Rates defined
#' by Li et al. (2011) to determine the level of reproducibility.
#'
#' Tewari et. al. (2011) proposed using GMCMs as an general unsupervised
#' clustering tool. If such a general unsupervised clustering is needed, like
#' above, the function \code{\link{fit.full.GMCM}} computes the maximum
#' likelihood estimate of the general GMCM. The function
#' \code{\link{get.prob}} is used to estimate the class membership
#' probabilities of each observation.
#'
#' \code{\link{SimulateGMCMData}} provide easy simulation from the GMCMs.
#'
#' @author
#' Anders Ellern Bilgrau,
#' Martin Boegsted,
#' Poul Svante Eriksen
#'
#' Maintainer: Anders Ellern Bilgrau <anders.ellern.bilgrau@@gmail.com>
#' @docType package
#' @references
#' Anders Ellern Bilgrau, Poul Svante Eriksen, Jakob Gulddahl Rasmussen,
#' Hans Erik Johnsen, Karen Dybkaer, Martin Boegsted (2016). GMCM:
#' Unsupervised Clustering and Meta-Analysis Using Gaussian Mixture Copula
#' Models. Journal of Statistical Software, 70(2), 1-23.
#' doi:10.18637/jss.v070.i02
#'
#' Li, Q., Brown, J. B. J. B., Huang, H., & Bickel, P. J. (2011).
#' Measuring reproducibility of high-throughput experiments. The Annals of
#' Applied Statistics, 5(3), 1752-1779. doi:10.1214/11-AOAS466
#'
#' Tewari, A., Giering, M. J., & Raghunathan, A. (2011). Parametric
#' Characterization of Multimodal Distributions with Non-gaussian Modes.
#' 2011 IEEE 11th International Conference on Data Mining Workshops,
#' 286-292. doi:10.1109/ICDMW.2011.135
#' @seealso
#' Core user functions: \code{\link{fit.meta.GMCM}},
#' \code{\link{fit.full.GMCM}}, \code{\link{get.IDR}},
#' \code{\link{get.prob}}, \code{\link{SimulateGMCMData}},
#' \code{\link{SimulateGMMData}}, \code{\link{rtheta}},
#' \code{\link{Uhat}}, \code{\link{choose.theta}},
#' \code{\link{full2meta}}, \code{\link{meta2full}}
#'
#' Package by Li et. al. (2011): \code{\link[idr:idr-package]{idr}}.
#' @useDynLib GMCM
#' @importFrom Rcpp evalCpp
#' @importFrom stats approxfun cov.wt cov2cor kmeans optim rchisq rnorm runif
#' @importFrom utils flush.console
#' @examples
#' # Loading data
#' data(u133VsExon)
#'
#' # Subsetting data to reduce computation time
#' u133VsExon <- u133VsExon[1:5000, ]
#'
#' # Ranking and scaling,
#' # Remember large values should be critical to the null!
#' uhat <- Uhat(1 - u133VsExon)
#'
#' # Visualizing P-values and the ranked and scaled P-values
#' \dontrun{
#' par(mfrow = c(1,2))
#' plot(u133VsExon, cex = 0.5, pch = 4, col = "tomato", main = "P-values",
#' xlab = "P (U133)", ylab = "P (Exon)")
#' plot(uhat, cex = 0.5, pch = 4, col = "tomato", main = "Ranked P-values",
#' xlab = "rank(1-P) (U133)", ylab = "rank(1-P) (Exon)")
#' }
#'
#' # Fitting using BFGS
#' fit <- fit.meta.GMCM(uhat, init.par = c(0.5, 1, 1, 0.5), pgtol = 1e-2,
#' method = "L-BFGS", positive.rho = TRUE, verbose = TRUE)
#'
#' # Compute IDR values and classify
#' idr <- get.IDR(uhat, par = fit)
#' table(idr$K) # 1 = irreproducible, 2 = reproducible
#'
#' \dontrun{
#' # See clustering results
#' par(mfrow = c(1,2))
#' plot(u133VsExon, cex = 0.5, pch = 4, main = "Classified genes",
#' col = c("tomato", "steelblue")[idr$K],
#' xlab = "P-value (U133)", ylab = "P-value (Exon)")
#' plot(uhat, cex = 0.5, pch = 4, main = "Classified genes",
#' col = c("tomato", "steelblue")[idr$K],
#' xlab = "rank(1-P) (U133)", ylab = "rank(1-P) (Exon)")
#' }
NULL
#' Reproducibility between U133 plus 2 and Exon microarrays
#'
#' This dataset contains a \code{data.frame} of unadjusted P-values for
#' differential expression between germinal center cells and other B-cells
#' within tonsils for two different experiments. The experiments differ
#' primarily in the microarray platform used. The first column corresponds the
#' evidence from the Affymetrix GeneChip Human Genome U133 Plus 2.0 Array.
#' The second column corresponds to the Affymetrix GeneChip Human Exon 1.0 ST
#' Array.
#' @docType data
#' @name u133VsExon
#' @details Further details can be found in Bergkvist et al. (2014) and
#' Rasmussen and Bilgrau et al. (2014).
#' @format The format of the \code{data.frame} is:
#'
#' \code{'data.frame': 19577 obs. of 2 variables:}\cr
#' \code{$ u133: num 0.17561 0.00178 0.005371 0.000669 0.655261 ...}\cr
#' \code{$ exon: num 1.07e-01 6.74e-10 1.51e-03 6.76e-05 3.36e-01 ...}\cr
#'
#' @author Anders Ellern Bilgrau <anders.ellern.bilgrau@@gmail.com>
#' @references
#' Bergkvist, Kim Steve, Mette Nyegaard, Martin Boegsted, Alexander Schmitz,
#' Julie Stoeve Boedker, Simon Mylius Rasmussen, Martin Perez-Andres et al.
#' (2014). "Validation and Implementation of a Method for Microarray Gene
#' Expression Profiling of Minor B-Cell Subpopulations in Man".
#' BMC immunology, 15(1), 3.
#'
#' Rasmussen SM, Bilgrau AE, Schmitz A, Falgreen S, Bergkvist KS, Tramm AM,
#' Baech J, Jacobsen CL, Gaihede M, Kjeldsen MK, Boedker JS, Dybkaer K,
#' Boegsted M, Johnsen HE (2015). "Stable Phenotype Of B-Cell Subsets Following
#' Cryopreservation and Thawing of Normal Human Lymphocytes Stored in a Tissue
#' Biobank." Cytometry Part B: Clinical Cytometry, 88(1), 40-49.
#' @keywords datasets, data
#' @examples
#' data(u133VsExon)
#' str(u133VsExon)
#'
#' # Plot P-values
#' plot(u133VsExon, cex = 0.5)
#'
#' # Plot ranked and scaled P-values
#' plot(Uhat(1-u133VsExon), cex = 0.5)
NULL
#' Reproducibility between Fresh and Frozen B-cell subtypes
#'
#' This dataset contains a \code{data.frame} of \eqn{t}-scores (from a Linear
#' mixed effects model) and \eqn{p}-values for
#' differential expression between pre (Im, N) and post germinal (M, PB) centre
#' cells within peripheral blood.
#' The first and second column contain the the test for the hypothesis of no
#' differentially expression between pre and post germinal cells for the
#' freshly sorted and gene profiled cells.
#' The third and fourth column contain the the test for the hypothesis of no
#' differentially expression between pre and post germinal cells for the
#' cryopreserved (frozen), thawed, sorted, and gene profiled cells.
#' The fifth and sixth column contain the the test for the hypothesis of no
#' differentially expression between fresh and frozen cells.
#' The used array type was Affymetrix Human Exon 1.0 ST microarray.
#'
#' @docType data
#' @name freshVsFrozen
#' @details Further details can be found in Rasmussen and Bilgrau et al. (2015).
#' @format The format of the \code{data.frame} is:
#'
#' \code{'data.frame': 18708 obs. of 6 variables:}\cr
#' \code{$ PreVsPost.Fresh.tstat : num -1.073 -0.381 -1.105 -0.559 -1.054 ...}\cr
#' \code{$ PreVsPost.Fresh.pval : num 0.283 0.703 0.269 0.576 0.292 ...}\cr
#' \code{$ PreVsPost.Frozen.tstat: num -0.245 -0.731 -0.828 -0.568 -1.083 ...}\cr
#' \code{$ PreVsPost.Frozen.pval : num 0.806 0.465 0.408 0.57 0.279 ...}\cr
#' \code{$ FreshVsFrozen.tstat : num 0.836 1.135 -0.221 0.191 -0.783 ...}\cr
#' \code{$ FreshVsFrozen.pval : num 0.403 0.256 0.825 0.849 0.434 ...}\cr
#'
#' @author Anders Ellern Bilgrau <anders.ellern.bilgrau@@gmail.com>
#' @references
#' Rasmussen SM, Bilgrau AE, Schmitz A, Falgreen S, Bergkvist KS, Tramm AM,
#' Baech J, Jacobsen CL, Gaihede M, Kjeldsen MK, Boedker JS, Dybkaer K,
#' Boegsted M, Johnsen HE (2015). "Stable Phenotype Of B-Cell Subsets Following
#' Cryopreservation and Thawing of Normal Human Lymphocytes Stored in a Tissue
#' Biobank." Cytometry Part B: Clinical Cytometry, 88(1), 40-49.
#' @keywords datasets, data
#' @examples
#' data(freshVsFrozen)
#' str(freshVsFrozen)
#'
#' # Plot P-values
#' plot(freshVsFrozen[,c(2,4)], cex = 0.5)
#'
#' # Plot ranked and scaled P-values
#' plot(Uhat(abs(freshVsFrozen[,c(1,3)])), cex = 0.5)
NULL
# The following ensures that the DLL is unloaded when the package is unloaded.
# See http://r-pkgs.had.co.nz/src.html
.onUnload <- function(libpath) {
library.dynam.unload("GMCM", libpath)
}
# 6f037e235ba745d55fea4baa9f32caf64ce2d336ac431578a4ce67ee6e1aebf2
|
library(shiny)
#
# Idea and original code by Pierre Chretien
# Small updates by Michael Kapler
# More mods by iyermobile
#
# Shiny app simulating cash-flow scenarios during retirement
shinyServer(function(input, output) {
projectRetirement <- reactive({
yearsObserving = input$n.obs # what period of time do we want to look at?
monthsObserving = 12 * yearsObserving
ageNow = input$age.now
delayYears = input$years.wait
retireAgeYears = ageNow + delayYears
ageMonths= ageNow * 12
retireAgeMonths = retireAgeYears * 12
numSims = input$n.sim # how many simulations do we want to perform?
liquidN401Ks = input$liquid.n401Ks
totalPension = input$total.pension
numPensionPayouts = input$number.increments
pensionPayout = totalPension/numPensionPayouts
# PENSION MATRIX
if ( (numPensionPayouts > 0) & (totalPension > 0) ) {
pensionMatrix = matrix(0,1,numSims) #matrix w bogus row1
for (j in 1:numPensionPayouts) {
pensionMatrixTmp = matrix(j*pensionPayout, monthsObserving/numPensionPayouts, numSims)
pensionMatrix = rbind(pensionMatrix,pensionMatrixTmp)
}
if (monthsObserving %% numPensionPayouts != 0) { #to avoid having pension drop out at end due to modulo
pensionMatrixTmp = matrix(numPensionPayouts*pensionPayout, monthsObserving %% numPensionPayouts, numSims)
pensionMatrix = rbind(pensionMatrix, pensionMatrixTmp)
}
pensionMatrix = pensionMatrix[-(1),] #remove bogus row1
} else { # no pension
pensionMatrix = matrix(0,monthsObserving, numSims)
}
monthlyWithdrawals = input$monthly.withdrawals
ageSeq = seq(from=ageMonths, by=1, length.out=monthsObserving)
ageVec = matrix(ageSeq)
ageVecYears = ageVec/12
ssAmount = input$social.security
ssStartYear = input$social.security.start
ssStartMonth = ssStartYear * 12
ssStartDelta = ssStartMonth - ageMonths
if (ssStartDelta < 0 ) { ssStartDelta = 0 } # not dealing with negative time
ssMatrixA = matrix(0, ssStartDelta, numSims) # two matrices - one before SS starts
ssMatrixB = matrix(ssAmount, (monthsObserving-ssStartDelta), numSims) # one matrix for social security time
ssMatrix = rbind(ssMatrixA, ssMatrixB)
yearlyCapitalContribs = input$capital.contribs
yearsContributing2capital = input$years.contributing
if ( (yearlyCapitalContribs > 0) & (yearsContributing2capital > 0) ) { #assuming that capital contribution time finite
monthlyCapitalContribs = yearlyCapitalContribs / 12
monthsContributing2capital = yearsContributing2capital * 12
capitalContribMatrixA = matrix(monthlyCapitalContribs, monthsContributing2capital, numSims)
capitalContribMatrixB = matrix(0, (monthsObserving-monthsContributing2capital), numSims)
capitalContribMatrix = rbind(capitalContribMatrixA, capitalContribMatrixB)
} else {
capitalContribMatrix = matrix(0, monthsObserving, numSims)
}
startCapital = pensionMatrix + liquidN401Ks + ssMatrix + capitalContribMatrix
# monthly Investment and Inflation assumptions
annualMeanReturn = input$annual.mean.return/100
monthlyReturnMean = annualMeanReturn / 12
annualReturnStdDev = input$annual.ret.std.dev/100
monthlyReturnStdDev = annualReturnStdDev / sqrt(12)
# simulate Returns
investReturnsMatrix = matrix(0, monthsObserving, numSims)
investReturnsMatrix[] = rnorm(monthsObserving * numSims, mean = monthlyReturnMean, sd = monthlyReturnStdDev)
annualInflation = input$annual.inflation/100
monthlyInflation = annualInflation / 12
annualInflationStdDev = input$annual.inf.std.dev/100
monthlyInflationStdDev = annualInflationStdDev / sqrt(12)
# simulate effect of inflation
inflationMatrix = matrix(0, monthsObserving, numSims)
inflationMatrix[] = rnorm(monthsObserving * numSims, mean = monthlyInflation, sd = monthlyInflationStdDev)
nav = startCapital
for (j in 1:(monthsObserving-1)) {
startCapital[j + 1, ] = startCapital[j, ] * (1 + investReturnsMatrix[j, ] - inflationMatrix[j, ]) - monthlyWithdrawals
#nav[j , ] = nav[j , ] + startCapital/input$number.increments
}
#nav = nav[-(monthsObserving+1) , ] # remove that last row we added in
#for (j in 1:input$number.increments*12) {
#if (j %% 12 == 0) {
#}
#}
startCapital[ startCapital < 0 ] = NA # once nav is below 0 => run out of money
Retirement = startCapital / 1000000 # convert to millions
Retirement=cbind(ageVecYears,Retirement)
#output$documentationText = renderText({"Adjust the slider bars to reflect the retirement scenario you wish to simulate."})
output$documentationText = renderText({'... projecting retirement assets over time ...\n'})
output$sourceText = renderText({"Idea and original code by Pierre Chretien, updated by Michael Kapler, and then IyerMobile. Source at https://github.com/iyermobile/peirod2retire. Comments/complaints to iyermobile@gmail.com"})
return(Retirement)
})
output$distPlot <- renderPlot({
Retirement = projectRetirement()
layout(matrix(c(1,2,1,3),2,2))
matplot(Retirement[ , 1], Retirement[ , -1 ], type = 'l', las = 1, ylab='Millions', xlab='Age')
})
})
| /period2retire/server.r | no_license | iyermobile/period2retire | R | false | false | 4,990 | r | library(shiny)
#
# Idea and original code by Pierre Chretien
# Small updates by Michael Kapler
# More mods by iyermobile
#
# Shiny app simulating cash-flow scenarios during retirement
shinyServer(function(input, output) {
projectRetirement <- reactive({
yearsObserving = input$n.obs # what period of time do we want to look at?
monthsObserving = 12 * yearsObserving
ageNow = input$age.now
delayYears = input$years.wait
retireAgeYears = ageNow + delayYears
ageMonths= ageNow * 12
retireAgeMonths = retireAgeYears * 12
numSims = input$n.sim # how many simulations do we want to perform?
liquidN401Ks = input$liquid.n401Ks
totalPension = input$total.pension
numPensionPayouts = input$number.increments
pensionPayout = totalPension/numPensionPayouts
# PENSION MATRIX
if ( (numPensionPayouts > 0) & (totalPension > 0) ) {
pensionMatrix = matrix(0,1,numSims) #matrix w bogus row1
for (j in 1:numPensionPayouts) {
pensionMatrixTmp = matrix(j*pensionPayout, monthsObserving/numPensionPayouts, numSims)
pensionMatrix = rbind(pensionMatrix,pensionMatrixTmp)
}
if (monthsObserving %% numPensionPayouts != 0) { #to avoid having pension drop out at end due to modulo
pensionMatrixTmp = matrix(numPensionPayouts*pensionPayout, monthsObserving %% numPensionPayouts, numSims)
pensionMatrix = rbind(pensionMatrix, pensionMatrixTmp)
}
pensionMatrix = pensionMatrix[-(1),] #remove bogus row1
} else { # no pension
pensionMatrix = matrix(0,monthsObserving, numSims)
}
monthlyWithdrawals = input$monthly.withdrawals
ageSeq = seq(from=ageMonths, by=1, length.out=monthsObserving)
ageVec = matrix(ageSeq)
ageVecYears = ageVec/12
ssAmount = input$social.security
ssStartYear = input$social.security.start
ssStartMonth = ssStartYear * 12
ssStartDelta = ssStartMonth - ageMonths
if (ssStartDelta < 0 ) { ssStartDelta = 0 } # not dealing with negative time
ssMatrixA = matrix(0, ssStartDelta, numSims) # two matrices - one before SS starts
ssMatrixB = matrix(ssAmount, (monthsObserving-ssStartDelta), numSims) # one matrix for social security time
ssMatrix = rbind(ssMatrixA, ssMatrixB)
yearlyCapitalContribs = input$capital.contribs
yearsContributing2capital = input$years.contributing
if ( (yearlyCapitalContribs > 0) & (yearsContributing2capital > 0) ) { #assuming that capital contribution time finite
monthlyCapitalContribs = yearlyCapitalContribs / 12
monthsContributing2capital = yearsContributing2capital * 12
capitalContribMatrixA = matrix(monthlyCapitalContribs, monthsContributing2capital, numSims)
capitalContribMatrixB = matrix(0, (monthsObserving-monthsContributing2capital), numSims)
capitalContribMatrix = rbind(capitalContribMatrixA, capitalContribMatrixB)
} else {
capitalContribMatrix = matrix(0, monthsObserving, numSims)
}
startCapital = pensionMatrix + liquidN401Ks + ssMatrix + capitalContribMatrix
# monthly Investment and Inflation assumptions
annualMeanReturn = input$annual.mean.return/100
monthlyReturnMean = annualMeanReturn / 12
annualReturnStdDev = input$annual.ret.std.dev/100
monthlyReturnStdDev = annualReturnStdDev / sqrt(12)
# simulate Returns
investReturnsMatrix = matrix(0, monthsObserving, numSims)
investReturnsMatrix[] = rnorm(monthsObserving * numSims, mean = monthlyReturnMean, sd = monthlyReturnStdDev)
annualInflation = input$annual.inflation/100
monthlyInflation = annualInflation / 12
annualInflationStdDev = input$annual.inf.std.dev/100
monthlyInflationStdDev = annualInflationStdDev / sqrt(12)
# simulate effect of inflation
inflationMatrix = matrix(0, monthsObserving, numSims)
inflationMatrix[] = rnorm(monthsObserving * numSims, mean = monthlyInflation, sd = monthlyInflationStdDev)
nav = startCapital
for (j in 1:(monthsObserving-1)) {
startCapital[j + 1, ] = startCapital[j, ] * (1 + investReturnsMatrix[j, ] - inflationMatrix[j, ]) - monthlyWithdrawals
#nav[j , ] = nav[j , ] + startCapital/input$number.increments
}
#nav = nav[-(monthsObserving+1) , ] # remove that last row we added in
#for (j in 1:input$number.increments*12) {
#if (j %% 12 == 0) {
#}
#}
startCapital[ startCapital < 0 ] = NA # once nav is below 0 => run out of money
Retirement = startCapital / 1000000 # convert to millions
Retirement=cbind(ageVecYears,Retirement)
#output$documentationText = renderText({"Adjust the slider bars to reflect the retirement scenario you wish to simulate."})
output$documentationText = renderText({'... projecting retirement assets over time ...\n'})
output$sourceText = renderText({"Idea and original code by Pierre Chretien, updated by Michael Kapler, and then IyerMobile. Source at https://github.com/iyermobile/peirod2retire. Comments/complaints to iyermobile@gmail.com"})
return(Retirement)
})
output$distPlot <- renderPlot({
Retirement = projectRetirement()
layout(matrix(c(1,2,1,3),2,2))
matplot(Retirement[ , 1], Retirement[ , -1 ], type = 'l', las = 1, ylab='Millions', xlab='Age')
})
})
|
# Author: lily-tian
# Date: 20 Aug 2016
# Word frequency writer
# reads in the raw data
words <- read.delim("data/all_words.csv", header = FALSE)
vars <- cbind("word", "year", "freq", "freqs")
colnames(words) <- vars
# make cap-insensitive and aggregates across all time periods
words$word <- toupper(words$word)
words$freq <- words$freq / 1000
words$freqs <- words$freqs / 1000
words.agg <- aggregate(cbind(freq, freqs) ~ word, data = words, FUN = sum)
words.agg$rank <- rank(max(words.agg$freq) - words.agg$freq, ties.method = "max")
# writes out table of word freqencies (in thousands)
write.table(words.agg, "wordfreq.txt", row.names = FALSE, col.names = FALSE)
| /wordfreq_writer.R | no_license | lily-tian/words | R | false | false | 669 | r | # Author: lily-tian
# Date: 20 Aug 2016
# Word frequency writer
# reads in the raw data
words <- read.delim("data/all_words.csv", header = FALSE)
vars <- cbind("word", "year", "freq", "freqs")
colnames(words) <- vars
# make cap-insensitive and aggregates across all time periods
words$word <- toupper(words$word)
words$freq <- words$freq / 1000
words$freqs <- words$freqs / 1000
words.agg <- aggregate(cbind(freq, freqs) ~ word, data = words, FUN = sum)
words.agg$rank <- rank(max(words.agg$freq) - words.agg$freq, ties.method = "max")
# writes out table of word freqencies (in thousands)
write.table(words.agg, "wordfreq.txt", row.names = FALSE, col.names = FALSE)
|
library(dplyr)
library(ggplot2)
# Begin Exclude Linting
library(gridExtra)
# End Exclude Linting
####################################################
# #
# EXPORTED FUNCTIONS (A-Z #
# #
####################################################
#' question_1a_002_charts_latitude
#'
#' Generate scatterplot charts comparing weather variables with latitude.
#' @examples
#' question_1_002_charts_latitude()
#' @export
question_1a_002_charts_latitude <- function() {
generate_latitude_charts(
question_1a_001_data(),
c(
"hours_sun",
"rain_mm",
"temp_max_degrees_c",
"temp_min_degrees_c"
),
"weather_station_name"
)
}
####################################################
# #
# NON EXPORTED FUNCTIONS (A-Z) #
# #
####################################################
#' generate_latitude_chart
#'
#' Generates a single scatterplot chart from a data_frame, comparing "x_variable_name" with "latitude".
#' @param data_frame A data_frame with "x_variable_name" and "latitude" features.
#' @param x_variable_name A string defining the name of the feature to be represent on the x-axis.
#' @param cluster_variable_name A string defining the clustering feature. Not being used at present.
#' @examples
#' generate_latitude_chart(data_frame = my_data_frame, x_variable_name = "x" cluster_variable_name = "region")
generate_latitude_chart <- function(
data_frame,
x_variable_name,
cluster_variable_name
) {
data_frame %>%
ggplot(
aes(
x = !!sym(x_variable_name),
y = latitude
)
) + geom_point() + ylim(50, 60)
}
generate_latitude_charts <- function(
data_frame,
x_variable_names,
cluster_variable_name
) {
charts <- list()
i <- 1
for (x_variable_name in x_variable_names) {
charts[[i]] <- generate_latitude_chart(
data_frame,
x_variable_name,
cluster_variable_name
)
i <- i + 1
}
grid.arrange(
charts[[1]],
charts[[2]],
charts[[3]],
charts[[4]]
)
}
| /R/question-1a-002-charts-latitude.R | no_license | s1888637/weathr | R | false | false | 2,226 | r | library(dplyr)
library(ggplot2)
# Begin Exclude Linting
library(gridExtra)
# End Exclude Linting
####################################################
# #
# EXPORTED FUNCTIONS (A-Z #
# #
####################################################
#' question_1a_002_charts_latitude
#'
#' Generate scatterplot charts comparing weather variables with latitude.
#' @examples
#' question_1_002_charts_latitude()
#' @export
question_1a_002_charts_latitude <- function() {
generate_latitude_charts(
question_1a_001_data(),
c(
"hours_sun",
"rain_mm",
"temp_max_degrees_c",
"temp_min_degrees_c"
),
"weather_station_name"
)
}
####################################################
# #
# NON EXPORTED FUNCTIONS (A-Z) #
# #
####################################################
#' generate_latitude_chart
#'
#' Generates a single scatterplot chart from a data_frame, comparing "x_variable_name" with "latitude".
#' @param data_frame A data_frame with "x_variable_name" and "latitude" features.
#' @param x_variable_name A string defining the name of the feature to be represent on the x-axis.
#' @param cluster_variable_name A string defining the clustering feature. Not being used at present.
#' @examples
#' generate_latitude_chart(data_frame = my_data_frame, x_variable_name = "x" cluster_variable_name = "region")
generate_latitude_chart <- function(
data_frame,
x_variable_name,
cluster_variable_name
) {
data_frame %>%
ggplot(
aes(
x = !!sym(x_variable_name),
y = latitude
)
) + geom_point() + ylim(50, 60)
}
generate_latitude_charts <- function(
data_frame,
x_variable_names,
cluster_variable_name
) {
charts <- list()
i <- 1
for (x_variable_name in x_variable_names) {
charts[[i]] <- generate_latitude_chart(
data_frame,
x_variable_name,
cluster_variable_name
)
i <- i + 1
}
grid.arrange(
charts[[1]],
charts[[2]],
charts[[3]],
charts[[4]]
)
}
|
#'@name debiasedmcmc-package
#'@aliases debiasedmcmc
#'@docType package
#'@title debiasedmcmc
#'@author Pierre E. Jacob <pierre.jacob.work@@gmail.com>, John O'Leary
#'@description Unbiased MCMC estimators with couplings
#'@details This package contains scripts to reproduce the figures of the
#' paper "Unbiased Markov chain Monte Carlo with couplings" by
#' Pierre E. Jacob, John O'Leary, Yves F Atchade
#'@keywords package
#'@useDynLib debiasedmcmc
#'@importFrom Rcpp sourceCpp
NULL
| /R/debiasedmcmc-package.R | no_license | dennisprangle/debiasedmcmc | R | false | false | 485 | r | #'@name debiasedmcmc-package
#'@aliases debiasedmcmc
#'@docType package
#'@title debiasedmcmc
#'@author Pierre E. Jacob <pierre.jacob.work@@gmail.com>, John O'Leary
#'@description Unbiased MCMC estimators with couplings
#'@details This package contains scripts to reproduce the figures of the
#' paper "Unbiased Markov chain Monte Carlo with couplings" by
#' Pierre E. Jacob, John O'Leary, Yves F Atchade
#'@keywords package
#'@useDynLib debiasedmcmc
#'@importFrom Rcpp sourceCpp
NULL
|
TrainSuperMDS <-
function(d=NULL, y, alpha=.5, S=2, x=NULL, nstarts=5, silent=FALSE){
outputs <- list()
crits <- rep(NA, nstarts)
if(!silent) cat("Starting iter 1", fill=TRUE)
outputs[[1]] <- TrainSuperMDSOnce(d=d,y=y,alpha=alpha,S=S,x=x,z=NULL)
crits[1] <- min(outputs[[1]]$crits)
if(nstarts>1){
for(iter in 2:nstarts){
if(!silent) cat("Starting iter", iter, fill=TRUE)
z <- matrix(rnorm(nrow(outputs[[1]]$d)*S), nrow=nrow(outputs[[1]]$d))
z[y==2,] <- z[y==2,]+5
outputs[[iter]] <- TrainSuperMDSOnce(d=d,y=y,alpha=alpha,S=S,x=x,z=z)
crits[iter] <- min(outputs[[iter]]$crits)
}
}
return(outputs[[which.min(crits)]])
}
| /R/TrainSuperMDS.R | no_license | cran/superMDS | R | false | false | 676 | r | TrainSuperMDS <-
function(d=NULL, y, alpha=.5, S=2, x=NULL, nstarts=5, silent=FALSE){
outputs <- list()
crits <- rep(NA, nstarts)
if(!silent) cat("Starting iter 1", fill=TRUE)
outputs[[1]] <- TrainSuperMDSOnce(d=d,y=y,alpha=alpha,S=S,x=x,z=NULL)
crits[1] <- min(outputs[[1]]$crits)
if(nstarts>1){
for(iter in 2:nstarts){
if(!silent) cat("Starting iter", iter, fill=TRUE)
z <- matrix(rnorm(nrow(outputs[[1]]$d)*S), nrow=nrow(outputs[[1]]$d))
z[y==2,] <- z[y==2,]+5
outputs[[iter]] <- TrainSuperMDSOnce(d=d,y=y,alpha=alpha,S=S,x=x,z=z)
crits[iter] <- min(outputs[[iter]]$crits)
}
}
return(outputs[[which.min(crits)]])
}
|
# ## import libraries
library(shiny)
library(ggplot2)
library(dplyr)
library(DT)
library(stringr)
library(shinyBS)
library(shinyjs)
library(ggthemes)
## import functions
source('./functions/helper.R')
source('./functions/plot.R')
source('./functions/aggregate.R')
## import global constants
source('./global_constants.R')
## file size options
# by default, the file size limit is 5MB. It can be changed by
# setting this option. Here we'll raise limit to 10GB.
options(shiny.maxRequestSize = 10000*1024^2)
shinyServer(function(input, output, session) {
## reactive variables
source('./reactives/reactives.R', local=TRUE) # general/miscellaneous
source('./reactives/dataset.R', local=TRUE) # dataset variables
source('./reactives/plotWidgetVals.R', local=TRUE) # plot widget values
source('./reactives/plotWidgetNames.R', local=TRUE) # plot widget names
source('./reactives/plotWidgetOpts.R', local=TRUE) # plot widget options
source('./reactives/plotWidgetsDisplayCond.R', local=TRUE) # plot widgets display condition
source('./reactives/plotWidgetsLoadedCond.R', local=TRUE) # plot widgets load conditions
source('./reactives/plotWidgetSelectedVals.R', local=TRUE) # plot widget selected values
source('./reactives/plot.R', local=TRUE) # plot
## UI controls
source('./uiWidgets/generalWidgets.R', local=TRUE)
source('./uiWidgets/fileWidgets.R', local=TRUE)
source('./uiWidgets/manAggWidgets.R', local=TRUE)
source('./uiWidgets/plotWidgets.R', local=TRUE)
## download handlers
source('./reactives/download.R', local=TRUE)
## observed events
source('./observeEvents.R', local=TRUE)
})
| /inst/raptR/server.R | permissive | newey/raptR | R | false | false | 1,650 | r | # ## import libraries
library(shiny)
library(ggplot2)
library(dplyr)
library(DT)
library(stringr)
library(shinyBS)
library(shinyjs)
library(ggthemes)
## import functions
source('./functions/helper.R')
source('./functions/plot.R')
source('./functions/aggregate.R')
## import global constants
source('./global_constants.R')
## file size options
# by default, the file size limit is 5MB. It can be changed by
# setting this option. Here we'll raise limit to 10GB.
options(shiny.maxRequestSize = 10000*1024^2)
shinyServer(function(input, output, session) {
## reactive variables
source('./reactives/reactives.R', local=TRUE) # general/miscellaneous
source('./reactives/dataset.R', local=TRUE) # dataset variables
source('./reactives/plotWidgetVals.R', local=TRUE) # plot widget values
source('./reactives/plotWidgetNames.R', local=TRUE) # plot widget names
source('./reactives/plotWidgetOpts.R', local=TRUE) # plot widget options
source('./reactives/plotWidgetsDisplayCond.R', local=TRUE) # plot widgets display condition
source('./reactives/plotWidgetsLoadedCond.R', local=TRUE) # plot widgets load conditions
source('./reactives/plotWidgetSelectedVals.R', local=TRUE) # plot widget selected values
source('./reactives/plot.R', local=TRUE) # plot
## UI controls
source('./uiWidgets/generalWidgets.R', local=TRUE)
source('./uiWidgets/fileWidgets.R', local=TRUE)
source('./uiWidgets/manAggWidgets.R', local=TRUE)
source('./uiWidgets/plotWidgets.R', local=TRUE)
## download handlers
source('./reactives/download.R', local=TRUE)
## observed events
source('./observeEvents.R', local=TRUE)
})
|
hexSticker::sticker(
subplot = "ImmPortR/icons8-cargo-ship-100.png",
package = "ImmPortR", p_family = "serif", p_y = 1.35,
h_fill = "#222222", h_color = "#53309E", h_size = 1.4,
s_x = 1, s_y = 0.75, s_width = 0.35, s_height = 0.35,
filename = "ImmPortR/logo.png"
)
# hexSticker::sticker(
# subplot = "https://www.immport.org/images/header/immport-resources-icon-black-background.png",
# package = "ImmPortR", p_family = "serif", p_y = 1.35,
# h_fill = "#222222", h_color = "#53309E", h_size = 1.4,
# s_x = 1, s_y = 0.75,
# filename = "ImmPortR/logo_immport.png"
# )
| /ImmPortR/logo_gen.R | permissive | RGLab/sticker | R | false | false | 587 | r | hexSticker::sticker(
subplot = "ImmPortR/icons8-cargo-ship-100.png",
package = "ImmPortR", p_family = "serif", p_y = 1.35,
h_fill = "#222222", h_color = "#53309E", h_size = 1.4,
s_x = 1, s_y = 0.75, s_width = 0.35, s_height = 0.35,
filename = "ImmPortR/logo.png"
)
# hexSticker::sticker(
# subplot = "https://www.immport.org/images/header/immport-resources-icon-black-background.png",
# package = "ImmPortR", p_family = "serif", p_y = 1.35,
# h_fill = "#222222", h_color = "#53309E", h_size = 1.4,
# s_x = 1, s_y = 0.75,
# filename = "ImmPortR/logo_immport.png"
# )
|
#SETTING WORKING DIRECTORY
setwd("~/coursera")
#READING THE WHOLE DATA INTO R
HPC = read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
#SUBSETTING THE DATA
DataOne = subset(HPC, HPC$Date == "1/2/2007")
DataTwo = subset(HPC, HPC$Date == "2/2/2007")
NewData = rbind(DataOne, DataTwo)
#PLOTTING OF HISTOGRAM AND SAVING IT AS plot1.png
#with width = 480 pixels and height = 480 pixels
png(filename ="plot1.png", width = 480, height = 480)
hist(NewData$Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
| /plot1.R | no_license | WisdomAmet/ExData_Plotting1 | R | false | false | 614 | r | #SETTING WORKING DIRECTORY
setwd("~/coursera")
#READING THE WHOLE DATA INTO R
HPC = read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
#SUBSETTING THE DATA
DataOne = subset(HPC, HPC$Date == "1/2/2007")
DataTwo = subset(HPC, HPC$Date == "2/2/2007")
NewData = rbind(DataOne, DataTwo)
#PLOTTING OF HISTOGRAM AND SAVING IT AS plot1.png
#with width = 480 pixels and height = 480 pixels
png(filename ="plot1.png", width = 480, height = 480)
hist(NewData$Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
### ENet Molecular Feature only
###################################################
### step 1: loadLibraries
###################################################
library(predictiveModeling)
library(BCC)
library(survival)
library(survcomp)
library(MASS)
library(rms)
synapseLogin("in.sock.jang@sagebase.org","tjsDUD@")
###################################################ac
### step 2: loadData
###################################################
# synapseLogin() ### not required if configured for automatic login
trainingData <- loadMetabricTrainingData()
###################################################
### step 3: call predefined Models' classFile
###################################################
modelClassFile = ("~/COMPBIO/trunk/users/jang/R5/myEnetCoxIterModel.R")
source(modelClassFile)
modelClassFile1 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelCancerCensusM.R")
modelClassFile2 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelMeanMarginalAssociationM.R")
modelClassFile3 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelMeanMetabricClusteringM.R")
modelClassFile4 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelMeanTopvaringHigginsM.R")
modelClassFile5 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelMeanTopvaringM.R")
source(modelClassFile1)
source(modelClassFile2)
source(modelClassFile3)
source(modelClassFile4)
source(modelClassFile5)
###################################################
### step 4: trainModel
###################################################
# ENet Grid Setting
alphas =unique(createENetTuneGrid()[,1])
lambdas = createENetTuneGrid(alphas = 1)[,2]
lambdas <- exp(seq(-5, 2, length = 50))
CancerCensusM <- metabricENetCoxModelCancerCensusM$new()
CancerCensusM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions1 <- CancerCensusM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
MarginalAssociationM <- metabricENetCoxModelMeanMarginalAssociationM$new()
MarginalAssociationM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions2 <- MarginalAssociationM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
MetabricClusteringM <- metabricENetCoxModelMeanMetabricClusteringM$new()
MetabricClusteringM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions3 <- MetabricClusteringM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
TopvaringHigginsM <- metabricENetCoxModelMeanTopvaringHigginsM$new()
TopvaringHigginsM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions4 <- TopvaringHigginsM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
TopvaringM <- metabricENetCoxModelMeanTopvaringM$new()
TopvaringM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions5 <- TopvaringM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
###################################################
### step 5: computeTrainCIndex
###################################################
trainPerformance1 <- SurvivalModelPerformance$new(as.numeric(trainPredictions1), trainingData$clinicalSurvData)
trainPerformance2 <- SurvivalModelPerformance$new(as.numeric(trainPredictions2), trainingData$clinicalSurvData)
trainPerformance3 <- SurvivalModelPerformance$new(as.numeric(trainPredictions3), trainingData$clinicalSurvData)
trainPerformance4 <- SurvivalModelPerformance$new(as.numeric(trainPredictions4), trainingData$clinicalSurvData)
trainPerformance5 <- SurvivalModelPerformance$new(as.numeric(trainPredictions5), trainingData$clinicalSurvData)
print(trainPerformance1$getExactConcordanceIndex())
print(trainPerformance2$getExactConcordanceIndex())
print(trainPerformance3$getExactConcordanceIndex())
print(trainPerformance4$getExactConcordanceIndex())
print(trainPerformance5$getExactConcordanceIndex())
###################################################
### step 6: submitModel
###################################################
myModelName1 = "InSock ENet M CancerCensus"
myModelName2 = "InSock ENet M MarginalAssociation"
myModelName3 = "InSock ENet M MetabricClustering"
myModelName4 = "InSock ENet M TopVaringHiggins"
myModelName5 = "InSock ENet M TopVaring"
submitCompetitionModel(modelName = myModelName1, trainedModel=CancerCensusM,rFiles=list(modelClassFile1,modelClassFile), parentDatasetId = "syn308537")
submitCompetitionModel(modelName = myModelName2, trainedModel=MarginalAssociationM,rFiles=list(modelClassFile2,modelClassFile), parentDatasetId = "syn308537")
submitCompetitionModel(modelName = myModelName3, trainedModel=MetabricClusteringM,rFiles=list(modelClassFile3,modelClassFile), parentDatasetId = "syn308537")
submitCompetitionModel(modelName = myModelName4, trainedModel=TopvaringHigginsM,rFiles=list(modelClassFile4,modelClassFile), parentDatasetId = "syn308537")
submitCompetitionModel(modelName = myModelName5, trainedModel=TopvaringM,rFiles=list(modelClassFile5,modelClassFile), parentDatasetId = "syn308537")
| /survival_analysis/IterCV_Gaurav/ENet_M_submission.R | no_license | insockjang/DrugResponse | R | false | false | 5,789 | r | ### ENet Molecular Feature only
###################################################
### step 1: loadLibraries
###################################################
library(predictiveModeling)
library(BCC)
library(survival)
library(survcomp)
library(MASS)
library(rms)
synapseLogin("in.sock.jang@sagebase.org","tjsDUD@")
###################################################ac
### step 2: loadData
###################################################
# synapseLogin() ### not required if configured for automatic login
trainingData <- loadMetabricTrainingData()
###################################################
### step 3: call predefined Models' classFile
###################################################
modelClassFile = ("~/COMPBIO/trunk/users/jang/R5/myEnetCoxIterModel.R")
source(modelClassFile)
modelClassFile1 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelCancerCensusM.R")
modelClassFile2 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelMeanMarginalAssociationM.R")
modelClassFile3 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelMeanMetabricClusteringM.R")
modelClassFile4 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelMeanTopvaringHigginsM.R")
modelClassFile5 = ("~/COMPBIO/trunk/users/jang/survival_analysis/IterCV_Gaurav/ML/metabricENetCoxModelMeanTopvaringM.R")
source(modelClassFile1)
source(modelClassFile2)
source(modelClassFile3)
source(modelClassFile4)
source(modelClassFile5)
###################################################
### step 4: trainModel
###################################################
# ENet Grid Setting
alphas =unique(createENetTuneGrid()[,1])
lambdas = createENetTuneGrid(alphas = 1)[,2]
lambdas <- exp(seq(-5, 2, length = 50))
CancerCensusM <- metabricENetCoxModelCancerCensusM$new()
CancerCensusM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions1 <- CancerCensusM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
MarginalAssociationM <- metabricENetCoxModelMeanMarginalAssociationM$new()
MarginalAssociationM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions2 <- MarginalAssociationM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
MetabricClusteringM <- metabricENetCoxModelMeanMetabricClusteringM$new()
MetabricClusteringM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions3 <- MetabricClusteringM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
TopvaringHigginsM <- metabricENetCoxModelMeanTopvaringHigginsM$new()
TopvaringHigginsM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions4 <- TopvaringHigginsM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
TopvaringM <- metabricENetCoxModelMeanTopvaringM$new()
TopvaringM$customTrain(trainingData$exprData,trainingData$copyData,trainingData$clinicalFeaturesData,trainingData$clinicalSurvData, alpha = alphas,lambda = lambdas)
trainPredictions5 <- TopvaringM$customPredict(trainingData$exprData, trainingData$copyData, trainingData$clinicalFeaturesData)
###################################################
### step 5: computeTrainCIndex
###################################################
trainPerformance1 <- SurvivalModelPerformance$new(as.numeric(trainPredictions1), trainingData$clinicalSurvData)
trainPerformance2 <- SurvivalModelPerformance$new(as.numeric(trainPredictions2), trainingData$clinicalSurvData)
trainPerformance3 <- SurvivalModelPerformance$new(as.numeric(trainPredictions3), trainingData$clinicalSurvData)
trainPerformance4 <- SurvivalModelPerformance$new(as.numeric(trainPredictions4), trainingData$clinicalSurvData)
trainPerformance5 <- SurvivalModelPerformance$new(as.numeric(trainPredictions5), trainingData$clinicalSurvData)
print(trainPerformance1$getExactConcordanceIndex())
print(trainPerformance2$getExactConcordanceIndex())
print(trainPerformance3$getExactConcordanceIndex())
print(trainPerformance4$getExactConcordanceIndex())
print(trainPerformance5$getExactConcordanceIndex())
###################################################
### step 6: submitModel
###################################################
myModelName1 = "InSock ENet M CancerCensus"
myModelName2 = "InSock ENet M MarginalAssociation"
myModelName3 = "InSock ENet M MetabricClustering"
myModelName4 = "InSock ENet M TopVaringHiggins"
myModelName5 = "InSock ENet M TopVaring"
submitCompetitionModel(modelName = myModelName1, trainedModel=CancerCensusM,rFiles=list(modelClassFile1,modelClassFile), parentDatasetId = "syn308537")
submitCompetitionModel(modelName = myModelName2, trainedModel=MarginalAssociationM,rFiles=list(modelClassFile2,modelClassFile), parentDatasetId = "syn308537")
submitCompetitionModel(modelName = myModelName3, trainedModel=MetabricClusteringM,rFiles=list(modelClassFile3,modelClassFile), parentDatasetId = "syn308537")
submitCompetitionModel(modelName = myModelName4, trainedModel=TopvaringHigginsM,rFiles=list(modelClassFile4,modelClassFile), parentDatasetId = "syn308537")
submitCompetitionModel(modelName = myModelName5, trainedModel=TopvaringM,rFiles=list(modelClassFile5,modelClassFile), parentDatasetId = "syn308537")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wav2db.R
\name{wav2db}
\alias{wav2db}
\title{Extract pulses and store them in a soundcluster database}
\usage{
wav2db(db, path, recursive = TRUE, make, model, serial = NA_character_,
te_factor = 1, channel = c("left", "right"), max_length = 30,
window_ms = 1, overlap = 0.9, threshold_amplitude = 10,
min_peak_amplitude = 30, dimensions = 32, existing = c("append",
"skip"), frequency_range = c(10000, 130000))
}
\arguments{
\item{db}{A connection to a soundcluster database}
\item{path}{the name of a file or a directory}
\item{recursive}{logical. Should the listing recurse into directories?}
\item{make}{the manufacturer of the device}
\item{model}{the model of the device}
\item{serial}{the optional serial number of the device}
\item{te_factor}{The factor to which the original sound was slowed down prior
to recording}
\item{channel}{Select the left or the right channel}
\item{max_length}{Maximum length of the recording to use in seconds. If the
recording is longer, the last part is ignored.}
\item{window_ms}{The size of the window in microseconds. Default to 1.}
\item{overlap}{The overlap of two windows. Defaults to 0.9.}
\item{threshold_amplitude}{relevant regions have an amplitude above the \code{threshold_amplitude}. Defaults to 10 dB.}
\item{min_peak_amplitude}{the maximum amplitude in a relevant region must be above \code{min_peak_amplitude}. Defaults to 30 dB.}
\item{dimensions}{the number of rows and columns used to resample the shape. Must be a single number and a power of 2. Will be altered to the next power of 2.}
\item{existing}{what to do with existing spectrograms. "append" will add new pulses. "skip" will skip the recording.}
\item{frequency_range}{the range of frequencies to use in Hz. Frequencies below the minimum or above the maximum are removed from the spectrogram. Defaults to 10000 (10 kHz) and 130000 (130 kHz).}
}
\description{
Extract pulses and store them in a soundcluster database
}
| /man/wav2db.Rd | no_license | ThierryO/soundcluster | R | false | true | 2,037 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wav2db.R
\name{wav2db}
\alias{wav2db}
\title{Extract pulses and store them in a soundcluster database}
\usage{
wav2db(db, path, recursive = TRUE, make, model, serial = NA_character_,
te_factor = 1, channel = c("left", "right"), max_length = 30,
window_ms = 1, overlap = 0.9, threshold_amplitude = 10,
min_peak_amplitude = 30, dimensions = 32, existing = c("append",
"skip"), frequency_range = c(10000, 130000))
}
\arguments{
\item{db}{A connection to a soundcluster database}
\item{path}{the name of a file or a directory}
\item{recursive}{logical. Should the listing recurse into directories?}
\item{make}{the manufacturer of the device}
\item{model}{the model of the device}
\item{serial}{the optional serial number of the device}
\item{te_factor}{The factor to which the original sound was slowed down prior
to recording}
\item{channel}{Select the left or the right channel}
\item{max_length}{Maximum length of the recording to use in seconds. If the
recording is longer, the last part is ignored.}
\item{window_ms}{The size of the window in microseconds. Default to 1.}
\item{overlap}{The overlap of two windows. Defaults to 0.9.}
\item{threshold_amplitude}{relevant regions have an amplitude above the \code{threshold_amplitude}. Defaults to 10 dB.}
\item{min_peak_amplitude}{the maximum amplitude in a relevant region must be above \code{min_peak_amplitude}. Defaults to 30 dB.}
\item{dimensions}{the number of rows and columns used to resample the shape. Must be a single number and a power of 2. Will be altered to the next power of 2.}
\item{existing}{what to do with existing spectrograms. "append" will add new pulses. "skip" will skip the recording.}
\item{frequency_range}{the range of frequencies to use in Hz. Frequencies below the minimum or above the maximum are removed from the spectrogram. Defaults to 10000 (10 kHz) and 130000 (130 kHz).}
}
\description{
Extract pulses and store them in a soundcluster database
}
|
# Bagging is a randomized model, so let's set a seed (123) for reproducibility
set.seed(___)
# Train a bagged model
credit_model <- bagging(formula = default ~ .,
data = ___,
coob = TRUE)
# Print the model
print(credit_model)
| /exercises/exc_03_03.R | permissive | xiangao/tree-based-models-in-r | R | false | false | 287 | r | # Bagging is a randomized model, so let's set a seed (123) for reproducibility
set.seed(___)
# Train a bagged model
credit_model <- bagging(formula = default ~ .,
data = ___,
coob = TRUE)
# Print the model
print(credit_model)
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ReseqCrosstalkCalibration.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{ReseqCrosstalkCalibration}
\docType{class}
\alias{ReseqCrosstalkCalibration}
\title{The ReseqCrosstalkCalibration class}
\description{
Package: aroma.affymetrix \cr
\bold{Class ReseqCrosstalkCalibration}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[aroma.core]{AromaTransform}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{Transform}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{ProbeLevelTransform}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\code{ReseqCrosstalkCalibration}\cr
\bold{Directly known subclasses:}\cr
\cr
public static class \bold{ReseqCrosstalkCalibration}\cr
extends \emph{\link[aroma.affymetrix]{ProbeLevelTransform}}\cr
This class represents a calibration function that transforms the
probe-level signals such that the signals from the four nucleotides
(A, C, G, T) are orthogonal.
}
\usage{
ReseqCrosstalkCalibration(dataSet=NULL, ..., targetAvg=2200, subsetToAvg=NULL,
mergeGroups=FALSE, flavor=c("sfit", "expectile"), alpha=c(0.1, 0.075, 0.05, 0.03,
0.01), q=2, Q=98)
}
\arguments{
\item{dataSet}{An \code{\link{AffymetrixCelSet}}.}
\item{...}{Arguments passed to the constructor of
\code{\link{ProbeLevelTransform}}.}
\item{targetAvg}{The signal(s) that the average of the sum of the
probe quartets should have after calibration.}
\item{subsetToAvg}{The indices of the cells (taken as the intersect of
existing indices) used to calculate average in order to rescale to
the target average. If \code{\link[base]{NULL}}, all probes are considered.}
\item{mergeGroups}{A \code{\link[base]{logical}} ...}
\item{flavor}{A \code{\link[base]{character}} string specifying what algorithm is used
to fit the crosstalk calibration.}
\item{alpha, q, Q}{Additional arguments passed to
\code{fitMultiDimensionalCone()}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{process} \tab -\cr
}
\bold{Methods inherited from ProbeLevelTransform}:\cr
getRootPath
\bold{Methods inherited from Transform}:\cr
getOutputDataSet, getOutputFiles
\bold{Methods inherited from AromaTransform}:\cr
as.character, findFilesTodo, getAsteriskTags, getExpectedOutputFiles, getExpectedOutputFullnames, getFullName, getInputDataSet, getName, getOutputDataSet, getOutputDataSet0, getOutputFiles, getPath, getRootPath, getTags, isDone, process, setTags
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
| /man/ReseqCrosstalkCalibration.Rd | no_license | microarray/aroma.affymetrix | R | false | false | 3,359 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ReseqCrosstalkCalibration.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{ReseqCrosstalkCalibration}
\docType{class}
\alias{ReseqCrosstalkCalibration}
\title{The ReseqCrosstalkCalibration class}
\description{
Package: aroma.affymetrix \cr
\bold{Class ReseqCrosstalkCalibration}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[aroma.core]{AromaTransform}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{Transform}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{ProbeLevelTransform}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\code{ReseqCrosstalkCalibration}\cr
\bold{Directly known subclasses:}\cr
\cr
public static class \bold{ReseqCrosstalkCalibration}\cr
extends \emph{\link[aroma.affymetrix]{ProbeLevelTransform}}\cr
This class represents a calibration function that transforms the
probe-level signals such that the signals from the four nucleotides
(A, C, G, T) are orthogonal.
}
\usage{
ReseqCrosstalkCalibration(dataSet=NULL, ..., targetAvg=2200, subsetToAvg=NULL,
mergeGroups=FALSE, flavor=c("sfit", "expectile"), alpha=c(0.1, 0.075, 0.05, 0.03,
0.01), q=2, Q=98)
}
\arguments{
\item{dataSet}{An \code{\link{AffymetrixCelSet}}.}
\item{...}{Arguments passed to the constructor of
\code{\link{ProbeLevelTransform}}.}
\item{targetAvg}{The signal(s) that the average of the sum of the
probe quartets should have after calibration.}
\item{subsetToAvg}{The indices of the cells (taken as the intersect of
existing indices) used to calculate average in order to rescale to
the target average. If \code{\link[base]{NULL}}, all probes are considered.}
\item{mergeGroups}{A \code{\link[base]{logical}} ...}
\item{flavor}{A \code{\link[base]{character}} string specifying what algorithm is used
to fit the crosstalk calibration.}
\item{alpha, q, Q}{Additional arguments passed to
\code{fitMultiDimensionalCone()}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{process} \tab -\cr
}
\bold{Methods inherited from ProbeLevelTransform}:\cr
getRootPath
\bold{Methods inherited from Transform}:\cr
getOutputDataSet, getOutputFiles
\bold{Methods inherited from AromaTransform}:\cr
as.character, findFilesTodo, getAsteriskTags, getExpectedOutputFiles, getExpectedOutputFullnames, getFullName, getInputDataSet, getName, getOutputDataSet, getOutputDataSet0, getOutputFiles, getPath, getRootPath, getTags, isDone, process, setTags
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
library(ALSM)
### Name: SBCp
### Title: Calculate SBC
### Aliases: SBCp
### ** Examples
##################use data Surgical Unit, page 360
SBCp(lm(lny~x4,SurgicalUnit))
| /data/genthat_extracted_code/ALSM/examples/9_SBCp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 178 | r | library(ALSM)
### Name: SBCp
### Title: Calculate SBC
### Aliases: SBCp
### ** Examples
##################use data Surgical Unit, page 360
SBCp(lm(lny~x4,SurgicalUnit))
|
##########################################################################################################################################
#(주의) -> 순차적으로 코드를 실행하는 것을 권함!
#에러 발생 시 github Finance/R-Finance-Programming 경로에 issue를 남기면 확인
##########################################################################################################################################
### 데이터 구조
# 간단한 금융데이터를 다룬다고 가정하면 벡터만으로도 충분함
# 조금 더 다양한 데이터 구조로는 배열, 리스트, 데이터 프레임, 팩터 등등이 있음.
x<-1:5
y<-x^2
m<-lm(y~x)
class(m)
# m은 lm의 클래스이다.
# lm 클래스는 현재 무엇인지 모르는 상태이다.
# 하지만 모든 클래스는 원래 R의 데이터 구조(예를 들면 벡터, 행렬, 리스트 또는 데이터 프레임)를 기반으로 만들어져서 mode함수로
# 기본적인 구조를 확인할 수 있음.
mode(m)
# mode() 함수를 사용하여 구조확인 실행
# m은 list를 기반으로 만들어진 클래스이다.
# 따라서 list를 사용하는 함수와 연산을 이용하여 m의 내용물을 확인할 수 있다.
############################################################결과값(print)#################################################################
# > x<-1:5
# > y<-x^2
# > m<-lm(y~x)
# > class(m)
# [1] "lm"
# > mode(m)
# [1] "list"
##########################################################################################################################################
names(m)
# 첫번째 리스트 요소는 "coefficient" 라는 것이다.
# 이는 회귀계수를 뜻하며, 실제로 값을 출력하면 회귀계수임을 확인할 수 있다.
m$coefficients
# 이런 식으로 mode() 함수와 class() 함수를 이용하여 해당 변수의 특성을 분석할 수 있다.
############################################################결과값(print)#################################################################
# > names(m)
# [1] "coefficients" "residuals" "effects" "rank" "fitted.values" "assign" "qr" "df.residual" "xlevels" "call"
# [11] "terms" "model"
# > m$coefficients
# (Intercept) x
# -7 6
##########################################################################################################################################
| /R-Finance-Programming/ch02_Data_manipulation/01_class_data_structure.R | no_license | Fintecuriosity11/Finance | R | false | false | 2,571 | r | ##########################################################################################################################################
#(주의) -> 순차적으로 코드를 실행하는 것을 권함!
#에러 발생 시 github Finance/R-Finance-Programming 경로에 issue를 남기면 확인
##########################################################################################################################################
### 데이터 구조
# 간단한 금융데이터를 다룬다고 가정하면 벡터만으로도 충분함
# 조금 더 다양한 데이터 구조로는 배열, 리스트, 데이터 프레임, 팩터 등등이 있음.
x<-1:5
y<-x^2
m<-lm(y~x)
class(m)
# m은 lm의 클래스이다.
# lm 클래스는 현재 무엇인지 모르는 상태이다.
# 하지만 모든 클래스는 원래 R의 데이터 구조(예를 들면 벡터, 행렬, 리스트 또는 데이터 프레임)를 기반으로 만들어져서 mode함수로
# 기본적인 구조를 확인할 수 있음.
mode(m)
# mode() 함수를 사용하여 구조확인 실행
# m은 list를 기반으로 만들어진 클래스이다.
# 따라서 list를 사용하는 함수와 연산을 이용하여 m의 내용물을 확인할 수 있다.
############################################################결과값(print)#################################################################
# > x<-1:5
# > y<-x^2
# > m<-lm(y~x)
# > class(m)
# [1] "lm"
# > mode(m)
# [1] "list"
##########################################################################################################################################
names(m)
# 첫번째 리스트 요소는 "coefficient" 라는 것이다.
# 이는 회귀계수를 뜻하며, 실제로 값을 출력하면 회귀계수임을 확인할 수 있다.
m$coefficients
# 이런 식으로 mode() 함수와 class() 함수를 이용하여 해당 변수의 특성을 분석할 수 있다.
############################################################결과값(print)#################################################################
# > names(m)
# [1] "coefficients" "residuals" "effects" "rank" "fitted.values" "assign" "qr" "df.residual" "xlevels" "call"
# [11] "terms" "model"
# > m$coefficients
# (Intercept) x
# -7 6
##########################################################################################################################################
|
# Compute the distance between two points (default: Euclidean)
# type = {"Euclidian", "Manhattan")
# YOU SHOULD ALSO VERIFY THE INPUT GIVEN BY THE USER TO THE FUNCTION
myDistance <- function(x, y, type="Euclidian") {
if ((type=="Manhattan") | (type=="L1")) { # or L1-norm
dist <- sum( abs(x - y) )
}
else { # Euclidian or L2-norm by default
dist <- sqrt( sum( (x - y)^2 ))
}
return( dist )
}
# Verify the computation
x <- c(2, 4); y <- c(3, 2)
myDistance(x, y)
sqrt(5) # must be equal to the previous computation
myDistance(x, y, type="Manhattan") # must be equal to 3
# ----------------------------------------------------------
install.packages("FNN")
library(FNN)
# Computer data: need to predict PRP (and ignore ERP)
myData <- read.table("ComputerData.txt", header=T)
attach(myData)
# Remove the variable model (name), vendor (name) and ERP
usefulData <- myData[, c("MYCT", "MMIN", "MMAX", "CACH", "CGMIN", "CHMAX", "PRP")]
predictors <- names(usefulData)
# Standardized the values (Z score)
means <- lapply(usefulData, mean)
sd <- lapply(usefulData, sd)
usefulData <- (usefulData - means) / sd
summary(usefulData)
# compute the performance (Using Leaving-One-Out)
for (i in 1:11) {
Computer.knn <- knn.reg(usefulData, test=NULL, PRP, k = i)
cat("k: ", i, "press: ", Computer.knn$PRESS, "R^2: ", Computer.knn$R2Pred,"\n")
}
# k= 1 the best choice for the Computer Data
#
# Cars2 data: need to predict mpg
#
myData <- read.table("Cars2Data.txt", header=T)
subset <- !(is.na(myData [,"horsepower"]))
# Remove the variable name to infer mpg
usefulData <- myData[subset, c("mpg", "cylinders", "displacement", "horsepower", "weight", "acceleration")]
predictors <- names(usefulData)
attach(usefulData)
# and repeat the previous computation (Using Leaving-One-Out)
for (i in 1:11) {
Car.knn <- knn.reg(usefulData, test=NULL, mpg, k = i)
cat("k: ", i, "press: ", Car.knn$PRESS, "R^2: ", Car.knn$R2Pred,"\n")
}
# Multiple regression: the best model is
car.lm3 <- lm(mpg ~ weight+horsepower+I(horsepower^2), data=usefulData)
summary(car.lm3) # R^2 73.76
# Comparing the k-nn and multiple regression
# Simple strategy: repeated hold-out
nb <- dim(usefulData)[1]
nbTest <- 10
diffKnn <- numeric(nbTest)
diffLM <- numeric(nbTest)
for (i in 1:nbTest) { # ten repeated holdout
subset <- sample(1:nb, 40)
trainData <- usefulData[-subset,]
testData <- usefulData[subset,]
car.knn <- knn.reg(trainData, test=testData, mpg, k=5)
diffKnn[i] <- sum (abs(car.knn$pred - testData$mpg))
car.lm <- lm(mpg ~ weight+horsepower+I(horsepower^2), data=trainData)
car.lm.predict <- predict(car.lm,testData)
diffLM[i] <- sum (abs(car.lm.predict - testData$mpg))
}
cat("Abs difference knn:", sum(diffKnn)," linear reg:",sum(diffLM)," \n")
| /Ex6/Commands ex.6 and 7.R | no_license | SimuJenni/Statistical-Learning-with-R | R | false | false | 2,797 | r | # Compute the distance between two points (default: Euclidean)
# type = {"Euclidian", "Manhattan")
# YOU SHOULD ALSO VERIFY THE INPUT GIVEN BY THE USER TO THE FUNCTION
myDistance <- function(x, y, type="Euclidian") {
if ((type=="Manhattan") | (type=="L1")) { # or L1-norm
dist <- sum( abs(x - y) )
}
else { # Euclidian or L2-norm by default
dist <- sqrt( sum( (x - y)^2 ))
}
return( dist )
}
# Verify the computation
x <- c(2, 4); y <- c(3, 2)
myDistance(x, y)
sqrt(5) # must be equal to the previous computation
myDistance(x, y, type="Manhattan") # must be equal to 3
# ----------------------------------------------------------
install.packages("FNN")
library(FNN)
# Computer data: need to predict PRP (and ignore ERP)
myData <- read.table("ComputerData.txt", header=T)
attach(myData)
# Remove the variable model (name), vendor (name) and ERP
usefulData <- myData[, c("MYCT", "MMIN", "MMAX", "CACH", "CGMIN", "CHMAX", "PRP")]
predictors <- names(usefulData)
# Standardized the values (Z score)
means <- lapply(usefulData, mean)
sd <- lapply(usefulData, sd)
usefulData <- (usefulData - means) / sd
summary(usefulData)
# compute the performance (Using Leaving-One-Out)
for (i in 1:11) {
Computer.knn <- knn.reg(usefulData, test=NULL, PRP, k = i)
cat("k: ", i, "press: ", Computer.knn$PRESS, "R^2: ", Computer.knn$R2Pred,"\n")
}
# k= 1 the best choice for the Computer Data
#
# Cars2 data: need to predict mpg
#
myData <- read.table("Cars2Data.txt", header=T)
subset <- !(is.na(myData [,"horsepower"]))
# Remove the variable name to infer mpg
usefulData <- myData[subset, c("mpg", "cylinders", "displacement", "horsepower", "weight", "acceleration")]
predictors <- names(usefulData)
attach(usefulData)
# and repeat the previous computation (Using Leaving-One-Out)
for (i in 1:11) {
Car.knn <- knn.reg(usefulData, test=NULL, mpg, k = i)
cat("k: ", i, "press: ", Car.knn$PRESS, "R^2: ", Car.knn$R2Pred,"\n")
}
# Multiple regression: the best model is
car.lm3 <- lm(mpg ~ weight+horsepower+I(horsepower^2), data=usefulData)
summary(car.lm3) # R^2 73.76
# Comparing the k-nn and multiple regression
# Simple strategy: repeated hold-out
nb <- dim(usefulData)[1]
nbTest <- 10
diffKnn <- numeric(nbTest)
diffLM <- numeric(nbTest)
for (i in 1:nbTest) { # ten repeated holdout
subset <- sample(1:nb, 40)
trainData <- usefulData[-subset,]
testData <- usefulData[subset,]
car.knn <- knn.reg(trainData, test=testData, mpg, k=5)
diffKnn[i] <- sum (abs(car.knn$pred - testData$mpg))
car.lm <- lm(mpg ~ weight+horsepower+I(horsepower^2), data=trainData)
car.lm.predict <- predict(car.lm,testData)
diffLM[i] <- sum (abs(car.lm.predict - testData$mpg))
}
cat("Abs difference knn:", sum(diffKnn)," linear reg:",sum(diffLM)," \n")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scfind-package.R
\docType{package}
\name{scfind}
\alias{scfind}
\alias{scfind-package}
\title{Minimal Rcpp Module Example}
\description{
scfind
}
\details{
A minimal example of using Rcpp modules in an Rcpp package
}
\author{
Nikolaos Patikas
}
| /man/scfind.Rd | permissive | thjimmylee/scfind | R | false | true | 323 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scfind-package.R
\docType{package}
\name{scfind}
\alias{scfind}
\alias{scfind-package}
\title{Minimal Rcpp Module Example}
\description{
scfind
}
\details{
A minimal example of using Rcpp modules in an Rcpp package
}
\author{
Nikolaos Patikas
}
|
#Set wd
#Copy folder path to clipboard
setwd(readClipboard())
## Import SQL library to subset on the read in
library(sqldf)
household_data <- read.csv.sql("household_power_consumption.txt", sep = ";", header = TRUE, sql = "select * from file where Date in ('2/1/2007','2/2/2007')")
###Clean out time data and create days
household_data$newdate <- strptime(as.character(household_data$Date), "%m/%d/%Y")
household_data$Date <- format(household_data$newdate, "%Y-%m-%d")
household_data$Global_active_power <- as.numeric(as.character(household_data$Global_active_power))
household_data$datetime <- as.POSIXct(paste(household_data$Date, household_data$Time), format = "%Y-%m-%d %H:%M:%S")
###Save and create plot 3 as .png
plot(household_data$datetime, household_data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", ylim = c(0, 30))
lines(household_data$datetime, household_data$Sub_metering_2, type = "l", col = "red")
lines(household_data$datetime, household_data$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = 1)
dev.off() | /plot3.R | no_license | adam-moreno/ExData_Plotting1 | R | false | false | 1,201 | r | #Set wd
#Copy folder path to clipboard
setwd(readClipboard())
## Import SQL library to subset on the read in
library(sqldf)
household_data <- read.csv.sql("household_power_consumption.txt", sep = ";", header = TRUE, sql = "select * from file where Date in ('2/1/2007','2/2/2007')")
###Clean out time data and create days
household_data$newdate <- strptime(as.character(household_data$Date), "%m/%d/%Y")
household_data$Date <- format(household_data$newdate, "%Y-%m-%d")
household_data$Global_active_power <- as.numeric(as.character(household_data$Global_active_power))
household_data$datetime <- as.POSIXct(paste(household_data$Date, household_data$Time), format = "%Y-%m-%d %H:%M:%S")
###Save and create plot 3 as .png
plot(household_data$datetime, household_data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", ylim = c(0, 30))
lines(household_data$datetime, household_data$Sub_metering_2, type = "l", col = "red")
lines(household_data$datetime, household_data$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = 1)
dev.off() |
description.mainpanel <- mainPanel(
fluidPage(
tags$p("Esta aplicación web tiene por objeto la visualización de los datos públicos obtenidos en la ",
tags$a(href="http://www.gobiernodecanarias.org/istac/temas_estadisticos/sectorservicios/hosteleriayturismo/demanda/C00028A.html",
"Encuesta sobre Gasto Turístico"),
" del",
tags$a(href = "http://www.gobiernodecanarias.org/istac/",
"Instituto Canario de Estadística (ISTAC)"),
"."),
tags$p("Actualmente, recoge datos de gasto y llegada de turistas según países de residencia, perfil y características del viaje."),
tags$p("Se emplea metodología 2016."),
tags$p("Los datos se muestran mediante gráficos dinámicos y tablas que pueden descargarse en formato CSV."),
tags$p("Los datos publicados se han obtenido usando la API base del",
tags$a(href = "http://www.gobiernodecanarias.org/istac/",
"ISTAC"),
", a través de la librería ",
tags$a(href = "https://github.com/rOpenSpain/istacr", "istacbaser"),
"."),
tags$h2(""),
# tags$h6("Aplicación Shiny en proceso de desarrollo... se muestran resultados preliminares."),
hr(),
tags$h6("Aplicación",
img(src = "https://www.rstudio.com/wp-content/uploads/2014/04/shiny.png", height = "30px"),
"en proceso de desarrollo... se muestran resultados preliminares.")
), align = "left")
| /description_elements.R | no_license | jmcartiles/canary_islands_tourism_dashboard | R | false | false | 1,549 | r |
description.mainpanel <- mainPanel(
fluidPage(
tags$p("Esta aplicación web tiene por objeto la visualización de los datos públicos obtenidos en la ",
tags$a(href="http://www.gobiernodecanarias.org/istac/temas_estadisticos/sectorservicios/hosteleriayturismo/demanda/C00028A.html",
"Encuesta sobre Gasto Turístico"),
" del",
tags$a(href = "http://www.gobiernodecanarias.org/istac/",
"Instituto Canario de Estadística (ISTAC)"),
"."),
tags$p("Actualmente, recoge datos de gasto y llegada de turistas según países de residencia, perfil y características del viaje."),
tags$p("Se emplea metodología 2016."),
tags$p("Los datos se muestran mediante gráficos dinámicos y tablas que pueden descargarse en formato CSV."),
tags$p("Los datos publicados se han obtenido usando la API base del",
tags$a(href = "http://www.gobiernodecanarias.org/istac/",
"ISTAC"),
", a través de la librería ",
tags$a(href = "https://github.com/rOpenSpain/istacr", "istacbaser"),
"."),
tags$h2(""),
# tags$h6("Aplicación Shiny en proceso de desarrollo... se muestran resultados preliminares."),
hr(),
tags$h6("Aplicación",
img(src = "https://www.rstudio.com/wp-content/uploads/2014/04/shiny.png", height = "30px"),
"en proceso de desarrollo... se muestran resultados preliminares.")
), align = "left")
|
mlb <- read.csv(file.choose(), header = TRUE)
attach(mlb)
winpct_HR <- lm(WinPct ~ HR)
summary(winpct_HR)
winpct_Doubles <- lm(WinPct ~ Doubles)
summary(winpct_Doubles)
winpct_hitsallowed <- lm(WinPct ~ HitsAllowed)
summary(winpct_hitsallowed)
winpct_strikeouts <- lm(WinPct ~ StrikeOuts)
summary(winpct_strikeouts)
winpct_obp <- lm(WinPct ~ OBP)
summary(winpct_obp)
winpct_obp_hr <- lm(WinPct ~ OBP + HR)
summary(winpct_obp_hr)
winpct_obp_doubles <- lm(WinPct ~ OBP + Doubles)
summary(winpct_obp_doubles)
winpct_obp_hitsallowed <- lm(WinPct ~ OBP + HitsAllowed)
summary(winpct_obp_hitsallowed)
winpct_obp_strikeouts <- lm(WinPct ~ OBP + StrikeOuts)
summary(winpct_obp_strikeouts)
winpct_obp_hitsallowed_HR <- lm(WinPct ~ OBP + HitsAllowed + HR)
summary(winpct_obp_hitsallowed_HR)
winpct_obp_hitsallowed_doubles <- lm(WinPct ~ OBP + HitsAllowed + Doubles)
summary(winpct_obp_hitsallowed_doubles)
winpct_obp_hitsallowed_strikeouts <- lm(WinPct ~ OBP + HitsAllowed + StrikeOuts)
summary(winpct_obp_hitsallowed_strikeouts)
big_model <- lm(WinPct ~ HR + Doubles + HitsAllowed + StrikeOuts + OBP)
summary(big_model)
model2 <- lm(WinPct ~ HR + HitsAllowed + StrikeOuts + OBP)
summary(model2)
model3 <- lm(WinPct ~ HR + HitsAllowed + OBP)
summary(model3)
model4 <- lm(WinPct ~ HitsAllowed + OBP)
summary(model4)
library(leaps)
predictors <- cbind(HR, Doubles, HitsAllowed, StrikeOuts, OBP)
malocp <- leaps(predictors, WinPct, method = 'Cp')
malocp
cbind(malocp$Cp, malocp$which)
adjustedr <- leaps(predictors, WinPct, method = 'adjr2')
cbind(adjustedr$adjr2, adjustedr$which)
| /problemSet2.R | no_license | viren-velacheri/Statistics-Programs | R | false | false | 1,569 | r | mlb <- read.csv(file.choose(), header = TRUE)
attach(mlb)
winpct_HR <- lm(WinPct ~ HR)
summary(winpct_HR)
winpct_Doubles <- lm(WinPct ~ Doubles)
summary(winpct_Doubles)
winpct_hitsallowed <- lm(WinPct ~ HitsAllowed)
summary(winpct_hitsallowed)
winpct_strikeouts <- lm(WinPct ~ StrikeOuts)
summary(winpct_strikeouts)
winpct_obp <- lm(WinPct ~ OBP)
summary(winpct_obp)
winpct_obp_hr <- lm(WinPct ~ OBP + HR)
summary(winpct_obp_hr)
winpct_obp_doubles <- lm(WinPct ~ OBP + Doubles)
summary(winpct_obp_doubles)
winpct_obp_hitsallowed <- lm(WinPct ~ OBP + HitsAllowed)
summary(winpct_obp_hitsallowed)
winpct_obp_strikeouts <- lm(WinPct ~ OBP + StrikeOuts)
summary(winpct_obp_strikeouts)
winpct_obp_hitsallowed_HR <- lm(WinPct ~ OBP + HitsAllowed + HR)
summary(winpct_obp_hitsallowed_HR)
winpct_obp_hitsallowed_doubles <- lm(WinPct ~ OBP + HitsAllowed + Doubles)
summary(winpct_obp_hitsallowed_doubles)
winpct_obp_hitsallowed_strikeouts <- lm(WinPct ~ OBP + HitsAllowed + StrikeOuts)
summary(winpct_obp_hitsallowed_strikeouts)
big_model <- lm(WinPct ~ HR + Doubles + HitsAllowed + StrikeOuts + OBP)
summary(big_model)
model2 <- lm(WinPct ~ HR + HitsAllowed + StrikeOuts + OBP)
summary(model2)
model3 <- lm(WinPct ~ HR + HitsAllowed + OBP)
summary(model3)
model4 <- lm(WinPct ~ HitsAllowed + OBP)
summary(model4)
library(leaps)
predictors <- cbind(HR, Doubles, HitsAllowed, StrikeOuts, OBP)
malocp <- leaps(predictors, WinPct, method = 'Cp')
malocp
cbind(malocp$Cp, malocp$which)
adjustedr <- leaps(predictors, WinPct, method = 'adjr2')
cbind(adjustedr$adjr2, adjustedr$which)
|
context("pca")
sc <- testthat_spark_connection()
mat <- data_frame(
V1 = c(0, 2, 4),
V2 = c(1, 0, 0),
V3 = c(0, 3, 0),
V4 = c(7, 4, 6),
V5 = c(0, 5, 7))
test_that("ml_pca() agrees with Scala result", {
test_requires("dplyr")
# import org.apache.spark.ml.feature.PCA
# import org.apache.spark.ml.linalg.Vectors
#
# val data = Array(
# Vectors.sparse(5, Seq((1, 1.0), (3, 7.0))),
# Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
# Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
# )
# val df = spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
# val pca = new PCA()
# .setInputCol("features")
# .setOutputCol("pcaFeatures")
# .setK(3)
# .fit(df)
# val pcaDF = pca.transform(df)
# val result = pcaDF.select("pcaFeatures")
# result.collect()
#
# res1: Array[org.apache.spark.sql.Row] =
# Array([[1.6485728230883807,-4.013282700516296,-5.524543751369388]],
# [[-4.645104331781534,-1.1167972663619026,-5.524543751369387]],
# [[-6.428880535676489,-5.337951427775355,-5.524543751369389]])
s <- data.frame(
PC1 = c(1.6485728230883807, -4.645104331781534, -6.428880535676489),
PC2 = c(-4.013282700516296, -1.1167972663619026, -5.337951427775355),
PC3 = c(-5.524543751369388, -5.524543751369387, -5.524543751369389)
)
mat_tbl <- testthat_tbl("mat")
r <- mat_tbl %>%
ml_pca(k = 3) %>%
sdf_project() %>%
select(dplyr::starts_with("PC")) %>%
collect() %>%
as.data.frame()
expect_equal(s, r)
})
test_that("sdf_project() returns correct number of columns", {
mat_tbl <- testthat_tbl("mat")
for (k in 1:2) {
expect_equal(mat_tbl %>%
ml_pca(k = k) %>%
sdf_project() %>%
select(dplyr::starts_with("PC")) %>%
collect() %>%
ncol(),
k)
}
})
test_that("sdf_project() takes newdata argument", {
mat_tbl <- testthat_tbl("mat")
expect_equal(mat_tbl %>%
ml_pca(k = 3) %>%
sdf_project() %>%
collect(),
mat_tbl %>% ml_pca(k = 3) %>%
sdf_project(mat_tbl) %>%
collect())
})
| /tests/testthat/test-ml-pca.R | permissive | leosouzadias/sparklyr | R | false | false | 2,200 | r | context("pca")
sc <- testthat_spark_connection()
mat <- data_frame(
V1 = c(0, 2, 4),
V2 = c(1, 0, 0),
V3 = c(0, 3, 0),
V4 = c(7, 4, 6),
V5 = c(0, 5, 7))
test_that("ml_pca() agrees with Scala result", {
test_requires("dplyr")
# import org.apache.spark.ml.feature.PCA
# import org.apache.spark.ml.linalg.Vectors
#
# val data = Array(
# Vectors.sparse(5, Seq((1, 1.0), (3, 7.0))),
# Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
# Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
# )
# val df = spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
# val pca = new PCA()
# .setInputCol("features")
# .setOutputCol("pcaFeatures")
# .setK(3)
# .fit(df)
# val pcaDF = pca.transform(df)
# val result = pcaDF.select("pcaFeatures")
# result.collect()
#
# res1: Array[org.apache.spark.sql.Row] =
# Array([[1.6485728230883807,-4.013282700516296,-5.524543751369388]],
# [[-4.645104331781534,-1.1167972663619026,-5.524543751369387]],
# [[-6.428880535676489,-5.337951427775355,-5.524543751369389]])
s <- data.frame(
PC1 = c(1.6485728230883807, -4.645104331781534, -6.428880535676489),
PC2 = c(-4.013282700516296, -1.1167972663619026, -5.337951427775355),
PC3 = c(-5.524543751369388, -5.524543751369387, -5.524543751369389)
)
mat_tbl <- testthat_tbl("mat")
r <- mat_tbl %>%
ml_pca(k = 3) %>%
sdf_project() %>%
select(dplyr::starts_with("PC")) %>%
collect() %>%
as.data.frame()
expect_equal(s, r)
})
test_that("sdf_project() returns correct number of columns", {
mat_tbl <- testthat_tbl("mat")
for (k in 1:2) {
expect_equal(mat_tbl %>%
ml_pca(k = k) %>%
sdf_project() %>%
select(dplyr::starts_with("PC")) %>%
collect() %>%
ncol(),
k)
}
})
test_that("sdf_project() takes newdata argument", {
mat_tbl <- testthat_tbl("mat")
expect_equal(mat_tbl %>%
ml_pca(k = 3) %>%
sdf_project() %>%
collect(),
mat_tbl %>% ml_pca(k = 3) %>%
sdf_project(mat_tbl) %>%
collect())
})
|
library(??ncdf4)
ncin <- nc_open('~/Downloads/S5P_NRTI_L2__NO2____20181017T045013_20181017T045513_05233_01_010100_20181017T053427.nc')
ncin$var %>% str()
ncvar_get(ncin, 'vals')
| /R/Special Projects/sentinel 5.R | permissive | UrbanMatrixOne/rumo | R | false | false | 180 | r | library(??ncdf4)
ncin <- nc_open('~/Downloads/S5P_NRTI_L2__NO2____20181017T045013_20181017T045513_05233_01_010100_20181017T053427.nc')
ncin$var %>% str()
ncvar_get(ncin, 'vals')
|
## Tasks:
## ---------------------
## CLASSIFICATION
if (! "config" %in% ls()) stop("No config file given")
suppressMessages(requireNamespace("mlr3oml"))
tasks_classif = list()
if (config$type == "oml") {
e = try({
tsk("oml", task_id = as.integer(as.character(config$task)))
}, silent = TRUE)
if (! "try-error" %in% class(e)) {
if ("twoclass" %in% e$properties) {
if (! all(is.na(e$data()))) tasks_classif[[as.character(config$task)]] = e
}
} else {
cat(e)
}
}
if (config$type == "script") {
source(paste0("load-", as.character(config$task), ".R"))
tasks_classif[[as.character(config$task)]] = ts_file
}
if (config$type == "mlr") {
tasks_classif[[as.character(config$task)]] = tsk(as.character(config$task))
}
| /src/tasks.R | no_license | schalkdaniel/cacb-benchmark | R | false | false | 756 | r | ## Tasks:
## ---------------------
## CLASSIFICATION
if (! "config" %in% ls()) stop("No config file given")
suppressMessages(requireNamespace("mlr3oml"))
tasks_classif = list()
if (config$type == "oml") {
e = try({
tsk("oml", task_id = as.integer(as.character(config$task)))
}, silent = TRUE)
if (! "try-error" %in% class(e)) {
if ("twoclass" %in% e$properties) {
if (! all(is.na(e$data()))) tasks_classif[[as.character(config$task)]] = e
}
} else {
cat(e)
}
}
if (config$type == "script") {
source(paste0("load-", as.character(config$task), ".R"))
tasks_classif[[as.character(config$task)]] = ts_file
}
if (config$type == "mlr") {
tasks_classif[[as.character(config$task)]] = tsk(as.character(config$task))
}
|
library(sf)
library(dplyr)
library(ggplot2)
library(gganimate)
library(bcmaps)
library(rmapshaper)
library(lwgeom)
bc <- bc_bound() %>%
ms_simplify() %>%
st_make_valid()
make_sf_for_bc_animation <- function(sf, name, simplify = TRUE) {
if (simplify) sf <- ms_simplify(sf)
sf %>%
mutate(state = name) %>%
mutate(id = seq_len(n())) %>%
st_intersection(st_geometry(bc)) %>%
st_make_valid() %>%
select(state, id)
}
layer_list = list(
rd = combine_nr_rd() %>%
make_sf_for_bc_animation("Reginal Districts"),
nr = nr_regions() %>%
make_sf_for_bc_animation("NR Regions"),
nrd = nr_districts() %>%
make_sf_for_bc_animation("NR Districts"),
az = airzones() %>%
make_sf_for_bc_animation("Airzones", simplify = FALSE),
# eco = ecosections() %>%
# make_sf_for_bc_animation("Ecosections"),
hz = hydrozones() %>%
make_sf_for_bc_animation("Hydrozones", simplify = FALSE)
)
all <- do.call(rbind, layer_list)
p <- ggplot() +
geom_sf(data = st_geometry(bc_neighbours()), colour = "white", alpha = 0.5) +
geom_sf(data = all, aes(fill = id), colour = "white", alpha = 0.5) +
coord_sf(datum = NA) +
scale_fill_distiller(palette = "Oranges", guide = "none") +
theme(panel.background = element_rect(fill = "white"),
axis.text = element_blank()) +
# labs(title = "{closest_state}") +
transition_states(state, state_length = 1, transition_length = 1)
# animate(p)
anim_save("bc_layers.gif", animate(p, width = 1000, height = 800))
cat(knitr::imgur_upload("bc_layers.gif"), file = "imgur_url", append = FALSE)
| /2018/ateucher_sf_bcmaps/src/R/make_bc_gif.R | permissive | bcgov/bcgov-useR | R | false | false | 1,582 | r | library(sf)
library(dplyr)
library(ggplot2)
library(gganimate)
library(bcmaps)
library(rmapshaper)
library(lwgeom)
bc <- bc_bound() %>%
ms_simplify() %>%
st_make_valid()
make_sf_for_bc_animation <- function(sf, name, simplify = TRUE) {
if (simplify) sf <- ms_simplify(sf)
sf %>%
mutate(state = name) %>%
mutate(id = seq_len(n())) %>%
st_intersection(st_geometry(bc)) %>%
st_make_valid() %>%
select(state, id)
}
layer_list = list(
rd = combine_nr_rd() %>%
make_sf_for_bc_animation("Reginal Districts"),
nr = nr_regions() %>%
make_sf_for_bc_animation("NR Regions"),
nrd = nr_districts() %>%
make_sf_for_bc_animation("NR Districts"),
az = airzones() %>%
make_sf_for_bc_animation("Airzones", simplify = FALSE),
# eco = ecosections() %>%
# make_sf_for_bc_animation("Ecosections"),
hz = hydrozones() %>%
make_sf_for_bc_animation("Hydrozones", simplify = FALSE)
)
all <- do.call(rbind, layer_list)
p <- ggplot() +
geom_sf(data = st_geometry(bc_neighbours()), colour = "white", alpha = 0.5) +
geom_sf(data = all, aes(fill = id), colour = "white", alpha = 0.5) +
coord_sf(datum = NA) +
scale_fill_distiller(palette = "Oranges", guide = "none") +
theme(panel.background = element_rect(fill = "white"),
axis.text = element_blank()) +
# labs(title = "{closest_state}") +
transition_states(state, state_length = 1, transition_length = 1)
# animate(p)
anim_save("bc_layers.gif", animate(p, width = 1000, height = 800))
cat(knitr::imgur_upload("bc_layers.gif"), file = "imgur_url", append = FALSE)
|
extract_effects <- function(formula, ..., family = NA,
check_response = TRUE) {
# Extract fixed and random effects from a formula
#
# Args:
# formula: An object of class "formula" using mostly the syntax
# of the \code{lme4} package
# ...: Additional objects of class "formula"
# family: the model family
# check_response: check if the response part is non-empty?
#
# Returns:
# A named list of the following elements:
# fixed: An object of class "formula" that contains the fixed effects
# including the dependent variable.
# random: A list of formulas containing the random effects per grouping variable.
# group: A vector of names of the grouping variables.
# weights, se, cens, trials, cat: information on possible addition arguments
# all: A formula that contains every variable mentioned in formula and ...
term_labels <- rename(attr(terms(formula), "term.labels"), " ", "")
formula <- formula2string(formula)
fixed <- gsub("\\|+[^~]*~", "~", formula)
re_terms <- term_labels[grepl("\\|", term_labels)]
if (length(re_terms)) {
re_terms <- paste0("(", re_terms, ")")
# make sure that + before random terms are also removed
extended_re_terms <- c(paste0("+", re_terms), re_terms)
fixed <- rename(fixed, extended_re_terms, "")
}
if (substr(fixed, nchar(fixed), nchar(fixed)) == "~") {
fixed <- paste0(fixed, "1")
}
if (grepl("|", x = fixed, fixed = TRUE)) {
stop("Random effects terms should be enclosed in brackets", call. = FALSE)
}
fixed <- formula(fixed)
if (!is.na(family[[1]]))
family <- check_family(family)
if (is.ordinal(family))
fixed <- update.formula(fixed, . ~ . + 1)
if (check_response && length(fixed) < 3)
stop("Invalid formula: response variable is missing", call. = FALSE)
# extract random effects parts
form <- lapply(get_matches("\\([^\\|]*", re_terms), function(r)
formula(paste0("~ ", substr(r, 2, nchar(r)))))
group <- get_matches("\\|[^\\)]*", re_terms)
group_formula <- lapply(group, get_group_formula)
group <- ulapply(group_formula, function(g)
paste0(all.vars(g), collapse = ":"))
cor <- ulapply(get_matches("\\|[^\\)]*", re_terms),
function(g) substr(g, 1, 2) != "||")
random <- data.frame(group = group, cor = cor,
stringsAsFactors = FALSE)
# ensure that all REs of the same gf are next to each other
if (nrow(random)) {
random$form <- form
random <- random[order(random$group), ]
}
x <- nlist(fixed, random)
# handle addition arguments
fun <- c("se", "weights", "trials", "cat", "cens", "trunc")
add_vars <- list()
if (!is.na(family[[1]])) {
add <- get_matches("\\|[^~]*~", formula)[1]
add <- substr(add, 2, nchar(add)-1)
families <- list(se = c("gaussian", "student", "cauchy"),
weights = "all",
trials = c("binomial", "zero_inflated_binomial"),
cat = c("categorical", "cumulative",
"cratio", "sratio", "acat"),
cens = c("gaussian", "student", "cauchy",
"inverse.gaussian", "binomial",
"poisson", "geometric", "negbinomial",
"exponential", "weibull", "gamma"),
trunc = c("gaussian", "student", "cauchy", "binomial",
"poisson", "geometric", "negbinomial",
"exponential", "weibull", "gamma"))
for (f in fun) {
x[[f]] <- get_matches(paste0(f, "\\([^\\|]*\\)"), add)[1]
add <- gsub(paste0(f,"\\([^~|\\|]*\\)\\|*"), "", add)
add_present <-
if (is.na(x[[f]])) {
x[[f]] <- NULL
} else if (family$family %in% families[[f]] ||
families[[f]][1] == "all") {
args <- substr(x[[f]], nchar(f) + 2, nchar(x[[f]]) - 1)
try_numeric <- suppressWarnings(as.numeric(args))
if (f %in% c("trials", "cat") && !is.na(try_numeric)) {
x[[f]] <- try_numeric
} else {
x[[f]] <- as.formula(paste0("~ .", x[[f]]))
if (length(all.vars(x[[f]]))) {
form <- paste("~", paste(all.vars(x[[f]]), collapse = "+"))
add_vars[[f]] <- as.formula(form)
}
}
} else {
stop(paste("Argument", f, "in formula is not supported",
"by family", family$family), call. = FALSE)
}
}
if (nchar(gsub("\\|", "", add)) > 0 && !is.na(add))
stop(paste("Invalid addition part of formula.",
"Please see the 'Details' section of help(brm)"),
call. = FALSE)
}
# make a formula containing all required variables (element 'all')
plus_rh <- function(x) {
# take the right hand side of a formula and add a +
if (is.formula(x)) {
paste0("+", Reduce(paste, deparse(x[[2]])))
} else ""
}
formula_list <- c(random$form, group_formula, add_vars, ...)
new_formula <- ulapply(formula_list, plus_rh)
new_formula <- paste0("update(",Reduce(paste, deparse(fixed)),
", ~ .", collapse(new_formula), ")")
x$all <- eval(parse(text = new_formula))
environment(x$all) <- globalenv()
# extract response variables
if (check_response) {
x$respform <- update(x$all, . ~ 1)
x$response <- gather_response(x$respform)
if (is.hurdle(family)) {
x$response <- c(x$response, paste0("hu_", x$response))
} else if (is.zero_inflated(family)) {
x$response <- c(x$response, paste0("zi_", x$response))
} else if (is.2PL(family)) {
x$response <- c(x$response, paste0("logDisc_", x$response))
}
if (length(x$response) > 1) {
if (!(is.null(x$cens) && is.null(x$se) && is.null(x$trunc))
&& is.linear(family)) {
stop(paste("Multivariate models currently allow",
"only weights as addition arguments"),
call. = FALSE)
}
x$fixed <- update(x$fixed, response ~ .)
x$all <- update(x$all, response ~ .)
}
}
x
}
extract_time <- function(formula) {
# extract time and grouping variabels for correlation structure
#
# Args:
# formula: a one sided formula of the form ~ time|group
# typically taken from a cor_brms object
#
# Returns:
# a list with elements time, group, and all, where all contains a
# formula with all variables in formula
if (is.null(formula))
return(NULL)
formula <- gsub(" ","",Reduce(paste, deparse(formula)))
time <- all.vars(as.formula(paste("~", gsub("~|\\|[[:print:]]*", "", formula))))
if (length(time) > 1) {
stop("Autocorrelation structures may only contain 1 time variable",
call. = FALSE)
}
x <- list(time = ifelse(length(time), time, ""))
group <- get_group_formula(sub("~[^\\|]*", "", formula))
x$group <- paste0(all.vars(group), collapse = ":")
x$all <- formula(paste("~",paste(c("1", time, all.vars(group)), collapse = "+")))
x
}
update_formula <- function(formula, data = NULL, addition = NULL,
partial = NULL) {
# incorporate addition arguments and category specific effects into formula
#
# Args:
# formula: a model formula
# data: a data.frame or NULL
# addition: a list with one sided formulas taken from the addition arguments in brm
# partial: a one sided formula containing category specific effects
#
# Returns:
# an updated formula containing the addition and category specific effects
var_names <- names(addition)
addition <- lapply(addition, formula2string, rm = 1)
fnew <- "."
if (length(addition)) {
warning("Argument addition is deprecated. See help(brm) for further details.",
call. = FALSE)
for (i in 1:length(addition)) {
fnew <- paste0(fnew, " | ", var_names[i], "(", addition[[i]], ")")
}
}
fnew <- paste(fnew, "~ .")
if (is.formula(partial)) {
partial <- formula2string(partial, rm = 1)
fnew <- paste(fnew, "+ partial(", partial, ")")
}
# to allow the '.' symbol in formula
formula <- formula(terms(formula, data = data))
if (fnew == ". ~ .") {
formula
} else {
update.formula(formula, formula(fnew))
}
}
get_group_formula <- function(g) {
# transform grouping term in formula
#
# Args:
# g: a grouping term
#
# Returns:
# the formula ~ g if g is valid and else an error
g <- sub("^\\|*", "", g)
if (nchar(gsub(":|[^([:digit:]|[:punct:])][[:alnum:]_\\.]*", "", g)))
stop(paste("Illegal grouping term:", g, "\n",
"may contain only variable names combined by the symbol ':'"),
call. = FALSE)
if (nchar(g)) {
return(formula(paste("~", g)))
} else {
return(~1)
}
}
check_re_formula <- function(re_formula, old_ranef, data) {
# validate the re_formula argument as passed to predict and fitted
#
# Args:
# re_formula: see predict.brmsfit for documentation
# old_ranef: named list containing the RE names
# of each grouping factor in the original model
# data: data supplied by the user
#
# Returns:
# named list containing the RE names of each grouping factor
# as defined in re_formula; or NULL if re_formula is NA or ~ 1
if (is.null(re_formula)) {
new_ranef <- old_ranef
} else if (is.formula(re_formula)) {
if (!is.data.frame(data)) {
stop("argument re_formula requires models fitted with brms > 0.5.0",
call. = FALSE)
}
if (length(re_formula) == 3) {
stop("re_formula must be one-sided", call. = FALSE)
}
ee <- extract_effects(re_formula, check_response = FALSE)
if (length(all.vars(ee$fixed))) {
stop("fixed effects are not allowed in re_formula", call. = FALSE)
}
if (!nrow(ee$random)) {
# if no RE terms are present in re_formula
return(NULL)
}
# the true family doesn't matter here
data <- update_data(data, family = NA, effects = ee)
new_ranef <- gather_ranef(random = ee$random, data = data)
new_ranef <- combine_duplicates(new_ranef)
invalid_gf <- setdiff(names(new_ranef), names(old_ranef))
if (length(invalid_gf)) {
stop(paste("Invalid grouping factors detected:",
paste(invalid_gf, collapse = ", ")), call. = FALSE)
}
for (gf in names(new_ranef)) {
invalid_re <- setdiff(new_ranef[[gf]], old_ranef[[gf]])
if (length(invalid_re)) {
stop(paste0("Invalid random effects detected for grouping factor ",
gf, ": ", paste(invalid_re, collapse = ", ")),
call. = FALSE)
}
}
} else if (is.na(re_formula)) {
new_ranef <- NULL
} else {
stop("invalid re_formula argument", call. = FALSE)
}
new_ranef
}
update_re_terms <- function(formula, re_formula = NULL) {
# remove RE terms in formula and add RE terms of re_formula
#
# Args:
# formula: model formula to be updated
# re_formula: formula containing new RE terms
#
# Returns:
# a formula with updated RE terms
if (suppressWarnings(anyNA(re_formula))) {
re_formula <- ~ 1
}
if (is.formula(re_formula)) {
formula <- formula2string(formula)
re_formula <- formula2string(re_formula)
fixef_formula <- gsub(paste0("\\([^(\\||~)]*\\|[^\\)]*\\)\\+",
"|\\+\\([^(\\||~)]*\\|[^\\)]*\\)",
"|\\([^(\\||~)]*\\|[^\\)]*\\)"),
"", formula)
new_re_terms <- get_matches("\\([^\\|\\)]*\\|[^\\)]*\\)", re_formula)
new_formula <- paste(c(fixef_formula, new_re_terms), collapse = "+")
new_formula <- formula(new_formula)
} else if (is.null(re_formula)) {
new_formula <- formula
} else {
stop("invalid re_formula argument", call. = FALSE)
}
new_formula
}
amend_terms <- function(x, rm_intercept = FALSE, is_forked = FALSE) {
# amend a terms object (or one that can be coerced to it)
# to be used in get_model_matrix
# Args:
# x: any R object; if not a formula or terms, NULL is returned
# rm_intercept: a flag indicating if the intercept column
# should be removed from the model.matrix.
# Primarily useful for ordinal models
# is_forked: a flag indicating if the model is forked into
# two parts (e.g., a hurdle model)
# Returns:
# a (possibly amended) terms object or NULL
if (is.formula(x) || is(x, "terms")) {
x <- terms(x)
} else {
return(NULL)
}
attr(x, "rm_intercept") <- as.logical(rm_intercept)
if (is_forked) {
# ensure that interactions with main and spec won't
# cause automatic cell mean coding of factors
term_labels <- attr(x, "term.labels")
if (any(grepl("(^|:)(main|spec)($|:)", term_labels))) {
if (any(grepl("(^|:)trait($|:)", term_labels))) {
stop(paste("formula may not contain variable 'trait'",
"when using variables 'main' or 'spec'"),
call. = FALSE)
}
if (attr(x, "intercept")) {
stop(paste("formula may not contain an intercept",
"when using variables 'main' or 'spec'"),
call. = FALSE)
}
attr(x, "intercept") <- 1
attr(x, "rm_intercept") <- TRUE
}
}
x
}
gather_response <- function(formula) {
# gather response variable names
# Args:
# formula: a formula containing only the model reponse
# Returns:
# a vector of names of the response variables (columns)
stopifnot(is.formula(formula))
all_vars <- all.vars(formula)
if (length(all_vars) == 0) {
stop("formula must contain at least one response variable", call. = FALSE)
}
mf <- as.data.frame(setNames(as.list(rep(1, length(all_vars))),
all_vars))
mf <- model.frame(formula, data = mf, na.action = NULL)
pseudo_resp <- model.response(mf)
if (is.null(dim(pseudo_resp))) {
# response is a vector
response <- all_vars[1]
} else if (length(dim(pseudo_resp)) == 2) {
# response is a matrix
response <- colnames(pseudo_resp)
empty_names <- which(!nchar(response))
if (length(empty_names)) {
response[empty_names] <- paste0("response", empty_names)
}
}
response
}
gather_ranef <- function(random, data = NULL, ...) {
# gathers helpful information on the random effects
#
# Args:
# effects: output of extract_effects
# data: data passed to brm after updating
# ...: Further arguments passed to get_model_matrix
#
# Returns:
# A named list with one element per grouping factor
Z <- lapply(random$form, get_model_matrix, data = data, ...)
ranef <- setNames(lapply(Z, colnames), random$group)
for (i in seq_along(ranef)) {
attr(ranef[[i]], "levels") <-
levels(as.factor(get(random$group[[i]], data)))
attr(ranef[[i]], "group") <- names(ranef)[i]
attr(ranef[[i]], "cor") <- random$cor[[i]]
}
ranef
}
check_brm_input <- function(x) {
# misc checks on brm arguments
# Args:
# x: A named list
if (x$chains %% x$cluster != 0) {
stop("chains must be a multiple of cluster", call. = FALSE)
}
family <- check_family(x$family)
if (family$family %in% c("exponential", "weibull") &&
x$inits == "random") {
warning(paste("Families exponential and weibull may not work well",
"with default initial values. \n",
" It is thus recommended to set inits = '0'"),
call. = FALSE)
}
if (family$family == "inverse.gaussian") {
warning(paste("inverse gaussian models require carefully chosen",
"prior distributions to ensure convergence of the chains"),
call. = FALSE)
}
if (family$link == "sqrt") {
warning(paste(family$family, "model with sqrt link may not be",
"uniquely identified"), call. = FALSE)
}
invisible(NULL)
}
exclude_pars <- function(formula, ranef = TRUE) {
# list irrelevant parameters NOT to be saved by Stan
#
# Args:
# formula: a model formula
# ranef: logical; should random effects of each level be saved?
#
# Returns:
# a vector of parameters to be excluded
ee <- extract_effects(formula)
out <- c("eta", "etap", "eta_2PL", "Eta",
"temp_Intercept1", "temp_Intercept",
"Lrescor", "Rescor", "Sigma", "LSigma",
"p", "q", "e", "E", "res_cov_matrix",
"lp_pre", "hs_local", "hs_global")
for (i in seq_along(ee$random$group)) {
out <- c(out, paste0("pre_",i), paste0("L_",i), paste0("Cor_",i))
if (!ranef) out <- c(out, paste0("r_",i))
}
out
}
remove_chains <- function(i, sflist) {
# remove chains that produce errors leaving the other chains untouched
#
# Args:
# i: an index between 1 and length(sflist)
# sflist: list of stanfit objects as returned by parLapply
if (!is(sflist[[i]], "stanfit") || length(sflist[[i]]@sim$samples) == 0) {
warning(paste("chain", i, "did not contain samples",
"and was removed from the fitted model"))
return(NULL)
} else {
return(sflist[[i]])
}
} | /R/validate.R | no_license | obaidpervaizgill/brms | R | false | false | 17,214 | r | extract_effects <- function(formula, ..., family = NA,
check_response = TRUE) {
# Extract fixed and random effects from a formula
#
# Args:
# formula: An object of class "formula" using mostly the syntax
# of the \code{lme4} package
# ...: Additional objects of class "formula"
# family: the model family
# check_response: check if the response part is non-empty?
#
# Returns:
# A named list of the following elements:
# fixed: An object of class "formula" that contains the fixed effects
# including the dependent variable.
# random: A list of formulas containing the random effects per grouping variable.
# group: A vector of names of the grouping variables.
# weights, se, cens, trials, cat: information on possible addition arguments
# all: A formula that contains every variable mentioned in formula and ...
term_labels <- rename(attr(terms(formula), "term.labels"), " ", "")
formula <- formula2string(formula)
fixed <- gsub("\\|+[^~]*~", "~", formula)
re_terms <- term_labels[grepl("\\|", term_labels)]
if (length(re_terms)) {
re_terms <- paste0("(", re_terms, ")")
# make sure that + before random terms are also removed
extended_re_terms <- c(paste0("+", re_terms), re_terms)
fixed <- rename(fixed, extended_re_terms, "")
}
if (substr(fixed, nchar(fixed), nchar(fixed)) == "~") {
fixed <- paste0(fixed, "1")
}
if (grepl("|", x = fixed, fixed = TRUE)) {
stop("Random effects terms should be enclosed in brackets", call. = FALSE)
}
fixed <- formula(fixed)
if (!is.na(family[[1]]))
family <- check_family(family)
if (is.ordinal(family))
fixed <- update.formula(fixed, . ~ . + 1)
if (check_response && length(fixed) < 3)
stop("Invalid formula: response variable is missing", call. = FALSE)
# extract random effects parts
form <- lapply(get_matches("\\([^\\|]*", re_terms), function(r)
formula(paste0("~ ", substr(r, 2, nchar(r)))))
group <- get_matches("\\|[^\\)]*", re_terms)
group_formula <- lapply(group, get_group_formula)
group <- ulapply(group_formula, function(g)
paste0(all.vars(g), collapse = ":"))
cor <- ulapply(get_matches("\\|[^\\)]*", re_terms),
function(g) substr(g, 1, 2) != "||")
random <- data.frame(group = group, cor = cor,
stringsAsFactors = FALSE)
# ensure that all REs of the same gf are next to each other
if (nrow(random)) {
random$form <- form
random <- random[order(random$group), ]
}
x <- nlist(fixed, random)
# handle addition arguments
fun <- c("se", "weights", "trials", "cat", "cens", "trunc")
add_vars <- list()
if (!is.na(family[[1]])) {
add <- get_matches("\\|[^~]*~", formula)[1]
add <- substr(add, 2, nchar(add)-1)
families <- list(se = c("gaussian", "student", "cauchy"),
weights = "all",
trials = c("binomial", "zero_inflated_binomial"),
cat = c("categorical", "cumulative",
"cratio", "sratio", "acat"),
cens = c("gaussian", "student", "cauchy",
"inverse.gaussian", "binomial",
"poisson", "geometric", "negbinomial",
"exponential", "weibull", "gamma"),
trunc = c("gaussian", "student", "cauchy", "binomial",
"poisson", "geometric", "negbinomial",
"exponential", "weibull", "gamma"))
for (f in fun) {
x[[f]] <- get_matches(paste0(f, "\\([^\\|]*\\)"), add)[1]
add <- gsub(paste0(f,"\\([^~|\\|]*\\)\\|*"), "", add)
add_present <-
if (is.na(x[[f]])) {
x[[f]] <- NULL
} else if (family$family %in% families[[f]] ||
families[[f]][1] == "all") {
args <- substr(x[[f]], nchar(f) + 2, nchar(x[[f]]) - 1)
try_numeric <- suppressWarnings(as.numeric(args))
if (f %in% c("trials", "cat") && !is.na(try_numeric)) {
x[[f]] <- try_numeric
} else {
x[[f]] <- as.formula(paste0("~ .", x[[f]]))
if (length(all.vars(x[[f]]))) {
form <- paste("~", paste(all.vars(x[[f]]), collapse = "+"))
add_vars[[f]] <- as.formula(form)
}
}
} else {
stop(paste("Argument", f, "in formula is not supported",
"by family", family$family), call. = FALSE)
}
}
if (nchar(gsub("\\|", "", add)) > 0 && !is.na(add))
stop(paste("Invalid addition part of formula.",
"Please see the 'Details' section of help(brm)"),
call. = FALSE)
}
# make a formula containing all required variables (element 'all')
plus_rh <- function(x) {
# take the right hand side of a formula and add a +
if (is.formula(x)) {
paste0("+", Reduce(paste, deparse(x[[2]])))
} else ""
}
formula_list <- c(random$form, group_formula, add_vars, ...)
new_formula <- ulapply(formula_list, plus_rh)
new_formula <- paste0("update(",Reduce(paste, deparse(fixed)),
", ~ .", collapse(new_formula), ")")
x$all <- eval(parse(text = new_formula))
environment(x$all) <- globalenv()
# extract response variables
if (check_response) {
x$respform <- update(x$all, . ~ 1)
x$response <- gather_response(x$respform)
if (is.hurdle(family)) {
x$response <- c(x$response, paste0("hu_", x$response))
} else if (is.zero_inflated(family)) {
x$response <- c(x$response, paste0("zi_", x$response))
} else if (is.2PL(family)) {
x$response <- c(x$response, paste0("logDisc_", x$response))
}
if (length(x$response) > 1) {
if (!(is.null(x$cens) && is.null(x$se) && is.null(x$trunc))
&& is.linear(family)) {
stop(paste("Multivariate models currently allow",
"only weights as addition arguments"),
call. = FALSE)
}
x$fixed <- update(x$fixed, response ~ .)
x$all <- update(x$all, response ~ .)
}
}
x
}
extract_time <- function(formula) {
# extract time and grouping variabels for correlation structure
#
# Args:
# formula: a one sided formula of the form ~ time|group
# typically taken from a cor_brms object
#
# Returns:
# a list with elements time, group, and all, where all contains a
# formula with all variables in formula
if (is.null(formula))
return(NULL)
formula <- gsub(" ","",Reduce(paste, deparse(formula)))
time <- all.vars(as.formula(paste("~", gsub("~|\\|[[:print:]]*", "", formula))))
if (length(time) > 1) {
stop("Autocorrelation structures may only contain 1 time variable",
call. = FALSE)
}
x <- list(time = ifelse(length(time), time, ""))
group <- get_group_formula(sub("~[^\\|]*", "", formula))
x$group <- paste0(all.vars(group), collapse = ":")
x$all <- formula(paste("~",paste(c("1", time, all.vars(group)), collapse = "+")))
x
}
update_formula <- function(formula, data = NULL, addition = NULL,
partial = NULL) {
# incorporate addition arguments and category specific effects into formula
#
# Args:
# formula: a model formula
# data: a data.frame or NULL
# addition: a list with one sided formulas taken from the addition arguments in brm
# partial: a one sided formula containing category specific effects
#
# Returns:
# an updated formula containing the addition and category specific effects
var_names <- names(addition)
addition <- lapply(addition, formula2string, rm = 1)
fnew <- "."
if (length(addition)) {
warning("Argument addition is deprecated. See help(brm) for further details.",
call. = FALSE)
for (i in 1:length(addition)) {
fnew <- paste0(fnew, " | ", var_names[i], "(", addition[[i]], ")")
}
}
fnew <- paste(fnew, "~ .")
if (is.formula(partial)) {
partial <- formula2string(partial, rm = 1)
fnew <- paste(fnew, "+ partial(", partial, ")")
}
# to allow the '.' symbol in formula
formula <- formula(terms(formula, data = data))
if (fnew == ". ~ .") {
formula
} else {
update.formula(formula, formula(fnew))
}
}
get_group_formula <- function(g) {
# transform grouping term in formula
#
# Args:
# g: a grouping term
#
# Returns:
# the formula ~ g if g is valid and else an error
g <- sub("^\\|*", "", g)
if (nchar(gsub(":|[^([:digit:]|[:punct:])][[:alnum:]_\\.]*", "", g)))
stop(paste("Illegal grouping term:", g, "\n",
"may contain only variable names combined by the symbol ':'"),
call. = FALSE)
if (nchar(g)) {
return(formula(paste("~", g)))
} else {
return(~1)
}
}
check_re_formula <- function(re_formula, old_ranef, data) {
# validate the re_formula argument as passed to predict and fitted
#
# Args:
# re_formula: see predict.brmsfit for documentation
# old_ranef: named list containing the RE names
# of each grouping factor in the original model
# data: data supplied by the user
#
# Returns:
# named list containing the RE names of each grouping factor
# as defined in re_formula; or NULL if re_formula is NA or ~ 1
if (is.null(re_formula)) {
new_ranef <- old_ranef
} else if (is.formula(re_formula)) {
if (!is.data.frame(data)) {
stop("argument re_formula requires models fitted with brms > 0.5.0",
call. = FALSE)
}
if (length(re_formula) == 3) {
stop("re_formula must be one-sided", call. = FALSE)
}
ee <- extract_effects(re_formula, check_response = FALSE)
if (length(all.vars(ee$fixed))) {
stop("fixed effects are not allowed in re_formula", call. = FALSE)
}
if (!nrow(ee$random)) {
# if no RE terms are present in re_formula
return(NULL)
}
# the true family doesn't matter here
data <- update_data(data, family = NA, effects = ee)
new_ranef <- gather_ranef(random = ee$random, data = data)
new_ranef <- combine_duplicates(new_ranef)
invalid_gf <- setdiff(names(new_ranef), names(old_ranef))
if (length(invalid_gf)) {
stop(paste("Invalid grouping factors detected:",
paste(invalid_gf, collapse = ", ")), call. = FALSE)
}
for (gf in names(new_ranef)) {
invalid_re <- setdiff(new_ranef[[gf]], old_ranef[[gf]])
if (length(invalid_re)) {
stop(paste0("Invalid random effects detected for grouping factor ",
gf, ": ", paste(invalid_re, collapse = ", ")),
call. = FALSE)
}
}
} else if (is.na(re_formula)) {
new_ranef <- NULL
} else {
stop("invalid re_formula argument", call. = FALSE)
}
new_ranef
}
update_re_terms <- function(formula, re_formula = NULL) {
# remove RE terms in formula and add RE terms of re_formula
#
# Args:
# formula: model formula to be updated
# re_formula: formula containing new RE terms
#
# Returns:
# a formula with updated RE terms
if (suppressWarnings(anyNA(re_formula))) {
re_formula <- ~ 1
}
if (is.formula(re_formula)) {
formula <- formula2string(formula)
re_formula <- formula2string(re_formula)
fixef_formula <- gsub(paste0("\\([^(\\||~)]*\\|[^\\)]*\\)\\+",
"|\\+\\([^(\\||~)]*\\|[^\\)]*\\)",
"|\\([^(\\||~)]*\\|[^\\)]*\\)"),
"", formula)
new_re_terms <- get_matches("\\([^\\|\\)]*\\|[^\\)]*\\)", re_formula)
new_formula <- paste(c(fixef_formula, new_re_terms), collapse = "+")
new_formula <- formula(new_formula)
} else if (is.null(re_formula)) {
new_formula <- formula
} else {
stop("invalid re_formula argument", call. = FALSE)
}
new_formula
}
amend_terms <- function(x, rm_intercept = FALSE, is_forked = FALSE) {
# amend a terms object (or one that can be coerced to it)
# to be used in get_model_matrix
# Args:
# x: any R object; if not a formula or terms, NULL is returned
# rm_intercept: a flag indicating if the intercept column
# should be removed from the model.matrix.
# Primarily useful for ordinal models
# is_forked: a flag indicating if the model is forked into
# two parts (e.g., a hurdle model)
# Returns:
# a (possibly amended) terms object or NULL
if (is.formula(x) || is(x, "terms")) {
x <- terms(x)
} else {
return(NULL)
}
attr(x, "rm_intercept") <- as.logical(rm_intercept)
if (is_forked) {
# ensure that interactions with main and spec won't
# cause automatic cell mean coding of factors
term_labels <- attr(x, "term.labels")
if (any(grepl("(^|:)(main|spec)($|:)", term_labels))) {
if (any(grepl("(^|:)trait($|:)", term_labels))) {
stop(paste("formula may not contain variable 'trait'",
"when using variables 'main' or 'spec'"),
call. = FALSE)
}
if (attr(x, "intercept")) {
stop(paste("formula may not contain an intercept",
"when using variables 'main' or 'spec'"),
call. = FALSE)
}
attr(x, "intercept") <- 1
attr(x, "rm_intercept") <- TRUE
}
}
x
}
gather_response <- function(formula) {
# gather response variable names
# Args:
# formula: a formula containing only the model reponse
# Returns:
# a vector of names of the response variables (columns)
stopifnot(is.formula(formula))
all_vars <- all.vars(formula)
if (length(all_vars) == 0) {
stop("formula must contain at least one response variable", call. = FALSE)
}
mf <- as.data.frame(setNames(as.list(rep(1, length(all_vars))),
all_vars))
mf <- model.frame(formula, data = mf, na.action = NULL)
pseudo_resp <- model.response(mf)
if (is.null(dim(pseudo_resp))) {
# response is a vector
response <- all_vars[1]
} else if (length(dim(pseudo_resp)) == 2) {
# response is a matrix
response <- colnames(pseudo_resp)
empty_names <- which(!nchar(response))
if (length(empty_names)) {
response[empty_names] <- paste0("response", empty_names)
}
}
response
}
gather_ranef <- function(random, data = NULL, ...) {
# gathers helpful information on the random effects
#
# Args:
# effects: output of extract_effects
# data: data passed to brm after updating
# ...: Further arguments passed to get_model_matrix
#
# Returns:
# A named list with one element per grouping factor
Z <- lapply(random$form, get_model_matrix, data = data, ...)
ranef <- setNames(lapply(Z, colnames), random$group)
for (i in seq_along(ranef)) {
attr(ranef[[i]], "levels") <-
levels(as.factor(get(random$group[[i]], data)))
attr(ranef[[i]], "group") <- names(ranef)[i]
attr(ranef[[i]], "cor") <- random$cor[[i]]
}
ranef
}
check_brm_input <- function(x) {
# misc checks on brm arguments
# Args:
# x: A named list
if (x$chains %% x$cluster != 0) {
stop("chains must be a multiple of cluster", call. = FALSE)
}
family <- check_family(x$family)
if (family$family %in% c("exponential", "weibull") &&
x$inits == "random") {
warning(paste("Families exponential and weibull may not work well",
"with default initial values. \n",
" It is thus recommended to set inits = '0'"),
call. = FALSE)
}
if (family$family == "inverse.gaussian") {
warning(paste("inverse gaussian models require carefully chosen",
"prior distributions to ensure convergence of the chains"),
call. = FALSE)
}
if (family$link == "sqrt") {
warning(paste(family$family, "model with sqrt link may not be",
"uniquely identified"), call. = FALSE)
}
invisible(NULL)
}
exclude_pars <- function(formula, ranef = TRUE) {
# list irrelevant parameters NOT to be saved by Stan
#
# Args:
# formula: a model formula
# ranef: logical; should random effects of each level be saved?
#
# Returns:
# a vector of parameters to be excluded
ee <- extract_effects(formula)
out <- c("eta", "etap", "eta_2PL", "Eta",
"temp_Intercept1", "temp_Intercept",
"Lrescor", "Rescor", "Sigma", "LSigma",
"p", "q", "e", "E", "res_cov_matrix",
"lp_pre", "hs_local", "hs_global")
for (i in seq_along(ee$random$group)) {
out <- c(out, paste0("pre_",i), paste0("L_",i), paste0("Cor_",i))
if (!ranef) out <- c(out, paste0("r_",i))
}
out
}
remove_chains <- function(i, sflist) {
# remove chains that produce errors leaving the other chains untouched
#
# Args:
# i: an index between 1 and length(sflist)
# sflist: list of stanfit objects as returned by parLapply
if (!is(sflist[[i]], "stanfit") || length(sflist[[i]]@sim$samples) == 0) {
warning(paste("chain", i, "did not contain samples",
"and was removed from the fitted model"))
return(NULL)
} else {
return(sflist[[i]])
}
} |
#' Plot a series forecast distributions of dressed ensembles
#'
#' @param dressed.ens An object of class `dressed.ens`. See ?DressEnsemble for details.
#' @param add logical, default=FALSE. If TRUE, no new plotting device is created and everything is added to an existing device.
#' @param obs A vector of length N, default=NULL. The verifying observations corresponding to the individual ensemble forecasts. If a vector of length N is provided (N = nrow(dressed.ens[["ens"]]), the values are added to the plot as markers.
#' @param plot.ens logical, default=FALSE. If TRUE, the centers of the individual dressing kernels are indicated by markers.
#' @param plot.ker logical, default=FALSE. If TRUE, the individual dressing kernels are plotted.
#' @return none
#' @examples
#' data(eurotempforecast)
#' d.ens <- DressEnsemble(ens)
#' PlotDressedEns(d.ens, add=FALSE, obs=obs, plot.ens=FALSE, plot.ker=TRUE)
#' @seealso DressEnsemble
#' @export
# plot a dressed ensemble
PlotDressedEns <- function(dressed.ens, add=FALSE, obs=NULL, plot.ens=FALSE, plot.ker=FALSE) {
ens <- dressed.ens[["ens"]]
k.wd <- dressed.ens[["ker.wd"]]
N.fcst <- nrow(ens)
K <- ncol(ens)
# init matrices of "x values" and corresponding
# forecast distributions
N.val <- 100
x.vals <- t(sapply(1:N.fcst,
function(i) {
seq(min(ens[i, ]) - 3*max(k.wd[i, ]),
max(ens[i, ]) + 3*max(k.wd[i, ]),
length.out=N.val)
}))
# calculate forecast distributions
fd.vals <- GetDensity(dressed.ens, x.vals)
# normalize for plotting
fd.vals <- t(apply(fd.vals, 1, function(x) x / max(x) * 0.7))
# initialize the plot
if (!add) {
xlims <- c(1, N.fcst+1)
ylims <- range(ens) + max(k.wd) * c(-3.5,3.5)
plot(NULL, xlim=xlims, ylim=ylims, axes=FALSE, xlab=NA, ylab=NA)
axis(side=1, at=pretty(xlims))
axis(side=2, at=pretty(ylims), las=2)
box()
}
# plot pdfs as polygons
for (i in 1:N.fcst) {
polygon(c(fd.vals[i, ], 0, 0, fd.vals[i, 1]) + i,
c(x.vals[i, ], tail(x.vals[i, ], 1), x.vals[i, 1], x.vals[i, 1]),
col=gray(.5))
}
# plot obs if provided
if (!is.null(obs)) {
stopifnot(length(obs) == nrow(dressed.ens[["ens"]]))
points(1:N.fcst, obs, pch=15)
}
# plot ensemble if desired
if (plot.ens) {
for (i in 1:N.fcst) {
points(rep(i, K), ens[i, ], pch=16, cex=.5)
}
}
# plot individual kernels if desired
if (plot.ker) {
for (i in 1:N.fcst) {
for (k in 1:K) {
d <- dressed.ens
d[["ens"]] <- ens[i, k, drop=FALSE]
d[["ker.wd"]] <- k.wd[i, k, drop=FALSE]
x <- seq(ens[i, k] - 3*k.wd[i, k], ens[i, k] + 3*k.wd[i, k], length.out=50)
d <- GetDensity(d, matrix(x, nrow=1))
d <- d / max(d) * max(fd.vals[i, ])
lines(x=i+d, y=x)
lines(x=c(i, i+max(d)), y=rep(ens[i,k], 2))
}
}
}
}
| /R/SpecsVerification2/R/PlotDressedEns.R | no_license | sieste/SpecsVerification2 | R | false | false | 2,887 | r | #' Plot a series forecast distributions of dressed ensembles
#'
#' @param dressed.ens An object of class `dressed.ens`. See ?DressEnsemble for details.
#' @param add logical, default=FALSE. If TRUE, no new plotting device is created and everything is added to an existing device.
#' @param obs A vector of length N, default=NULL. The verifying observations corresponding to the individual ensemble forecasts. If a vector of length N is provided (N = nrow(dressed.ens[["ens"]]), the values are added to the plot as markers.
#' @param plot.ens logical, default=FALSE. If TRUE, the centers of the individual dressing kernels are indicated by markers.
#' @param plot.ker logical, default=FALSE. If TRUE, the individual dressing kernels are plotted.
#' @return none
#' @examples
#' data(eurotempforecast)
#' d.ens <- DressEnsemble(ens)
#' PlotDressedEns(d.ens, add=FALSE, obs=obs, plot.ens=FALSE, plot.ker=TRUE)
#' @seealso DressEnsemble
#' @export
# plot a dressed ensemble
PlotDressedEns <- function(dressed.ens, add=FALSE, obs=NULL, plot.ens=FALSE, plot.ker=FALSE) {
ens <- dressed.ens[["ens"]]
k.wd <- dressed.ens[["ker.wd"]]
N.fcst <- nrow(ens)
K <- ncol(ens)
# init matrices of "x values" and corresponding
# forecast distributions
N.val <- 100
x.vals <- t(sapply(1:N.fcst,
function(i) {
seq(min(ens[i, ]) - 3*max(k.wd[i, ]),
max(ens[i, ]) + 3*max(k.wd[i, ]),
length.out=N.val)
}))
# calculate forecast distributions
fd.vals <- GetDensity(dressed.ens, x.vals)
# normalize for plotting
fd.vals <- t(apply(fd.vals, 1, function(x) x / max(x) * 0.7))
# initialize the plot
if (!add) {
xlims <- c(1, N.fcst+1)
ylims <- range(ens) + max(k.wd) * c(-3.5,3.5)
plot(NULL, xlim=xlims, ylim=ylims, axes=FALSE, xlab=NA, ylab=NA)
axis(side=1, at=pretty(xlims))
axis(side=2, at=pretty(ylims), las=2)
box()
}
# plot pdfs as polygons
for (i in 1:N.fcst) {
polygon(c(fd.vals[i, ], 0, 0, fd.vals[i, 1]) + i,
c(x.vals[i, ], tail(x.vals[i, ], 1), x.vals[i, 1], x.vals[i, 1]),
col=gray(.5))
}
# plot obs if provided
if (!is.null(obs)) {
stopifnot(length(obs) == nrow(dressed.ens[["ens"]]))
points(1:N.fcst, obs, pch=15)
}
# plot ensemble if desired
if (plot.ens) {
for (i in 1:N.fcst) {
points(rep(i, K), ens[i, ], pch=16, cex=.5)
}
}
# plot individual kernels if desired
if (plot.ker) {
for (i in 1:N.fcst) {
for (k in 1:K) {
d <- dressed.ens
d[["ens"]] <- ens[i, k, drop=FALSE]
d[["ker.wd"]] <- k.wd[i, k, drop=FALSE]
x <- seq(ens[i, k] - 3*k.wd[i, k], ens[i, k] + 3*k.wd[i, k], length.out=50)
d <- GetDensity(d, matrix(x, nrow=1))
d <- d / max(d) * max(fd.vals[i, ])
lines(x=i+d, y=x)
lines(x=c(i, i+max(d)), y=rep(ens[i,k], 2))
}
}
}
}
|
# - - - - - - - - - - EXPLORATIVE DATA ANALYSIS - PARTNER EDUCATION - - - - - - - - - - #
#### load the dataset ####
work <- read.csv('Data_Cleaning/data_work.csv', header = T)
# Focus on south Europe
work <- work[work$rgn == 'South Europe', ]
work$rgn <- NULL
# Focus on people with a partner.working
work <- work[work$prtnr == 1, ]
work$prtnr <- NULL
# Extract vector containing the countries labels
work$cntry <- as.factor(work$cntry)
work$cntry <- droplevels(work$cntry)
country <- levels(work$cntry)
# Merge native and autochthonous
work$ctzmod[work$ctzmod == 'native'] <- 'autochthonous'
# Convert ctzmod in factor and drop levels
work$ctzmod <- as.factor(work$ctzmod)
work$ctzmod <- droplevels(work$ctzmod)
citizienship <- levels(work$ctzmod)
# Convert eduptre in factor and drop levels
work$edutre <- as.factor(work$edutre)
work$eduptre <- as.factor(work$eduptre)
#### Define the weights design ####
library(survey)
design <- svydesign(ids = ~psu, strata = ~stratum, weights = ~anweight, nest = T,
data = work)
#### Employment rate by gender in South Europe ####
partner.education.male.tab <- svytable(~eduptre+pdwrk, subset(design, gndr == 'male'))
partner.education.female.tab <- svytable(~eduptre+pdwrk, subset(design, gndr == 'female'))
partner.education.male <- partner.education.male.tab[,2]/rowSums(partner.education.male.tab)*100
partner.education.female <- partner.education.female.tab[,2]/rowSums(partner.education.female.tab)*100
rm(partner.education.male.tab, partner.education.female.tab)
# PLOT
# colors.south <- c('darkgreen','darkred', 'darkblue', 'darkorange2')
# barplot(education,
# ylim = c(0,100),
# beside = T,
# col = colors.south)
# title('education ratio of men and women in South Europe')
par(mfrow = c(1,1))
plot(1:3, partner.education.male, type = 'o', col = 'dodgerblue3', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('Occupation rate in South Europe', side=3, line = 1)
axis(1, at=1:3, names(partner.education.male))
abline(v=1:3, col = 'lightgrey')
points(1:3, partner.education.female, type = 'o', col = 'red', lwd = 3)
legend('bottomleft', legend = c('Men', 'Women'),
fill = c('dodgerblue3','red'),
border = NA,
cex = 1.2,
bty = 'n')
#### eduptre: Difference between countries WITHOUT GENDER ####
partner.education.south.NOgender <- NULL
for( cnt in country){
tabnostd <- svytable(~eduptre+pdwrk, subset(design, cntry == cnt))
tabnostd <- tabnostd[,2]/rowSums(tabnostd)*100
partner.education.south.NOgender <- cbind(partner.education.south.NOgender, tabnostd)
}
colnames(partner.education.south.NOgender) <- country
rm(tabnostd)
# PLOT
# colors.south <- c('darkgreen','darkred', 'darkblue', 'darkorange2')
# barplot(partner.education.south,
# ylim = c(0,100),
# beside = T,
# col = colors.south)
# title('education ratio of men and women in South Europe')
par(mfrow = c(1,1))
plot(1:5, partner.education.south.NOgender[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('Occupation rate in South Europe by country', side=3, line = 1)
axis(1, at=1:5, colnames(partner.education.south.NOgender))
abline(v=1:5, col = 'lightgrey')
points(1:5, partner.education.south.NOgender[2,], type = 'o', col = 'gold', lwd = 3)
points(1:5, partner.education.south.NOgender[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.south.NOgender),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
#### eduptre: Difference between countries WITH GENDER ####
partner.education.south.male <- partner.education.south.female <- NULL
for( cnt in country){
tabnostd.male <- svytable(~eduptre+pdwrk, subset(design, gndr == 'male' & cntry == cnt))
tabnostd.male <- tabnostd.male[,2]/rowSums(tabnostd.male)*100
partner.education.south.male <- cbind(partner.education.south.male, tabnostd.male)
tabnostd.female <- svytable(~eduptre+pdwrk, subset(design, gndr == 'female' & cntry == cnt))
tabnostd.female <- tabnostd.female[,2]/rowSums(tabnostd.female)*100
partner.education.south.female <- cbind(partner.education.south.female, tabnostd.female)
}
colnames(partner.education.south.male) <- country
colnames(partner.education.south.female) <- country
rm(tabnostd.male, tabnostd.female)
# PLOT
# colors.south <- c('darkgreen','darkred', 'darkblue', 'darkorange2')
# barplot(partner.education.south,
# ylim = c(0,100),
# beside = T,
# col = colors.south)
# title('education ratio of men and women in South Europe')
par(mfrow = c(1,2))
plot(1:5, partner.education.south.male[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('MEN', side=3, line = 1)
axis(1, at=1:5, colnames(partner.education.south.male))
abline(v=1:5, col = 'lightgrey')
points(1:5, partner.education.south.male[2,], type = 'o', col = 'gold', lwd = 3)
points(1:5, partner.education.south.male[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.south.male),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
plot(1:5, partner.education.south.female[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('WOMEN', side=3, line = 1)
axis(1, at=1:5, colnames(partner.education.south.female))
abline(v=1:5, col = 'lightgrey')
points(1:5, partner.education.south.female[2,], type = 'o', col = 'gold', lwd = 3)
points(1:5, partner.education.south.female[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.south.female),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
mtext('Occupation rate in South Europe by country', outer = T, side=3, line = -1.5)
#### eduptre: Difference between countries WITH CITIZIENSHIP ####
partner.education.citizienship.NOgender <- NULL
for( ctz in citizienship){
tabnostd <- svytable(~eduptre+pdwrk, subset(design, ctzmod == ctz))
tabnostd <- tabnostd[,2]/rowSums(tabnostd)*100
partner.education.citizienship.NOgender <- cbind(partner.education.citizienship.NOgender, tabnostd)
}
colnames(partner.education.citizienship.NOgender) <- citizienship
partner.education.citizienship.male <- partner.education.citizienship.female <- NULL
for( ctz in citizienship){
tabnostd.male <- svytable(~eduptre+pdwrk, subset(design, gndr == 'male' & ctzmod == ctz))
tabnostd.male <- tabnostd.male[,2]/rowSums(tabnostd.male)*100
partner.education.citizienship.male <- cbind(partner.education.citizienship.male, tabnostd.male)
tabnostd.female <- svytable(~eduptre+pdwrk, subset(design, gndr == 'female' & ctzmod == ctz))
tabnostd.female <- tabnostd.female[,2]/rowSums(tabnostd.female)*100
partner.education.citizienship.female <- cbind(partner.education.citizienship.female, tabnostd.female)
}
colnames(partner.education.citizienship.male) <- citizienship
colnames(partner.education.citizienship.female) <- citizienship
rm(tabnostd, tabnostd.male, tabnostd.female)
# PLOT
# colors.south <- c('darkgreen','darkred', 'darkblue', 'darkorange2')
# barplot(partner.education.citizienship,
# ylim = c(0,100),
# beside = T,
# col = colors.south)
# title('education ratio of men and women in South Europe')
par(mfrow = c(1,3))
plot(1:3, partner.education.citizienship.NOgender[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('Whole population', side=3, line = 1)
axis(1, at=1:3, colnames(partner.education.citizienship.NOgender))
abline(v=1:3, col = 'lightgrey')
points(1:3, partner.education.citizienship.NOgender[2,], type = 'o', col = 'gold', lwd = 3)
points(1:3, partner.education.citizienship.NOgender[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.citizienship.NOgender),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
plot(1:3, partner.education.citizienship.male[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('MEN', side=3, line = 1)
axis(1, at=1:3, colnames(partner.education.citizienship.male))
abline(v=1:3, col = 'lightgrey')
points(1:3, partner.education.citizienship.male[2,], type = 'o', col = 'gold', lwd = 3)
points(1:3, partner.education.citizienship.male[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.citizienship.male),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
plot(1:3, partner.education.citizienship.female[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('WOMEN', side=3, line = 1)
axis(1, at=1:3, colnames(partner.education.citizienship.female))
abline(v=1:3, col = 'lightgrey')
points(1:3, partner.education.citizienship.female[2,], type = 'o', col = 'gold', lwd = 3)
points(1:3, partner.education.citizienship.female[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.citizienship.female),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
mtext('Occupation rate in South Europe by country', outer = T, side=3, line = -1.5)
rm(cnt, ctz)
#### Relationship with edu3 ####
svytable(~edutre+eduptre, design)
# eduptre
# edutre 1 2 3
# 1 801.85774 250.52794 80.87445
# 2 282.70706 557.89797 198.47491
# 3 93.63130 266.20877 511.88877
| /Explorative_Data_Analysis/EDA_SE_partner_education.R | no_license | Gilda95/Intersectionality-Bayesian-Analysis | R | false | false | 9,803 | r | # - - - - - - - - - - EXPLORATIVE DATA ANALYSIS - PARTNER EDUCATION - - - - - - - - - - #
#### load the dataset ####
work <- read.csv('Data_Cleaning/data_work.csv', header = T)
# Focus on south Europe
work <- work[work$rgn == 'South Europe', ]
work$rgn <- NULL
# Focus on people with a partner.working
work <- work[work$prtnr == 1, ]
work$prtnr <- NULL
# Extract vector containing the countries labels
work$cntry <- as.factor(work$cntry)
work$cntry <- droplevels(work$cntry)
country <- levels(work$cntry)
# Merge native and autochthonous
work$ctzmod[work$ctzmod == 'native'] <- 'autochthonous'
# Convert ctzmod in factor and drop levels
work$ctzmod <- as.factor(work$ctzmod)
work$ctzmod <- droplevels(work$ctzmod)
citizienship <- levels(work$ctzmod)
# Convert eduptre in factor and drop levels
work$edutre <- as.factor(work$edutre)
work$eduptre <- as.factor(work$eduptre)
#### Define the weights design ####
library(survey)
design <- svydesign(ids = ~psu, strata = ~stratum, weights = ~anweight, nest = T,
data = work)
#### Employment rate by gender in South Europe ####
partner.education.male.tab <- svytable(~eduptre+pdwrk, subset(design, gndr == 'male'))
partner.education.female.tab <- svytable(~eduptre+pdwrk, subset(design, gndr == 'female'))
partner.education.male <- partner.education.male.tab[,2]/rowSums(partner.education.male.tab)*100
partner.education.female <- partner.education.female.tab[,2]/rowSums(partner.education.female.tab)*100
rm(partner.education.male.tab, partner.education.female.tab)
# PLOT
# colors.south <- c('darkgreen','darkred', 'darkblue', 'darkorange2')
# barplot(education,
# ylim = c(0,100),
# beside = T,
# col = colors.south)
# title('education ratio of men and women in South Europe')
par(mfrow = c(1,1))
plot(1:3, partner.education.male, type = 'o', col = 'dodgerblue3', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('Occupation rate in South Europe', side=3, line = 1)
axis(1, at=1:3, names(partner.education.male))
abline(v=1:3, col = 'lightgrey')
points(1:3, partner.education.female, type = 'o', col = 'red', lwd = 3)
legend('bottomleft', legend = c('Men', 'Women'),
fill = c('dodgerblue3','red'),
border = NA,
cex = 1.2,
bty = 'n')
#### eduptre: Difference between countries WITHOUT GENDER ####
partner.education.south.NOgender <- NULL
for( cnt in country){
tabnostd <- svytable(~eduptre+pdwrk, subset(design, cntry == cnt))
tabnostd <- tabnostd[,2]/rowSums(tabnostd)*100
partner.education.south.NOgender <- cbind(partner.education.south.NOgender, tabnostd)
}
colnames(partner.education.south.NOgender) <- country
rm(tabnostd)
# PLOT
# colors.south <- c('darkgreen','darkred', 'darkblue', 'darkorange2')
# barplot(partner.education.south,
# ylim = c(0,100),
# beside = T,
# col = colors.south)
# title('education ratio of men and women in South Europe')
par(mfrow = c(1,1))
plot(1:5, partner.education.south.NOgender[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('Occupation rate in South Europe by country', side=3, line = 1)
axis(1, at=1:5, colnames(partner.education.south.NOgender))
abline(v=1:5, col = 'lightgrey')
points(1:5, partner.education.south.NOgender[2,], type = 'o', col = 'gold', lwd = 3)
points(1:5, partner.education.south.NOgender[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.south.NOgender),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
#### eduptre: Difference between countries WITH GENDER ####
partner.education.south.male <- partner.education.south.female <- NULL
for( cnt in country){
tabnostd.male <- svytable(~eduptre+pdwrk, subset(design, gndr == 'male' & cntry == cnt))
tabnostd.male <- tabnostd.male[,2]/rowSums(tabnostd.male)*100
partner.education.south.male <- cbind(partner.education.south.male, tabnostd.male)
tabnostd.female <- svytable(~eduptre+pdwrk, subset(design, gndr == 'female' & cntry == cnt))
tabnostd.female <- tabnostd.female[,2]/rowSums(tabnostd.female)*100
partner.education.south.female <- cbind(partner.education.south.female, tabnostd.female)
}
colnames(partner.education.south.male) <- country
colnames(partner.education.south.female) <- country
rm(tabnostd.male, tabnostd.female)
# PLOT
# colors.south <- c('darkgreen','darkred', 'darkblue', 'darkorange2')
# barplot(partner.education.south,
# ylim = c(0,100),
# beside = T,
# col = colors.south)
# title('education ratio of men and women in South Europe')
par(mfrow = c(1,2))
plot(1:5, partner.education.south.male[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('MEN', side=3, line = 1)
axis(1, at=1:5, colnames(partner.education.south.male))
abline(v=1:5, col = 'lightgrey')
points(1:5, partner.education.south.male[2,], type = 'o', col = 'gold', lwd = 3)
points(1:5, partner.education.south.male[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.south.male),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
plot(1:5, partner.education.south.female[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('WOMEN', side=3, line = 1)
axis(1, at=1:5, colnames(partner.education.south.female))
abline(v=1:5, col = 'lightgrey')
points(1:5, partner.education.south.female[2,], type = 'o', col = 'gold', lwd = 3)
points(1:5, partner.education.south.female[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.south.female),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
mtext('Occupation rate in South Europe by country', outer = T, side=3, line = -1.5)
#### eduptre: Difference between countries WITH CITIZIENSHIP ####
partner.education.citizienship.NOgender <- NULL
for( ctz in citizienship){
tabnostd <- svytable(~eduptre+pdwrk, subset(design, ctzmod == ctz))
tabnostd <- tabnostd[,2]/rowSums(tabnostd)*100
partner.education.citizienship.NOgender <- cbind(partner.education.citizienship.NOgender, tabnostd)
}
colnames(partner.education.citizienship.NOgender) <- citizienship
partner.education.citizienship.male <- partner.education.citizienship.female <- NULL
for( ctz in citizienship){
tabnostd.male <- svytable(~eduptre+pdwrk, subset(design, gndr == 'male' & ctzmod == ctz))
tabnostd.male <- tabnostd.male[,2]/rowSums(tabnostd.male)*100
partner.education.citizienship.male <- cbind(partner.education.citizienship.male, tabnostd.male)
tabnostd.female <- svytable(~eduptre+pdwrk, subset(design, gndr == 'female' & ctzmod == ctz))
tabnostd.female <- tabnostd.female[,2]/rowSums(tabnostd.female)*100
partner.education.citizienship.female <- cbind(partner.education.citizienship.female, tabnostd.female)
}
colnames(partner.education.citizienship.male) <- citizienship
colnames(partner.education.citizienship.female) <- citizienship
rm(tabnostd, tabnostd.male, tabnostd.female)
# PLOT
# colors.south <- c('darkgreen','darkred', 'darkblue', 'darkorange2')
# barplot(partner.education.citizienship,
# ylim = c(0,100),
# beside = T,
# col = colors.south)
# title('education ratio of men and women in South Europe')
par(mfrow = c(1,3))
plot(1:3, partner.education.citizienship.NOgender[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('Whole population', side=3, line = 1)
axis(1, at=1:3, colnames(partner.education.citizienship.NOgender))
abline(v=1:3, col = 'lightgrey')
points(1:3, partner.education.citizienship.NOgender[2,], type = 'o', col = 'gold', lwd = 3)
points(1:3, partner.education.citizienship.NOgender[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.citizienship.NOgender),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
plot(1:3, partner.education.citizienship.male[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('MEN', side=3, line = 1)
axis(1, at=1:3, colnames(partner.education.citizienship.male))
abline(v=1:3, col = 'lightgrey')
points(1:3, partner.education.citizienship.male[2,], type = 'o', col = 'gold', lwd = 3)
points(1:3, partner.education.citizienship.male[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.citizienship.male),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
plot(1:3, partner.education.citizienship.female[1,], type = 'o', col = 'red', lwd = 3,
ylim = c(0,100), xlab = '', ylab = '', xaxt = 'n')
mtext('WOMEN', side=3, line = 1)
axis(1, at=1:3, colnames(partner.education.citizienship.female))
abline(v=1:3, col = 'lightgrey')
points(1:3, partner.education.citizienship.female[2,], type = 'o', col = 'gold', lwd = 3)
points(1:3, partner.education.citizienship.female[3,], type = 'o', col = 'darkgreen', lwd = 3)
legend('bottomleft', legend = rownames(partner.education.citizienship.female),
fill = c('red', 'gold', 'darkgreen'),
border = NA,
cex = 1.2,
bty = 'n')
mtext('Occupation rate in South Europe by country', outer = T, side=3, line = -1.5)
rm(cnt, ctz)
#### Relationship with edu3 ####
svytable(~edutre+eduptre, design)
# eduptre
# edutre 1 2 3
# 1 801.85774 250.52794 80.87445
# 2 282.70706 557.89797 198.47491
# 3 93.63130 266.20877 511.88877
|
#' Automatically create a ggplot for objects obtained from fit_gam()
#'
#' @description Takes an object produced by \code{fit_gam()}, and plots the fitted GAM.
#'
#' @param x fitgam object produced by \code{fit_gam}
#' @param conf_int determines whether 95\% confidence intervals will be plotted. The default is \code{conf_int = FALSE}
#' @param color_gam a color can be specified either by name (e.g.: "red") or by hexadecimal code (e.g. : "#FF1234") (default is "steelblue")
#' @param x_stepsize set step size for labels horizontal axis
#' @param show_observations add observed frequency/severity points for each level of the variable for which tariff classes are constructed
#' @param size_points size for points (1 is default)
#' @param color_points change the color of the points in the graph ("black" is default)
#' @param rotate_labels rotate x-labels 45 degrees (this might be helpful for overlapping x-labels)
#' @param remove_outliers do not show observations above this number in the plot. This might be helpful for outliers.
#'
#' @return a ggplot object
#'
#' @import ggplot2
#'
#' @examples
#' \dontrun{
#' library(ggplot2)
#' library(dplyr)
#' fit_gam(MTPL, nclaims = nclaims, x = age_policyholder, exposure = exposure) %>%
#' autoplot(., show_observations = TRUE)
#' }
#'
#' @author Martin Haringa
#'
#' @export
autoplot.fitgam <- function(x, conf_int = FALSE, color_gam = "steelblue", show_observations = FALSE,
x_stepsize = NULL, size_points = 1, color_points = "black", rotate_labels = FALSE,
remove_outliers = NULL){
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE)
}
if (!inherits(x, "fitgam")) {
stop("autoplot.fitgam requires a fitgam object, use x = object")
}
prediction <- x[[1]]
xlab <- x[[2]]
ylab <- x[[3]]
points <- x[[4]]
if(isTRUE(conf_int) & sum(prediction$upr_95 > 1e9) > 0){
message("The confidence bounds are too large to show.")
}
if(is.numeric(remove_outliers) & isTRUE(show_observations)) {
if (ylab == "frequency") points <- points[points$frequency < remove_outliers, ]
if (ylab == "severity") points <- points[points$avg_claimsize < remove_outliers, ]
if (ylab == "burning") points <- points[points$avg_premium < remove_outliers, ]
}
gam_plot <- ggplot(data = prediction, aes(x = x, y = predicted)) +
geom_line(color = color_gam) +
theme_bw(base_size = 12) +
{if(isTRUE(conf_int) & sum(prediction$upr_95 > 1e9) == 0) geom_ribbon(aes(ymin = lwr_95, ymax = upr_95), alpha = 0.12)} +
{if(is.numeric(x_stepsize)) scale_x_continuous(breaks = seq(floor(min(prediction$x)), ceiling(max(prediction$x)), by = x_stepsize))} +
{if(isTRUE(show_observations) & ylab == "frequency") geom_point(data = points, aes(x = x, y = frequency), size = size_points, color = color_points)} +
{if(isTRUE(show_observations) & ylab == "severity") geom_point(data = points, aes(x = x, y = avg_claimsize), size = size_points, color = color_points)} +
{if(isTRUE(show_observations) & ylab == "burning") geom_point(data = points, aes(x = x, y = avg_premium), size = size_points, color = color_points)} +
{if(ylab == "severity") scale_y_continuous(labels = scales::comma)} +
{if(isTRUE(rotate_labels)) theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) } +
labs(y = paste0("Predicted ", ylab), x = xlab)
return(gam_plot)
}
| /R/plot_fitgam.R | no_license | emailhy/insurancerating | R | false | false | 3,523 | r | #' Automatically create a ggplot for objects obtained from fit_gam()
#'
#' @description Takes an object produced by \code{fit_gam()}, and plots the fitted GAM.
#'
#' @param x fitgam object produced by \code{fit_gam}
#' @param conf_int determines whether 95\% confidence intervals will be plotted. The default is \code{conf_int = FALSE}
#' @param color_gam a color can be specified either by name (e.g.: "red") or by hexadecimal code (e.g. : "#FF1234") (default is "steelblue")
#' @param x_stepsize set step size for labels horizontal axis
#' @param show_observations add observed frequency/severity points for each level of the variable for which tariff classes are constructed
#' @param size_points size for points (1 is default)
#' @param color_points change the color of the points in the graph ("black" is default)
#' @param rotate_labels rotate x-labels 45 degrees (this might be helpful for overlapping x-labels)
#' @param remove_outliers do not show observations above this number in the plot. This might be helpful for outliers.
#'
#' @return a ggplot object
#'
#' @import ggplot2
#'
#' @examples
#' \dontrun{
#' library(ggplot2)
#' library(dplyr)
#' fit_gam(MTPL, nclaims = nclaims, x = age_policyholder, exposure = exposure) %>%
#' autoplot(., show_observations = TRUE)
#' }
#'
#' @author Martin Haringa
#'
#' @export
autoplot.fitgam <- function(x, conf_int = FALSE, color_gam = "steelblue", show_observations = FALSE,
x_stepsize = NULL, size_points = 1, color_points = "black", rotate_labels = FALSE,
remove_outliers = NULL){
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE)
}
if (!inherits(x, "fitgam")) {
stop("autoplot.fitgam requires a fitgam object, use x = object")
}
prediction <- x[[1]]
xlab <- x[[2]]
ylab <- x[[3]]
points <- x[[4]]
if(isTRUE(conf_int) & sum(prediction$upr_95 > 1e9) > 0){
message("The confidence bounds are too large to show.")
}
if(is.numeric(remove_outliers) & isTRUE(show_observations)) {
if (ylab == "frequency") points <- points[points$frequency < remove_outliers, ]
if (ylab == "severity") points <- points[points$avg_claimsize < remove_outliers, ]
if (ylab == "burning") points <- points[points$avg_premium < remove_outliers, ]
}
gam_plot <- ggplot(data = prediction, aes(x = x, y = predicted)) +
geom_line(color = color_gam) +
theme_bw(base_size = 12) +
{if(isTRUE(conf_int) & sum(prediction$upr_95 > 1e9) == 0) geom_ribbon(aes(ymin = lwr_95, ymax = upr_95), alpha = 0.12)} +
{if(is.numeric(x_stepsize)) scale_x_continuous(breaks = seq(floor(min(prediction$x)), ceiling(max(prediction$x)), by = x_stepsize))} +
{if(isTRUE(show_observations) & ylab == "frequency") geom_point(data = points, aes(x = x, y = frequency), size = size_points, color = color_points)} +
{if(isTRUE(show_observations) & ylab == "severity") geom_point(data = points, aes(x = x, y = avg_claimsize), size = size_points, color = color_points)} +
{if(isTRUE(show_observations) & ylab == "burning") geom_point(data = points, aes(x = x, y = avg_premium), size = size_points, color = color_points)} +
{if(ylab == "severity") scale_y_continuous(labels = scales::comma)} +
{if(isTRUE(rotate_labels)) theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) } +
labs(y = paste0("Predicted ", ylab), x = xlab)
return(gam_plot)
}
|
# config.R for minimal example
# character vector of soil names to show in profile plot
soils <- c('cecil', 'altavista', 'lloyd', 'wickham', 'woodbridge', 'chewacla', 'congaree')
# single series name to fetch siblings for example
siblings_of <- 'Amador' | /inst/reports/templates/minimal/config.R | no_license | ncss-tech/soilReports | R | false | false | 256 | r | # config.R for minimal example
# character vector of soil names to show in profile plot
soils <- c('cecil', 'altavista', 'lloyd', 'wickham', 'woodbridge', 'chewacla', 'congaree')
# single series name to fetch siblings for example
siblings_of <- 'Amador' |
##############################################################################
# Read data: Downloading,unzipping and reading data
##############################################################################
# Downloading
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
# Unzip dataSet to /data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
# Reading trainings tables:
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
# Reading testing tables:
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
# Reading feature vector:
features <- read.table('./data/UCI HAR Dataset/features.txt')
# Reading activity labels:
activityLabels = read.table('./data/UCI HAR Dataset/activity_labels.txt')
activityLabels[,2] <- as.character(activityLabels[,2])
##############################################################################
# Step 1 - Merge the training and the test sets to create one data set
##############################################################################
# Assigning column names:
colnames(x_train) <- features[,2]
colnames(y_train) <-"activity"
colnames(subject_train) <- "subject"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activity"
colnames(subject_test) <- "subject"
colnames(activityLabels) <- c('subject','activity')
# Merging train data in one set:
mrg_train <- cbind(subject_train, y_train, x_train)
sum(is.na(mrg_train))
# Merging test data in one set:
mrg_test <- cbind(subject_test,y_test, x_test)
sum(is.na(mrg_test))
# Merging all data in one set "mergedata":
mergedata <- rbind(mrg_train, mrg_test)
sum(is.na(mergedata))
write.table(mergedata, "mergedata.txt", row.names = FALSE, quote = FALSE)
View(mergedata)
# cleanup enviroment. remove unused data
remove(x_train, y_train, subject_train, x_test, y_test, subject_test, mrg_train, mrg_test,fileUrl)
##############################################################################
# Step 2 - Extracting only the measurements on the mean and standard deviation
# for each measurement
##############################################################################
# Extract only the data on mean and standard deviation
columnsToKeep <- grepl("subject|activity|mean|std", colnames(mergedata))
mergedataMeanStd <- mergedata[, columnsToKeep]
View(mergedataMeanStd)
##############################################################################
# Step 3 - Use descriptive activity names to name the activities in the data
# set
##############################################################################
mergedataMeanStd$activity <- factor(mergedataMeanStd$activity, levels = activityLabels[,1], labels = activityLabels[,2])
mergedataMeanStd$subject <- as.factor(mergedataMeanStd$subject)
dataMeanStd<-mergedataMeanStd
View(dataMeanStd)
##############################################################################
# step 4. Using descriptive activity names to name the activities in the data set
##############################################################################
#searching for matches
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
#replacement
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names = gsub("^t", "time", featuresWanted.names)
featuresWanted.names = gsub("^f", "frequency", featuresWanted.names)
featuresWanted.names = gsub("Acc", "Accelerometer", featuresWanted.names)
featuresWanted.names = gsub("Gyro", "Gyroscope", featuresWanted.names)
featuresWanted.names = gsub("Mag", "Magnitude", featuresWanted.names)
featuresWanted.names = gsub("BodyBody", "Body", featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
colnames(dataMeanStd) <- c("subject", "activity", featuresWanted.names)
View(dataMeanStd)
write.table(dataMeanStd, "dataMeanStd.txt", row.names = FALSE, quote = FALSE)
##############################################################################
# Step 5 - Create a second, independent tidy set with the average of each
# variable for each activity and each subject
##############################################################################
#5.1 Making second tidy data set
tidy <- aggregate(. ~subject + activity, dataMeanStd, mean)
tidy <- tidy[order(tidy$subject, tidy$activity),]
sum(is.na(tidy))
#5.2 Writing second tidy data set in txt file
write.table(tidy, "tidy.txt", row.name=FALSE,quote = FALSE)
View(tidy)
| /run_analysis.R | no_license | elwali6/Peer-graded-Assignment-Getting-and-Cleaning-Data-Course-Project | R | false | false | 5,132 | r |
##############################################################################
# Read data: Downloading,unzipping and reading data
##############################################################################
# Downloading
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
# Unzip dataSet to /data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
# Reading trainings tables:
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
# Reading testing tables:
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
# Reading feature vector:
features <- read.table('./data/UCI HAR Dataset/features.txt')
# Reading activity labels:
activityLabels = read.table('./data/UCI HAR Dataset/activity_labels.txt')
activityLabels[,2] <- as.character(activityLabels[,2])
##############################################################################
# Step 1 - Merge the training and the test sets to create one data set
##############################################################################
# Assigning column names:
colnames(x_train) <- features[,2]
colnames(y_train) <-"activity"
colnames(subject_train) <- "subject"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activity"
colnames(subject_test) <- "subject"
colnames(activityLabels) <- c('subject','activity')
# Merging train data in one set:
mrg_train <- cbind(subject_train, y_train, x_train)
sum(is.na(mrg_train))
# Merging test data in one set:
mrg_test <- cbind(subject_test,y_test, x_test)
sum(is.na(mrg_test))
# Merging all data in one set "mergedata":
mergedata <- rbind(mrg_train, mrg_test)
sum(is.na(mergedata))
write.table(mergedata, "mergedata.txt", row.names = FALSE, quote = FALSE)
View(mergedata)
# cleanup enviroment. remove unused data
remove(x_train, y_train, subject_train, x_test, y_test, subject_test, mrg_train, mrg_test,fileUrl)
##############################################################################
# Step 2 - Extracting only the measurements on the mean and standard deviation
# for each measurement
##############################################################################
# Extract only the data on mean and standard deviation
columnsToKeep <- grepl("subject|activity|mean|std", colnames(mergedata))
mergedataMeanStd <- mergedata[, columnsToKeep]
View(mergedataMeanStd)
##############################################################################
# Step 3 - Use descriptive activity names to name the activities in the data
# set
##############################################################################
mergedataMeanStd$activity <- factor(mergedataMeanStd$activity, levels = activityLabels[,1], labels = activityLabels[,2])
mergedataMeanStd$subject <- as.factor(mergedataMeanStd$subject)
dataMeanStd<-mergedataMeanStd
View(dataMeanStd)
##############################################################################
# step 4. Using descriptive activity names to name the activities in the data set
##############################################################################
#searching for matches
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
#replacement
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names = gsub("^t", "time", featuresWanted.names)
featuresWanted.names = gsub("^f", "frequency", featuresWanted.names)
featuresWanted.names = gsub("Acc", "Accelerometer", featuresWanted.names)
featuresWanted.names = gsub("Gyro", "Gyroscope", featuresWanted.names)
featuresWanted.names = gsub("Mag", "Magnitude", featuresWanted.names)
featuresWanted.names = gsub("BodyBody", "Body", featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
colnames(dataMeanStd) <- c("subject", "activity", featuresWanted.names)
View(dataMeanStd)
write.table(dataMeanStd, "dataMeanStd.txt", row.names = FALSE, quote = FALSE)
##############################################################################
# Step 5 - Create a second, independent tidy set with the average of each
# variable for each activity and each subject
##############################################################################
#5.1 Making second tidy data set
tidy <- aggregate(. ~subject + activity, dataMeanStd, mean)
tidy <- tidy[order(tidy$subject, tidy$activity),]
sum(is.na(tidy))
#5.2 Writing second tidy data set in txt file
write.table(tidy, "tidy.txt", row.name=FALSE,quote = FALSE)
View(tidy)
|
rat.lm <- lm(consumption ~ location + flavor, data = RatPoison)
anova(rat.lm)
plot(rat.lm, w=c(1, 2, 5))
gf_point(consumption ~ flavor, color = ~ location, data = RatPoison,
width = 0.15, height = 0) %>%
gf_line(stat = "summary", group = ~ location, fun.data = mean_se)
| /inst/snippet/rat01-fig.R | no_license | rpruim/fastR2 | R | false | false | 284 | r | rat.lm <- lm(consumption ~ location + flavor, data = RatPoison)
anova(rat.lm)
plot(rat.lm, w=c(1, 2, 5))
gf_point(consumption ~ flavor, color = ~ location, data = RatPoison,
width = 0.15, height = 0) %>%
gf_line(stat = "summary", group = ~ location, fun.data = mean_se)
|
library(tidyverse)
library(lubridate)
library(ggplot2)
library(plyr)
library(scales)
# Read in data that has already been filtered through full_gloss, then format dates.
dat <- read.csv("data.csv", stringsAsFactors = FALSE)
dat$DATEposted <- as.Date(dat$DATEposted)
dat <- as_tibble(dat)
# Add new categories from glossary terms here
antisemitic <- c("Israel", "isreal", "izreal", "joos", "sionist", "zog", "j3w$", "jevv", "shlomo", "israhell", "kike", "pissrael")
as_virulent <- c("zionazi", "shlomo")
# Filter for specific rhetoric using relevant gloss terms--when adding a new target of concern per client's protected group designation, grey out unneeded lists and update as needed.
dat1 <- dat %>%
select(DATEposted, term) %>%
unnest(term) %>%
filter(term %in% antisemitic) # Replace object after %in% with list of terms needed.
# Changes comment publication date to YYYY-MM format. Adjust as needed for daily, yearly, or hourly analysis.
dat$DATEposted <- floor_date(dat$DATEposted, unit="week")
dat1$DATEposted <- floor_date(dat1$DATEposted, unit="week")
# Frequency of each term's use in a given month.
termfreq <- count(dat1, vars=c("DATEposted", "term"))
# Frequency of total occurrances of targeted vocabulary in a given month.
monthfreq <- count(dat$DATEposted)
monthfreq1 <- count(dat1$DATEposted)
# Use for tracking density of comments in a given category--un-grey lines as appropriate
dens <- monthfreq1$freq/monthfreq$freq
density <- cbind(monthfreq, dens)
# Plots a frequency chart based on monthly totals.
ggplot(density, aes(x, dens)) +
geom_point() +
ggtitle("Long-term Density of Anti-Semitic Sentiment") +
xlab("Date") +
ylab("Density of Term Usage") +
scale_x_date(breaks = date_breaks("1 month")) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
geom_smooth()
# Plots a bar graph based on weekly totals.
ggplot(density, aes(x, dens)) +
geom_bar(stat="identity") +
ggtitle("Long-term Density of Anti-Semitic Sentiment") +
xlab("Date") +
ylab("Density of Term Usage") +
scale_x_date(breaks = date_breaks("1 week")) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) #+
#geom_smooth()
# Plots a frequency chart that specifies term usage.
ggplot(termfreq, aes(DATEposted, freq)) +
geom_point() +
geom_text(aes(label=term)) +
geom_smooth()
write.csv(dat, file="data_FINAL.csv")
| /custom_wordlist_trend_analysis.R | no_license | secondenrique/data_scripts | R | false | false | 2,446 | r | library(tidyverse)
library(lubridate)
library(ggplot2)
library(plyr)
library(scales)
# Read in data that has already been filtered through full_gloss, then format dates.
dat <- read.csv("data.csv", stringsAsFactors = FALSE)
dat$DATEposted <- as.Date(dat$DATEposted)
dat <- as_tibble(dat)
# Add new categories from glossary terms here
antisemitic <- c("Israel", "isreal", "izreal", "joos", "sionist", "zog", "j3w$", "jevv", "shlomo", "israhell", "kike", "pissrael")
as_virulent <- c("zionazi", "shlomo")
# Filter for specific rhetoric using relevant gloss terms--when adding a new target of concern per client's protected group designation, grey out unneeded lists and update as needed.
dat1 <- dat %>%
select(DATEposted, term) %>%
unnest(term) %>%
filter(term %in% antisemitic) # Replace object after %in% with list of terms needed.
# Changes comment publication date to YYYY-MM format. Adjust as needed for daily, yearly, or hourly analysis.
dat$DATEposted <- floor_date(dat$DATEposted, unit="week")
dat1$DATEposted <- floor_date(dat1$DATEposted, unit="week")
# Frequency of each term's use in a given month.
termfreq <- count(dat1, vars=c("DATEposted", "term"))
# Frequency of total occurrances of targeted vocabulary in a given month.
monthfreq <- count(dat$DATEposted)
monthfreq1 <- count(dat1$DATEposted)
# Use for tracking density of comments in a given category--un-grey lines as appropriate
dens <- monthfreq1$freq/monthfreq$freq
density <- cbind(monthfreq, dens)
# Plots a frequency chart based on monthly totals.
ggplot(density, aes(x, dens)) +
geom_point() +
ggtitle("Long-term Density of Anti-Semitic Sentiment") +
xlab("Date") +
ylab("Density of Term Usage") +
scale_x_date(breaks = date_breaks("1 month")) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
geom_smooth()
# Plots a bar graph based on weekly totals.
ggplot(density, aes(x, dens)) +
geom_bar(stat="identity") +
ggtitle("Long-term Density of Anti-Semitic Sentiment") +
xlab("Date") +
ylab("Density of Term Usage") +
scale_x_date(breaks = date_breaks("1 week")) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) #+
#geom_smooth()
# Plots a frequency chart that specifies term usage.
ggplot(termfreq, aes(DATEposted, freq)) +
geom_point() +
geom_text(aes(label=term)) +
geom_smooth()
write.csv(dat, file="data_FINAL.csv")
|
library(tidyverse)
library(rvest)
library(glue)
settings <- list(
teams = 12,
qbs = 1,
wrs = 2,
rbs = 2,
tes = 1,
passing_yards = 25,
passing_td = 6,
int = -2,
rushing_yards = 10,
rushing_td = 6,
reception = 1,
receiving_yards = 10,
receiving_td = 6,
return_td = 6,
two_pt = 2,
fmbl = -2,
off_fmbl_return_td = 6
)
scrape_season_html <- function(season = 2019) {
url = glue("https://www.pro-football-reference.com/years/{season}/fantasy.htm#fantasy::none", sep = "")
season_stats <- read_html(url)
return(season_stats)
}
get_column_names <- function(tbl) {
cols <- tbl %>%
slice(1:2) %>%
rownames_to_column() %>%
pivot_longer(-rowname) %>%
pivot_wider(names_from = rowname, values_from = value) %>%
unite(`1`, `2`, col = "col_names", sep = "_") %>%
select(col_names)
cols <- cols$col_names %>%
str_extract("[A-Za-z]+\\_?[A-Za-z0-9]*")
return(cols)
}
clean_names <- function(names) {
return(
names %>%
str_extract("[A-Za-z\'\\.\\-]+\\s[A-Za-z\\.\\-]+")
)
}
format_season <- function(season_html) {
season_l <- season_html %>%
html_nodes("table") %>%
html_table(header = FALSE)
season_tbl <- season_l[[1]]
colnames(season_tbl) <- season_tbl %>% get_column_names
season_tbl <- season_tbl %>%
slice(3:n()) %>%
filter(Player != "Player")
season_tbl$Player <- season_tbl$Player %>% clean_names
return(season_tbl)
}
calculate_league_scoring <- function(season, settings) {
season$league_score <- season$Passing_Yds / settings$passing_yards +
season$Passing_TD * settings$passing_td +
season$Passing_Int * settings$int +
season$Rushing_Yds / settings$rushing_yards +
season$Rushing_TD * settings$rushing_td +
season$Receiving_Rec * settings$reception +
season$Receiving_Yds / settings$receiving_yards +
season$Receiving_TD * settings$receiving_td +
season$Fumbles_FL * settings$fmbl +
season$Scoring_2PM * settings$two_pt +
season$Scoring_2PP * settings$two_pt
return(season)
}
calculate_league_vbd <- function(season, settings) {
rep_qb <- season %>%
filter(FantPos == 'QB' & league_pos_rank == settings$teams * settings$qbs)
rep_rb <- season %>%
filter(FantPos == 'RB' & league_pos_rank == settings$teams * settings$rbs)
rep_wr <- season %>%
filter(FantPos == 'WR' & league_pos_rank == settings$teams * settings$wrs)
rep_te <- season %>%
filter(FantPos == 'TE' & league_pos_rank == settings$teams * settings$tes)
season_qb <- season %>%
filter(FantPos == 'QB') %>%
mutate(
league_vbd = league_score - rep_qb$league_score
)
season_rb <- season %>%
filter(FantPos == 'RB') %>%
mutate(
league_vbd = league_score - rep_rb$league_score
)
season_wr <- season %>%
filter(FantPos == 'WR') %>%
mutate(
league_vbd = league_score - rep_wr$league_score
)
season_te <- season %>%
filter(FantPos == 'TE') %>%
mutate(
league_vbd = league_score - rep_te$league_score
)
season <- season_qb %>%
bind_rows(season_rb, season_wr, season_te)
return(season)
}
calculate_league_rank <- function(season) {
season <- season %>%
mutate(
league_rank = row_number(desc(league_score))
)
season <- season %>%
group_by(FantPos) %>%
mutate(
league_pos_rank = row_number(desc(league_score))
)
return(season)
}
scrape_season <- function(season = 2019) {
season_data <- scrape_season_html(season) %>%
format_season %>%
type_convert %>%
mutate(
across(everything(), ~replace_na(.x, 0))
) %>%
calculate_league_scoring(settings) %>%
calculate_league_rank %>%
calculate_league_vbd(settings)
season_data$season <- season
return(season_data)
}
| /helper_functions.R | no_license | brianlawrence2/ffb | R | false | false | 3,871 | r | library(tidyverse)
library(rvest)
library(glue)
settings <- list(
teams = 12,
qbs = 1,
wrs = 2,
rbs = 2,
tes = 1,
passing_yards = 25,
passing_td = 6,
int = -2,
rushing_yards = 10,
rushing_td = 6,
reception = 1,
receiving_yards = 10,
receiving_td = 6,
return_td = 6,
two_pt = 2,
fmbl = -2,
off_fmbl_return_td = 6
)
scrape_season_html <- function(season = 2019) {
url = glue("https://www.pro-football-reference.com/years/{season}/fantasy.htm#fantasy::none", sep = "")
season_stats <- read_html(url)
return(season_stats)
}
get_column_names <- function(tbl) {
cols <- tbl %>%
slice(1:2) %>%
rownames_to_column() %>%
pivot_longer(-rowname) %>%
pivot_wider(names_from = rowname, values_from = value) %>%
unite(`1`, `2`, col = "col_names", sep = "_") %>%
select(col_names)
cols <- cols$col_names %>%
str_extract("[A-Za-z]+\\_?[A-Za-z0-9]*")
return(cols)
}
clean_names <- function(names) {
return(
names %>%
str_extract("[A-Za-z\'\\.\\-]+\\s[A-Za-z\\.\\-]+")
)
}
format_season <- function(season_html) {
season_l <- season_html %>%
html_nodes("table") %>%
html_table(header = FALSE)
season_tbl <- season_l[[1]]
colnames(season_tbl) <- season_tbl %>% get_column_names
season_tbl <- season_tbl %>%
slice(3:n()) %>%
filter(Player != "Player")
season_tbl$Player <- season_tbl$Player %>% clean_names
return(season_tbl)
}
calculate_league_scoring <- function(season, settings) {
season$league_score <- season$Passing_Yds / settings$passing_yards +
season$Passing_TD * settings$passing_td +
season$Passing_Int * settings$int +
season$Rushing_Yds / settings$rushing_yards +
season$Rushing_TD * settings$rushing_td +
season$Receiving_Rec * settings$reception +
season$Receiving_Yds / settings$receiving_yards +
season$Receiving_TD * settings$receiving_td +
season$Fumbles_FL * settings$fmbl +
season$Scoring_2PM * settings$two_pt +
season$Scoring_2PP * settings$two_pt
return(season)
}
calculate_league_vbd <- function(season, settings) {
rep_qb <- season %>%
filter(FantPos == 'QB' & league_pos_rank == settings$teams * settings$qbs)
rep_rb <- season %>%
filter(FantPos == 'RB' & league_pos_rank == settings$teams * settings$rbs)
rep_wr <- season %>%
filter(FantPos == 'WR' & league_pos_rank == settings$teams * settings$wrs)
rep_te <- season %>%
filter(FantPos == 'TE' & league_pos_rank == settings$teams * settings$tes)
season_qb <- season %>%
filter(FantPos == 'QB') %>%
mutate(
league_vbd = league_score - rep_qb$league_score
)
season_rb <- season %>%
filter(FantPos == 'RB') %>%
mutate(
league_vbd = league_score - rep_rb$league_score
)
season_wr <- season %>%
filter(FantPos == 'WR') %>%
mutate(
league_vbd = league_score - rep_wr$league_score
)
season_te <- season %>%
filter(FantPos == 'TE') %>%
mutate(
league_vbd = league_score - rep_te$league_score
)
season <- season_qb %>%
bind_rows(season_rb, season_wr, season_te)
return(season)
}
calculate_league_rank <- function(season) {
season <- season %>%
mutate(
league_rank = row_number(desc(league_score))
)
season <- season %>%
group_by(FantPos) %>%
mutate(
league_pos_rank = row_number(desc(league_score))
)
return(season)
}
scrape_season <- function(season = 2019) {
season_data <- scrape_season_html(season) %>%
format_season %>%
type_convert %>%
mutate(
across(everything(), ~replace_na(.x, 0))
) %>%
calculate_league_scoring(settings) %>%
calculate_league_rank %>%
calculate_league_vbd(settings)
season_data$season <- season
return(season_data)
}
|
#' calcMinkowskiDistanceVotersAlts
#' Calulate distance using minkowskiDistanceSets() which is an Rcpp generated function.
#'
#' Takes a voters dataframe and an alternatives dataframe and strips out the relevant parts and feeds them to minkowskiDistanceSets() which is an Rcpp generated function.
#' @param votersCalcMinkowskiDistanceVotersAlts The voters data frame must have a specific format, and it must be an R data.frame object. There must be at least these 6 variables and they must have the following names. The order of the variables in the data.frame is not important as long as the variables have the proper names.
#'
#' ID: A numeric identifier unique to the voter.
#' xLocation: The x coordinate of the voter's ideal point.
#' yLocation: The y coordinate of the voter's ideal point.
#' minkoOrder: The Minkowski order of the voters MInkowski metric based utility function. = 1, is City Block. = 2 is Euclidian and 100 = is See ?Minkowski.
#' xSalience: The salience of the x dimension for the voter. The dimension with the lowest salience is normalized to 1 and it is the numerarier, the salience of other dimension is measured in units of the numerarire.
#' ySalience: The salience of the y dimension for the voter. he dimension with the lowest salience is normalized to 1 and it is the numerarier, the salience of other dimension is measured in units of the numerarire.
#' lossOrder: The loss order for the agents utility function. See the parameter lossOrderVector in ?minkowskiUtilitySets().
#'
#' @param alternativesCalcMinkowskiDistanceVotersAlts alternatives data frame generated from genAlternatives() or in the same format.
#' @return A numVoters by numAlternitives matrix containing the minkowski distance each voter is from each alternitive.
#' @export
calcMinkowskiDistanceVotersAlts <- function(votersCalcMinkowskiDistanceVotersAlts, alternativesCalcMinkowskiDistanceVotersAlts){
# ## FOR TESTING ###
# votersCalcMinkowskiDistanceVotersAlts <- data.frame(pointType = rep(x = "voter", 3), ID = c("V-1", "V-2", "V-3"), xLocation=c(-1/8, 7/8, 4/8), yLocation=c(3/8, 4/8, -3/8), minkoOrder=c(1, 2, 100), xSalience = c(1, 1, 1), ySalience = c(1, 1, 1), lossOrder = c(2, 2, 2) )
#
# alternativesCalcMinkowskiDistanceVotersAlts <- data.frame(pointType = rep(x = "alternative", 3), ID = c("A-1", "A-2", "A-3"), xLocation=c(-3/8, 1/8, 2/8), yLocation=c(-3/8, 1/8, 7/8) )
# ## FOR TESTING ##
minkoDistOut <- minkowskiDistanceSets( idealsMatrix = as.matrix (dplyr::select(votersCalcMinkowskiDistanceVotersAlts, xLocation, yLocation) ),
altsMatrix = as.matrix( dplyr::select(alternativesCalcMinkowskiDistanceVotersAlts, xLocation, yLocation) ),
minkoOrderVector = as.matrix( dplyr::select(votersCalcMinkowskiDistanceVotersAlts, minkoOrder) ),
salienceMatrix = as.matrix( dplyr::select(votersCalcMinkowskiDistanceVotersAlts, xSalience, ySalience) )
)
rownames(minkoDistOut) <- votersCalcMinkowskiDistanceVotersAlts$ID
colnames(minkoDistOut) <- as.vector(alternativesCalcMinkowskiDistanceVotersAlts$ID)
minkoDistOut
}
| /R/calcMinkowskiDistanceVotersAlts.R | no_license | robiRagan/voteR | R | false | false | 3,303 | r | #' calcMinkowskiDistanceVotersAlts
#' Calulate distance using minkowskiDistanceSets() which is an Rcpp generated function.
#'
#' Takes a voters dataframe and an alternatives dataframe and strips out the relevant parts and feeds them to minkowskiDistanceSets() which is an Rcpp generated function.
#' @param votersCalcMinkowskiDistanceVotersAlts The voters data frame must have a specific format, and it must be an R data.frame object. There must be at least these 6 variables and they must have the following names. The order of the variables in the data.frame is not important as long as the variables have the proper names.
#'
#' ID: A numeric identifier unique to the voter.
#' xLocation: The x coordinate of the voter's ideal point.
#' yLocation: The y coordinate of the voter's ideal point.
#' minkoOrder: The Minkowski order of the voters MInkowski metric based utility function. = 1, is City Block. = 2 is Euclidian and 100 = is See ?Minkowski.
#' xSalience: The salience of the x dimension for the voter. The dimension with the lowest salience is normalized to 1 and it is the numerarier, the salience of other dimension is measured in units of the numerarire.
#' ySalience: The salience of the y dimension for the voter. he dimension with the lowest salience is normalized to 1 and it is the numerarier, the salience of other dimension is measured in units of the numerarire.
#' lossOrder: The loss order for the agents utility function. See the parameter lossOrderVector in ?minkowskiUtilitySets().
#'
#' @param alternativesCalcMinkowskiDistanceVotersAlts alternatives data frame generated from genAlternatives() or in the same format.
#' @return A numVoters by numAlternitives matrix containing the minkowski distance each voter is from each alternitive.
#' @export
calcMinkowskiDistanceVotersAlts <- function(votersCalcMinkowskiDistanceVotersAlts, alternativesCalcMinkowskiDistanceVotersAlts){
# ## FOR TESTING ###
# votersCalcMinkowskiDistanceVotersAlts <- data.frame(pointType = rep(x = "voter", 3), ID = c("V-1", "V-2", "V-3"), xLocation=c(-1/8, 7/8, 4/8), yLocation=c(3/8, 4/8, -3/8), minkoOrder=c(1, 2, 100), xSalience = c(1, 1, 1), ySalience = c(1, 1, 1), lossOrder = c(2, 2, 2) )
#
# alternativesCalcMinkowskiDistanceVotersAlts <- data.frame(pointType = rep(x = "alternative", 3), ID = c("A-1", "A-2", "A-3"), xLocation=c(-3/8, 1/8, 2/8), yLocation=c(-3/8, 1/8, 7/8) )
# ## FOR TESTING ##
minkoDistOut <- minkowskiDistanceSets( idealsMatrix = as.matrix (dplyr::select(votersCalcMinkowskiDistanceVotersAlts, xLocation, yLocation) ),
altsMatrix = as.matrix( dplyr::select(alternativesCalcMinkowskiDistanceVotersAlts, xLocation, yLocation) ),
minkoOrderVector = as.matrix( dplyr::select(votersCalcMinkowskiDistanceVotersAlts, minkoOrder) ),
salienceMatrix = as.matrix( dplyr::select(votersCalcMinkowskiDistanceVotersAlts, xSalience, ySalience) )
)
rownames(minkoDistOut) <- votersCalcMinkowskiDistanceVotersAlts$ID
colnames(minkoDistOut) <- as.vector(alternativesCalcMinkowskiDistanceVotersAlts$ID)
minkoDistOut
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108271e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615771614-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108271e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
# Trump Insulting Tweets -----
library(foreign)
library(psych)
library(lsa)
trumpinsult_tweet <- read.csv(file.choose(), header = TRUE, encoding="UTF-8", stringsAsFactors=FALSE) # file.choose()
# trump_insult_tweets_new.csv
dim(trumpinsult_tweet)
head(trumpinsult_tweet)
class(trumpinsult_tweet$date)
trumpinsult_tweet$date <- as.Date(trumpinsult_tweet$date,"%d/%m/%Y")
names(trumpinsult_tweet)
names(trumpinsult_tweet)[names(trumpinsult_tweet) == 'X'] <- 'doc_id'
names(trumpinsult_tweet)[names(trumpinsult_tweet) == 'tweet'] <- 'text'
library(NLP)
library(tm)
tweets_trump_docs <- subset(trumpinsult_tweet, select = c("doc_id", "text"))
tweets_trump_VCorpus <- VCorpus(DataframeSource(tweets_trump_docs))
# to inspect the contents
tweets_trump_VCorpus[[1]]$content
# to convert to all lower cases
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, content_transformer(tolower)) # tm_map: to apply transformation functions (also denoted as mappings) to corpora
# to remove URLs
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, content_transformer(removeURL))
# to remove anything other than English
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x)
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, content_transformer(removeNumPunct))
# to remove stopwords, (note: one can define their own myStopwords)
stopwords("english")
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, removeWords, stopwords("english"))
# to remove extra whitespaces
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, stripWhitespace)
# to remove punctuations
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, removePunctuation)
# Stemming: to remove plurals and action suffixes (please use it with caution: some hypertextual elements such as @mentions, #hashtags, and URLs are removed)
library(SnowballC)
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, stemDocument)
tweets_trump_VCorpus[[1]]$content
# TF and TF-IDF
# converting to Document-term matrix (TDM)
tweets_trump_dtm <- DocumentTermMatrix(tweets_trump_VCorpus, control = list(removePunctuation = TRUE, stopwords=TRUE))
tweets_trump_dtm
# A high sparsity means terms are not repeated often among different documents.
inspect(tweets_trump_dtm) # a sample of the matrix
# TF
term_freq_trump <- colSums(as.matrix(tweets_trump_dtm))
write.csv(as.data.frame(sort(rowSums(as.matrix(term_freq_trump)), decreasing=TRUE)), file="tweets_trump_dtm_tf.csv")
# TF-IDF
tweets_trump_dtm_tfidf <- DocumentTermMatrix(tweets_trump_VCorpus, control = list(weighting = weightTfIdf)) # DTM is for TF-IDF calculation
print(tweets_trump_dtm_tfidf)
tweets_trump_dtm_tfidf2 = removeSparseTerms(tweets_trump_dtm_tfidf, 0.99)
print(tweets_trump_dtm_tfidf2)
write.csv(as.data.frame(sort(colSums(as.matrix(tweets_trump_dtm_tfidf2)), decreasing=TRUE)), file="tweets_trump_dtm_tfidf.csv")
#topic modeling with LDA
#install.packages("topicmodels")
library(topicmodels)
# clean the empty (non-zero entry)
rowTotals_trump <- apply(tweets_trump_dtm , 1, sum) #Find the sum of words in each Document
tweets_trump_dtm_nonzero <- tweets_trump_dtm[rowTotals_trump> 0, ]
library(ldatuning)
library(slam)
result <- FindTopicsNumber(
tweets_trump_dtm_nonzero,
topics = seq(from = 2, to = 15, by = 1),
metrics = c("CaoJuan2009", "Arun2010", "Deveaud2014",'Griffiths2004'),
method = "Gibbs",
control = list(seed = 77),
mc.cores = 2L,
verbose = TRUE
)
FindTopicsNumber_plot(result)
# after finding "the optimal-K" topics, then redo the above analysis
tweets_trump_dtm_6topics <- LDA(tweets_trump_dtm_nonzero, k = 6, method = "Gibbs", control = list(iter=2000, seed = 2000))
tweets_trump_dtm_6topics_10words <- terms(tweets_trump_dtm_6topics, 10) # get top 10 words of every topic
tweets_trump_dtm_6topics_10words
###################################################### the end of the codes ---
| /code/Trump_insulting_tweets.R | no_license | mic-lin/trumpinsult_tweet | R | false | false | 3,947 | r | # Trump Insulting Tweets -----
library(foreign)
library(psych)
library(lsa)
trumpinsult_tweet <- read.csv(file.choose(), header = TRUE, encoding="UTF-8", stringsAsFactors=FALSE) # file.choose()
# trump_insult_tweets_new.csv
dim(trumpinsult_tweet)
head(trumpinsult_tweet)
class(trumpinsult_tweet$date)
trumpinsult_tweet$date <- as.Date(trumpinsult_tweet$date,"%d/%m/%Y")
names(trumpinsult_tweet)
names(trumpinsult_tweet)[names(trumpinsult_tweet) == 'X'] <- 'doc_id'
names(trumpinsult_tweet)[names(trumpinsult_tweet) == 'tweet'] <- 'text'
library(NLP)
library(tm)
tweets_trump_docs <- subset(trumpinsult_tweet, select = c("doc_id", "text"))
tweets_trump_VCorpus <- VCorpus(DataframeSource(tweets_trump_docs))
# to inspect the contents
tweets_trump_VCorpus[[1]]$content
# to convert to all lower cases
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, content_transformer(tolower)) # tm_map: to apply transformation functions (also denoted as mappings) to corpora
# to remove URLs
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, content_transformer(removeURL))
# to remove anything other than English
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x)
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, content_transformer(removeNumPunct))
# to remove stopwords, (note: one can define their own myStopwords)
stopwords("english")
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, removeWords, stopwords("english"))
# to remove extra whitespaces
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, stripWhitespace)
# to remove punctuations
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, removePunctuation)
# Stemming: to remove plurals and action suffixes (please use it with caution: some hypertextual elements such as @mentions, #hashtags, and URLs are removed)
library(SnowballC)
tweets_trump_VCorpus <- tm_map(tweets_trump_VCorpus, stemDocument)
tweets_trump_VCorpus[[1]]$content
# TF and TF-IDF
# converting to Document-term matrix (TDM)
tweets_trump_dtm <- DocumentTermMatrix(tweets_trump_VCorpus, control = list(removePunctuation = TRUE, stopwords=TRUE))
tweets_trump_dtm
# A high sparsity means terms are not repeated often among different documents.
inspect(tweets_trump_dtm) # a sample of the matrix
# TF
term_freq_trump <- colSums(as.matrix(tweets_trump_dtm))
write.csv(as.data.frame(sort(rowSums(as.matrix(term_freq_trump)), decreasing=TRUE)), file="tweets_trump_dtm_tf.csv")
# TF-IDF
tweets_trump_dtm_tfidf <- DocumentTermMatrix(tweets_trump_VCorpus, control = list(weighting = weightTfIdf)) # DTM is for TF-IDF calculation
print(tweets_trump_dtm_tfidf)
tweets_trump_dtm_tfidf2 = removeSparseTerms(tweets_trump_dtm_tfidf, 0.99)
print(tweets_trump_dtm_tfidf2)
write.csv(as.data.frame(sort(colSums(as.matrix(tweets_trump_dtm_tfidf2)), decreasing=TRUE)), file="tweets_trump_dtm_tfidf.csv")
#topic modeling with LDA
#install.packages("topicmodels")
library(topicmodels)
# clean the empty (non-zero entry)
rowTotals_trump <- apply(tweets_trump_dtm , 1, sum) #Find the sum of words in each Document
tweets_trump_dtm_nonzero <- tweets_trump_dtm[rowTotals_trump> 0, ]
library(ldatuning)
library(slam)
result <- FindTopicsNumber(
tweets_trump_dtm_nonzero,
topics = seq(from = 2, to = 15, by = 1),
metrics = c("CaoJuan2009", "Arun2010", "Deveaud2014",'Griffiths2004'),
method = "Gibbs",
control = list(seed = 77),
mc.cores = 2L,
verbose = TRUE
)
FindTopicsNumber_plot(result)
# after finding "the optimal-K" topics, then redo the above analysis
tweets_trump_dtm_6topics <- LDA(tweets_trump_dtm_nonzero, k = 6, method = "Gibbs", control = list(iter=2000, seed = 2000))
tweets_trump_dtm_6topics_10words <- terms(tweets_trump_dtm_6topics, 10) # get top 10 words of every topic
tweets_trump_dtm_6topics_10words
###################################################### the end of the codes ---
|
## open png plotting device
png("plot2.png")
## read input data
ip <- read.table("household_power_consumption.txt",sep=";",header=TRUE,na.strings="?")
## filter out only required data
ip_filt <- subset(ip,Date == '1/2/2007' | Date == '2/2/2007')
## free up memory used by ip
ip <- NULL
## concat date and time columns
ip_filt$DtTm <- paste(ip_filt$Date,ip_filt$Time,sep=" ")
## cast as datetime type
ip_filt$DtTm <- strptime(ip_filt$DtTm, format="%d/%m/%Y %H:%M:%S")
## create plot
with(ip_filt,plot(ip_filt$DtTm,ip_filt$Global_active_power, type = "l", ylab="Global Active Power (kilowatts)", xlab = "", main=""))
## close device
dev.off()
| /ExData_Plotting1/plot2.R | no_license | joellieser/ExploratoryDataAnalysis | R | false | false | 641 | r | ## open png plotting device
png("plot2.png")
## read input data
ip <- read.table("household_power_consumption.txt",sep=";",header=TRUE,na.strings="?")
## filter out only required data
ip_filt <- subset(ip,Date == '1/2/2007' | Date == '2/2/2007')
## free up memory used by ip
ip <- NULL
## concat date and time columns
ip_filt$DtTm <- paste(ip_filt$Date,ip_filt$Time,sep=" ")
## cast as datetime type
ip_filt$DtTm <- strptime(ip_filt$DtTm, format="%d/%m/%Y %H:%M:%S")
## create plot
with(ip_filt,plot(ip_filt$DtTm,ip_filt$Global_active_power, type = "l", ylab="Global Active Power (kilowatts)", xlab = "", main=""))
## close device
dev.off()
|
library(tangram)
### Name: custom_css
### Title: Return a CSS file as a string
### Aliases: custom_css
### ** Examples
custom_css("lancet.css", "tbl1")
| /data/genthat_extracted_code/tangram/examples/custom_css.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 159 | r | library(tangram)
### Name: custom_css
### Title: Return a CSS file as a string
### Aliases: custom_css
### ** Examples
custom_css("lancet.css", "tbl1")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kpPlotCoverage.R
\name{kpPlotCoverage}
\alias{kpPlotCoverage}
\title{kpCoverage}
\usage{
kpPlotCoverage(karyoplot, data, data.panel=1, r0=NULL, r1=NULL, col="#0e87eb", ymax=NULL, ...)
}
\arguments{
\item{karyoplot}{(a \code{KaryoPlot} object) This is the first argument to all data plotting functions of \code{karyoploteR}. A KaryoPlot object referring to the currently active plot.}
\item{data}{(a \code{GRanges}) A GRanges object from wich the coverage will be computed or a \code{SimpleRleList} result of computing the coverage.}
\item{data.panel}{(numeric) The identifier of the data panel where the data is to be plotted. The available data panels depend on the plot type selected in the call to \code{\link{plotKaryotype}}. (defaults to 1)}
\item{r0}{(numeric) r0 and r1 define the vertical range of the data panel to be used to draw this plot. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{r1}{(numeric) r0 and r1 define the vertical range of the data panel to be used to draw this plot. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{col}{(color) The background color of the regions. (defaults to "#0e87eb")}
\item{ymax}{(numeric) The maximum value to be plotted on the data.panel. If NULL the maximum coverage is used. (defaults to NULL)}
\item{...}{The ellipsis operator can be used to specify any additional graphical parameters. Any additional parameter will be passed to the internal calls to the R base plotting functions.}
}
\value{
Returns the original karyoplot object, unchanged.
}
\description{
Given a \code{GRanges} object, plot the coverage along the genome.
}
\details{
This is one of the high-level, or specialized, plotting functions of karyoploteR.
It takes a \code{GRanges} object and plots it's coverage, that is, the number of regions
overlapping each genomic position. The input can also be a \code{SimpleRleList} resulting
from computing the coverage with \code{coverage(data)}. In contrast with the low-level
functions such as \code{\link{kpRect}}, it is not possible to specify the data using
independent numeric vectors and the function only takes in the expected object types.
}
\examples{
set.seed(1000)
#Example 1: create 20 sets of non-overlapping random regions and plot them all. Add a coverage plot on top.
kp <- plotKaryotype("hg19", plot.type=1, chromosomes=c("chr1", "chr2"))
all.regs <- GRanges()
nreps <- 20
for(i in 1:nreps) {
regs <- createRandomRegions(nregions = 100, length.mean = 10000000, length.sd = 1000000,
non.overlapping = TRUE, genome = "hg19", mask=NA)
all.regs <- c(all.regs, regs)
kpPlotRegions(kp, regs, r0 = (i-1)*(0.8/nreps), r1 = (i)*(0.8/nreps), col="#AAAAAA")
}
kpPlotCoverage(kp, all.regs, ymax = 20, r0=0.8, r1=1, col="#CCCCFF")
kpAxis(kp, ymin = 0, ymax= 20, numticks = 2, r0 = 0.8, r1=1)
#Example 2: Do the same with a single bigger set of possibly overlapping regions
kp <- plotKaryotype("hg19", plot.type=1, chromosomes=c("chr1", "chr2"))
regs <- createRandomRegions(nregions = 1000, length.mean = 10000000, length.sd = 1000000,
non.overlapping = FALSE, genome = "hg19", mask=NA)
kpPlotRegions(kp, regs, r0 = 0, r1 = 0.8, col="#AAAAAA")
kpPlotCoverage(kp, regs, ymax = 20, r0=0.8, r1=1, col="#CCCCFF")
kpAxis(kp, ymin = 0, ymax= 20, numticks = 2, r0 = 0.8, r1=1)
}
\seealso{
\code{\link{plotKaryotype}}, \code{\link{kpPlotRegions}}, \code{\link{kpBars}}
\code{\link[IRanges]{coverage}}
}
| /man/kpPlotCoverage.Rd | no_license | YTLogos/karyoploteR | R | false | true | 3,992 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kpPlotCoverage.R
\name{kpPlotCoverage}
\alias{kpPlotCoverage}
\title{kpCoverage}
\usage{
kpPlotCoverage(karyoplot, data, data.panel=1, r0=NULL, r1=NULL, col="#0e87eb", ymax=NULL, ...)
}
\arguments{
\item{karyoplot}{(a \code{KaryoPlot} object) This is the first argument to all data plotting functions of \code{karyoploteR}. A KaryoPlot object referring to the currently active plot.}
\item{data}{(a \code{GRanges}) A GRanges object from wich the coverage will be computed or a \code{SimpleRleList} result of computing the coverage.}
\item{data.panel}{(numeric) The identifier of the data panel where the data is to be plotted. The available data panels depend on the plot type selected in the call to \code{\link{plotKaryotype}}. (defaults to 1)}
\item{r0}{(numeric) r0 and r1 define the vertical range of the data panel to be used to draw this plot. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{r1}{(numeric) r0 and r1 define the vertical range of the data panel to be used to draw this plot. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{col}{(color) The background color of the regions. (defaults to "#0e87eb")}
\item{ymax}{(numeric) The maximum value to be plotted on the data.panel. If NULL the maximum coverage is used. (defaults to NULL)}
\item{...}{The ellipsis operator can be used to specify any additional graphical parameters. Any additional parameter will be passed to the internal calls to the R base plotting functions.}
}
\value{
Returns the original karyoplot object, unchanged.
}
\description{
Given a \code{GRanges} object, plot the coverage along the genome.
}
\details{
This is one of the high-level, or specialized, plotting functions of karyoploteR.
It takes a \code{GRanges} object and plots it's coverage, that is, the number of regions
overlapping each genomic position. The input can also be a \code{SimpleRleList} resulting
from computing the coverage with \code{coverage(data)}. In contrast with the low-level
functions such as \code{\link{kpRect}}, it is not possible to specify the data using
independent numeric vectors and the function only takes in the expected object types.
}
\examples{
set.seed(1000)
#Example 1: create 20 sets of non-overlapping random regions and plot them all. Add a coverage plot on top.
kp <- plotKaryotype("hg19", plot.type=1, chromosomes=c("chr1", "chr2"))
all.regs <- GRanges()
nreps <- 20
for(i in 1:nreps) {
regs <- createRandomRegions(nregions = 100, length.mean = 10000000, length.sd = 1000000,
non.overlapping = TRUE, genome = "hg19", mask=NA)
all.regs <- c(all.regs, regs)
kpPlotRegions(kp, regs, r0 = (i-1)*(0.8/nreps), r1 = (i)*(0.8/nreps), col="#AAAAAA")
}
kpPlotCoverage(kp, all.regs, ymax = 20, r0=0.8, r1=1, col="#CCCCFF")
kpAxis(kp, ymin = 0, ymax= 20, numticks = 2, r0 = 0.8, r1=1)
#Example 2: Do the same with a single bigger set of possibly overlapping regions
kp <- plotKaryotype("hg19", plot.type=1, chromosomes=c("chr1", "chr2"))
regs <- createRandomRegions(nregions = 1000, length.mean = 10000000, length.sd = 1000000,
non.overlapping = FALSE, genome = "hg19", mask=NA)
kpPlotRegions(kp, regs, r0 = 0, r1 = 0.8, col="#AAAAAA")
kpPlotCoverage(kp, regs, ymax = 20, r0=0.8, r1=1, col="#CCCCFF")
kpAxis(kp, ymin = 0, ymax= 20, numticks = 2, r0 = 0.8, r1=1)
}
\seealso{
\code{\link{plotKaryotype}}, \code{\link{kpPlotRegions}}, \code{\link{kpBars}}
\code{\link[IRanges]{coverage}}
}
|
install.packages("tidytext")
library(twitteR)
## copied from: https://gist.github.com/earino/65faaa4388193204e1c93b8eb9773c1c
library(tidyverse)
library(tidytext)
# library(broom)
#authenticate to distant service
setup_twitter_oauth(
consumer_key = Sys.getenv("TWITTER_CONSUMER_KEY"),
consumer_secret = Sys.getenv("TWITTER_CONSUMER_SECRET"),
access_token = Sys.getenv("TWITTER_ACCESS_TOKEN"),
access_secret = Sys.getenv("TWITTER_ACCESS_SECRET")
)
trump <- userTimeline('realDonaldTrump', n = 3200)
obama <- userTimeline('BarackObama', n = 3200)
raw_tweets <- bind_rows(twListToDF(trump), twListToDF(obama))
words <- raw_tweets %>%
unnest_tokens(word, text) #global, should work for hungarian as well
data("stop_words")
words <- words %>%
anti_join(stop_words, by = "word") %>%
filter(! str_detect(word, "\\d"))
words_to_ignore <- data_frame(word = c("https", "amp", "t.co"))
words <- words %>%
anti_join(words_to_ignore, by = "word")
tweets <- words %>%
group_by(screenName, id, word) %>%
summarise(contains = 1) %>%
ungroup() %>%
spread(key = word, value = contains, fill = 0) %>%
mutate(tweet_by_trump = as.integer(screenName == "realDonaldTrump")) %>%
select(-screenName, -id)
library(glmnet)
fit <- cv.glmnet(
x = tweets %>% select(-tweet_by_trump) %>% as.matrix(),
y = tweets$tweet_by_trump,
family = "binomial"
)
temp <- coef(fit, s = exp(-3)) %>% as.matrix()
coefficients <- data.frame(word = row.names(temp), beta = temp[, 1])
data <- coefficients %>%
filter(beta != 0) %>%
filter(word != "(Intercept)") %>%
arrange(desc(beta)) %>%
mutate(i = row_number())
ggplot(data, aes(x = i, y = beta, fill = ifelse(beta > 0, "Trump", "Obama"))) +
geom_bar(stat = "identity", alpha = 0.75) +
scale_x_continuous(breaks = data$i, labels = data$word, minor_breaks = NULL) +
xlab("") +
ylab("Coefficient Estimate") +
coord_flip() +
scale_fill_manual(
guide = guide_legend(title = "Word typically used by:"),
values = c("#446093", "#bc3939")
) +
theme_bw() +
theme(legend.position = "top")
library(wordcloud)
words %>%
filter(screenName == "realDonaldTrump") %>%
count(word) %>%
with(wordcloud(word, n, max.words = 20))
words %>%
filter(screenName == "BarackObama") %>%
count(word) %>%
with(wordcloud(word, n, max.words = 20))
ggplot(raw_tweets, aes(x = created, y = screenName)) +
geom_jitter(width = 0) +
theme_bw() +
ylab("") +
xlab("") | /motivating_example.R | no_license | tomiaJO/CEU_TEXT_MINING | R | false | false | 2,437 | r | install.packages("tidytext")
library(twitteR)
## copied from: https://gist.github.com/earino/65faaa4388193204e1c93b8eb9773c1c
library(tidyverse)
library(tidytext)
# library(broom)
#authenticate to distant service
setup_twitter_oauth(
consumer_key = Sys.getenv("TWITTER_CONSUMER_KEY"),
consumer_secret = Sys.getenv("TWITTER_CONSUMER_SECRET"),
access_token = Sys.getenv("TWITTER_ACCESS_TOKEN"),
access_secret = Sys.getenv("TWITTER_ACCESS_SECRET")
)
trump <- userTimeline('realDonaldTrump', n = 3200)
obama <- userTimeline('BarackObama', n = 3200)
raw_tweets <- bind_rows(twListToDF(trump), twListToDF(obama))
words <- raw_tweets %>%
unnest_tokens(word, text) #global, should work for hungarian as well
data("stop_words")
words <- words %>%
anti_join(stop_words, by = "word") %>%
filter(! str_detect(word, "\\d"))
words_to_ignore <- data_frame(word = c("https", "amp", "t.co"))
words <- words %>%
anti_join(words_to_ignore, by = "word")
tweets <- words %>%
group_by(screenName, id, word) %>%
summarise(contains = 1) %>%
ungroup() %>%
spread(key = word, value = contains, fill = 0) %>%
mutate(tweet_by_trump = as.integer(screenName == "realDonaldTrump")) %>%
select(-screenName, -id)
library(glmnet)
fit <- cv.glmnet(
x = tweets %>% select(-tweet_by_trump) %>% as.matrix(),
y = tweets$tweet_by_trump,
family = "binomial"
)
temp <- coef(fit, s = exp(-3)) %>% as.matrix()
coefficients <- data.frame(word = row.names(temp), beta = temp[, 1])
data <- coefficients %>%
filter(beta != 0) %>%
filter(word != "(Intercept)") %>%
arrange(desc(beta)) %>%
mutate(i = row_number())
ggplot(data, aes(x = i, y = beta, fill = ifelse(beta > 0, "Trump", "Obama"))) +
geom_bar(stat = "identity", alpha = 0.75) +
scale_x_continuous(breaks = data$i, labels = data$word, minor_breaks = NULL) +
xlab("") +
ylab("Coefficient Estimate") +
coord_flip() +
scale_fill_manual(
guide = guide_legend(title = "Word typically used by:"),
values = c("#446093", "#bc3939")
) +
theme_bw() +
theme(legend.position = "top")
library(wordcloud)
words %>%
filter(screenName == "realDonaldTrump") %>%
count(word) %>%
with(wordcloud(word, n, max.words = 20))
words %>%
filter(screenName == "BarackObama") %>%
count(word) %>%
with(wordcloud(word, n, max.words = 20))
ggplot(raw_tweets, aes(x = created, y = screenName)) +
geom_jitter(width = 0) +
theme_bw() +
ylab("") +
xlab("") |
########################################
# This script precomputes cross correlation coefficients based on SARIMA model
# residuals with lags up to -14
########################################
# Imports
library(dplyr)
library(forecast)
library(tidyverse)
# Get Google Trends data
dat = data.frame(keyword=character(),
date=numeric(),
geo=character(),
hits=character())
for (f in c('GT/covid data.csv','GT/mask data.csv','GT/qanon data.csv','GT/social distancing data.csv','GT/vaccine data.csv','GT/vaccine near me data.csv')){
newdat = read_csv(f)
dat = rbind(dat,newdat)
}
rm(newdat, f)
dat = subset(dat, select=-c(X1))
dat$hits = as.numeric(dat$hits)
dat = dat %>% pivot_wider(names_from = keyword, values_from = hits, values_fill = 0)
dat = rename(dat, Date = date, State = geo)
write_csv(dat, 'search_data.csv')
# Get outcome data
start_date = '2021-01-01'
vaccines = read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv")
vaccines = vaccines %>% rename(Date = date, Province_State = location)
vaccines = vaccines %>% filter(Date >= start_date)
vaccines = data.frame(lapply(vaccines, function(x) {gsub("New York State", "New York", x)}))
cases = read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv")
cases = cases %>% pivot_longer(names_to="Date", values_to="Cases", cols=colnames(cases[12:length(cases)]))
cases = cases %>% mutate(Date = as.Date(cases$Date, format ="%m/%d/%y"))
cases = cases %>% filter(Date >= start_date)
cases = cases[,c('Date', 'Province_State', 'Cases')] %>% group_by(Date,Province_State) %>% summarise(cases = sum(Cases))
deaths = read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv")
deaths = deaths %>% pivot_longer(names_to="Date", values_to="Deaths", cols=colnames(deaths[13:length(deaths)]))
deaths = deaths %>% mutate(Date = as.Date(deaths$Date, format ="%m/%d/%y"))
deaths = deaths %>% filter(Date >= start_date)
deaths = deaths[,c('Date', 'Province_State', 'Deaths')] %>% group_by(Date,Province_State) %>% summarise(deaths = sum(Deaths))
dat = merge(cases,deaths,by=c('Date','Province_State'), all = TRUE)
dat = merge(dat,vaccines,by=c('Date','Province_State'), all = TRUE)
dat = rename(dat, State = Province_State)
dat = dat %>% select(Date, State, everything())
write_csv(dat, 'outcomes.csv')
# Read inputs
states = state.name
search_data = read_csv('search_data.csv')
covid = read_csv('outcomes.csv')
# Merge into one data frame
timeseries = merge(search_data, covid, by=c('Date','State'))
# Results containers
state = c()
term = c()
outcome = c()
croscor = c()
lagd = c()
# For each state
for (s in states){
state_dat = timeseries %>% filter(State == s)
# For each pred
for (p in colnames(search_data)[-1:-2]){
# For each outcome
for (o in c('daily_vaccinations', 'daily_vaccinations_per_million')){
# Get ARIMA models
xaa = auto.arima(ts(state_dat[,p]))
yaa = auto.arima(ts(state_dat[,o]))
# Get ccf
croscor_this = ccf(xaa$residuals, yaa$residuals, lag = 14, na.action = na.contiguous, pl = FALSE)
# For each lag
for (l in seq(-14,0)){
# Get ccf at lag
cvalue = croscor_this$acf[match(l,croscor_this$lag)]
# Add to results
state = c(state, s)
term = c(term, p)
outcome = c(outcome, o)
croscor = c(croscor, cvalue)
lagd = c(lagd,l)
}
}
}
print(paste(s,'done'))
}
# Save model results
models = data.frame(state,
term,
outcome,
croscor,
lagd)
write.csv(models, 'models.csv')
write.csv(timeseries, 'timeseries.csv')
| /Time Series Analysis/precompute.R | no_license | nanand7/DS-Project-2021 | R | false | false | 3,936 | r | ########################################
# This script precomputes cross correlation coefficients based on SARIMA model
# residuals with lags up to -14
########################################
# Imports
library(dplyr)
library(forecast)
library(tidyverse)
# Get Google Trends data
dat = data.frame(keyword=character(),
date=numeric(),
geo=character(),
hits=character())
for (f in c('GT/covid data.csv','GT/mask data.csv','GT/qanon data.csv','GT/social distancing data.csv','GT/vaccine data.csv','GT/vaccine near me data.csv')){
newdat = read_csv(f)
dat = rbind(dat,newdat)
}
rm(newdat, f)
dat = subset(dat, select=-c(X1))
dat$hits = as.numeric(dat$hits)
dat = dat %>% pivot_wider(names_from = keyword, values_from = hits, values_fill = 0)
dat = rename(dat, Date = date, State = geo)
write_csv(dat, 'search_data.csv')
# Get outcome data
start_date = '2021-01-01'
vaccines = read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv")
vaccines = vaccines %>% rename(Date = date, Province_State = location)
vaccines = vaccines %>% filter(Date >= start_date)
vaccines = data.frame(lapply(vaccines, function(x) {gsub("New York State", "New York", x)}))
cases = read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv")
cases = cases %>% pivot_longer(names_to="Date", values_to="Cases", cols=colnames(cases[12:length(cases)]))
cases = cases %>% mutate(Date = as.Date(cases$Date, format ="%m/%d/%y"))
cases = cases %>% filter(Date >= start_date)
cases = cases[,c('Date', 'Province_State', 'Cases')] %>% group_by(Date,Province_State) %>% summarise(cases = sum(Cases))
deaths = read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv")
deaths = deaths %>% pivot_longer(names_to="Date", values_to="Deaths", cols=colnames(deaths[13:length(deaths)]))
deaths = deaths %>% mutate(Date = as.Date(deaths$Date, format ="%m/%d/%y"))
deaths = deaths %>% filter(Date >= start_date)
deaths = deaths[,c('Date', 'Province_State', 'Deaths')] %>% group_by(Date,Province_State) %>% summarise(deaths = sum(Deaths))
dat = merge(cases,deaths,by=c('Date','Province_State'), all = TRUE)
dat = merge(dat,vaccines,by=c('Date','Province_State'), all = TRUE)
dat = rename(dat, State = Province_State)
dat = dat %>% select(Date, State, everything())
write_csv(dat, 'outcomes.csv')
# Read inputs
states = state.name
search_data = read_csv('search_data.csv')
covid = read_csv('outcomes.csv')
# Merge into one data frame
timeseries = merge(search_data, covid, by=c('Date','State'))
# Results containers
state = c()
term = c()
outcome = c()
croscor = c()
lagd = c()
# For each state
for (s in states){
state_dat = timeseries %>% filter(State == s)
# For each pred
for (p in colnames(search_data)[-1:-2]){
# For each outcome
for (o in c('daily_vaccinations', 'daily_vaccinations_per_million')){
# Get ARIMA models
xaa = auto.arima(ts(state_dat[,p]))
yaa = auto.arima(ts(state_dat[,o]))
# Get ccf
croscor_this = ccf(xaa$residuals, yaa$residuals, lag = 14, na.action = na.contiguous, pl = FALSE)
# For each lag
for (l in seq(-14,0)){
# Get ccf at lag
cvalue = croscor_this$acf[match(l,croscor_this$lag)]
# Add to results
state = c(state, s)
term = c(term, p)
outcome = c(outcome, o)
croscor = c(croscor, cvalue)
lagd = c(lagd,l)
}
}
}
print(paste(s,'done'))
}
# Save model results
models = data.frame(state,
term,
outcome,
croscor,
lagd)
write.csv(models, 'models.csv')
write.csv(timeseries, 'timeseries.csv')
|
X <- rbind(x_train, x_test)
Y <- rbind(y_train, y_test)
Subject <- rbind(subject_train, subject_test)
Merged_Data <- cbind(Subject, Y, X)
TidyData <- Merged_Data %>% select(subject, code, contains("mean"), contains("std"))
TidyData$code <- activities[TidyData$code, 2]
names(TidyData)[2] = "activity"
names(TidyData)<-gsub("Acc", "Accelerometer", names(TidyData))
names(TidyData)<-gsub("Gyro", "Gyroscope", names(TidyData))
names(TidyData)<-gsub("BodyBody", "Body", names(TidyData))
names(TidyData)<-gsub("Mag", "Magnitude", names(TidyData))
names(TidyData)<-gsub("^t", "Time", names(TidyData))
names(TidyData)<-gsub("^f", "Frequency", names(TidyData))
names(TidyData)<-gsub("tBody", "TimeBody", names(TidyData))
names(TidyData)<-gsub("-mean()", "Mean", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-std()", "STD", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-freq()", "Frequency", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("angle", "Angle", names(TidyData))
names(TidyData)<-gsub("gravity", "Gravity", names(TidyData))
FinalData <- TidyData %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
write.table(FinalData, "FinalData.txt", row.name=FALSE)
| /run_analysis.R | no_license | mose85/jhu-datacleaned_project | R | false | false | 1,275 | r |
X <- rbind(x_train, x_test)
Y <- rbind(y_train, y_test)
Subject <- rbind(subject_train, subject_test)
Merged_Data <- cbind(Subject, Y, X)
TidyData <- Merged_Data %>% select(subject, code, contains("mean"), contains("std"))
TidyData$code <- activities[TidyData$code, 2]
names(TidyData)[2] = "activity"
names(TidyData)<-gsub("Acc", "Accelerometer", names(TidyData))
names(TidyData)<-gsub("Gyro", "Gyroscope", names(TidyData))
names(TidyData)<-gsub("BodyBody", "Body", names(TidyData))
names(TidyData)<-gsub("Mag", "Magnitude", names(TidyData))
names(TidyData)<-gsub("^t", "Time", names(TidyData))
names(TidyData)<-gsub("^f", "Frequency", names(TidyData))
names(TidyData)<-gsub("tBody", "TimeBody", names(TidyData))
names(TidyData)<-gsub("-mean()", "Mean", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-std()", "STD", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-freq()", "Frequency", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("angle", "Angle", names(TidyData))
names(TidyData)<-gsub("gravity", "Gravity", names(TidyData))
FinalData <- TidyData %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
write.table(FinalData, "FinalData.txt", row.name=FALSE)
|
speed_vector <- c("Fast","Slow","Slow","Fast","Ultra-fast")
factor_speed_vector <- factor(speed_vector, ordered=TRUE, levels=c("Slow","Fast","Ultra-fast"))
# Your code below
compare_them <- factor_speed_vector[2] > factor_speed_vector[5]
# Is data analyst 2 faster than data analyst 5?
compare_them
| /introduction-to-r/chapter-4-factors/Comparing ordered factors.R | no_license | oliverwreath/R_Practise | R | false | false | 301 | r | speed_vector <- c("Fast","Slow","Slow","Fast","Ultra-fast")
factor_speed_vector <- factor(speed_vector, ordered=TRUE, levels=c("Slow","Fast","Ultra-fast"))
# Your code below
compare_them <- factor_speed_vector[2] > factor_speed_vector[5]
# Is data analyst 2 faster than data analyst 5?
compare_them
|
pdf("city.pdf")
citysales<-read.csv("citysales.csv")
barplot(as.matrix(citysales[,2:4]), beside=TRUE,horiz=TRUE,
legend.text=citysales$City, args.legend=list(bty="n"),
col=brewer.pal(5,"Set1"),border="white",
xlim=c(0,100), xlab="Sales Revenue (1,000's of USD)",
main="Sales Figures")
dev.off() | /R可视化/orientation of bar.r | no_license | rogerjms/R_data_mining | R | false | false | 301 | r | pdf("city.pdf")
citysales<-read.csv("citysales.csv")
barplot(as.matrix(citysales[,2:4]), beside=TRUE,horiz=TRUE,
legend.text=citysales$City, args.legend=list(bty="n"),
col=brewer.pal(5,"Set1"),border="white",
xlim=c(0,100), xlab="Sales Revenue (1,000's of USD)",
main="Sales Figures")
dev.off() |
\name{Ridit-package}
\alias{Ridit-package}
\alias{Ridit}
\docType{package}
\title{
Ridit Analysis (An extension of the Kruskal-Wallis Test.)
}
\description{
An extension of the Kruskal-Wallis Test that allow selection of arbitrary reference group. Also provide Mean Ridit for each group. Mean Ridit of a group is an estimate of probability a random observation from that group will be greater than or equal to a random observation from reference group.
}
\details{
\tabular{ll}{
Package: \tab Ridit\cr
Type: \tab Package\cr
Version: \tab 1.1\cr
Date: \tab 2012-10-15\cr
License: \tab GPL-2 | GPL-3\cr
}
}
\author{
SeyedMahmood TaghaviShahri
Maintainer: SeyedMahmood TaghaviShahri <taghavi_m@razi.tums.ac.ir>
}
\references{
Fleiss, J. L., (1986), The Design and Analysis of Clinical Experiments. New York: John Wiley & Sons.
}
\keyword{ package }
| /man/Ridit-package.Rd | no_license | cran/Ridit | R | false | false | 880 | rd | \name{Ridit-package}
\alias{Ridit-package}
\alias{Ridit}
\docType{package}
\title{
Ridit Analysis (An extension of the Kruskal-Wallis Test.)
}
\description{
An extension of the Kruskal-Wallis Test that allow selection of arbitrary reference group. Also provide Mean Ridit for each group. Mean Ridit of a group is an estimate of probability a random observation from that group will be greater than or equal to a random observation from reference group.
}
\details{
\tabular{ll}{
Package: \tab Ridit\cr
Type: \tab Package\cr
Version: \tab 1.1\cr
Date: \tab 2012-10-15\cr
License: \tab GPL-2 | GPL-3\cr
}
}
\author{
SeyedMahmood TaghaviShahri
Maintainer: SeyedMahmood TaghaviShahri <taghavi_m@razi.tums.ac.ir>
}
\references{
Fleiss, J. L., (1986), The Design and Analysis of Clinical Experiments. New York: John Wiley & Sons.
}
\keyword{ package }
|
#Hierarchy (in order to ease searching via Strg + f):
# Part I - xxx
# Chapter 1 - Introduction
# 1.1.3 -------------------------------------------------------------------
# Part III - Program
# Chapter 19 - Functions
# 19.2.1 ------------------------------------------------------------------
#1
rescale01 <- function(x) {
rng <- range(x, na.rm = TRUE, finite = TRUE)
(x - rng[1]) / (rng[2] - rng[1])
}
rescale01(rnorm(10))
#TRUE is not necessarily an argument to the function because it just specifies the behavior of the called 'range' function
#Users of rescale01 needn't control the behavior differently than the default.
#theoretically, you can rewrite the function so that it gives control over the handling of NA's and Infinite Values.
#This isn't sensible but still, the execution is shown below.
rescale01_alt <- function(x, na = TRUE, fin = TRUE) {
rng <- range(x, na.rm = na, finite = fin)
(x - rng[1]) / (rng[2] - rng[1])
}
rescale01_alt(c(rnorm(10), NA), na = FALSE)
#If na.rm is specified as FALSE, the code still works due to the finite argument being stronger
#However, setting the finite argument to FALSE additionally results in all values being returned as NA
rescale01_alt(c(rnorm(10), NA), na = FALSE, fin = FALSE)
#2
x <- c(rnorm(10), NA, Inf, -Inf)
rescale01_var <- function(x) {
rng <- range(x, na.rm = TRUE, finite = TRUE)
case_when(
x == Inf ~ 1,
x == -Inf ~ 0,
is.na(x) ~ NA_real_,
TRUE ~ (x - rng[1]) / (rng[2] - rng[1])
)
}
rescale01(x)
rescale01_var(x)
#3
x <- c(rnorm(10), NA)
#a)
perc_na <- function(x) {
all.na <- is.na(x)
mean(all.na)
}
#This function returns the amount of NA's in a vector
perc_na(x)
#b)
portion_of_sum <- function(x) {
sum.vec <- sum(x, na.rm = TRUE)
x / sum.vec
}
portion_of_sum(1:5)
#This function normalises a vector so that it sums to 1
#c
rsd <- function(x) {
sd(x, na.rm = TRUE) / mean(x, na.rm = TRUE)
}
rsd(1:5)
#This function returns the relative standard deviation
#4
library(tidyverse)
#the mpg dataset (and mostly the hwy & cty variables) from the tidyverse is used
hwy <- mpg$hwy; cty <- mpg$cty
se <- function(x) {
c <- var(x)
l <- length(x)
sqrt( c/l )
}
se(hwy); se(cty)
variance <- function(x) {
n <- length(x)
m <- mean(x)
(1 / (n - 1)) * sum((x - m)^2)
}
variance(hwy); var(hwy); variance(1:10)
x <- cty
skew <- function(x) {
n <- length(x)
m <- mean(x)
sde <- sd(x)
numerator <- sum((x - m)^3)
denominator <- (n-2) * sde^3
numerator / denominator
}
skew(cty); skew(hwy); skew(c(1,2,5,100))
#5
#Question is a little ambiguous.
#a) Either the function shall return the total number of missing values
#b) Or the function shall return the position within each vector
#Both interpretations will be implemented. The following vectors are used for demonstration.
x <- c(NA, 1:10, NA, NA)
y <- c(0, NA, 1:10, NA)
#a)
both_na_a <- function(x,y) {
sum(is.na(x) + is.na(y))
}
both_na_a(x,y)
#b)
both_na_b <- function(x,y) {
na.x <- which(is.na(x))
na.y <- which(is.na(y))
list(na.x, na.y)
}
both_na_b(x,y)
#6
is_directory <- function(x) file.info(x)$isdir
#the function gives an info whether or not a particular path is a directory
#Using this function has 2 benefits over just using file.info("path")$isdir
#a) the name of the function is clearer which makes the code more readable
#b) it just returns the information we're interested in
is_directory("C:/Users/psymo/OneDrive/Studium/Statistik/Daten/College.csv")
is_readable <- function(x) file.access(x, 4) == 0
#the function gives an info whether or not a particular path is readable
#Using this function has 2 benefits over using file.access(x, 4) == 0
#a) It's more readable
#b) It's reasonably faster to write the command line
is_readable("C:/Users/psymo/OneDrive/Studium/Statistik/Daten/College.csv")
#7
#TBD
# 19.3.1 ------------------------------------------------------------------
#1
has_prefix <- function(string, prefix) {
substr(string, 1, nchar(prefix)) == prefix
}
#f1 checks each element of string on having a specfified prefix
#a better name than f1 would be 'has_prefix'
has_prefix(c("fin_table1", "fin_table2", "metadata"), "fin_")
truncate_last <- function(x) {
if (length(x) <= 1) return(NULL)
x[-length(x)]
}
#f2 truncates the last element of a vector.
#a better name than f2 would be 'truncate_last'
truncate_last(1:8); truncate_last(c("here", "I", "am", "Rock", "you", "like", "a", "hurricane"))
count_and_rep <- function(x, y) {
rep(y, length.out = length(x))
}
#f3 counts the number of elements of x and returns y that many times
#a better name than f3 would be 'count_and_rep' (?)
count_and_rep(1:10, "PARTY!")
#2
#TBD
#3
?rnorm
?MASS::mvrnorm
#rnorm draws random values from a normal distribution
#mvrnorm draws values from a multivariate normal distribution
#They could be made more consistent by:
#having the same argument names (n, mean/mu, sd/Sigma)
#having the same default argument values(n vs. n = 1 | mean = 0 vs. mu | sd = 0 vs. Sigma)
#4
#norm_r and norm_d have the same prefix. By typing 'norm' and then hitting tab, all functions are immediately available
#rnorm, and dnorm are a little more intuitive names because 'random normal distribution' rolls better off the tongue than 'normal distribution random'
# 19.4.4 ------------------------------------------------------------------
#1
?`if`
?ifelse
#if tests a single condition
#ifelse tests a condition for each element within a vector
x <- -1:4
if(x < 0) {NA} else {sqrt(x)}
ifelse(x < 1, NA, sqrt(x))
sqrt(ifelse(x<1, NA, x))
#2
greeting <- function() {
library(lubridate)
x <- hour(now())
case_when(x>=18 ~ "Good Evening",
x>=12 ~ "Good Afternoon",
x>=0 ~ "Good Morning"
)
}
greeting()
#3
fizzbuzz <- function(x) {
case_when((x %% 5 == 0 & x %% 3 == 0) ~ "fizzbuzz",
x %% 5 == 0 ~ "buzz",
x %% 3 == 0 ~ "fizz",
TRUE ~ as.character(x))
}
fizzbuzz(1:50)
#4
?cut
temp <- seq(-5, 35, by = 5); temp
cut(temp,
breaks = c(-Inf, 0, 10, 20, 30, Inf),
labels = c("freezing", "cold", "cool", "warm", "hot"))
#in order to mimic the behavior of the if-statement with '<' instead of '<=', the right argument can be switched
cut(temp,
breaks = c(-Inf, 0, 10, 20, 30, Inf),
labels = c("freezing", "cold", "cool", "warm", "hot"),
right = FALSE)
#There are 2 advantages of cut over 'if'
#a) It's easier to read
#b) it can process single values as well as vectors containing more than one element
#5
?switch
#TBD
#6
#TBD
# 19.5.5 ------------------------------------------------------------------
#1
commas <- function(...) stringr::str_c(..., collapse = ", ")
commas(letters, collapse = "-")
#it gives an error because collapse is already specified in the function
#2
library(stringr)
rule <- function(..., pad = "-") {
title <- paste0(...)
width <- getOption("width") - nchar(title) - 5
cat(title, " ", stringr::str_dup(pad, width / str_length(pad)), "\n", sep = "")
}
rule("Important output")
rule("Title", pad = "-+")
rule("Important", pad = "!.+-=")
#3
?mean
#it excludes some extreme values for the calculation. This might be helpful when working with strong outliers or infinite values
mean(c(-Inf, rnorm(20), Inf)) ; mean(c(-Inf, rnorm(20), Inf), trim = 0.1)
#4
?cor
#"Pearson", "Kendall" & "Spearman" reflect different ways of computing the covariance/correlation coefficient
#If not specified manually, Pearson is chosen as default.
#The other two are merely written down to show their availability
# Chapter 20 - Vectors
# 20.3.5 ------------------------------------------------------------------
#1
?is.finite
is.finite(c(0, Inf, -Inf, NA, NaN))
#is.finite checks just for numbers, i.e. Inf, -Inf, NA & NaN don't are not finite
!is.infinite(c(0, Inf, -Inf, NA, NaN))
#!is.infinite checks just for infinite values, i.e. NA, NaN & any real number is not infinite
#2
dplyr::near
#dplyr::near tests whether the absolute difference between two values is smaller than some threshold
#The threshold can be specified manually. By default it's eight 0's.
#3
#Theory would suggest that both doubles integers and doubles can take an infinite amount of values.
#However, there probably will be some computational limits both left and right to the decimal point.
#4
do <- c(1, 1.3, 1.5, 1.7, 2.5)
?as.integer; as.integer(do) #converts real numbers truncating everything behind the decimal point
as.integer(round(do)) #using appropriate rounding
ceiling(do)
floor(do)
#5
str_count(words) %>% str() #str_count returns an integer vector
#There are others, e.g. str_length & str_locate
str_detect(words, "th") %>% str() #str_detect returns a logical vector
# 20.4.6 ------------------------------------------------------------------
#1
x <- c(1:96, NA, Inf, -Inf, NaN)
#mean(is.na(x)) gives the proportion of NAs (& NaNs) in a vector
mean(is.na(x))
#sum(!is.finite(x)) returns the total amount of NAs, NaNs, Infs & -Infs
sum(!is.finite(x))
#2
?is.vector
#is vector returns FALSE for augmented vectors (like tibbles or data frames)
is.vector(1); is.vector("hi")
is.vector(tibble(1)); is.vector(as.Date("2019-01-01"))
?is.atomic
#is atomic doesn't agree with the prevailing definition because it returns TRUE for NULL
is.atomic(NULL)
#3
?setNames
?purrr::set_names
#set_names returns an error when the vector to name has a different length as the vector of names
setNames(1:3, c("a", "b"))
purrr::set_names(1:3, c("a", "b"))
#and a lot of other stuff (check https://jrnold.github.io/r4ds-exercise-solutions/vectors.html)
#4
test <- c(10:1)
#a)
last <- function(x) { x[length(x)] }
last(test)
#b)
even_positions <- function(x) {x[seq(2, length(x), by = 2)]}
even_positions(test)
#c)
drop_last <- function(x) { x[-length(x)]}
drop_last(test)
#d)
even_values <- function(x) { x[x %% 2 == 0] }
even_values(test)
#5
test[which(test > 0)]
#this code returns all elements with a value greater than 0
test[test <= 0]
#this code returns all elements with a value smaller or equal to 0
#6
test2 <- c("a" = 1, "b" = 2)
test2[3]
#subsetting with a higher number than the length of the vector returns NA
test["c"]
#subsetting with a non-existant name also returns NA
# 20.5.4 ------------------------------------------------------------------
#1
#see paper or solution: https://jrnold.github.io/r4ds-exercise-solutions/vectors.html
#2
diamonds[1]; diamonds["cut"]; diamonds[-3]
#subsetting with [] also returns a list
diamonds[[1]]; diamonds[["cut"]]
#subsetting with [[]] removes the tibble property and returns a single vector
diamonds$cut
#subsetting with does the same as [[]]
#the key difference between a list and a tibble is that the columns of a tibble must have the same length
# 20.7.4 ------------------------------------------------------------------
#1
library(lubridate)
hour1 <- hms::hms(3600); hour1
#it returns 1 hour in the corresponding format
typeof(hour1)
#it's built on the primitive vector 'double'
attributes(hour1)
#there is a unit attribute (seconds) which interprets 3600 as 3600 seconds = 60 minutes = 1 hour
#the classes hms (format) and difftime are associated with this augmented vector
#2
tibble(c(1:10), c(11:30))
#There is an error message. Exceptions are single values
tibble(c(1:10), "single value")
#3
tibble(list(1))
# Chapter 21 - Iteration
library(tidyverse)
#1
#a)
str(mtcars) #numeric columns exclusively
res_a <- vector("double", ncol(mtcars))
for (i in seq_along(mtcars)) {
res_a[i] <- mean(mtcars[,i])
names(mtcars[i])
}
(res_a <- set_names(res_a, names(mtcars)))
#b)
library(nycflights13)
res_b <- vector("character", ncol(flights))
for (i in seq_along(flights)) {
res_b[i] <- typeof(flights[[i]])
}
(res_b <- set_names(res_b, names(flights)))
#c)
res_c <- vector("integer", ncol(iris))
for (i in seq_along(iris)) {
res_c[i] <- length(unique(iris[,i]))
}
(res_c <- set_names(res_c, names(iris)))
#d)
means <- c(-10, 0, 10, 100)
draws <- 10
res_d <- matrix(nrow = draws, ncol = length(means))
for (i in seq_along(means)) {
res_d[,i] <- rnorm(draws, means[i])
}
res_d
#2
#a)
library(stringr)
out <- ""
for (x in letters) {
out
<- stringr::str_c(out, x)
}
out
#use str_c with the collapse argument
str_c(letters, collapse = "")
#b)
x <- sample(100)
sd <- 0
for (i in seq_along(x)) {
sd <- sd + (x[i] - mean(x)) ^ 2
}
sd <- sqrt(sd / (length(x) - 1))
sd
#use sd or manual vector computation instead
sd(x)
sqrt(sum((x - mean(x))^2) * (1/(length(x) - 1)))
#c)
x <- runif(100)
out <- vector("numeric", length(x))
out[1] <- x[1]
for (i in 2:length(x)) {
out[i] <- out[i - 1] + x[i]
}
out
#use cumsum instead
cumsum(x)
#3
#TBD
#4
x <- rnorm(1e+5)
#concatenating the output vector
output <- vector("integer", 0)
library(lubridate)
start_c <- now(); for (i in seq_along(x)) {
output <- c(output, lengths(x[[i]]))
}; end_c <- now()
(diff_c <- end_c - start_c)
length(output); head(output)
#overwriting elements of a fully prepared output vector
output2 <- vector("integer", length(x))
start_v <- now(); for (i in seq_along(x)) {
output2[i] <- lengths(x[[i]])
}; end_v <- now()
(diff_v <- end_v - start_v)
length(output2); head(output2)
#difference between both approaches
diff_c - diff_v
# 21.3.5 ------------------------------------------------------------------
#1
library(tidyverse); library(stringr)
#Identify all files in a certain directory
files <- dir("C:/Users/psymo/OneDrive/Studium/Statistik/Daten/", pattern = "^table", full.names = TRUE)
#Read all identified files
res <- vector("list", length(files)) #1. prepare output
for (i in seq_along(res)) { #2. define sequence
res[[i]] <- read_csv(files[[i]]) #3. execute body: read files
}
all_names <- vector("list", length(res)) #1. prepare output: all available names
for(i in seq_along(res)) { #2.define sequence
#3. execute body: rename 'Samplesize' and extract column names
#rename the column 'Samplesize' so that it matches across dataframes
names(res[[i]]) <- str_replace(names(res[[i]]), "\\[.*\\]", "")
#extract all the column names and atatch them to the list
all_names[[i]] <- str_c(names(res[[i]]))
}
#get unique colun names across all df's
unique_names <- unique(unlist(all_names))
for(i in seq_along(res)) {
#check what column names are not available in the specific df
missing_columns <- setdiff(unique_names, names(res[[i]]))
#add missing columns to specific df and fill them with NA
res[[i]][,missing_columns] <- NA_character_
}
#union all single tables into a single tibble
data <- Reduce(dplyr::union_all, res)
data
#2
demo1 <- list(c(3,4), c(4,5))
demo2 <- list(a = c(3,4), c(4,5))
demo3 <- list(a = c(3,4), c(8,9))
for (nm in names(demo1)) {
print(demo1[[nm]])
}
#if the object has no names, nothing will happen
for (nm in names(demo2)) {
print(demo2[[nm]])
}
#if not every object has names, the loop functions properly for named elements and returns NULL for non-names ones
for (nm in names(demo3)) {
print(demo3[[nm]])
}
#if some elements have the same name, the loop functions properly for the first and treats the second as not named
#3
head(iris)
x <- iris[2]
df <- iris
show_mean <- function(df) {
max_str_length <- max(str_length(names(df)))
for (i in seq_along(df)) {
x <- df[i]
var_name <- names(x)
if (is.numeric(x[,1])) {
whitespace <- str_pad(": ", width = (max_str_length - str_length(var_name) + 2), side = "right")
str_mean <- as.character(round(mean(x[,1]), 2))
cat(str_c(var_name, whitespace, str_mean, sep = ""), "\n")
}
}
}
show_mean(iris)
#4
trans <- list(
disp = function(x) x * 0.0163871,
am = function(x) {
factor(x, labels = c("auto", "manual"))
}
)
for (var in names(trans)) {
mtcars[[var]] <- trans[[var]](mtcars[[var]])
}
mtcars
# 21.4.1 ------------------------------------------------------------------
#1
?apply
#for the 2d case (dataframes/tibbles), apply generalises apllying functions to each row or column
demo_matrix <- matrix(c(rnorm(10, -10), rnorm(10), rnorm(10,10)), nrow = 10)
#a)calculating the mean over rows works by..
#i) either using a for loop
res <- vector("double", nrow(demo_matrix))
for (i in seq_along(res)) {
res[i] <- mean(demo_matrix[i,])
}
res
#ii) or the apply function with MARGIN = 1
apply(demo_matrix, MARGIN = 1, FUN = mean)
#b) calculating the mean over columns works by
#i) either using a for loop
res <- vector("double", ncol(demo_matrix))
for (i in seq_along(res)) {
res[i] <- mean(demo_matrix[,i])
}
res
#ii) or the apply function with MARGIN = 2
apply(demo_matrix, MARGIN = 2, FUN = mean)
#2
demo_tibble <- tibble(a = rnorm(10,-10), b = rnorm(10), c = rnorm(10,10))
col_summary <- function(df, fun) {
numerics <- unlist(lapply(df, is.numeric))
df <- df[numerics]
out <- vector("double", length(df))
for (i in seq_along(df)) {
out[i] <- fun(df[[i]])
}
names(out) <- names(df)
out
}
col_summary(demo_tibble, fun = mean)
col_summary(iris, fun = median)
col_summary(mpg, fun = max)
# 21.5.3 ------------------------------------------------------------------
#1
#a)
map_dbl(mtcars, mean)
#b)
library(nycflights13)
map_chr(flights, typeof)
#c)
iris %>%
map(unique) %>%
map_int(length)
#d)
c(-10, 0, 10, 100) %>%
map(~rnorm(10, .))
#2
demo_factor <- tibble(f = factor(c("ab", "cd", "ab"), levels = c("ab", "cd", "ef")),
b = 1:3,
c = str_c(3:1, " check"))
map_lgl(demo_factor, is.factor)
map_lgl(iris, is.factor)
#3
#applying map to vectors that aren't lists will apply the specified function to every element within the vector
map(1:5, runif)
#here runif takes every element of 1:5 and uses is at it's first argument.
#An alternative approach could be
map_dbl(1:5, ~runif(1, min = ., max = (.+1)))
#4
map(-2:2, rnorm, n = 5)
#This map randomly draws five times from the normal distribution for each mapping
#Every element of the vector -2:2 is passed to the mean-argument (n is already specified: 5)
map_dbl(-2:2, rnorm, n = 5)
#This map_dbl tries the same as the above map. But it can't return hierarchical elements and thus,
#throws an error.
#5
mtcars %>%
split(.$cyl) %>%
map(function(df) lm(mpg ~ wt, data = df))
mtcars %>%
split(.$cyl) %>%
map(~lm(mpg ~ wt, data = .))
x <- list(1, "a", 3)
mu <- c(5, 10, -3)
sigma <- c(1,2,3)
seq_along(mu) %>%
map(~rnorm(10, mean = mu[[.]] , sd = sigma[[.]] )) %>%
str()
map2(mu, sigma, rnorm, n = 10) %>% str()
n <- c(1, 3, 5)
args1 <- list(n, mu, sigma)
args1 %>%
pmap(rnorm) %>%
str()
dfs <- list(
age = tibble(name = "John", age = 30),
sex = tibble(name = c("John", "Mary"), sex = c("M", "F")),
trt = tibble(name = "Mary", treatment = "A")
)
# 21.9.3 ------------------------------------------------------------------
#1
x <- list(1:5, letters, list(10)) #to test the functions behavior compared to purrr's 'every'
every2 <- function(x, FUN) {
res <- vector("logical", length(x))
for (i in seq_along(x)) {
res[[i]] <- FUN(x[[i]])
}
res %>%
mean() %>%
floor() %>%
as.logical()
}
every2(x, is.vector); every(x, is.vector)
every2(x, is.list); every(x, is.list)
?every
#every takes more alternatives at the second argument (e.g. a logical vector) and passes further arguments to the function
#2
col_summary <- function(df, fun) {
df <- keep(df, map_lgl(df, is.numeric)) #this line is added to throw out all non numeric vectors
out <- vector("double", length(df))
for (i in seq_along(df)) {
out[i] <- fun(df[[i]])
}
out
}
col_summary(iris, mean)
#3
col_sum3 <- function(df, f) {
is_num <- sapply(df, is.numeric)
df_num <- df[, is_num]
sapply(df_num, f)
}
df <- tibble(
x = 1:3,
y = 3:1,
z = c("a", "b", "c")
)
# OK
col_sum3(df, mean)
# Has problems: don't always return numeric vector
col_sum3(df[1:2], mean)
col_sum3(df[1], mean)
col_sum3(df[0], mean)
| /R for Data Science - Exercises III_Program.R | no_license | MalteJe/R-for-Data-Science---Notes | R | false | false | 20,577 | r | #Hierarchy (in order to ease searching via Strg + f):
# Part I - xxx
# Chapter 1 - Introduction
# 1.1.3 -------------------------------------------------------------------
# Part III - Program
# Chapter 19 - Functions
# 19.2.1 ------------------------------------------------------------------
#1
rescale01 <- function(x) {
rng <- range(x, na.rm = TRUE, finite = TRUE)
(x - rng[1]) / (rng[2] - rng[1])
}
rescale01(rnorm(10))
#TRUE is not necessarily an argument to the function because it just specifies the behavior of the called 'range' function
#Users of rescale01 needn't control the behavior differently than the default.
#theoretically, you can rewrite the function so that it gives control over the handling of NA's and Infinite Values.
#This isn't sensible but still, the execution is shown below.
rescale01_alt <- function(x, na = TRUE, fin = TRUE) {
rng <- range(x, na.rm = na, finite = fin)
(x - rng[1]) / (rng[2] - rng[1])
}
rescale01_alt(c(rnorm(10), NA), na = FALSE)
#If na.rm is specified as FALSE, the code still works due to the finite argument being stronger
#However, setting the finite argument to FALSE additionally results in all values being returned as NA
rescale01_alt(c(rnorm(10), NA), na = FALSE, fin = FALSE)
#2
x <- c(rnorm(10), NA, Inf, -Inf)
rescale01_var <- function(x) {
rng <- range(x, na.rm = TRUE, finite = TRUE)
case_when(
x == Inf ~ 1,
x == -Inf ~ 0,
is.na(x) ~ NA_real_,
TRUE ~ (x - rng[1]) / (rng[2] - rng[1])
)
}
rescale01(x)
rescale01_var(x)
#3
x <- c(rnorm(10), NA)
#a)
perc_na <- function(x) {
all.na <- is.na(x)
mean(all.na)
}
#This function returns the amount of NA's in a vector
perc_na(x)
#b)
portion_of_sum <- function(x) {
sum.vec <- sum(x, na.rm = TRUE)
x / sum.vec
}
portion_of_sum(1:5)
#This function normalises a vector so that it sums to 1
#c
rsd <- function(x) {
sd(x, na.rm = TRUE) / mean(x, na.rm = TRUE)
}
rsd(1:5)
#This function returns the relative standard deviation
#4
library(tidyverse)
#the mpg dataset (and mostly the hwy & cty variables) from the tidyverse is used
hwy <- mpg$hwy; cty <- mpg$cty
se <- function(x) {
c <- var(x)
l <- length(x)
sqrt( c/l )
}
se(hwy); se(cty)
variance <- function(x) {
n <- length(x)
m <- mean(x)
(1 / (n - 1)) * sum((x - m)^2)
}
variance(hwy); var(hwy); variance(1:10)
x <- cty
skew <- function(x) {
n <- length(x)
m <- mean(x)
sde <- sd(x)
numerator <- sum((x - m)^3)
denominator <- (n-2) * sde^3
numerator / denominator
}
skew(cty); skew(hwy); skew(c(1,2,5,100))
#5
#Question is a little ambiguous.
#a) Either the function shall return the total number of missing values
#b) Or the function shall return the position within each vector
#Both interpretations will be implemented. The following vectors are used for demonstration.
x <- c(NA, 1:10, NA, NA)
y <- c(0, NA, 1:10, NA)
#a)
both_na_a <- function(x,y) {
sum(is.na(x) + is.na(y))
}
both_na_a(x,y)
#b)
both_na_b <- function(x,y) {
na.x <- which(is.na(x))
na.y <- which(is.na(y))
list(na.x, na.y)
}
both_na_b(x,y)
#6
is_directory <- function(x) file.info(x)$isdir
#the function gives an info whether or not a particular path is a directory
#Using this function has 2 benefits over just using file.info("path")$isdir
#a) the name of the function is clearer which makes the code more readable
#b) it just returns the information we're interested in
is_directory("C:/Users/psymo/OneDrive/Studium/Statistik/Daten/College.csv")
is_readable <- function(x) file.access(x, 4) == 0
#the function gives an info whether or not a particular path is readable
#Using this function has 2 benefits over using file.access(x, 4) == 0
#a) It's more readable
#b) It's reasonably faster to write the command line
is_readable("C:/Users/psymo/OneDrive/Studium/Statistik/Daten/College.csv")
#7
#TBD
# 19.3.1 ------------------------------------------------------------------
#1
has_prefix <- function(string, prefix) {
substr(string, 1, nchar(prefix)) == prefix
}
#f1 checks each element of string on having a specfified prefix
#a better name than f1 would be 'has_prefix'
has_prefix(c("fin_table1", "fin_table2", "metadata"), "fin_")
truncate_last <- function(x) {
if (length(x) <= 1) return(NULL)
x[-length(x)]
}
#f2 truncates the last element of a vector.
#a better name than f2 would be 'truncate_last'
truncate_last(1:8); truncate_last(c("here", "I", "am", "Rock", "you", "like", "a", "hurricane"))
count_and_rep <- function(x, y) {
rep(y, length.out = length(x))
}
#f3 counts the number of elements of x and returns y that many times
#a better name than f3 would be 'count_and_rep' (?)
count_and_rep(1:10, "PARTY!")
#2
#TBD
#3
?rnorm
?MASS::mvrnorm
#rnorm draws random values from a normal distribution
#mvrnorm draws values from a multivariate normal distribution
#They could be made more consistent by:
#having the same argument names (n, mean/mu, sd/Sigma)
#having the same default argument values(n vs. n = 1 | mean = 0 vs. mu | sd = 0 vs. Sigma)
#4
#norm_r and norm_d have the same prefix. By typing 'norm' and then hitting tab, all functions are immediately available
#rnorm, and dnorm are a little more intuitive names because 'random normal distribution' rolls better off the tongue than 'normal distribution random'
# 19.4.4 ------------------------------------------------------------------
#1
?`if`
?ifelse
#if tests a single condition
#ifelse tests a condition for each element within a vector
x <- -1:4
if(x < 0) {NA} else {sqrt(x)}
ifelse(x < 1, NA, sqrt(x))
sqrt(ifelse(x<1, NA, x))
#2
greeting <- function() {
library(lubridate)
x <- hour(now())
case_when(x>=18 ~ "Good Evening",
x>=12 ~ "Good Afternoon",
x>=0 ~ "Good Morning"
)
}
greeting()
#3
fizzbuzz <- function(x) {
case_when((x %% 5 == 0 & x %% 3 == 0) ~ "fizzbuzz",
x %% 5 == 0 ~ "buzz",
x %% 3 == 0 ~ "fizz",
TRUE ~ as.character(x))
}
fizzbuzz(1:50)
#4
?cut
temp <- seq(-5, 35, by = 5); temp
cut(temp,
breaks = c(-Inf, 0, 10, 20, 30, Inf),
labels = c("freezing", "cold", "cool", "warm", "hot"))
#in order to mimic the behavior of the if-statement with '<' instead of '<=', the right argument can be switched
cut(temp,
breaks = c(-Inf, 0, 10, 20, 30, Inf),
labels = c("freezing", "cold", "cool", "warm", "hot"),
right = FALSE)
#There are 2 advantages of cut over 'if'
#a) It's easier to read
#b) it can process single values as well as vectors containing more than one element
#5
?switch
#TBD
#6
#TBD
# 19.5.5 ------------------------------------------------------------------
#1
commas <- function(...) stringr::str_c(..., collapse = ", ")
commas(letters, collapse = "-")
#it gives an error because collapse is already specified in the function
#2
library(stringr)
rule <- function(..., pad = "-") {
title <- paste0(...)
width <- getOption("width") - nchar(title) - 5
cat(title, " ", stringr::str_dup(pad, width / str_length(pad)), "\n", sep = "")
}
rule("Important output")
rule("Title", pad = "-+")
rule("Important", pad = "!.+-=")
#3
?mean
#it excludes some extreme values for the calculation. This might be helpful when working with strong outliers or infinite values
mean(c(-Inf, rnorm(20), Inf)) ; mean(c(-Inf, rnorm(20), Inf), trim = 0.1)
#4
?cor
#"Pearson", "Kendall" & "Spearman" reflect different ways of computing the covariance/correlation coefficient
#If not specified manually, Pearson is chosen as default.
#The other two are merely written down to show their availability
# Chapter 20 - Vectors
# 20.3.5 ------------------------------------------------------------------
#1
?is.finite
is.finite(c(0, Inf, -Inf, NA, NaN))
#is.finite checks just for numbers, i.e. Inf, -Inf, NA & NaN don't are not finite
!is.infinite(c(0, Inf, -Inf, NA, NaN))
#!is.infinite checks just for infinite values, i.e. NA, NaN & any real number is not infinite
#2
dplyr::near
#dplyr::near tests whether the absolute difference between two values is smaller than some threshold
#The threshold can be specified manually. By default it's eight 0's.
#3
#Theory would suggest that both doubles integers and doubles can take an infinite amount of values.
#However, there probably will be some computational limits both left and right to the decimal point.
#4
do <- c(1, 1.3, 1.5, 1.7, 2.5)
?as.integer; as.integer(do) #converts real numbers truncating everything behind the decimal point
as.integer(round(do)) #using appropriate rounding
ceiling(do)
floor(do)
#5
str_count(words) %>% str() #str_count returns an integer vector
#There are others, e.g. str_length & str_locate
str_detect(words, "th") %>% str() #str_detect returns a logical vector
# 20.4.6 ------------------------------------------------------------------
#1
x <- c(1:96, NA, Inf, -Inf, NaN)
#mean(is.na(x)) gives the proportion of NAs (& NaNs) in a vector
mean(is.na(x))
#sum(!is.finite(x)) returns the total amount of NAs, NaNs, Infs & -Infs
sum(!is.finite(x))
#2
?is.vector
#is vector returns FALSE for augmented vectors (like tibbles or data frames)
is.vector(1); is.vector("hi")
is.vector(tibble(1)); is.vector(as.Date("2019-01-01"))
?is.atomic
#is atomic doesn't agree with the prevailing definition because it returns TRUE for NULL
is.atomic(NULL)
#3
?setNames
?purrr::set_names
#set_names returns an error when the vector to name has a different length as the vector of names
setNames(1:3, c("a", "b"))
purrr::set_names(1:3, c("a", "b"))
#and a lot of other stuff (check https://jrnold.github.io/r4ds-exercise-solutions/vectors.html)
#4
test <- c(10:1)
#a)
last <- function(x) { x[length(x)] }
last(test)
#b)
even_positions <- function(x) {x[seq(2, length(x), by = 2)]}
even_positions(test)
#c)
drop_last <- function(x) { x[-length(x)]}
drop_last(test)
#d)
even_values <- function(x) { x[x %% 2 == 0] }
even_values(test)
#5
test[which(test > 0)]
#this code returns all elements with a value greater than 0
test[test <= 0]
#this code returns all elements with a value smaller or equal to 0
#6
test2 <- c("a" = 1, "b" = 2)
test2[3]
#subsetting with a higher number than the length of the vector returns NA
test["c"]
#subsetting with a non-existant name also returns NA
# 20.5.4 ------------------------------------------------------------------
#1
#see paper or solution: https://jrnold.github.io/r4ds-exercise-solutions/vectors.html
#2
diamonds[1]; diamonds["cut"]; diamonds[-3]
#subsetting with [] also returns a list
diamonds[[1]]; diamonds[["cut"]]
#subsetting with [[]] removes the tibble property and returns a single vector
diamonds$cut
#subsetting with does the same as [[]]
#the key difference between a list and a tibble is that the columns of a tibble must have the same length
# 20.7.4 ------------------------------------------------------------------
#1
library(lubridate)
hour1 <- hms::hms(3600); hour1
#it returns 1 hour in the corresponding format
typeof(hour1)
#it's built on the primitive vector 'double'
attributes(hour1)
#there is a unit attribute (seconds) which interprets 3600 as 3600 seconds = 60 minutes = 1 hour
#the classes hms (format) and difftime are associated with this augmented vector
#2
tibble(c(1:10), c(11:30))
#There is an error message. Exceptions are single values
tibble(c(1:10), "single value")
#3
tibble(list(1))
# Chapter 21 - Iteration
library(tidyverse)
#1
#a)
str(mtcars) #numeric columns exclusively
res_a <- vector("double", ncol(mtcars))
for (i in seq_along(mtcars)) {
res_a[i] <- mean(mtcars[,i])
names(mtcars[i])
}
(res_a <- set_names(res_a, names(mtcars)))
#b)
library(nycflights13)
res_b <- vector("character", ncol(flights))
for (i in seq_along(flights)) {
res_b[i] <- typeof(flights[[i]])
}
(res_b <- set_names(res_b, names(flights)))
#c)
res_c <- vector("integer", ncol(iris))
for (i in seq_along(iris)) {
res_c[i] <- length(unique(iris[,i]))
}
(res_c <- set_names(res_c, names(iris)))
#d)
means <- c(-10, 0, 10, 100)
draws <- 10
res_d <- matrix(nrow = draws, ncol = length(means))
for (i in seq_along(means)) {
res_d[,i] <- rnorm(draws, means[i])
}
res_d
#2
#a)
library(stringr)
out <- ""
for (x in letters) {
out
<- stringr::str_c(out, x)
}
out
#use str_c with the collapse argument
str_c(letters, collapse = "")
#b)
x <- sample(100)
sd <- 0
for (i in seq_along(x)) {
sd <- sd + (x[i] - mean(x)) ^ 2
}
sd <- sqrt(sd / (length(x) - 1))
sd
#use sd or manual vector computation instead
sd(x)
sqrt(sum((x - mean(x))^2) * (1/(length(x) - 1)))
#c)
x <- runif(100)
out <- vector("numeric", length(x))
out[1] <- x[1]
for (i in 2:length(x)) {
out[i] <- out[i - 1] + x[i]
}
out
#use cumsum instead
cumsum(x)
#3
#TBD
#4
x <- rnorm(1e+5)
#concatenating the output vector
output <- vector("integer", 0)
library(lubridate)
start_c <- now(); for (i in seq_along(x)) {
output <- c(output, lengths(x[[i]]))
}; end_c <- now()
(diff_c <- end_c - start_c)
length(output); head(output)
#overwriting elements of a fully prepared output vector
output2 <- vector("integer", length(x))
start_v <- now(); for (i in seq_along(x)) {
output2[i] <- lengths(x[[i]])
}; end_v <- now()
(diff_v <- end_v - start_v)
length(output2); head(output2)
#difference between both approaches
diff_c - diff_v
# 21.3.5 ------------------------------------------------------------------
#1
library(tidyverse); library(stringr)
#Identify all files in a certain directory
files <- dir("C:/Users/psymo/OneDrive/Studium/Statistik/Daten/", pattern = "^table", full.names = TRUE)
#Read all identified files
res <- vector("list", length(files)) #1. prepare output
for (i in seq_along(res)) { #2. define sequence
res[[i]] <- read_csv(files[[i]]) #3. execute body: read files
}
all_names <- vector("list", length(res)) #1. prepare output: all available names
for(i in seq_along(res)) { #2.define sequence
#3. execute body: rename 'Samplesize' and extract column names
#rename the column 'Samplesize' so that it matches across dataframes
names(res[[i]]) <- str_replace(names(res[[i]]), "\\[.*\\]", "")
#extract all the column names and atatch them to the list
all_names[[i]] <- str_c(names(res[[i]]))
}
#get unique colun names across all df's
unique_names <- unique(unlist(all_names))
for(i in seq_along(res)) {
#check what column names are not available in the specific df
missing_columns <- setdiff(unique_names, names(res[[i]]))
#add missing columns to specific df and fill them with NA
res[[i]][,missing_columns] <- NA_character_
}
#union all single tables into a single tibble
data <- Reduce(dplyr::union_all, res)
data
#2
demo1 <- list(c(3,4), c(4,5))
demo2 <- list(a = c(3,4), c(4,5))
demo3 <- list(a = c(3,4), c(8,9))
for (nm in names(demo1)) {
print(demo1[[nm]])
}
#if the object has no names, nothing will happen
for (nm in names(demo2)) {
print(demo2[[nm]])
}
#if not every object has names, the loop functions properly for named elements and returns NULL for non-names ones
for (nm in names(demo3)) {
print(demo3[[nm]])
}
#if some elements have the same name, the loop functions properly for the first and treats the second as not named
#3
head(iris)
x <- iris[2]
df <- iris
show_mean <- function(df) {
max_str_length <- max(str_length(names(df)))
for (i in seq_along(df)) {
x <- df[i]
var_name <- names(x)
if (is.numeric(x[,1])) {
whitespace <- str_pad(": ", width = (max_str_length - str_length(var_name) + 2), side = "right")
str_mean <- as.character(round(mean(x[,1]), 2))
cat(str_c(var_name, whitespace, str_mean, sep = ""), "\n")
}
}
}
show_mean(iris)
#4
trans <- list(
disp = function(x) x * 0.0163871,
am = function(x) {
factor(x, labels = c("auto", "manual"))
}
)
for (var in names(trans)) {
mtcars[[var]] <- trans[[var]](mtcars[[var]])
}
mtcars
# 21.4.1 ------------------------------------------------------------------
#1
?apply
#for the 2d case (dataframes/tibbles), apply generalises apllying functions to each row or column
demo_matrix <- matrix(c(rnorm(10, -10), rnorm(10), rnorm(10,10)), nrow = 10)
#a)calculating the mean over rows works by..
#i) either using a for loop
res <- vector("double", nrow(demo_matrix))
for (i in seq_along(res)) {
res[i] <- mean(demo_matrix[i,])
}
res
#ii) or the apply function with MARGIN = 1
apply(demo_matrix, MARGIN = 1, FUN = mean)
#b) calculating the mean over columns works by
#i) either using a for loop
res <- vector("double", ncol(demo_matrix))
for (i in seq_along(res)) {
res[i] <- mean(demo_matrix[,i])
}
res
#ii) or the apply function with MARGIN = 2
apply(demo_matrix, MARGIN = 2, FUN = mean)
#2
demo_tibble <- tibble(a = rnorm(10,-10), b = rnorm(10), c = rnorm(10,10))
col_summary <- function(df, fun) {
numerics <- unlist(lapply(df, is.numeric))
df <- df[numerics]
out <- vector("double", length(df))
for (i in seq_along(df)) {
out[i] <- fun(df[[i]])
}
names(out) <- names(df)
out
}
col_summary(demo_tibble, fun = mean)
col_summary(iris, fun = median)
col_summary(mpg, fun = max)
# 21.5.3 ------------------------------------------------------------------
#1
#a)
map_dbl(mtcars, mean)
#b)
library(nycflights13)
map_chr(flights, typeof)
#c)
iris %>%
map(unique) %>%
map_int(length)
#d)
c(-10, 0, 10, 100) %>%
map(~rnorm(10, .))
#2
demo_factor <- tibble(f = factor(c("ab", "cd", "ab"), levels = c("ab", "cd", "ef")),
b = 1:3,
c = str_c(3:1, " check"))
map_lgl(demo_factor, is.factor)
map_lgl(iris, is.factor)
#3
#applying map to vectors that aren't lists will apply the specified function to every element within the vector
map(1:5, runif)
#here runif takes every element of 1:5 and uses is at it's first argument.
#An alternative approach could be
map_dbl(1:5, ~runif(1, min = ., max = (.+1)))
#4
map(-2:2, rnorm, n = 5)
#This map randomly draws five times from the normal distribution for each mapping
#Every element of the vector -2:2 is passed to the mean-argument (n is already specified: 5)
map_dbl(-2:2, rnorm, n = 5)
#This map_dbl tries the same as the above map. But it can't return hierarchical elements and thus,
#throws an error.
#5
mtcars %>%
split(.$cyl) %>%
map(function(df) lm(mpg ~ wt, data = df))
mtcars %>%
split(.$cyl) %>%
map(~lm(mpg ~ wt, data = .))
x <- list(1, "a", 3)
mu <- c(5, 10, -3)
sigma <- c(1,2,3)
seq_along(mu) %>%
map(~rnorm(10, mean = mu[[.]] , sd = sigma[[.]] )) %>%
str()
map2(mu, sigma, rnorm, n = 10) %>% str()
n <- c(1, 3, 5)
args1 <- list(n, mu, sigma)
args1 %>%
pmap(rnorm) %>%
str()
dfs <- list(
age = tibble(name = "John", age = 30),
sex = tibble(name = c("John", "Mary"), sex = c("M", "F")),
trt = tibble(name = "Mary", treatment = "A")
)
# 21.9.3 ------------------------------------------------------------------
#1
x <- list(1:5, letters, list(10)) #to test the functions behavior compared to purrr's 'every'
every2 <- function(x, FUN) {
res <- vector("logical", length(x))
for (i in seq_along(x)) {
res[[i]] <- FUN(x[[i]])
}
res %>%
mean() %>%
floor() %>%
as.logical()
}
every2(x, is.vector); every(x, is.vector)
every2(x, is.list); every(x, is.list)
?every
#every takes more alternatives at the second argument (e.g. a logical vector) and passes further arguments to the function
#2
col_summary <- function(df, fun) {
df <- keep(df, map_lgl(df, is.numeric)) #this line is added to throw out all non numeric vectors
out <- vector("double", length(df))
for (i in seq_along(df)) {
out[i] <- fun(df[[i]])
}
out
}
col_summary(iris, mean)
#3
col_sum3 <- function(df, f) {
is_num <- sapply(df, is.numeric)
df_num <- df[, is_num]
sapply(df_num, f)
}
df <- tibble(
x = 1:3,
y = 3:1,
z = c("a", "b", "c")
)
# OK
col_sum3(df, mean)
# Has problems: don't always return numeric vector
col_sum3(df[1:2], mean)
col_sum3(df[1], mean)
col_sum3(df[0], mean)
|
\name{mean.fts}
\alias{mean.fts}
\title{Mean functions for functional time series}
\description{
Computes mean of functional time series at each variable.
}
\usage{
\method{mean}{fts}(x, method = c("coordinate", "FM", "mode", "RP", "RPD", "radius"),
na.rm = TRUE, alpha, beta, weight, ...)
}
\arguments{
\item{x}{An object of class \code{fts}.}
\item{method}{Method for computing the mean function.}
\item{na.rm}{A logical value indicating whether NA values should be stripped before the computation proceeds.}
\item{alpha}{Tuning parameter when \code{method="radius"}.}
\item{beta}{Trimming percentage, by default it is 0.25, when \code{method="radius"}.}
\item{weight}{Hard thresholding or soft thresholding.}
\item{...}{Other arguments.}
}
\details{
If \code{method = "coordinate"}, it computes the coordinate-wise functional mean.
If \code{method = "FM"}, it computes the mean of trimmed functional data ordered by the functional depth of Fraiman and Muniz (2001).
If \code{method = "mode"}, it computes the mean of trimmed functional data ordered by \eqn{h}-modal functional depth.
If \code{method = "RP"}, it computes the mean of trimmed functional data ordered by random projection depth.
If \code{method = "RPD"}, it computes the mean of trimmed functional data ordered by random projection derivative depth.
If \code{method = "radius"}, it computes the mean of trimmed functional data ordered by the notion of alpha-radius.
}
\value{A list containing \code{x} = variables and \code{y} = mean rates.}
\references{
O. Hossjer and C. Croux (1995) "Generalized univariate signed rank statistics for testing and estimating a multivariate location parameter", \emph{Journal of Nonparametric Statistics}, \bold{4}(3), 293-308.
A. Cuevas and M. Febrero and R. Fraiman (2006) "On the use of bootstrap for estimating functions with functional data", \emph{Computational Statistics and Data Analysis}, \bold{51}(2), 1063-1074.
A. Cuevas and M. Febrero and R. Fraiman (2007), "Robust estimation and classification for functional data via projection-based depth notions", \emph{Computational Statistics}, \bold{22}(3), 481-496.
M. Febrero and P. Galeano and W. Gonzalez-Manteiga (2007) "A functional analysis of NOx levels: location and scale estimation and outlier detection", \emph{Computational Statistics}, \bold{22}(3), 411-427.
M. Febrero and P. Galeano and W. Gonzalez-Manteiga (2008) "Outlier detection in functional data by depth measures, with application to identify abnormal NOx levels", \emph{Environmetrics}, \bold{19}(4), 331-345.
M. Febrero and P. Galeano and W. Gonzalez-Manteiga (2010) "Measures of influence for the functional linear model with scalar response", \emph{Journal of Multivariate Analysis}, \bold{101}(2), 327-339.
J. A. Cuesta-Albertos and A. Nieto-Reyes (2010) "Functional classification and the random Tukey depth. Practical issues", Combining Soft Computing and Statistical Methods in Data Analysis, \emph{Advances in Intelligent and Soft Computing}, \bold{77}, 123-130.
D. Gervini (2012) "Outlier detection and trimmed estimation in general functional spaces", \emph{Statistica Sinica}, \bold{22}(4), 1639-1660.
}
\author{Rob J Hyndman, Han Lin Shang}
\seealso{\code{\link[ftsa]{median.fts}}, \code{\link[ftsa]{var.fts}}, \code{\link[ftsa]{sd.fts}}, \code{\link[ftsa]{quantile.fts}}}
\examples{
# Calculate the mean function by the different depth measures.
mean(x = ElNino_ERSST_region_1and2, method = "coordinate")
mean(x = ElNino_ERSST_region_1and2, method = "FM")
mean(x = ElNino_ERSST_region_1and2, method = "mode")
mean(x = ElNino_ERSST_region_1and2, method = "RP")
mean(x = ElNino_ERSST_region_1and2, method = "RPD")
mean(x = ElNino_ERSST_region_1and2, method = "radius",
alpha = 0.5, beta = 0.25, weight = "hard")
mean(x = ElNino_ERSST_region_1and2, method = "radius",
alpha = 0.5, beta = 0.25, weight = "soft")
}
\keyword{methods}
| /man/mean.fts.Rd | no_license | cran/ftsa | R | false | false | 3,895 | rd | \name{mean.fts}
\alias{mean.fts}
\title{Mean functions for functional time series}
\description{
Computes mean of functional time series at each variable.
}
\usage{
\method{mean}{fts}(x, method = c("coordinate", "FM", "mode", "RP", "RPD", "radius"),
na.rm = TRUE, alpha, beta, weight, ...)
}
\arguments{
\item{x}{An object of class \code{fts}.}
\item{method}{Method for computing the mean function.}
\item{na.rm}{A logical value indicating whether NA values should be stripped before the computation proceeds.}
\item{alpha}{Tuning parameter when \code{method="radius"}.}
\item{beta}{Trimming percentage, by default it is 0.25, when \code{method="radius"}.}
\item{weight}{Hard thresholding or soft thresholding.}
\item{...}{Other arguments.}
}
\details{
If \code{method = "coordinate"}, it computes the coordinate-wise functional mean.
If \code{method = "FM"}, it computes the mean of trimmed functional data ordered by the functional depth of Fraiman and Muniz (2001).
If \code{method = "mode"}, it computes the mean of trimmed functional data ordered by \eqn{h}-modal functional depth.
If \code{method = "RP"}, it computes the mean of trimmed functional data ordered by random projection depth.
If \code{method = "RPD"}, it computes the mean of trimmed functional data ordered by random projection derivative depth.
If \code{method = "radius"}, it computes the mean of trimmed functional data ordered by the notion of alpha-radius.
}
\value{A list containing \code{x} = variables and \code{y} = mean rates.}
\references{
O. Hossjer and C. Croux (1995) "Generalized univariate signed rank statistics for testing and estimating a multivariate location parameter", \emph{Journal of Nonparametric Statistics}, \bold{4}(3), 293-308.
A. Cuevas and M. Febrero and R. Fraiman (2006) "On the use of bootstrap for estimating functions with functional data", \emph{Computational Statistics and Data Analysis}, \bold{51}(2), 1063-1074.
A. Cuevas and M. Febrero and R. Fraiman (2007), "Robust estimation and classification for functional data via projection-based depth notions", \emph{Computational Statistics}, \bold{22}(3), 481-496.
M. Febrero and P. Galeano and W. Gonzalez-Manteiga (2007) "A functional analysis of NOx levels: location and scale estimation and outlier detection", \emph{Computational Statistics}, \bold{22}(3), 411-427.
M. Febrero and P. Galeano and W. Gonzalez-Manteiga (2008) "Outlier detection in functional data by depth measures, with application to identify abnormal NOx levels", \emph{Environmetrics}, \bold{19}(4), 331-345.
M. Febrero and P. Galeano and W. Gonzalez-Manteiga (2010) "Measures of influence for the functional linear model with scalar response", \emph{Journal of Multivariate Analysis}, \bold{101}(2), 327-339.
J. A. Cuesta-Albertos and A. Nieto-Reyes (2010) "Functional classification and the random Tukey depth. Practical issues", Combining Soft Computing and Statistical Methods in Data Analysis, \emph{Advances in Intelligent and Soft Computing}, \bold{77}, 123-130.
D. Gervini (2012) "Outlier detection and trimmed estimation in general functional spaces", \emph{Statistica Sinica}, \bold{22}(4), 1639-1660.
}
\author{Rob J Hyndman, Han Lin Shang}
\seealso{\code{\link[ftsa]{median.fts}}, \code{\link[ftsa]{var.fts}}, \code{\link[ftsa]{sd.fts}}, \code{\link[ftsa]{quantile.fts}}}
\examples{
# Calculate the mean function by the different depth measures.
mean(x = ElNino_ERSST_region_1and2, method = "coordinate")
mean(x = ElNino_ERSST_region_1and2, method = "FM")
mean(x = ElNino_ERSST_region_1and2, method = "mode")
mean(x = ElNino_ERSST_region_1and2, method = "RP")
mean(x = ElNino_ERSST_region_1and2, method = "RPD")
mean(x = ElNino_ERSST_region_1and2, method = "radius",
alpha = 0.5, beta = 0.25, weight = "hard")
mean(x = ElNino_ERSST_region_1and2, method = "radius",
alpha = 0.5, beta = 0.25, weight = "soft")
}
\keyword{methods}
|
#
# Conditionally download, unzip and prepare the data for plotting
#
if (!dir.exists('data/')) {
dir.create('data/')
}
if (!file.exists('data/NEI_data.zip')) {
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip', 'data/NEI_data.zip')
unzip('data/NEI_data.zip')
}
if (!exists("NEI")) {
NEI <- readRDS("summarySCC_PM25.rds")
}
if (!exists("SCC")) {
SCC <- readRDS("Source_Classification_Code.rds")
}
| /project/load-data.R | no_license | scrain/exploratory-data-analysis | R | false | false | 444 | r | #
# Conditionally download, unzip and prepare the data for plotting
#
if (!dir.exists('data/')) {
dir.create('data/')
}
if (!file.exists('data/NEI_data.zip')) {
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip', 'data/NEI_data.zip')
unzip('data/NEI_data.zip')
}
if (!exists("NEI")) {
NEI <- readRDS("summarySCC_PM25.rds")
}
if (!exists("SCC")) {
SCC <- readRDS("Source_Classification_Code.rds")
}
|
## makeCacheMatrix function takes a matrix and sets it as a Cached Matrix object
makeCacheMatrix <- function(x = matrix()) {
## initialize 'm' variable as the Inverse Matrix Cache with NULL value
m <- NULL
## setmatrix() function definition
setmatrix <- function(y) {
## call to setmatrix replaces 'x' variable Matrix Cached Value with the passed matrix
## searches 'x' in the parent environment and redefines the value
x <<- y
## call to setmatrix replaces 'm' variable Inverse Matrix Cached value to NULL
## searches 'm' in the parent environment and redefines the value to NULL
m <<- NULL
}
## returns 'x' Cached Matrix Value
getmatrix <- function() x
## sets 'm' Cached Inverse Matrix Value with the passed solved inverse matrix value from cacheSolve
setinvmatrix <- function(solvedinvmtx) m <<- solvedinvmtx
## returns 'm' Cached Inverse Matrix Value
getinvmatrix <- function() m
##if no arguments are passed, show list of Cache Matrix functions
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinvmatrix = setinvmatrix,
getinvmatrix = getinvmatrix)
}
## cachSolve function takes a Cached Matrix Object and returns the Inverse of the Cached Matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinvmatrix()
## if 'm' is not NULL, exit and return the cached inverse matrix retrieved from 'x'
if(!is.null(m)) {
message("getting cached inverse matrix data")
return(m)
}
## if 'm' is NULL, retrieve the cached matrix in 'x' to 'data'
data <- x$getmatrix()
## use solve() to get the inverse matrix of 'data' and assign to 'm'
m <- solve(data, ...)
## set the inverse matrix of 'x' with the 'm' solved value using setinvmatrix()
x$setinvmatrix(m)
## return 'a' value - inverse matrix
m
} | /cachematrix.R | no_license | ghutty/ProgrammingAssignment2 | R | false | false | 1,855 | r | ## makeCacheMatrix function takes a matrix and sets it as a Cached Matrix object
makeCacheMatrix <- function(x = matrix()) {
## initialize 'm' variable as the Inverse Matrix Cache with NULL value
m <- NULL
## setmatrix() function definition
setmatrix <- function(y) {
## call to setmatrix replaces 'x' variable Matrix Cached Value with the passed matrix
## searches 'x' in the parent environment and redefines the value
x <<- y
## call to setmatrix replaces 'm' variable Inverse Matrix Cached value to NULL
## searches 'm' in the parent environment and redefines the value to NULL
m <<- NULL
}
## returns 'x' Cached Matrix Value
getmatrix <- function() x
## sets 'm' Cached Inverse Matrix Value with the passed solved inverse matrix value from cacheSolve
setinvmatrix <- function(solvedinvmtx) m <<- solvedinvmtx
## returns 'm' Cached Inverse Matrix Value
getinvmatrix <- function() m
##if no arguments are passed, show list of Cache Matrix functions
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinvmatrix = setinvmatrix,
getinvmatrix = getinvmatrix)
}
## cachSolve function takes a Cached Matrix Object and returns the Inverse of the Cached Matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinvmatrix()
## if 'm' is not NULL, exit and return the cached inverse matrix retrieved from 'x'
if(!is.null(m)) {
message("getting cached inverse matrix data")
return(m)
}
## if 'm' is NULL, retrieve the cached matrix in 'x' to 'data'
data <- x$getmatrix()
## use solve() to get the inverse matrix of 'data' and assign to 'm'
m <- solve(data, ...)
## set the inverse matrix of 'x' with the 'm' solved value using setinvmatrix()
x$setinvmatrix(m)
## return 'a' value - inverse matrix
m
} |
histprod<-function(donnee,firstvar,lastvar=ncol(donnee),numr = 2,numc = 2,adjust=1) {
nbdesc <- lastvar-firstvar+1
xquant<-donnee[,firstvar:lastvar]
xrange<-max(apply(xquant,2,max,na.rm=TRUE))
#yrange<-max(hist(donnee[,firstvar],plot=FALSE)$density)
yrange<-max(density(donnee[,firstvar], na.rm = TRUE,adjust=adjust)$y)
for (i in 2:nbdesc){
#yrangeinter<-max(hist(donnee[,i+firstvar-1],plot=FALSE)$density)
yrangeinter<-max(density(donnee[,i+firstvar-1], na.rm = TRUE,adjust=adjust)$y)
yrange<-max(yrange,yrangeinter)
}
mult <- nbdesc %/% (numr*numc)
if (nbdesc==(nbdesc %/% (numr*numc))*(numr*numc)) mult=mult-1
for (m in 0:mult) {
par(mfrow = c(numr,numc))
for (nbd in 1:(numr*numc)) {
nb <- (m*(numr*numc)+nbd)
if (nb <= nbdesc) {
hist(donnee[,nb+firstvar-1],col=grey(0.9),border = grey(0.8),xlab=names(donnee[nb+firstvar-1]),main = paste("Histogram of" , names(donnee[nb+firstvar-1])),xlim=c(0,xrange),ylim=c(0,yrange),proba=TRUE)
step <- seq(from = 0, to = xrange, length = 100)
lines(step, dnorm(step, mean(donnee[,nb+firstvar-1], na.rm = TRUE),sd(donnee[,nb+firstvar-1], na.rm = TRUE)),lty=2)
lines(density(donnee[,nb+firstvar-1], na.rm = TRUE,adjust=adjust), lwd = 1,col="red")
}
}
if (m < mult) dev.new()
} #for (m in 0:mult) {
}
| /SensoMineR/R/histprod.R | no_license | ingted/R-Examples | R | false | false | 1,418 | r | histprod<-function(donnee,firstvar,lastvar=ncol(donnee),numr = 2,numc = 2,adjust=1) {
nbdesc <- lastvar-firstvar+1
xquant<-donnee[,firstvar:lastvar]
xrange<-max(apply(xquant,2,max,na.rm=TRUE))
#yrange<-max(hist(donnee[,firstvar],plot=FALSE)$density)
yrange<-max(density(donnee[,firstvar], na.rm = TRUE,adjust=adjust)$y)
for (i in 2:nbdesc){
#yrangeinter<-max(hist(donnee[,i+firstvar-1],plot=FALSE)$density)
yrangeinter<-max(density(donnee[,i+firstvar-1], na.rm = TRUE,adjust=adjust)$y)
yrange<-max(yrange,yrangeinter)
}
mult <- nbdesc %/% (numr*numc)
if (nbdesc==(nbdesc %/% (numr*numc))*(numr*numc)) mult=mult-1
for (m in 0:mult) {
par(mfrow = c(numr,numc))
for (nbd in 1:(numr*numc)) {
nb <- (m*(numr*numc)+nbd)
if (nb <= nbdesc) {
hist(donnee[,nb+firstvar-1],col=grey(0.9),border = grey(0.8),xlab=names(donnee[nb+firstvar-1]),main = paste("Histogram of" , names(donnee[nb+firstvar-1])),xlim=c(0,xrange),ylim=c(0,yrange),proba=TRUE)
step <- seq(from = 0, to = xrange, length = 100)
lines(step, dnorm(step, mean(donnee[,nb+firstvar-1], na.rm = TRUE),sd(donnee[,nb+firstvar-1], na.rm = TRUE)),lty=2)
lines(density(donnee[,nb+firstvar-1], na.rm = TRUE,adjust=adjust), lwd = 1,col="red")
}
}
if (m < mult) dev.new()
} #for (m in 0:mult) {
}
|
##############################################################
# This file holds:
# -the paramters for the simulated data, ie the underlying phenology parameters, sample sizes, etc.
# -the different parameters used for each of the methods in the simulation study
####################################
# global config stuff
####################################
photo_folder = '/home/shawn/data/phenology_gradients/photos/'
###################################
# stuff affecting runtim and resources in run_estimators.R
###################################
n_cores = 2
n_bootstrap = 1
###################################
# underlying phenology parameters
###################################
flowering_lengths = c(15,30,45,60)
flowering_gradients = c(
# slope of lm(sos~latitude) from Melaas et al. 2018 / scale of simulated scale
3.36/0.1,
# double the above
6.72/0.1,
# Half the above, representing a relativly uniform spatial gradient
1.68/0.1)
spatial_gradient_types = c('linear','non-linear')
###################################
# sampling parameters
###################################
sample_sizes = c(150, 300, 600, 1200)
clustering = c(TRUE, FALSE)
########################################
# Spatial model parameters
######################################
#n_boxess = c(200)
weibull_model_parameters = expand.grid(
box_size = c(0.2, 0.4),
num_boxes = c(5,10,20,40),
stratum_size = c(0.1, 0.2, 0.5)
)
weibull_model_parameters$model_id = 1:nrow(weibull_model_parameters)
# # linear model
# linear_model_parameters = expand.grid(
# quantile = c(0.9, 0.95, 0.99)
# )
# linear_model_parameters$model_id = 1:nrow(linear_model_parameters)
#
| /config.R | no_license | sdtaylor/phenology_gradients | R | false | false | 1,667 | r | ##############################################################
# This file holds:
# -the paramters for the simulated data, ie the underlying phenology parameters, sample sizes, etc.
# -the different parameters used for each of the methods in the simulation study
####################################
# global config stuff
####################################
photo_folder = '/home/shawn/data/phenology_gradients/photos/'
###################################
# stuff affecting runtim and resources in run_estimators.R
###################################
n_cores = 2
n_bootstrap = 1
###################################
# underlying phenology parameters
###################################
flowering_lengths = c(15,30,45,60)
flowering_gradients = c(
# slope of lm(sos~latitude) from Melaas et al. 2018 / scale of simulated scale
3.36/0.1,
# double the above
6.72/0.1,
# Half the above, representing a relativly uniform spatial gradient
1.68/0.1)
spatial_gradient_types = c('linear','non-linear')
###################################
# sampling parameters
###################################
sample_sizes = c(150, 300, 600, 1200)
clustering = c(TRUE, FALSE)
########################################
# Spatial model parameters
######################################
#n_boxess = c(200)
weibull_model_parameters = expand.grid(
box_size = c(0.2, 0.4),
num_boxes = c(5,10,20,40),
stratum_size = c(0.1, 0.2, 0.5)
)
weibull_model_parameters$model_id = 1:nrow(weibull_model_parameters)
# # linear model
# linear_model_parameters = expand.grid(
# quantile = c(0.9, 0.95, 0.99)
# )
# linear_model_parameters$model_id = 1:nrow(linear_model_parameters)
#
|
testlist <- list(x = c(1280061267L, -2687190L, -7012353L, -27393L, -1L, -2686977L, -144085427L, 1634752105L, 1702308136L, 1397053520L, 6516590L, 405405516L, -162783703L, 1936992588L, 506068991L, -2731494L, 791235625L, -232L, 704632575L, 570414327L, -24673L, -1616928865L, -1612720385L, -10726L, -8857857L, 692857302L, -14070743L, -704643072L, 522846207L, -16757711L, 909522687L), y = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962502-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 867 | r | testlist <- list(x = c(1280061267L, -2687190L, -7012353L, -27393L, -1L, -2686977L, -144085427L, 1634752105L, 1702308136L, 1397053520L, 6516590L, 405405516L, -162783703L, 1936992588L, 506068991L, -2731494L, 791235625L, -232L, 704632575L, 570414327L, -24673L, -1616928865L, -1612720385L, -10726L, -8857857L, 692857302L, -14070743L, -704643072L, 522846207L, -16757711L, 909522687L), y = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
#' Tests if there is a significant difference in abundance between selected conditions
#'
#' A nice long description
#'
#' @usage amp_test_species(data, design)
#'
#' @param data (required) A phyloseq object including sample data.
#' @param group (required) The group to test against.
#' @param sig Significance treshold (default: 0.01).
#' @param fold Log2fold filter default for displaying significant results (default: 0)
#' @param tax.aggregate Group data at specific taxonomic level (default: "OTU").
#' @param tax.class Converts a specific phyla to class level instead (e.g. "p__Proteobacteria").
#' @param tax.empty Either "remove" OTUs without taxonomic information at X level, with "best" classification or add the "OTU" name (default: best).
#' @param tax.display Display additional taxonomic levels in the plot output e.g. "Genus".
#' @param label Label the significant entries with tax.display (default:F).
#' @param plot.type Either "boxplot" or "point" (default: point)
#' @param plot.show Display the X most significant results.
#' @param plot.point.size The size of the plotted points.
#' @param plot.theme Chose different standard layouts choose from "normal" or "clean" (default: "normal").
#' @param parallel To run DESeq2 in parallel mode (default: F)
#' @param adjust.zero Keep 0 abundances in ggplot2 median calculations by adding a small constant to these.
#'
#' @return A p-value for each comparison.
#'
#' @export
#' @import ggplot2
#' @import dplyr
#' @import reshape2
#' @import phyloseq
#' @import grid
#' @import data.table
#'
#' @author Mads Albertsen \email{MadsAlbertsen85@@gmail.com}
amp_test_species <- function(data, group, tax.aggregate = "OTU", tax.add = NULL, test = "Wald", fitType = "parametric", sig = 0.01, fold = 0, tax.class = NULL, tax.empty = "best", label = F, plot.type = "point", plot.show = NULL, plot.point.size = 2, plot.theme = "normal", parallel = F , adjust.zero = NULL){
data <- list(abund = as.data.frame(otu_table(data)@.Data),
tax = data.frame(tax_table(data)@.Data, OTU = rownames(tax_table(data))),
sample = suppressWarnings(as.data.frame(as.matrix(sample_data(data)))))
## Clean up the taxonomy
data <- amp_rename(data = data, tax.class = tax.class, tax.empty = tax.empty, tax.level = tax.aggregate)
## Extract the data into seperate objects for readability
abund <- data[["abund"]]
tax <- data[["tax"]]
sample <- data[["sample"]]
## Make a name variable that can be used instead of tax.aggregate to display multiple levels
suppressWarnings(
if (!is.null(tax.add)){
if (tax.add != tax.aggregate) {
tax <- data.frame(tax, Display = apply(tax[,c(tax.add,tax.aggregate)], 1, paste, collapse="; "))
}
} else {
tax <- data.frame(tax, Display = tax[,tax.aggregate])
}
)
# Aggregate to a specific taxonomic level
abund3 <- cbind.data.frame(Display = tax[,"Display"], abund) %>%
melt(id.var = "Display", value.name= "Abundance", variable.name = "Sample")
abund3 <- data.table(abund3)[, sum:=sum(Abundance), by=list(Display, Sample)] %>%
setkey(Display, Sample) %>%
unique() %>%
as.data.frame()
## Convert to DESeq2 format
abund4 <- dcast(abund3, formula = Display~Sample, value.var = "sum")
rownames(abund4) <- abund4$Display
abund4 <- abund4[,-1]
groupF <- as.formula(paste("~", group, sep=""))
data_deseq <- DESeqDataSetFromMatrix(countData = abund4,
colData = sample,
design = groupF)
#data_deseq = phyloseq_to_deseq2(physeq=data, design=groupF)
## Test for significant differential abundance
data_deseq_test = DESeq(data_deseq, test=test, fitType=fitType, parallel = parallel)
## Extract the results
res = results(data_deseq_test, cooksCutoff = FALSE)
res_tax = cbind(as.data.frame(res), Tax = rownames(res))
res_tax_sig = subset(res_tax, padj < sig & fold < abs(log2FoldChange)) %>%
arrange(padj)
## Plot the data
### MA plot
res_tax$Significant <- ifelse(rownames(res_tax) %in% res_tax_sig$Tax , "Yes", "No")
res_tax$Significant[is.na(res_tax$Significant)] <- "No"
p1 <- ggplot(data = res_tax, aes(x = baseMean, y = log2FoldChange, color = Significant)) +
geom_point(size = plot.point.size) +
scale_x_log10() +
scale_color_manual(values=c("black", "red")) +
labs(x = "BaseMean read abundance", y = "Log2 fold change")
### Points plot of significant differential abundant entries
abund5 <- mutate(abund4, Tax = rownames(abund4)) %>%
melt(id.vars=c("Tax"),value.name="Count", variable.name="Sample") %>%
group_by(Sample) %>%
mutate(Abundance = Count / sum(Count)*100)
abund6 <- merge(abund5, res_tax, by = "Tax") %>%
filter(padj < sig & fold < abs(log2FoldChange)) %>%
arrange(padj)
if(!is.null(adjust.zero)){
abund6$Abundance[abund6$Abundance==0] <- adjust.zero
}
colnames(sample)[1] <- "Sample"
sample <- sample[c("Sample",group)]
colnames(sample)[2] <- "Group"
point_df <- merge(x = abund6, y = sample, by = "Sample") %>%
group_by(Sample) %>%
arrange(padj)
colnames(point_df)[12] <- group
clean_temp <- point_df
if(!is.null(plot.show)){
point_df <- subset(point_df, Tax %in% as.character(unique(point_df$Tax))[1:plot.show])
}
point_df$Tax <- factor(point_df$Tax, levels = rev(as.character(unique(point_df$Tax))[1:plot.show]))
p2 <-ggplot(data = point_df, aes_string(x = "Tax", y = "Abundance", color = group)) +
labs(x = "", y = "Read Abundance (%)") +
coord_flip()
if (plot.type == "point"){
p2 <- p2 + geom_jitter(position = position_jitter(width = .05), size = plot.point.size)
} else{
p2 <- p2 + geom_boxplot(outlier.size=1)
}
clean_res0 <- merge(abund5, res_tax, by = "Tax") %>%
merge(y = sample, by = "Sample") %>%
group_by(Sample) %>%
arrange(padj)
colnames(clean_res0)[12] <- "group"
clean_res <- mutate(clean_res0, padj = signif(padj, 2),
Log2FC = signif(log2FoldChange, 2),
Taxonomy = Tax) %>%
group_by(group, Taxonomy, padj, Log2FC) %>%
summarise(Avg = round(mean(Abundance), 3)) %>%
dcast(Taxonomy+padj+Log2FC~group, value.var = "Avg") %>%
arrange(padj)
if(plot.theme == "clean"){
p1 <- p1 + theme(axis.ticks.length = unit(1, "mm"),
axis.ticks = element_line(color = "black"),
text = element_text(size = 10, color = "black"),
axis.text = element_text(size = 8, color = "black"),
plot.margin = unit(c(0,0,0,0), "mm"),
panel.grid.major = element_line(color = "grey95"),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black")
)
p2 <- p2 + theme(axis.ticks.length = unit(1, "mm"),
axis.ticks = element_line(color = "black"),
text = element_text(size = 10, color = "black"),
axis.text = element_text(size = 8, color = "black"),
plot.margin = unit(c(0,0,0,0), "mm"),
panel.grid.major = element_line(color = "grey95"),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black")
)
}
out <- list(results = res, plot_MA = p1, sig_res = res_tax_sig, plot_sig = p2 , sig_res_plot_data = point_df, clean_res = clean_res)
return(out)
}
| /R/amp_test_species.R | no_license | giriarteS/ampvis | R | false | false | 7,902 | r | #' Tests if there is a significant difference in abundance between selected conditions
#'
#' A nice long description
#'
#' @usage amp_test_species(data, design)
#'
#' @param data (required) A phyloseq object including sample data.
#' @param group (required) The group to test against.
#' @param sig Significance treshold (default: 0.01).
#' @param fold Log2fold filter default for displaying significant results (default: 0)
#' @param tax.aggregate Group data at specific taxonomic level (default: "OTU").
#' @param tax.class Converts a specific phyla to class level instead (e.g. "p__Proteobacteria").
#' @param tax.empty Either "remove" OTUs without taxonomic information at X level, with "best" classification or add the "OTU" name (default: best).
#' @param tax.display Display additional taxonomic levels in the plot output e.g. "Genus".
#' @param label Label the significant entries with tax.display (default:F).
#' @param plot.type Either "boxplot" or "point" (default: point)
#' @param plot.show Display the X most significant results.
#' @param plot.point.size The size of the plotted points.
#' @param plot.theme Chose different standard layouts choose from "normal" or "clean" (default: "normal").
#' @param parallel To run DESeq2 in parallel mode (default: F)
#' @param adjust.zero Keep 0 abundances in ggplot2 median calculations by adding a small constant to these.
#'
#' @return A p-value for each comparison.
#'
#' @export
#' @import ggplot2
#' @import dplyr
#' @import reshape2
#' @import phyloseq
#' @import grid
#' @import data.table
#'
#' @author Mads Albertsen \email{MadsAlbertsen85@@gmail.com}
amp_test_species <- function(data, group, tax.aggregate = "OTU", tax.add = NULL, test = "Wald", fitType = "parametric", sig = 0.01, fold = 0, tax.class = NULL, tax.empty = "best", label = F, plot.type = "point", plot.show = NULL, plot.point.size = 2, plot.theme = "normal", parallel = F , adjust.zero = NULL){
data <- list(abund = as.data.frame(otu_table(data)@.Data),
tax = data.frame(tax_table(data)@.Data, OTU = rownames(tax_table(data))),
sample = suppressWarnings(as.data.frame(as.matrix(sample_data(data)))))
## Clean up the taxonomy
data <- amp_rename(data = data, tax.class = tax.class, tax.empty = tax.empty, tax.level = tax.aggregate)
## Extract the data into seperate objects for readability
abund <- data[["abund"]]
tax <- data[["tax"]]
sample <- data[["sample"]]
## Make a name variable that can be used instead of tax.aggregate to display multiple levels
suppressWarnings(
if (!is.null(tax.add)){
if (tax.add != tax.aggregate) {
tax <- data.frame(tax, Display = apply(tax[,c(tax.add,tax.aggregate)], 1, paste, collapse="; "))
}
} else {
tax <- data.frame(tax, Display = tax[,tax.aggregate])
}
)
# Aggregate to a specific taxonomic level
abund3 <- cbind.data.frame(Display = tax[,"Display"], abund) %>%
melt(id.var = "Display", value.name= "Abundance", variable.name = "Sample")
abund3 <- data.table(abund3)[, sum:=sum(Abundance), by=list(Display, Sample)] %>%
setkey(Display, Sample) %>%
unique() %>%
as.data.frame()
## Convert to DESeq2 format
abund4 <- dcast(abund3, formula = Display~Sample, value.var = "sum")
rownames(abund4) <- abund4$Display
abund4 <- abund4[,-1]
groupF <- as.formula(paste("~", group, sep=""))
data_deseq <- DESeqDataSetFromMatrix(countData = abund4,
colData = sample,
design = groupF)
#data_deseq = phyloseq_to_deseq2(physeq=data, design=groupF)
## Test for significant differential abundance
data_deseq_test = DESeq(data_deseq, test=test, fitType=fitType, parallel = parallel)
## Extract the results
res = results(data_deseq_test, cooksCutoff = FALSE)
res_tax = cbind(as.data.frame(res), Tax = rownames(res))
res_tax_sig = subset(res_tax, padj < sig & fold < abs(log2FoldChange)) %>%
arrange(padj)
## Plot the data
### MA plot
res_tax$Significant <- ifelse(rownames(res_tax) %in% res_tax_sig$Tax , "Yes", "No")
res_tax$Significant[is.na(res_tax$Significant)] <- "No"
p1 <- ggplot(data = res_tax, aes(x = baseMean, y = log2FoldChange, color = Significant)) +
geom_point(size = plot.point.size) +
scale_x_log10() +
scale_color_manual(values=c("black", "red")) +
labs(x = "BaseMean read abundance", y = "Log2 fold change")
### Points plot of significant differential abundant entries
abund5 <- mutate(abund4, Tax = rownames(abund4)) %>%
melt(id.vars=c("Tax"),value.name="Count", variable.name="Sample") %>%
group_by(Sample) %>%
mutate(Abundance = Count / sum(Count)*100)
abund6 <- merge(abund5, res_tax, by = "Tax") %>%
filter(padj < sig & fold < abs(log2FoldChange)) %>%
arrange(padj)
if(!is.null(adjust.zero)){
abund6$Abundance[abund6$Abundance==0] <- adjust.zero
}
colnames(sample)[1] <- "Sample"
sample <- sample[c("Sample",group)]
colnames(sample)[2] <- "Group"
point_df <- merge(x = abund6, y = sample, by = "Sample") %>%
group_by(Sample) %>%
arrange(padj)
colnames(point_df)[12] <- group
clean_temp <- point_df
if(!is.null(plot.show)){
point_df <- subset(point_df, Tax %in% as.character(unique(point_df$Tax))[1:plot.show])
}
point_df$Tax <- factor(point_df$Tax, levels = rev(as.character(unique(point_df$Tax))[1:plot.show]))
p2 <-ggplot(data = point_df, aes_string(x = "Tax", y = "Abundance", color = group)) +
labs(x = "", y = "Read Abundance (%)") +
coord_flip()
if (plot.type == "point"){
p2 <- p2 + geom_jitter(position = position_jitter(width = .05), size = plot.point.size)
} else{
p2 <- p2 + geom_boxplot(outlier.size=1)
}
clean_res0 <- merge(abund5, res_tax, by = "Tax") %>%
merge(y = sample, by = "Sample") %>%
group_by(Sample) %>%
arrange(padj)
colnames(clean_res0)[12] <- "group"
clean_res <- mutate(clean_res0, padj = signif(padj, 2),
Log2FC = signif(log2FoldChange, 2),
Taxonomy = Tax) %>%
group_by(group, Taxonomy, padj, Log2FC) %>%
summarise(Avg = round(mean(Abundance), 3)) %>%
dcast(Taxonomy+padj+Log2FC~group, value.var = "Avg") %>%
arrange(padj)
if(plot.theme == "clean"){
p1 <- p1 + theme(axis.ticks.length = unit(1, "mm"),
axis.ticks = element_line(color = "black"),
text = element_text(size = 10, color = "black"),
axis.text = element_text(size = 8, color = "black"),
plot.margin = unit(c(0,0,0,0), "mm"),
panel.grid.major = element_line(color = "grey95"),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black")
)
p2 <- p2 + theme(axis.ticks.length = unit(1, "mm"),
axis.ticks = element_line(color = "black"),
text = element_text(size = 10, color = "black"),
axis.text = element_text(size = 8, color = "black"),
plot.margin = unit(c(0,0,0,0), "mm"),
panel.grid.major = element_line(color = "grey95"),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black")
)
}
out <- list(results = res, plot_MA = p1, sig_res = res_tax_sig, plot_sig = p2 , sig_res_plot_data = point_df, clean_res = clean_res)
return(out)
}
|
#' Gives values for naked DNA BSWF (SETLOW) as a function of wavelength
#'
#' This function gives a set of numeric multipliers that can be used
#' as a weight to calculate effective doses and irradiances.
#'
#' @param w.length numeric array of w.length (nm)
#'
#' @return a numeric array of the same length as \code{w.length} with values for
#' the BSWF normalized as in the original source. The returned values are
#' based on quantum effectiveness units.
#'
#' @note The digitized data as used in the TUV model covers the wavelength range
#' from 256 nm to 364 nm. For longer wavelengths we set the value to zero, and
#' for shorter wavelengths we extrapolate the value for 256 nm.
#'
#'
#' @export
#' @examples
#' DNA_N_q_fun(293:400)
DNA_N_q_fun <-
function(w.length){
wl.within <- w.length >= 256 & w.length <= 364
spectral_weights <- numeric(length(w.length))
spectral_weights[w.length < 256] <- NA # the value at 256 nm
if (any(wl.within)) { # avoids error in spline when xout is empty
spectral_weights[wl.within] <-
stats::spline(photobiologyWavebands::SetlowTUV.spct$w.length,
photobiologyWavebands::SetlowTUV.spct$s.q.response,
xout = w.length[wl.within])$y
}
spectral_weights[w.length > 364] <- 0.0
return(spectral_weights)
}
| /photobiologyWavebands/R/dna.n.q.fun.r | no_license | ingted/R-Examples | R | false | false | 1,371 | r | #' Gives values for naked DNA BSWF (SETLOW) as a function of wavelength
#'
#' This function gives a set of numeric multipliers that can be used
#' as a weight to calculate effective doses and irradiances.
#'
#' @param w.length numeric array of w.length (nm)
#'
#' @return a numeric array of the same length as \code{w.length} with values for
#' the BSWF normalized as in the original source. The returned values are
#' based on quantum effectiveness units.
#'
#' @note The digitized data as used in the TUV model covers the wavelength range
#' from 256 nm to 364 nm. For longer wavelengths we set the value to zero, and
#' for shorter wavelengths we extrapolate the value for 256 nm.
#'
#'
#' @export
#' @examples
#' DNA_N_q_fun(293:400)
DNA_N_q_fun <-
function(w.length){
wl.within <- w.length >= 256 & w.length <= 364
spectral_weights <- numeric(length(w.length))
spectral_weights[w.length < 256] <- NA # the value at 256 nm
if (any(wl.within)) { # avoids error in spline when xout is empty
spectral_weights[wl.within] <-
stats::spline(photobiologyWavebands::SetlowTUV.spct$w.length,
photobiologyWavebands::SetlowTUV.spct$s.q.response,
xout = w.length[wl.within])$y
}
spectral_weights[w.length > 364] <- 0.0
return(spectral_weights)
}
|
library(qgtools)
### Name: ad.simudata
### Title: An R function to generate an AD model simulated data set
### Aliases: ad.simudata
### Keywords: AD model cotton simuated data cotf2
### ** Examples
library(qgtools)
data(cotf2)
Ped=cotf2[,c(1:5)]
Y=cotf2[,-c(1:5)]
YS=ad.simudata(Y,Ped,v=rep(20,7),b=c(100),SimuNum=10)
##End
| /data/genthat_extracted_code/qgtools/examples/ad.simudata.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 342 | r | library(qgtools)
### Name: ad.simudata
### Title: An R function to generate an AD model simulated data set
### Aliases: ad.simudata
### Keywords: AD model cotton simuated data cotf2
### ** Examples
library(qgtools)
data(cotf2)
Ped=cotf2[,c(1:5)]
Y=cotf2[,-c(1:5)]
YS=ad.simudata(Y,Ped,v=rep(20,7),b=c(100),SimuNum=10)
##End
|
testlist <- list(Rs = numeric(0), atmp = c(9.53708019597101e-228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(NA, NaN, NaN, -7.31045680883776e+303 ), temp = -Inf)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615860225-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 351 | r | testlist <- list(Rs = numeric(0), atmp = c(9.53708019597101e-228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(NA, NaN, NaN, -7.31045680883776e+303 ), temp = -Inf)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{plot_gradient}
\alias{plot_gradient}
\title{Gradient Descent Algorithm - Plotting the Gradient Function}
\usage{
plot_gradient(obj)
}
\arguments{
\item{obj}{Object containing the results of a gradient descent implementation}
}
\description{
\code{plot_gradient} Plots the norm of the gradient function of an object containing the results of a gradient descent object implementation
}
\examples{
# Generate some data for a simple bivariate example
set.seed(12345)
x <- sample(seq(from = -1, to = 1, by = 0.1), size = 50, replace = TRUE)
y <- 2*x + rnorm(50)
# Components required for gradient descent
X <- as.matrix(x)
y <- as.vector(y)
f <- function(X,y,b) {
(1/2)*norm(y-X\%*\%b,"F")^{2}
}
grad_f <- function(X,y,b) {
t(X)\%*\%(X\%*\%b - y)
}
# Run a simple gradient descent example
simple_ex <- gdescent(f,grad_f,X,y,alpha=0.01)
# Plot the norm of the gradient function
plot_gradient(simple_ex)
}
\author{
Jocelyn T. Chi
}
| /man/plot_gradient.Rd | no_license | cran/gettingtothebottom | R | false | false | 994 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{plot_gradient}
\alias{plot_gradient}
\title{Gradient Descent Algorithm - Plotting the Gradient Function}
\usage{
plot_gradient(obj)
}
\arguments{
\item{obj}{Object containing the results of a gradient descent implementation}
}
\description{
\code{plot_gradient} Plots the norm of the gradient function of an object containing the results of a gradient descent object implementation
}
\examples{
# Generate some data for a simple bivariate example
set.seed(12345)
x <- sample(seq(from = -1, to = 1, by = 0.1), size = 50, replace = TRUE)
y <- 2*x + rnorm(50)
# Components required for gradient descent
X <- as.matrix(x)
y <- as.vector(y)
f <- function(X,y,b) {
(1/2)*norm(y-X\%*\%b,"F")^{2}
}
grad_f <- function(X,y,b) {
t(X)\%*\%(X\%*\%b - y)
}
# Run a simple gradient descent example
simple_ex <- gdescent(f,grad_f,X,y,alpha=0.01)
# Plot the norm of the gradient function
plot_gradient(simple_ex)
}
\author{
Jocelyn T. Chi
}
|
library(ape)
testtree <- read.tree("1684_5.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1684_5_unrooted.txt") | /codeml_files/newick_trees_processed/1684_5/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("1684_5.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1684_5_unrooted.txt") |
#' create and save the `project_io` table
source('data-raw/common.R')
project_costs <- projects %>%
filter(institute %in% nih.institutes) %>%
select(project.num, fy.cost) %>%
group_by(project.num) %>%
summarize(total.cost = sum(fy.cost, na.rm = TRUE))
pub_output <- project_costs %>%
left_join(publinks) %>%
group_by(project.num) %>%
summarize(n.pubs = n())
patent_output <- project_costs %>%
left_join(patents) %>%
group_by(project.num) %>%
summarize(n.patents = n())
project_io <- pub_output %>%
inner_join(patent_output) %>%
left_join(project_costs) %>%
filter(project.num != '') %>%
filter(!grepl('-', project.num)) %>%
arrange(project.num)
use_data(project_io, compress = 'xz')
| /data-raw/project_io.R | no_license | davebraze/nihexporter | R | false | false | 719 | r | #' create and save the `project_io` table
source('data-raw/common.R')
project_costs <- projects %>%
filter(institute %in% nih.institutes) %>%
select(project.num, fy.cost) %>%
group_by(project.num) %>%
summarize(total.cost = sum(fy.cost, na.rm = TRUE))
pub_output <- project_costs %>%
left_join(publinks) %>%
group_by(project.num) %>%
summarize(n.pubs = n())
patent_output <- project_costs %>%
left_join(patents) %>%
group_by(project.num) %>%
summarize(n.patents = n())
project_io <- pub_output %>%
inner_join(patent_output) %>%
left_join(project_costs) %>%
filter(project.num != '') %>%
filter(!grepl('-', project.num)) %>%
arrange(project.num)
use_data(project_io, compress = 'xz')
|
# slight re-definition of the bicor function
bicor = function(x, y = NULL, robustX = TRUE, robustY = TRUE, use = 'all.obs', maxPOutliers = 1, quick = 0,
pearsonFallback = "individual",
cosine = FALSE,
cosineX = cosine, cosineY = cosine,
nThreads = 0, verbose = 0, indent = 0)
{
Cerrors = c("Memory allocation error")
nKnownErrors = length(Cerrors);
na.method = pmatch(use, c("all.obs", "pairwise.complete.obs"))
if (is.na(na.method))
stop(paste("Unrecognized parameter 'use'. Recognized values are \n",
"'all.obs', 'pairwise.complete.obs'"))
if (na.method==1)
{
if (sum(is.na(x))> 0)
stop("Missing values present in input variable 'x'. Consider using use = 'pairwise.complete.obs'.");
if (!is.null(y))
{
if (sum(is.na(y)) > 0)
stop("Missing values present in input variable 'y'. Consider using use = 'pairwise.complete.obs'.");
}
}
fallback = pmatch(pearsonFallback, .pearsonFallbacks)
if (is.na(na.method))
stop(paste("Unrecognized 'pearsonFallback'. Recognized values are (unique abbreviations of)\n",
paste(.pearsonFallbacks, collapse = ", ")))
if (quick < 0) stop("quick must be non-negative.");
if (nThreads < 0) stop("nThreads must be non-negative.");
if (is.null(nThreads) || (nThreads==0)) nThreads = .useNThreads();
x = as.matrix(x);
if (prod(dim(x))==0) stop("'x' has a zero dimension.");
storage.mode(x) = "double";
nNA = 0L;
err = 0L;
warnX = 0L;
warnY = 0L;
quick = as.double(quick);
maxPOutliers = as.double(maxPOutliers);
fallback = as.integer(fallback);
cosineX = as.integer(cosineX);
robustX = as.integer(robustX);
nThreads = as.integer(nThreads);
verbose = as.integer(verbose); indent = as.integer(indent)
if (is.null(y))
{
if (!robustX)
{
res = cor(x, use = use)
} else {
res = .Call("bicor1_call", x,
maxPOutliers,
quick,
fallback,
cosineX,
nNA, err, warnX,
nThreads, verbose, indent,
PACKAGE = "WGCNA");
}
if (!is.null(colnames(x))) dimnames(res) = list(colnames(x), colnames(x));
if (warnX > 0)
{
# For now have only one warning
warning(paste("bicor: zero MAD in variable 'x'.", .zeroMADWarnings[fallback]));
}
} else {
y = as.matrix(y);
storage.mode(y) = "double";
if (prod(dim(y))==0) stop("'y' has a zero dimension.");
if (nrow(x)!=nrow(y))
stop("'x' and 'y' have incompatible dimensions (unequal numbers of rows).");
cosineY = as.integer(cosineY);
robustY = as.integer(robustY);
res = .Call("bicor2_call", x, y,
robustX, robustY,
maxPOutliers,
quick,
fallback,
cosineX,
cosineY,
nNA, err,
warnX, warnY,
nThreads,
verbose, indent,
PACKAGE = "WGCNA");
if (!is.null(dimnames(x)[[2]]) || !is.null(dimnames(y)[[2]]))
dimnames(res) = list(dimnames(x)[[2]], dimnames(y)[[2]]);
if (warnX > 0)
warning(paste("bicor: zero MAD in variable 'x'.", .zeroMADWarnings[fallback]));
if (warnY > 0)
warning(paste("bicor: zero MAD in variable 'y'.", .zeroMADWarnings[fallback]));
}
if (err > 0)
{
if (err > nKnownErrors)
{
stop(paste("An error occurred in compiled code. Error code is", err));
} else {
stop(paste(Cerrors[err], "occurred in compiled code. "));
}
}
if (nNA > 0)
{
warning(paste("Missing values generated in calculation of bicor.",
"Likely cause: too many missing entries, zero median absolute deviation, or zero variance."));
}
res;
}
# Code to call my implementation of correlation
# For less than 100 correlations, use stats::cor since that is usually faster, particularly when no missing
# data are present, likely due to the complicated threading I do in the WGCNA correlations.
cor = function(x, y = NULL, use = "all.obs", method = c("pearson", "kendall", "spearman"),
quick = 0,
cosine = FALSE,
cosineX = cosine, cosineY = cosine,
drop = FALSE,
nThreads = 0, verbose = 0, indent = 0)
{
na.method <- pmatch(use, c("all.obs", "complete.obs", "pairwise.complete.obs",
"everything", "na.or.complete"), nomatch = 0)
method <- match.arg(method)
x = as.matrix(x);
nx = ncol(x);
if (!is.null(y))
{
y = as.matrix(y);
ny = ncol(y);
} else ny = nx;
if ((method=="pearson") && ( (na.method==1) || (na.method==3) ))
{
Cerrors = c("Memory allocation error")
nKnownErrors = length(Cerrors);
na.method = pmatch(use, c("all.obs", "pairwise.complete.obs"))
if (is.na(na.method))
stop(paste("Unrecognized parameter 'use'. Recognized values are \n",
"'all.obs', 'pairwise.complete.obs'"))
if (na.method==1)
{
if (sum(is.na(x))> 0)
stop("Missing values present in input variable 'x'. Consider using use = 'pairwise.complete.obs'.");
if (!is.null(y))
{
if (sum(is.na(y)) > 0)
stop("Missing values present in input variable 'y'. Consider using use = 'pairwise.complete.obs'.");
}
}
if (quick < 0) stop("quick must be non-negative.");
if (nThreads < 0) stop("nThreads must be non-negative.");
if (is.null(nThreads) || (nThreads==0)) nThreads = .useNThreads();
if (prod(dim(x))==0) stop("'x' has a zero dimension.");
storage.mode(x)= "double";
nNA = 0L
err = 0L
cosineX = as.integer(cosineX);
nThreads = as.integer(nThreads);
verbose = as.integer(verbose);
indent = as.integer(indent);
if (is.null(y))
{
res = .Call("cor1Fast_call", x,
quick, cosine,
nNA, err, nThreads,
verbose, indent, PACKAGE = "WGCNA");
if (!is.null(dimnames(x)[[2]])) dimnames(res) = list(dimnames(x)[[2]], dimnames(x)[[2]] );
} else {
y = as.matrix(y);
storage.mode(y)= "double";
cosineY = as.integer(cosineY);
if (prod(dim(y))==0) stop("'y' has a zero dimension.");
if (nrow(x)!=nrow(y))
stop("'x' and 'y' have incompatible dimensions (unequal numbers of rows).");
res = .Call("corFast_call", x, y,
quick,
cosineX,
cosineY,
nNA, err,
nThreads,
verbose, indent,
PACKAGE = "WGCNA");
if (!is.null(dimnames(x)[[2]]) || !is.null(dimnames(y)[[2]]))
dimnames(res) = list(dimnames(x)[[2]], dimnames(y)[[2]]);
}
if (err > 0)
{
if (err > nKnownErrors)
{
stop(paste("An error occurred in compiled code. Error code is", err));
} else {
stop(paste(Cerrors[err], "occurred in compiled code. "));
}
}
if (nNA > 0)
{
warning(paste("Missing values generated in calculation of cor.",
"Likely cause: too many missing entries or zero variance."));
}
if (drop) res[, , drop = TRUE] else res;
} else {
stats::cor(x,y, use, method);
}
}
# Wrappers for compatibility with older scripts
cor1 = function(x, use = "all.obs", verbose = 0, indent = 0)
{
cor(x, use = use, verbose = verbose, indent = indent);
}
corFast = function(x, y = NULL, use = "all.obs",
quick = 0, nThreads = 0, verbose = 0, indent = 0)
{
cor(x,y, use, method = "pearson", quick, nThreads, verbose, indent)
}
| /R/corFunctions.R | no_license | joshuamwang/WGCNA | R | false | false | 7,815 | r | # slight re-definition of the bicor function
bicor = function(x, y = NULL, robustX = TRUE, robustY = TRUE, use = 'all.obs', maxPOutliers = 1, quick = 0,
pearsonFallback = "individual",
cosine = FALSE,
cosineX = cosine, cosineY = cosine,
nThreads = 0, verbose = 0, indent = 0)
{
Cerrors = c("Memory allocation error")
nKnownErrors = length(Cerrors);
na.method = pmatch(use, c("all.obs", "pairwise.complete.obs"))
if (is.na(na.method))
stop(paste("Unrecognized parameter 'use'. Recognized values are \n",
"'all.obs', 'pairwise.complete.obs'"))
if (na.method==1)
{
if (sum(is.na(x))> 0)
stop("Missing values present in input variable 'x'. Consider using use = 'pairwise.complete.obs'.");
if (!is.null(y))
{
if (sum(is.na(y)) > 0)
stop("Missing values present in input variable 'y'. Consider using use = 'pairwise.complete.obs'.");
}
}
fallback = pmatch(pearsonFallback, .pearsonFallbacks)
if (is.na(na.method))
stop(paste("Unrecognized 'pearsonFallback'. Recognized values are (unique abbreviations of)\n",
paste(.pearsonFallbacks, collapse = ", ")))
if (quick < 0) stop("quick must be non-negative.");
if (nThreads < 0) stop("nThreads must be non-negative.");
if (is.null(nThreads) || (nThreads==0)) nThreads = .useNThreads();
x = as.matrix(x);
if (prod(dim(x))==0) stop("'x' has a zero dimension.");
storage.mode(x) = "double";
nNA = 0L;
err = 0L;
warnX = 0L;
warnY = 0L;
quick = as.double(quick);
maxPOutliers = as.double(maxPOutliers);
fallback = as.integer(fallback);
cosineX = as.integer(cosineX);
robustX = as.integer(robustX);
nThreads = as.integer(nThreads);
verbose = as.integer(verbose); indent = as.integer(indent)
if (is.null(y))
{
if (!robustX)
{
res = cor(x, use = use)
} else {
res = .Call("bicor1_call", x,
maxPOutliers,
quick,
fallback,
cosineX,
nNA, err, warnX,
nThreads, verbose, indent,
PACKAGE = "WGCNA");
}
if (!is.null(colnames(x))) dimnames(res) = list(colnames(x), colnames(x));
if (warnX > 0)
{
# For now have only one warning
warning(paste("bicor: zero MAD in variable 'x'.", .zeroMADWarnings[fallback]));
}
} else {
y = as.matrix(y);
storage.mode(y) = "double";
if (prod(dim(y))==0) stop("'y' has a zero dimension.");
if (nrow(x)!=nrow(y))
stop("'x' and 'y' have incompatible dimensions (unequal numbers of rows).");
cosineY = as.integer(cosineY);
robustY = as.integer(robustY);
res = .Call("bicor2_call", x, y,
robustX, robustY,
maxPOutliers,
quick,
fallback,
cosineX,
cosineY,
nNA, err,
warnX, warnY,
nThreads,
verbose, indent,
PACKAGE = "WGCNA");
if (!is.null(dimnames(x)[[2]]) || !is.null(dimnames(y)[[2]]))
dimnames(res) = list(dimnames(x)[[2]], dimnames(y)[[2]]);
if (warnX > 0)
warning(paste("bicor: zero MAD in variable 'x'.", .zeroMADWarnings[fallback]));
if (warnY > 0)
warning(paste("bicor: zero MAD in variable 'y'.", .zeroMADWarnings[fallback]));
}
if (err > 0)
{
if (err > nKnownErrors)
{
stop(paste("An error occurred in compiled code. Error code is", err));
} else {
stop(paste(Cerrors[err], "occurred in compiled code. "));
}
}
if (nNA > 0)
{
warning(paste("Missing values generated in calculation of bicor.",
"Likely cause: too many missing entries, zero median absolute deviation, or zero variance."));
}
res;
}
# Code to call my implementation of correlation
# For less than 100 correlations, use stats::cor since that is usually faster, particularly when no missing
# data are present, likely due to the complicated threading I do in the WGCNA correlations.
cor = function(x, y = NULL, use = "all.obs", method = c("pearson", "kendall", "spearman"),
quick = 0,
cosine = FALSE,
cosineX = cosine, cosineY = cosine,
drop = FALSE,
nThreads = 0, verbose = 0, indent = 0)
{
na.method <- pmatch(use, c("all.obs", "complete.obs", "pairwise.complete.obs",
"everything", "na.or.complete"), nomatch = 0)
method <- match.arg(method)
x = as.matrix(x);
nx = ncol(x);
if (!is.null(y))
{
y = as.matrix(y);
ny = ncol(y);
} else ny = nx;
if ((method=="pearson") && ( (na.method==1) || (na.method==3) ))
{
Cerrors = c("Memory allocation error")
nKnownErrors = length(Cerrors);
na.method = pmatch(use, c("all.obs", "pairwise.complete.obs"))
if (is.na(na.method))
stop(paste("Unrecognized parameter 'use'. Recognized values are \n",
"'all.obs', 'pairwise.complete.obs'"))
if (na.method==1)
{
if (sum(is.na(x))> 0)
stop("Missing values present in input variable 'x'. Consider using use = 'pairwise.complete.obs'.");
if (!is.null(y))
{
if (sum(is.na(y)) > 0)
stop("Missing values present in input variable 'y'. Consider using use = 'pairwise.complete.obs'.");
}
}
if (quick < 0) stop("quick must be non-negative.");
if (nThreads < 0) stop("nThreads must be non-negative.");
if (is.null(nThreads) || (nThreads==0)) nThreads = .useNThreads();
if (prod(dim(x))==0) stop("'x' has a zero dimension.");
storage.mode(x)= "double";
nNA = 0L
err = 0L
cosineX = as.integer(cosineX);
nThreads = as.integer(nThreads);
verbose = as.integer(verbose);
indent = as.integer(indent);
if (is.null(y))
{
res = .Call("cor1Fast_call", x,
quick, cosine,
nNA, err, nThreads,
verbose, indent, PACKAGE = "WGCNA");
if (!is.null(dimnames(x)[[2]])) dimnames(res) = list(dimnames(x)[[2]], dimnames(x)[[2]] );
} else {
y = as.matrix(y);
storage.mode(y)= "double";
cosineY = as.integer(cosineY);
if (prod(dim(y))==0) stop("'y' has a zero dimension.");
if (nrow(x)!=nrow(y))
stop("'x' and 'y' have incompatible dimensions (unequal numbers of rows).");
res = .Call("corFast_call", x, y,
quick,
cosineX,
cosineY,
nNA, err,
nThreads,
verbose, indent,
PACKAGE = "WGCNA");
if (!is.null(dimnames(x)[[2]]) || !is.null(dimnames(y)[[2]]))
dimnames(res) = list(dimnames(x)[[2]], dimnames(y)[[2]]);
}
if (err > 0)
{
if (err > nKnownErrors)
{
stop(paste("An error occurred in compiled code. Error code is", err));
} else {
stop(paste(Cerrors[err], "occurred in compiled code. "));
}
}
if (nNA > 0)
{
warning(paste("Missing values generated in calculation of cor.",
"Likely cause: too many missing entries or zero variance."));
}
if (drop) res[, , drop = TRUE] else res;
} else {
stats::cor(x,y, use, method);
}
}
# Wrappers for compatibility with older scripts
cor1 = function(x, use = "all.obs", verbose = 0, indent = 0)
{
cor(x, use = use, verbose = verbose, indent = indent);
}
corFast = function(x, y = NULL, use = "all.obs",
quick = 0, nThreads = 0, verbose = 0, indent = 0)
{
cor(x,y, use, method = "pearson", quick, nThreads, verbose, indent)
}
|
library(caret)
timestamp <- format(Sys.time(), "%Y_%m_%d_%H_%M")
model <- "rfe_train"
#########################################################################
SLC14_1 <- function(n = 100) {
dat <- matrix(rnorm(n*20, sd = 3), ncol = 20)
foo <- function(x) x[1] + sin(x[2]) + log(abs(x[3])) + x[4]^2 + x[5]*x[6] +
ifelse(x[7]*x[8]*x[9] < 0, 1, 0) +
ifelse(x[10] > 0, 1, 0) + x[11]*ifelse(x[11] > 0, 1, 0) +
sqrt(abs(x[12])) + cos(x[13]) + 2*x[14] + abs(x[15]) +
ifelse(x[16] < -1, 1, 0) + x[17]*ifelse(x[17] < -1, 1, 0) -
2 * x[18] - x[19]*x[20]
dat <- as.data.frame(dat)
colnames(dat) <- paste0("Var", 1:ncol(dat))
dat$y <- apply(dat[, 1:20], 1, foo) + rnorm(n, sd = 3)
dat
}
set.seed(2)
training <- SLC14_1(275)
testing <- SLC14_1(500)
trainX <- training[, -ncol(training)]
trainY <- training$y
testX <- trainX[, -ncol(training)]
testY <- trainX$y
training$fact <- factor(sample(letters[1:3], size = nrow(training), replace = TRUE))
testing$fact <- factor(sample(letters[1:3], size = nrow(testing), replace = TRUE))
#########################################################################
rctrl1 <- rfeControl(method = "cv", number = 3, returnResamp = "all", functions = caretFuncs)
rctrl2 <- rfeControl(method = "LOOCV", functions = caretFuncs)
set.seed(849)
test_cv_model <- rfe(x = trainX, y = trainY,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = rctrl1)
set.seed(849)
test_loo_model <- rfe(x = trainX, y = trainY,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = rctrl2)
set.seed(849)
test_cv_model_form <- rfe(y ~ ., data = training,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = rctrl1)
set.seed(849)
test_loo_model_form <- rfe(y ~ ., data = training,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = rctrl2)
#########################################################################
test_cv_pred <- predict(test_cv_model, testX)
test_loo_pred <- predict(test_loo_model, testX)
# test_cv_pred_form <- predict(test_cv_model_form, testing[, colnames(testing) != "y"])
# test_loo_pred_form <- predict(test_loo_model_form, testing[, colnames(testing) != "y"])
#########################################################################
set.seed(2)
training_class <- twoClassSim(50)
testing_class <- twoClassSim(500)
trainX_class <- training_class[, -ncol(training_class)]
trainY_class <- training_class$Class
testX_class <- testing_class[, -ncol(testing_class)]
testY_class <- testing_class$Class
training_class$fact <- factor(sample(letters[1:3], size = nrow(training_class), replace = TRUE))
testing_class$fact <- factor(sample(letters[1:3], size = nrow(testing_class), replace = TRUE))
#########################################################################
cctrl1 <- rfeControl(method = "cv", number = 3, returnResamp = "all", functions = caretFuncs)
cctrl2 <- rfeControl(method = "LOOCV", functions = caretFuncs)
set.seed(849)
test_cv_model_class <- rfe(x = trainX_class, y = trainY_class,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = cctrl1)
set.seed(849)
test_loo_model_class <- rfe(x = trainX_class, y = trainY_class,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = cctrl2)
set.seed(849)
test_cv_model_form_class <- rfe(Class ~ ., data = training_class,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = cctrl1)
set.seed(849)
test_loo_model_form_class <- rfe(Class ~ ., data = training_class,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = cctrl2)
#########################################################################
test_cv_pred_class <- predict(test_cv_model_class, testX_class)
test_loo_pred_class <- predict(test_loo_model_class, testX_class)
test_cv_pred_form_class <- predict(test_cv_model_form_class,
testing_class[, colnames(testing_class) != "Class"])
test_loo_pred_form_class <- predict(test_loo_model_form_class,
testing_class[, colnames(testing_class) != "Class"])
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
save(list = c(tests, "sInfo", "timestamp"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
q("no")
| /RegressionTests/Code/rfe_train.R | no_license | ilarischeinin/caret | R | false | false | 5,923 | r | library(caret)
timestamp <- format(Sys.time(), "%Y_%m_%d_%H_%M")
model <- "rfe_train"
#########################################################################
SLC14_1 <- function(n = 100) {
dat <- matrix(rnorm(n*20, sd = 3), ncol = 20)
foo <- function(x) x[1] + sin(x[2]) + log(abs(x[3])) + x[4]^2 + x[5]*x[6] +
ifelse(x[7]*x[8]*x[9] < 0, 1, 0) +
ifelse(x[10] > 0, 1, 0) + x[11]*ifelse(x[11] > 0, 1, 0) +
sqrt(abs(x[12])) + cos(x[13]) + 2*x[14] + abs(x[15]) +
ifelse(x[16] < -1, 1, 0) + x[17]*ifelse(x[17] < -1, 1, 0) -
2 * x[18] - x[19]*x[20]
dat <- as.data.frame(dat)
colnames(dat) <- paste0("Var", 1:ncol(dat))
dat$y <- apply(dat[, 1:20], 1, foo) + rnorm(n, sd = 3)
dat
}
set.seed(2)
training <- SLC14_1(275)
testing <- SLC14_1(500)
trainX <- training[, -ncol(training)]
trainY <- training$y
testX <- trainX[, -ncol(training)]
testY <- trainX$y
training$fact <- factor(sample(letters[1:3], size = nrow(training), replace = TRUE))
testing$fact <- factor(sample(letters[1:3], size = nrow(testing), replace = TRUE))
#########################################################################
rctrl1 <- rfeControl(method = "cv", number = 3, returnResamp = "all", functions = caretFuncs)
rctrl2 <- rfeControl(method = "LOOCV", functions = caretFuncs)
set.seed(849)
test_cv_model <- rfe(x = trainX, y = trainY,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = rctrl1)
set.seed(849)
test_loo_model <- rfe(x = trainX, y = trainY,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = rctrl2)
set.seed(849)
test_cv_model_form <- rfe(y ~ ., data = training,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = rctrl1)
set.seed(849)
test_loo_model_form <- rfe(y ~ ., data = training,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = rctrl2)
#########################################################################
test_cv_pred <- predict(test_cv_model, testX)
test_loo_pred <- predict(test_loo_model, testX)
# test_cv_pred_form <- predict(test_cv_model_form, testing[, colnames(testing) != "y"])
# test_loo_pred_form <- predict(test_loo_model_form, testing[, colnames(testing) != "y"])
#########################################################################
set.seed(2)
training_class <- twoClassSim(50)
testing_class <- twoClassSim(500)
trainX_class <- training_class[, -ncol(training_class)]
trainY_class <- training_class$Class
testX_class <- testing_class[, -ncol(testing_class)]
testY_class <- testing_class$Class
training_class$fact <- factor(sample(letters[1:3], size = nrow(training_class), replace = TRUE))
testing_class$fact <- factor(sample(letters[1:3], size = nrow(testing_class), replace = TRUE))
#########################################################################
cctrl1 <- rfeControl(method = "cv", number = 3, returnResamp = "all", functions = caretFuncs)
cctrl2 <- rfeControl(method = "LOOCV", functions = caretFuncs)
set.seed(849)
test_cv_model_class <- rfe(x = trainX_class, y = trainY_class,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = cctrl1)
set.seed(849)
test_loo_model_class <- rfe(x = trainX_class, y = trainY_class,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = cctrl2)
set.seed(849)
test_cv_model_form_class <- rfe(Class ~ ., data = training_class,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = cctrl1)
set.seed(849)
test_loo_model_form_class <- rfe(Class ~ ., data = training_class,
sizes = c(1, 5, 10, 15),
method = "knn",
preProc = c("center", "scale"),
trControl = trainControl(method = "cv"),
rfeControl = cctrl2)
#########################################################################
test_cv_pred_class <- predict(test_cv_model_class, testX_class)
test_loo_pred_class <- predict(test_loo_model_class, testX_class)
test_cv_pred_form_class <- predict(test_cv_model_form_class,
testing_class[, colnames(testing_class) != "Class"])
test_loo_pred_form_class <- predict(test_loo_model_form_class,
testing_class[, colnames(testing_class) != "Class"])
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
save(list = c(tests, "sInfo", "timestamp"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
q("no")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dfm}
\alias{dfm}
\title{A concatenation of 3 mini "bci-stem_YEAR" datasets.}
\format{An object of class \code{data.table} (inherits from \code{data.frame}) with 645 rows and 37 columns.}
\usage{
dfm
}
\description{
\code{dfm} concatenates the 3 mini "bci-stem_YEAR" datasets. It avoids going
through lines.
}
\examples{
str(dfm)
}
\keyword{datasets}
| /man/dfm.Rd | no_license | ErvanCH/AGBflux_pack | R | false | true | 474 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dfm}
\alias{dfm}
\title{A concatenation of 3 mini "bci-stem_YEAR" datasets.}
\format{An object of class \code{data.table} (inherits from \code{data.frame}) with 645 rows and 37 columns.}
\usage{
dfm
}
\description{
\code{dfm} concatenates the 3 mini "bci-stem_YEAR" datasets. It avoids going
through lines.
}
\examples{
str(dfm)
}
\keyword{datasets}
|
#Assignment: Caching the Inverse of a Matrix
# Matrix inversion is usually a costly computation and there may be some benefit to caching
# the inverse of a matrix rather than computing it repeatedly (there are also alternatives
# to matrix inversion that we will not discuss here). Your assignment is to write a pair of
# functions that cache the inverse of a matrix.
#Write the following functions:
# * makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
# * cacheSolve: This function computes the inverse of the special "matrix" returned
# by makeCacheMatrix above. If the inverse has already been calculated
# (and the matrix has not changed), then cacheSolve should retrieve the inverse
# from the cache.
# makeCacheMatrix: return a list of functions to:
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of the inverse
# 4. get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
# inverseM will store the cached inverse matrix
inverseM <- NULL
# setter for the matrix
set <- function(y) {
x <<- y
# clear out current inverse
inverseM <<- NULL
}
# getter for the matrix
get <- function() x
# setter for the inverse
setinverse <- function(inverse) inverseM <<- inverse
# getter for the inverse
getinverse <- function() inverseM
# return the matrix with our defined functions
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
# cacheSolve: compute the inverse of the matrix. If the inverse is already
# calculated before, it returns the cached inverse.
cacheSolve <- function(x, ...) {
## return a matrix that is the inverse of 'x'
inverseM <- x$getinverse()
# If the inverse is already calculated, return it
if (!is.null(inverseM)) {
message("getting cached data")
return(inverseM)
}
# calculate the inverse matrix using solve
data <- x$get()
inverseM <- solve(data, ...)
# and cache it
x$setinverse(inverseM)
inverseM
}
| /cachematrix.R | no_license | jp4711/ProgrammingAssignment2 | R | false | false | 2,127 | r | #Assignment: Caching the Inverse of a Matrix
# Matrix inversion is usually a costly computation and there may be some benefit to caching
# the inverse of a matrix rather than computing it repeatedly (there are also alternatives
# to matrix inversion that we will not discuss here). Your assignment is to write a pair of
# functions that cache the inverse of a matrix.
#Write the following functions:
# * makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
# * cacheSolve: This function computes the inverse of the special "matrix" returned
# by makeCacheMatrix above. If the inverse has already been calculated
# (and the matrix has not changed), then cacheSolve should retrieve the inverse
# from the cache.
# makeCacheMatrix: return a list of functions to:
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of the inverse
# 4. get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
# inverseM will store the cached inverse matrix
inverseM <- NULL
# setter for the matrix
set <- function(y) {
x <<- y
# clear out current inverse
inverseM <<- NULL
}
# getter for the matrix
get <- function() x
# setter for the inverse
setinverse <- function(inverse) inverseM <<- inverse
# getter for the inverse
getinverse <- function() inverseM
# return the matrix with our defined functions
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
# cacheSolve: compute the inverse of the matrix. If the inverse is already
# calculated before, it returns the cached inverse.
cacheSolve <- function(x, ...) {
## return a matrix that is the inverse of 'x'
inverseM <- x$getinverse()
# If the inverse is already calculated, return it
if (!is.null(inverseM)) {
message("getting cached data")
return(inverseM)
}
# calculate the inverse matrix using solve
data <- x$get()
inverseM <- solve(data, ...)
# and cache it
x$setinverse(inverseM)
inverseM
}
|
## The functions below cache the inverse of a matrix
makecachematrix <- function(x = matrix()){ ## Cache Matrix function
inv <- NULL ## Initialize the inverse property
set <- function(y){ ## Method to set the matrix
x <<-y
inv <<-NULL
}
get <- function() {x} ## Method the get the matrix
setinverse <- function(inverse) {inv <<- inverse} ## Method to set the inverse of the matrix
getinverse <- function(){inv} ## Method to get the inverse of the matriix
list(set = set , ## Return a list of the methods
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Compute the inverse of a special matrix returned by "makeCacheMatrix"
## above. If the inverse is already calculated (and the matrix has not changed)
## Then the "cachesolve" should retrieve the inverse from the cache
cachesolve <- function(x, ...){
inv <- x$getinverse() ## Return a matrix that is the inverse of 'x'
if(!is.null(inv)){ ## Just return the inverse if its already set
message('getting cached data')
return(inv)
}
mat <- x$get() ## Get the matrix from our object
inv <- solve(mat, ...) ## Calculate the inverse using matrix multiplication
x$setinverse(inv) ## Set the inverse to the object
inv ## Return the matrix
}
| /cachematrix.R | no_license | Rahul-Tclb/ProgrammingAssignment2 | R | false | false | 1,685 | r | ## The functions below cache the inverse of a matrix
makecachematrix <- function(x = matrix()){ ## Cache Matrix function
inv <- NULL ## Initialize the inverse property
set <- function(y){ ## Method to set the matrix
x <<-y
inv <<-NULL
}
get <- function() {x} ## Method the get the matrix
setinverse <- function(inverse) {inv <<- inverse} ## Method to set the inverse of the matrix
getinverse <- function(){inv} ## Method to get the inverse of the matriix
list(set = set , ## Return a list of the methods
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Compute the inverse of a special matrix returned by "makeCacheMatrix"
## above. If the inverse is already calculated (and the matrix has not changed)
## Then the "cachesolve" should retrieve the inverse from the cache
cachesolve <- function(x, ...){
inv <- x$getinverse() ## Return a matrix that is the inverse of 'x'
if(!is.null(inv)){ ## Just return the inverse if its already set
message('getting cached data')
return(inv)
}
mat <- x$get() ## Get the matrix from our object
inv <- solve(mat, ...) ## Calculate the inverse using matrix multiplication
x$setinverse(inv) ## Set the inverse to the object
inv ## Return the matrix
}
|
#### Setup #####
source('R_Pipeline/initialize.R')
load_package('rbig', version = rbig.version)
################ Inputs ################
config_filename = 'em_config_01.yml'
args = commandArgs(trailingOnly = T)
if(!is.empty(args)){
config_filename = args[1]
}
################ Read & Validate config ################
yaml::read_yaml(mc$path_configs %>% paste('modules', 'event_mapper', config_filename, sep = '/')) -> em_config
# emap_config %<>% verify_emap_config
################ Read Data ################
for(step in em_config$steps){
# Read input files:
input_data = list()
for (fn in step$input){
if(!file.exists(fn)){
fnp = mc$path_preprocessing %>% paste(em_config$preprocessing_id, paste(fn, 'rds', sep = '.'), sep = '/')
if(!file.exists(fnp)){stop(sprintf("file %s not found", fnp))}
} else {fnp = fn}
readRDS(fnp) -> input_data[[fn]]
}
if(!file.exists(mc$path_eventmapper)){dir.create(mc$path_eventmapper)}
outpath = paste(mc$path_eventmapper, em_config$output_id, sep = '/')
if(!file.exists(outpath)){dir.create(outpath)}
outpath %<>% paste(step$output %++% '.rds', sep = '/')
input_data[[step$input[[1]]]] %>% operate(step$operation) %>% saveRDS(outpath)
}
| /event_prediction_pipeline/modules/event_mapper.R | no_license | genpack/tutorials | R | false | false | 1,222 | r | #### Setup #####
source('R_Pipeline/initialize.R')
load_package('rbig', version = rbig.version)
################ Inputs ################
config_filename = 'em_config_01.yml'
args = commandArgs(trailingOnly = T)
if(!is.empty(args)){
config_filename = args[1]
}
################ Read & Validate config ################
yaml::read_yaml(mc$path_configs %>% paste('modules', 'event_mapper', config_filename, sep = '/')) -> em_config
# emap_config %<>% verify_emap_config
################ Read Data ################
for(step in em_config$steps){
# Read input files:
input_data = list()
for (fn in step$input){
if(!file.exists(fn)){
fnp = mc$path_preprocessing %>% paste(em_config$preprocessing_id, paste(fn, 'rds', sep = '.'), sep = '/')
if(!file.exists(fnp)){stop(sprintf("file %s not found", fnp))}
} else {fnp = fn}
readRDS(fnp) -> input_data[[fn]]
}
if(!file.exists(mc$path_eventmapper)){dir.create(mc$path_eventmapper)}
outpath = paste(mc$path_eventmapper, em_config$output_id, sep = '/')
if(!file.exists(outpath)){dir.create(outpath)}
outpath %<>% paste(step$output %++% '.rds', sep = '/')
input_data[[step$input[[1]]]] %>% operate(step$operation) %>% saveRDS(outpath)
}
|
func_homo <-function(t,y,pars,Kpf,Kdf,Kfz,Kfm,Kfs,Kfp,Kps,Kpss,KfCTA,KfCCTA,Ktf,Ktd,Ktc,roM,roS,roP){
with(as.list(c(y,pars)),{
#browser()
Kd1<-function(T){return(1.178855e16*exp(-28925.227/1.987/T))}
Kd2<-function(T){return(1.3e15*exp(-27756.9/1.987/T))}
Kc<-function(X){return(3*X^2-3.17*X+2.9)}
FWin<-FinM+FinI+FinZ+FinS+FinCTA+FinCCTA+FinNa
Fin<-FinM+FinI+FinZ #mol/min
#composition of in flow
if(FinS!=0)Fin<-Fin+FinS
if(FinCTA!=0)Fin<-Fin+FinCTA
if(FinCCTA!=0)Fin<-Fin+FinCCTA
if(FinNa!=0)Fin<-Fin+FinNa
uM<-0
uI<-0
uZ<-0
uS<-0
uCTA<-0
uCCTA<-0
uNa<-0
Cpin<-0
roin<-0
if(Fin!=0){
uM<-FinM/Fin #molar fraction
uI<-FinI/Fin #molar fraction
uZ<-FinZ/Fin #molar fraction
if(FinS!=0)uS<-FinS/Fin #molar fraction
if(FinCTA!=0)uCTA<-FinCTA/Fin #molar fraction
if(FinCCTA!=0)uCCTA<-FinCCTA/Fin #molar fraction
if(FinNa!=0)uNa<-FinNa/Fin #molar fraction
Cpin<-FinM/FWin*MWM/1000*CpM+FinS/FWin*MWS/1000*CpS #cal/mol/k
roin<-uM*roM(Tin)*1000/MWM #mol/L
if(uS!=0)roin<-roin+uS*roS(Tin)*1000/MWS #mol/L
}
Hin<-H0+Cpin*(Tin-T0) #cal/mol
#composition of out flow
Mtm<-M+I+S+Z+P+Na+CTA+CCTA #mol/L
zM<-M/Mtm #molar fraction
zI<-I/Mtm #molar fraction
zS<-S/Mtm #molar fraction
zZ<-Z/Mtm #molar fraction
zP<-P/Mtm #molar fraction
zNa<-Na/Mtm #molar fraction
zCTA<-CTA/Mtm #molar fraction
zCCTA<-CCTA/Mtm #molar fraction
MWm<-(zM+zP)*MWM+zI*MWI+zS*MWS+zZ*MWZ+zNa*MWNa+zCTA*MWCTA+zCCTA*MWCCTA #g/mol
Fout<-FWout*1000/MWm #mol/min
#gel effect parameter determination
Cgel<-gel(y,pars,roM,roS,roP)
Ktb<-Ktf(T)
Ktt<-Ktb
Ktr<-0
Kts<-0
if(DCT)Ktt<-Ktb*Cgel$Ft
if(ST)Kts<-Ktb*Cgel$Fs
if(RT){
Ktrmin<-Kpf(T)*M*Cgel$Ktrmin
Ktrmax<-Kpf(T)*M*Cgel$Ktrmax
Ktr<-Ktrmin+(Ktrmax-Ktrmin)*X
}
if((Ktr!=0)&(Kts!=0))Ktb<-1/(1/Ktt+1/Kts+1/Ktr)
if((Ktr==0)&(Kts!=0))Ktb<-1/(1/Ktt+1/Kts)
if((Ktr!=0)&(Kts==0))Ktb<-1/(1/Ktt+1/Ktr)
if((Ktr==0)&(Kts==0))Ktb<-Ktt
Ktd<-Ktd(T)/Ktf(T)*Ktb
Ktc<-Ktc(T)/Ktf(T)*Ktb
Kp<-Kpf(T)
if(DCP)Kp<-Kp*Cgel$Fp
Kd<-Kdf(T)
if(DCI)Kd<-Kd*Cgel$Fd
Kd1<-Kd1(T)
if(DCI)Kd1<-Kd1*Cgel$Fd
Kd2<-Kd2(T)
if(DCI)Kd2<-Kd2*Cgel$Fd
Cp<-zM*CpM*MWM/1000+zS*MWS/1000*CpS+zP*CpP*MWM/1000 #cal/mol/k
roout<-zM*roM(T)*1000/MWM+zP*roP(T)*1000/MWM #mol/L
if(MWS!=0)roout<-roout+zS*roS(T)*1000/MWS #mol/L
#(A3.14)
if(pH0!=0){
# case of water soluble monomers
AM<-M-HA
if(DPH){
pH<--log10(HA/AM)+pH0+log10(HA0/(M0-HA0))
if(pH>14)pH<-14
}else{
pH<-pH0
}
#(A3.8)
RI<-2*Kd*I+2*Kd1*HA+2*Kd2*AM #mol/L/min
#(A1.25) Radical Balance under SSA
R<-0.5*(sqrt((Kfz(T)*Z/Ktb)^2+4*RI/Ktb)-Kfz(T)*Z/Ktb) #mol/L
#(12.4)
alfa2<-1/(1+10^(KdisP-pH))
alfa3<-1/(1+10^(KdisP-pH))
HP<-P/(1+alfa2)
PM<-alfa2*P/(1+alfa2)
HR<-R/(1+alfa3)
RM<-alfa3*R/(1+alfa3)
#(A3.22)
B<-Kc(X)*(HP-Na-P)-1
C<-Kc(X)*Na*(P-HP)
Pc<-(-B-sqrt(B^2-4*Kc(X)*C))/(2*Kc(X))
#(A3.23)
Rc<-Kc(X)*(Na-Pc)*RM
#(A3.16)
Kpp<-Kp*(HA/M+Kratio*HR*AM/(R*M)+Kratio*Rc*AM/(R*M))
}else{
# case of oil soluble monomers
AM<-0
HA<-M
pH<-0
KdisM<-0
KdisP<-0
RI<-2*Kd*I #mol/L/min
R<-0.5*(sqrt((Kfz(T)*Z/Ktb)^2+4*RI/Ktb)-Kfz(T)*Z/Ktb) #mol/L
alfa2<-1
alfa3<-1
HP<-P
PM<-0
HR<-R
RM<-0
Pc<-0
Rc<-0
Kpp<-Kp
}
#(A1.29) Enthalpy
H<-H0+Cp*(T-T0) #cal/mol
#(A1.32)
Tau<-Ktd*(Kpp*M*R)/(Kpp*M)^2+Kfm(T)/Kpp+Kfs(T)*S/(Kpp*M)+KfCTA(T)*CTA/(Kpp*M)+KfCCTA(T)*CCTA/(Kpp*M)+Kfz(T)*Z/(Kpp*M)
#(A1.33)
Beta<-Ktc*(Kpp*M*R)/(Kpp*M)^2
#(A1.42)
Gam<-1+RI/(Kpp*M*R)+Kfm(T)/Kpp+Kfs(T)*S/(Kpp*M)+KfCTA(T)*CTA/(Kpp*M)+KfCCTA(T)*CCTA/(Kpp*M)+Kfz(T)*Z/(Kpp*M)
#(A1.43)
Lam<-Ktb*R/(Kpp*M)+Kfm(T)/Kpp+Kfs(T)*S/(Kpp*M)+KfCTA(T)*CTA/(Kpp*M)+KfCCTA(T)*CCTA/(Kpp*M)+Kfz(T)*Z/(Kpp*M)+Kfp(T)*Mu1/(Kpp*M)
#(A1.44)
Eta<-1+Kfm(T)/Kpp+Kfs(T)*S/(Kpp*M)+KfCTA(T)*CTA/(Kpp*M)+KfCCTA(T)*CCTA/(Kpp*M)+Kfz(T)*Z/(Kpp*M)+Kps(T)*Mu1/(Kpp*M)+RI/(Kpp*M*R)
+(Kfp(T)/Kpp+Kpss(T)/Kpp)*Mu2/M
if(LIN){
#(A1.30)
Mn<-MWM/(Tau+Beta/2)
#(A1.31)
Mw<-MWM*(2*Tau+3*Beta)/(Tau+Beta)^2
}else{
#(A1.45)
Mn<-MWM*Mu1/Mu0
if(is.na(Mn))Mn<-MWM
#(A1.46)
Mw<-MWM*Mu2/Mu1
if(is.na(Mw))Mw<-MWM
}
if(Mn<MWM)Mn<-MWM
if(Mw<MWM)Mw<-MWM
#Derivative output vector
yp<-rep(0,19)
#(A1.18) Monomer
dM<-Kpp*M*R #mol/L/min
yp[1]<-Fin*uM-dM*Vl-Fout*zM #mol/min
#molar conversion
yp[2]<--yp[1]/M0 #1/min
#(A3.12) undissociated monomer
if(pH0!=0){
yp[4]<--Kp*R*HA*Vl #mol/min
}else{
yp[4]<-yp[1]
}
#(A1.19) Initiator
yp[5]<-Fin*uI-2*Kd*I*Vl-Fout*zI #mol/min
#(A1.20) Solvent
if(S0!=0)yp[6]<-Fin*uS-Kfs(T)*S*R*Vl-Fout*zS #mol/min
#(A1.21) Polymer
yp[7]<-dM*Vl-Fout*zP #mol/min
#(A1.22) Inhibitor
if(Z0!=0)yp[8]<-Fin*uZ-Kfz(T)*Z*R*Vl-Fout*zZ #mol/min
#(A1.23) CTA
if(CTA0!=0)yp[9]<-Fin*uCTA-KfCTA(T)*CTA*R*Vl-Fout*zCTA #mol/min
#(A1.24) CCTA
if(CCTA0!=0)yp[10]<-Fin*uCCTA-KfCCTA(T)*CCTA*R*Vl-Fout*zCCTA #mol/min
# Counter ions balance
if(Na0!=0)y[11]<-Fin*uNa-Fout*zNa #mol/min
#(A1.27) Energy balance
if(!ISOT){
yp[12]<-Fin*Hin+dM*Vl*DHp-UA*(T-Tj)-Fout*H
yp[12]<-yp[12]/(Vl*roout*Cp) #k/min
}else{
yp[12]<-0
}
#total volume
if(Fin!=0)yp[3]<-yp[3]+Fin/roin
if(Fout!=0)yp[3]<-yp[3]-Fout/roout
yp[3]<-yp[3]+MWM/1000*(1/roM(T)-1/roP(T))*yp[1]
yp[3]<-yp[3]+MWM/1000*M*(roP2/roP(T)^2-roM2/roM(T)^2)*yp[12] #L/min
#(A1.45)
yp[13]<-dM*Vl/Mn #mol^2/min/g
#(A1.46)
yp[14]<-dM*Vl*Mw #g/min
if(LIN){
yp[15]<-0
yp[16]<-0
yp[17]<-0
yp[18]<-0
yp[19]<-0
}else{
#(A1.39) 0th moment
yp[15]<-(Tau+Beta/2-Kpss(T)*Mu1/(Kpp*M)-Kps(T)*Mu0/(Kpp*M))*Kpp*M*R*Vl-Mu0*Fout*Vl
#(A1.40) 1st moment
yp[16]<-(Tau-Ktd*R/(Kpp*M))*Kpp*M*R*Vl-Mu1*Fout*Vl
#(A1.41) 2nd moment
yp[17]<-(Gam+2*(1+(Kps(T)*Mu1+Kpss(T)*Mu2)/(Kpp*M))*Eta/Gam+Ktc*R/(Kpp*M)*(Eta/Lam)^2)*Kpp*M*R*Vl-Mu2*Fout*Vl
#(A1.47)
yp[18]<-(Kfp(T)*Mu1*R+Kps(T)*R*Mu0-Mu0*BN3*Fout)*Vl
#(A1.48)
yp[19]<-(Kpss(T)*Mu2*R-Mu0*BN4*Fout)*Vl
}
der<-c(AM,R,Kp,Ktb,Kd,Mn,Mw,Cp,roin,roout,pH,Kpp)
attributes(der)<-NULL
names(der)<-c("AM","R","Kp","Ktb","Kd","Mn","Mw","Cp","roin","roout","pH","Kpp")
return(list(yp,der))
})
}
| /R/func_homo.R | no_license | timhockswender/HomoPolymer | R | false | false | 6,703 | r | func_homo <-function(t,y,pars,Kpf,Kdf,Kfz,Kfm,Kfs,Kfp,Kps,Kpss,KfCTA,KfCCTA,Ktf,Ktd,Ktc,roM,roS,roP){
with(as.list(c(y,pars)),{
#browser()
Kd1<-function(T){return(1.178855e16*exp(-28925.227/1.987/T))}
Kd2<-function(T){return(1.3e15*exp(-27756.9/1.987/T))}
Kc<-function(X){return(3*X^2-3.17*X+2.9)}
FWin<-FinM+FinI+FinZ+FinS+FinCTA+FinCCTA+FinNa
Fin<-FinM+FinI+FinZ #mol/min
#composition of in flow
if(FinS!=0)Fin<-Fin+FinS
if(FinCTA!=0)Fin<-Fin+FinCTA
if(FinCCTA!=0)Fin<-Fin+FinCCTA
if(FinNa!=0)Fin<-Fin+FinNa
uM<-0
uI<-0
uZ<-0
uS<-0
uCTA<-0
uCCTA<-0
uNa<-0
Cpin<-0
roin<-0
if(Fin!=0){
uM<-FinM/Fin #molar fraction
uI<-FinI/Fin #molar fraction
uZ<-FinZ/Fin #molar fraction
if(FinS!=0)uS<-FinS/Fin #molar fraction
if(FinCTA!=0)uCTA<-FinCTA/Fin #molar fraction
if(FinCCTA!=0)uCCTA<-FinCCTA/Fin #molar fraction
if(FinNa!=0)uNa<-FinNa/Fin #molar fraction
Cpin<-FinM/FWin*MWM/1000*CpM+FinS/FWin*MWS/1000*CpS #cal/mol/k
roin<-uM*roM(Tin)*1000/MWM #mol/L
if(uS!=0)roin<-roin+uS*roS(Tin)*1000/MWS #mol/L
}
Hin<-H0+Cpin*(Tin-T0) #cal/mol
#composition of out flow
Mtm<-M+I+S+Z+P+Na+CTA+CCTA #mol/L
zM<-M/Mtm #molar fraction
zI<-I/Mtm #molar fraction
zS<-S/Mtm #molar fraction
zZ<-Z/Mtm #molar fraction
zP<-P/Mtm #molar fraction
zNa<-Na/Mtm #molar fraction
zCTA<-CTA/Mtm #molar fraction
zCCTA<-CCTA/Mtm #molar fraction
MWm<-(zM+zP)*MWM+zI*MWI+zS*MWS+zZ*MWZ+zNa*MWNa+zCTA*MWCTA+zCCTA*MWCCTA #g/mol
Fout<-FWout*1000/MWm #mol/min
#gel effect parameter determination
Cgel<-gel(y,pars,roM,roS,roP)
Ktb<-Ktf(T)
Ktt<-Ktb
Ktr<-0
Kts<-0
if(DCT)Ktt<-Ktb*Cgel$Ft
if(ST)Kts<-Ktb*Cgel$Fs
if(RT){
Ktrmin<-Kpf(T)*M*Cgel$Ktrmin
Ktrmax<-Kpf(T)*M*Cgel$Ktrmax
Ktr<-Ktrmin+(Ktrmax-Ktrmin)*X
}
if((Ktr!=0)&(Kts!=0))Ktb<-1/(1/Ktt+1/Kts+1/Ktr)
if((Ktr==0)&(Kts!=0))Ktb<-1/(1/Ktt+1/Kts)
if((Ktr!=0)&(Kts==0))Ktb<-1/(1/Ktt+1/Ktr)
if((Ktr==0)&(Kts==0))Ktb<-Ktt
Ktd<-Ktd(T)/Ktf(T)*Ktb
Ktc<-Ktc(T)/Ktf(T)*Ktb
Kp<-Kpf(T)
if(DCP)Kp<-Kp*Cgel$Fp
Kd<-Kdf(T)
if(DCI)Kd<-Kd*Cgel$Fd
Kd1<-Kd1(T)
if(DCI)Kd1<-Kd1*Cgel$Fd
Kd2<-Kd2(T)
if(DCI)Kd2<-Kd2*Cgel$Fd
Cp<-zM*CpM*MWM/1000+zS*MWS/1000*CpS+zP*CpP*MWM/1000 #cal/mol/k
roout<-zM*roM(T)*1000/MWM+zP*roP(T)*1000/MWM #mol/L
if(MWS!=0)roout<-roout+zS*roS(T)*1000/MWS #mol/L
#(A3.14)
if(pH0!=0){
# case of water soluble monomers
AM<-M-HA
if(DPH){
pH<--log10(HA/AM)+pH0+log10(HA0/(M0-HA0))
if(pH>14)pH<-14
}else{
pH<-pH0
}
#(A3.8)
RI<-2*Kd*I+2*Kd1*HA+2*Kd2*AM #mol/L/min
#(A1.25) Radical Balance under SSA
R<-0.5*(sqrt((Kfz(T)*Z/Ktb)^2+4*RI/Ktb)-Kfz(T)*Z/Ktb) #mol/L
#(12.4)
alfa2<-1/(1+10^(KdisP-pH))
alfa3<-1/(1+10^(KdisP-pH))
HP<-P/(1+alfa2)
PM<-alfa2*P/(1+alfa2)
HR<-R/(1+alfa3)
RM<-alfa3*R/(1+alfa3)
#(A3.22)
B<-Kc(X)*(HP-Na-P)-1
C<-Kc(X)*Na*(P-HP)
Pc<-(-B-sqrt(B^2-4*Kc(X)*C))/(2*Kc(X))
#(A3.23)
Rc<-Kc(X)*(Na-Pc)*RM
#(A3.16)
Kpp<-Kp*(HA/M+Kratio*HR*AM/(R*M)+Kratio*Rc*AM/(R*M))
}else{
# case of oil soluble monomers
AM<-0
HA<-M
pH<-0
KdisM<-0
KdisP<-0
RI<-2*Kd*I #mol/L/min
R<-0.5*(sqrt((Kfz(T)*Z/Ktb)^2+4*RI/Ktb)-Kfz(T)*Z/Ktb) #mol/L
alfa2<-1
alfa3<-1
HP<-P
PM<-0
HR<-R
RM<-0
Pc<-0
Rc<-0
Kpp<-Kp
}
#(A1.29) Enthalpy
H<-H0+Cp*(T-T0) #cal/mol
#(A1.32)
Tau<-Ktd*(Kpp*M*R)/(Kpp*M)^2+Kfm(T)/Kpp+Kfs(T)*S/(Kpp*M)+KfCTA(T)*CTA/(Kpp*M)+KfCCTA(T)*CCTA/(Kpp*M)+Kfz(T)*Z/(Kpp*M)
#(A1.33)
Beta<-Ktc*(Kpp*M*R)/(Kpp*M)^2
#(A1.42)
Gam<-1+RI/(Kpp*M*R)+Kfm(T)/Kpp+Kfs(T)*S/(Kpp*M)+KfCTA(T)*CTA/(Kpp*M)+KfCCTA(T)*CCTA/(Kpp*M)+Kfz(T)*Z/(Kpp*M)
#(A1.43)
Lam<-Ktb*R/(Kpp*M)+Kfm(T)/Kpp+Kfs(T)*S/(Kpp*M)+KfCTA(T)*CTA/(Kpp*M)+KfCCTA(T)*CCTA/(Kpp*M)+Kfz(T)*Z/(Kpp*M)+Kfp(T)*Mu1/(Kpp*M)
#(A1.44)
Eta<-1+Kfm(T)/Kpp+Kfs(T)*S/(Kpp*M)+KfCTA(T)*CTA/(Kpp*M)+KfCCTA(T)*CCTA/(Kpp*M)+Kfz(T)*Z/(Kpp*M)+Kps(T)*Mu1/(Kpp*M)+RI/(Kpp*M*R)
+(Kfp(T)/Kpp+Kpss(T)/Kpp)*Mu2/M
if(LIN){
#(A1.30)
Mn<-MWM/(Tau+Beta/2)
#(A1.31)
Mw<-MWM*(2*Tau+3*Beta)/(Tau+Beta)^2
}else{
#(A1.45)
Mn<-MWM*Mu1/Mu0
if(is.na(Mn))Mn<-MWM
#(A1.46)
Mw<-MWM*Mu2/Mu1
if(is.na(Mw))Mw<-MWM
}
if(Mn<MWM)Mn<-MWM
if(Mw<MWM)Mw<-MWM
#Derivative output vector
yp<-rep(0,19)
#(A1.18) Monomer
dM<-Kpp*M*R #mol/L/min
yp[1]<-Fin*uM-dM*Vl-Fout*zM #mol/min
#molar conversion
yp[2]<--yp[1]/M0 #1/min
#(A3.12) undissociated monomer
if(pH0!=0){
yp[4]<--Kp*R*HA*Vl #mol/min
}else{
yp[4]<-yp[1]
}
#(A1.19) Initiator
yp[5]<-Fin*uI-2*Kd*I*Vl-Fout*zI #mol/min
#(A1.20) Solvent
if(S0!=0)yp[6]<-Fin*uS-Kfs(T)*S*R*Vl-Fout*zS #mol/min
#(A1.21) Polymer
yp[7]<-dM*Vl-Fout*zP #mol/min
#(A1.22) Inhibitor
if(Z0!=0)yp[8]<-Fin*uZ-Kfz(T)*Z*R*Vl-Fout*zZ #mol/min
#(A1.23) CTA
if(CTA0!=0)yp[9]<-Fin*uCTA-KfCTA(T)*CTA*R*Vl-Fout*zCTA #mol/min
#(A1.24) CCTA
if(CCTA0!=0)yp[10]<-Fin*uCCTA-KfCCTA(T)*CCTA*R*Vl-Fout*zCCTA #mol/min
# Counter ions balance
if(Na0!=0)y[11]<-Fin*uNa-Fout*zNa #mol/min
#(A1.27) Energy balance
if(!ISOT){
yp[12]<-Fin*Hin+dM*Vl*DHp-UA*(T-Tj)-Fout*H
yp[12]<-yp[12]/(Vl*roout*Cp) #k/min
}else{
yp[12]<-0
}
#total volume
if(Fin!=0)yp[3]<-yp[3]+Fin/roin
if(Fout!=0)yp[3]<-yp[3]-Fout/roout
yp[3]<-yp[3]+MWM/1000*(1/roM(T)-1/roP(T))*yp[1]
yp[3]<-yp[3]+MWM/1000*M*(roP2/roP(T)^2-roM2/roM(T)^2)*yp[12] #L/min
#(A1.45)
yp[13]<-dM*Vl/Mn #mol^2/min/g
#(A1.46)
yp[14]<-dM*Vl*Mw #g/min
if(LIN){
yp[15]<-0
yp[16]<-0
yp[17]<-0
yp[18]<-0
yp[19]<-0
}else{
#(A1.39) 0th moment
yp[15]<-(Tau+Beta/2-Kpss(T)*Mu1/(Kpp*M)-Kps(T)*Mu0/(Kpp*M))*Kpp*M*R*Vl-Mu0*Fout*Vl
#(A1.40) 1st moment
yp[16]<-(Tau-Ktd*R/(Kpp*M))*Kpp*M*R*Vl-Mu1*Fout*Vl
#(A1.41) 2nd moment
yp[17]<-(Gam+2*(1+(Kps(T)*Mu1+Kpss(T)*Mu2)/(Kpp*M))*Eta/Gam+Ktc*R/(Kpp*M)*(Eta/Lam)^2)*Kpp*M*R*Vl-Mu2*Fout*Vl
#(A1.47)
yp[18]<-(Kfp(T)*Mu1*R+Kps(T)*R*Mu0-Mu0*BN3*Fout)*Vl
#(A1.48)
yp[19]<-(Kpss(T)*Mu2*R-Mu0*BN4*Fout)*Vl
}
der<-c(AM,R,Kp,Ktb,Kd,Mn,Mw,Cp,roin,roout,pH,Kpp)
attributes(der)<-NULL
names(der)<-c("AM","R","Kp","Ktb","Kd","Mn","Mw","Cp","roin","roout","pH","Kpp")
return(list(yp,der))
})
}
|
library(SDMTools)
library(raster)
library(rgdal) # loads sp package
library(rgeos)
library(maptools)
library(abind)
library(pixmap)
library(offsetsim)
bin_raster <- function(current_feature_raster, current_filename, shape_data_files, agg_factor){
current_feature_raster[is.na(current_feature_raster)] = 0
if (current_filename %in% shape_data_files){
current_feature_raster = aggregate(current_feature_raster, fact = agg_factor, fun = modal)
} else {
current_feature_raster = aggregate(current_feature_raster, fact = agg_factor, fun = mean)
}
return(current_feature_raster)
}
convert_asc_to_raster <- function(output_data_folder, data_folder, asc_data_filenames, shape_data_files, agg_factor){
if (!file.exists(output_data_folder)){
dir.create(output_data_folder)
}
for (file_ind in seq_along(asc_data_filenames)){
current_filename = asc_data_filenames[file_ind]
current_feature_raster = raster(paste0(data_folder, current_filename))
if (agg_factor > 1){
current_feature_raster <- bin_raster(current_feature_raster, current_filename, shape_data_files, agg_factor)
}
current_filename = paste0(output_data_folder, gsub('.asc', '', current_filename), '.tif')
writeRaster(current_feature_raster, current_filename, overwrite = TRUE)
removeTmpFiles(h = 0)
print(file_ind)
}
}
simulation_inputs_folder = paste0(path.expand('~'), '/offset_data/uruguay/simulation_inputs/')
data_folder = '~/offset_data/uruguay/uruguay_data/'
output_data_folder = '~/offset_data/uruguay/uruguay_data/raster_tiff/species_features/'
asc_data_filenames <- list.files(path = paste0(data_folder, 'uruguay_raw_data/'), pattern = '.asc', all.files = FALSE,
full.names = FALSE, recursive = FALSE, ignore.case = FALSE,
include.dirs = FALSE, no.. = FALSE)
shape_data_files = vector()
data_characteristics <- as.list(read.csv(file=paste0(data_folder, 'group_defs.csv'), header=TRUE, sep=","))
names(data_characteristics) = c('group_index', 'group', 'filename')
group_characteristics = as.list(table(data_characteristics$group))
total_group_names = c(unique(as.vector(data_characteristics$group)), 'misc')
feature_type = 'species' # suitable_for_crops_&_forestry,
group_names_to_use = feature_type #c('amphibians', 'birds', 'plants', 'mammals', 'fish', 'ecoregions', 'landscape_units', 'Spp_VU_CC')
convert_asc_layers = TRUE
build_site_characteristics = FALSE
overwrite_simulation_inputs = FALSE
save_current_layers = FALSE
agg_factor = 1
if (build_site_characteristics == TRUE){
LGA_raster = load_rasters("~/offset_data/uruguay/uruguay_data/uruguay_raw_data/parcelas_uy.asc", features_to_use = 'all')
LGA_array = raster_to_array(LGA_raster)
site_characteristics <- build_site_characteristics(LGA_array)
objects_to_save$site_characteristics <- site_characteristics
} else {
site_characteristics = readRDS(paste0(simulation_inputs_folder, 'site_characteristics.rds'))
}
if (feature_type == 'ecosystem_services'){
ecoservice_group_types = c('agua', 'amort', 'calidad', 'clima', 'enferm', 'genet') #(drinking water, , climactic regulation, , genetic resources)
features_to_use = which(!is.na(match(data_characteristics$group, feature_type)))
datalist_filenames = data_characteristics$filename[features_to_use]
feature_rasters = load_rasters(paste0(data_folder, 'uruguay_raw_data/', datalist_filenames), 'all')
feature_layers = lapply(seq_along(features_to_use), function(i) raster_to_array(subset(feature_rasters, i)))
for (group_ind in seq_along(ecoservice_group_types)){
current_reduced_group = Reduce('+', feature_layers[grep(pattern = ecoservice_group_types[group_ind], x = datalist_filenames)])
current_feature_raster = raster(current_reduced_group)
current_file_name = paste0(simulation_inputs_folder, 'ecosystem_service_feature_', ecoservice_group_types[group_ind], '.tif')
writeRaster(current_feature_raster, current_file_name, overwrite = TRUE)
print(paste(ecoservice_group_types[group_ind], 'done'))
}
} else {
convert_asc_to_raster(output_data_folder, paste0(data_folder, 'uruguay_raw_data/'), asc_data_filenames, shape_data_files, agg_factor)
}
euclidean_distance <- function(p,q){
sqrt(sum((p - q)^2))
}
outer(mat1,mat2, Vectorize(euclidean_distance))
# group_inds_to_use = match(group_names_to_use, names(group_characteristics))
#
# current_group_characteristics = group_characteristics[group_inds_to_use]
#
# data_length = sum(unlist(current_group_characteristics))
# data_list <- vector('list', length(data_length))
# data_list_names = vector()
#
# for (file_ind in seq_along(asc_data_filenames)){
#
# match_ind = which(data_characteristics$filename == asc_data_filenames[file_ind])
#
# if (length(match_ind) > 0){
# current_group = data_characteristics$group[match_ind]
# } else {
# current_group = 'misc'
# }
#
# if (current_group %in% group_names_to_use){
#
# #current_data = readRDS(paste0(rds_data_folder, current_file))
# data_list_names = append(data_list_names, asc_data_filenames[file_ind])
# #data_list[[data_ind]] = current_data
# #data_ind = data_ind + 1
# }
#
# }
names(data_list) = data_list_names
dev_weight_layer = readRDS(paste0(rds_data_folder, 'agut_clase1.rds'))
scale_fac = sum(as.vector(dev_weight_layer))
objects_to_save$dev_weights = lapply(seq_along(site_characteristics$land_parcels),
function(i) sum(dev_weight_layer[site_characteristics$land_parcels[[i]] ])/scale_fac)
offset_weight_layer = readRDS(paste0(rds_data_folder, 'agut_clase3.rds'))
scale_fac = sum(as.vector(offset_weight_layer))
objects_to_save$offset_weights = lapply(seq_along(site_characteristics$land_parcels),
function(i) sum(offset_weight_layer[site_characteristics$land_parcels[[i]] ])/scale_fac)
if (overwrite_simulation_inputs == TRUE){
save_simulation_inputs(objects_to_save, paste0(simulation_inputs_folder))
saveRDS(site_characteristics, paste0(simulation_inputs_folder, 'site_characteristics.rds'))
}
if (save_current_layers){
data_dir <- paste0(simulation_inputs_folder, '/agg_factor_', agg_factor, '/', feature_type, '/')
save_simulation_inputs(objects_to_save, data_dir)
}
print('all simulation objects saved')
# data_dir = paste0(simulation_inputs_folder, 'agg_factor_', agg_factor, '/', feature_type, '/')
# object_to_image = objects_to_save$landscape_ecology
# folder_to_output = paste0(simulation_inputs_folder, '/pdf_layers/')
#
# if (write_layers_to_pdf){
# graphics.off()
# # output_pdf_filename = paste0(folder_to_output, 'agg_factor_', agg_factor, '_', feature_type, '.pdf')
# # pdf(output_pdf_filename, width = 8.3, height = 11.7)
# jpeg(paste0(folder_to_output, 'test.jpg'))
# setup_sub_plots(nx = 3, ny = 3, x_space = 0.5, y_space = 0)
# for (data_ind in seq_along(data_list)){
#
# image(data_list[[data_ind]], main = names(data_list)[data_ind], axes = FALSE)
#
# print(paste(data_ind, 'done'))
# }
# graphics.off()
# }
#
| /load_uruguay.R | no_license | isaacpeterson/uruguay | R | false | false | 7,178 | r | library(SDMTools)
library(raster)
library(rgdal) # loads sp package
library(rgeos)
library(maptools)
library(abind)
library(pixmap)
library(offsetsim)
bin_raster <- function(current_feature_raster, current_filename, shape_data_files, agg_factor){
current_feature_raster[is.na(current_feature_raster)] = 0
if (current_filename %in% shape_data_files){
current_feature_raster = aggregate(current_feature_raster, fact = agg_factor, fun = modal)
} else {
current_feature_raster = aggregate(current_feature_raster, fact = agg_factor, fun = mean)
}
return(current_feature_raster)
}
convert_asc_to_raster <- function(output_data_folder, data_folder, asc_data_filenames, shape_data_files, agg_factor){
if (!file.exists(output_data_folder)){
dir.create(output_data_folder)
}
for (file_ind in seq_along(asc_data_filenames)){
current_filename = asc_data_filenames[file_ind]
current_feature_raster = raster(paste0(data_folder, current_filename))
if (agg_factor > 1){
current_feature_raster <- bin_raster(current_feature_raster, current_filename, shape_data_files, agg_factor)
}
current_filename = paste0(output_data_folder, gsub('.asc', '', current_filename), '.tif')
writeRaster(current_feature_raster, current_filename, overwrite = TRUE)
removeTmpFiles(h = 0)
print(file_ind)
}
}
simulation_inputs_folder = paste0(path.expand('~'), '/offset_data/uruguay/simulation_inputs/')
data_folder = '~/offset_data/uruguay/uruguay_data/'
output_data_folder = '~/offset_data/uruguay/uruguay_data/raster_tiff/species_features/'
asc_data_filenames <- list.files(path = paste0(data_folder, 'uruguay_raw_data/'), pattern = '.asc', all.files = FALSE,
full.names = FALSE, recursive = FALSE, ignore.case = FALSE,
include.dirs = FALSE, no.. = FALSE)
shape_data_files = vector()
data_characteristics <- as.list(read.csv(file=paste0(data_folder, 'group_defs.csv'), header=TRUE, sep=","))
names(data_characteristics) = c('group_index', 'group', 'filename')
group_characteristics = as.list(table(data_characteristics$group))
total_group_names = c(unique(as.vector(data_characteristics$group)), 'misc')
feature_type = 'species' # suitable_for_crops_&_forestry,
group_names_to_use = feature_type #c('amphibians', 'birds', 'plants', 'mammals', 'fish', 'ecoregions', 'landscape_units', 'Spp_VU_CC')
convert_asc_layers = TRUE
build_site_characteristics = FALSE
overwrite_simulation_inputs = FALSE
save_current_layers = FALSE
agg_factor = 1
if (build_site_characteristics == TRUE){
LGA_raster = load_rasters("~/offset_data/uruguay/uruguay_data/uruguay_raw_data/parcelas_uy.asc", features_to_use = 'all')
LGA_array = raster_to_array(LGA_raster)
site_characteristics <- build_site_characteristics(LGA_array)
objects_to_save$site_characteristics <- site_characteristics
} else {
site_characteristics = readRDS(paste0(simulation_inputs_folder, 'site_characteristics.rds'))
}
if (feature_type == 'ecosystem_services'){
ecoservice_group_types = c('agua', 'amort', 'calidad', 'clima', 'enferm', 'genet') #(drinking water, , climactic regulation, , genetic resources)
features_to_use = which(!is.na(match(data_characteristics$group, feature_type)))
datalist_filenames = data_characteristics$filename[features_to_use]
feature_rasters = load_rasters(paste0(data_folder, 'uruguay_raw_data/', datalist_filenames), 'all')
feature_layers = lapply(seq_along(features_to_use), function(i) raster_to_array(subset(feature_rasters, i)))
for (group_ind in seq_along(ecoservice_group_types)){
current_reduced_group = Reduce('+', feature_layers[grep(pattern = ecoservice_group_types[group_ind], x = datalist_filenames)])
current_feature_raster = raster(current_reduced_group)
current_file_name = paste0(simulation_inputs_folder, 'ecosystem_service_feature_', ecoservice_group_types[group_ind], '.tif')
writeRaster(current_feature_raster, current_file_name, overwrite = TRUE)
print(paste(ecoservice_group_types[group_ind], 'done'))
}
} else {
convert_asc_to_raster(output_data_folder, paste0(data_folder, 'uruguay_raw_data/'), asc_data_filenames, shape_data_files, agg_factor)
}
euclidean_distance <- function(p,q){
sqrt(sum((p - q)^2))
}
outer(mat1,mat2, Vectorize(euclidean_distance))
# group_inds_to_use = match(group_names_to_use, names(group_characteristics))
#
# current_group_characteristics = group_characteristics[group_inds_to_use]
#
# data_length = sum(unlist(current_group_characteristics))
# data_list <- vector('list', length(data_length))
# data_list_names = vector()
#
# for (file_ind in seq_along(asc_data_filenames)){
#
# match_ind = which(data_characteristics$filename == asc_data_filenames[file_ind])
#
# if (length(match_ind) > 0){
# current_group = data_characteristics$group[match_ind]
# } else {
# current_group = 'misc'
# }
#
# if (current_group %in% group_names_to_use){
#
# #current_data = readRDS(paste0(rds_data_folder, current_file))
# data_list_names = append(data_list_names, asc_data_filenames[file_ind])
# #data_list[[data_ind]] = current_data
# #data_ind = data_ind + 1
# }
#
# }
names(data_list) = data_list_names
dev_weight_layer = readRDS(paste0(rds_data_folder, 'agut_clase1.rds'))
scale_fac = sum(as.vector(dev_weight_layer))
objects_to_save$dev_weights = lapply(seq_along(site_characteristics$land_parcels),
function(i) sum(dev_weight_layer[site_characteristics$land_parcels[[i]] ])/scale_fac)
offset_weight_layer = readRDS(paste0(rds_data_folder, 'agut_clase3.rds'))
scale_fac = sum(as.vector(offset_weight_layer))
objects_to_save$offset_weights = lapply(seq_along(site_characteristics$land_parcels),
function(i) sum(offset_weight_layer[site_characteristics$land_parcels[[i]] ])/scale_fac)
if (overwrite_simulation_inputs == TRUE){
save_simulation_inputs(objects_to_save, paste0(simulation_inputs_folder))
saveRDS(site_characteristics, paste0(simulation_inputs_folder, 'site_characteristics.rds'))
}
if (save_current_layers){
data_dir <- paste0(simulation_inputs_folder, '/agg_factor_', agg_factor, '/', feature_type, '/')
save_simulation_inputs(objects_to_save, data_dir)
}
print('all simulation objects saved')
# data_dir = paste0(simulation_inputs_folder, 'agg_factor_', agg_factor, '/', feature_type, '/')
# object_to_image = objects_to_save$landscape_ecology
# folder_to_output = paste0(simulation_inputs_folder, '/pdf_layers/')
#
# if (write_layers_to_pdf){
# graphics.off()
# # output_pdf_filename = paste0(folder_to_output, 'agg_factor_', agg_factor, '_', feature_type, '.pdf')
# # pdf(output_pdf_filename, width = 8.3, height = 11.7)
# jpeg(paste0(folder_to_output, 'test.jpg'))
# setup_sub_plots(nx = 3, ny = 3, x_space = 0.5, y_space = 0)
# for (data_ind in seq_along(data_list)){
#
# image(data_list[[data_ind]], main = names(data_list)[data_ind], axes = FALSE)
#
# print(paste(data_ind, 'done'))
# }
# graphics.off()
# }
#
|
\name{run.shinysky.example}
\alias{run.shinysky.example}
\title{run.shinysky.example}
\usage{
run.shinysky.example(example = c("0_all", "1_buttons&alerts", "2_select2",
"3_typeahead", "4_busyIndicator", "5_hotable", "6_jstree", "7_jscolor"))
}
\description{
run.shinysky.example
}
| /man/run.shinysky.example.Rd | permissive | dcurrier/ShinySky | R | false | false | 284 | rd | \name{run.shinysky.example}
\alias{run.shinysky.example}
\title{run.shinysky.example}
\usage{
run.shinysky.example(example = c("0_all", "1_buttons&alerts", "2_select2",
"3_typeahead", "4_busyIndicator", "5_hotable", "6_jstree", "7_jscolor"))
}
\description{
run.shinysky.example
}
|
testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(NaN, 4.56899406001072e-308, 0, 1.96568260790928e-236, 0, 7.29111854287849e-304, NaN, 3.74829336516343e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) | /meteor/inst/testfiles/E_Penman/libFuzzer_E_Penman/E_Penman_valgrind_files/1612738748-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 439 | r | testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(NaN, 4.56899406001072e-308, 0, 1.96568260790928e-236, 0, 7.29111854287849e-304, NaN, 3.74829336516343e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/btools_utilities.r
\name{safe.ifelse}
\alias{safe.ifelse}
\title{ifelse that can be used safely with dates}
\usage{
safe.ifelse(cond, yes, no)
}
\arguments{
\item{cond}{Logical expression.}
\item{yes}{Resulting value if cond is TRUE.}
\item{no}{Resulting value if cond is FALSE.}
}
\description{
ifelse that can be used safely with dates
}
\examples{
# snippet: mutate(date=safe.ifelse(freq=='A', as.Date(paste0(olddate, '-01-01')), date))
}
| /man/safe.ifelse.Rd | no_license | donboyd5/btools | R | false | true | 522 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/btools_utilities.r
\name{safe.ifelse}
\alias{safe.ifelse}
\title{ifelse that can be used safely with dates}
\usage{
safe.ifelse(cond, yes, no)
}
\arguments{
\item{cond}{Logical expression.}
\item{yes}{Resulting value if cond is TRUE.}
\item{no}{Resulting value if cond is FALSE.}
}
\description{
ifelse that can be used safely with dates
}
\examples{
# snippet: mutate(date=safe.ifelse(freq=='A', as.Date(paste0(olddate, '-01-01')), date))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/components.R
\name{components}
\alias{components}
\title{Extract STR components}
\usage{
components(object)
}
\arguments{
\item{object}{Result of STR decomposition.}
}
\description{
\code{components} extracts components as time series from the result of an STR decomposition.
}
\examples{
\dontrun{
fit <- AutoSTR(log(grocery))
comp <- components(fit)
plot(comp)
}
}
\author{
Alexander Dokumentov
}
\seealso{
\code{\link{STRmodel}}, \code{\link{RSTRmodel}}, \code{\link{STR}}, \code{\link{AutoSTR}}
}
| /man/components.Rd | no_license | Laurae2/stR | R | false | true | 581 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/components.R
\name{components}
\alias{components}
\title{Extract STR components}
\usage{
components(object)
}
\arguments{
\item{object}{Result of STR decomposition.}
}
\description{
\code{components} extracts components as time series from the result of an STR decomposition.
}
\examples{
\dontrun{
fit <- AutoSTR(log(grocery))
comp <- components(fit)
plot(comp)
}
}
\author{
Alexander Dokumentov
}
\seealso{
\code{\link{STRmodel}}, \code{\link{RSTRmodel}}, \code{\link{STR}}, \code{\link{AutoSTR}}
}
|
#' @title Hierarchical cluster analysis
#' @description Hierarchical cluster analysis using several methods such as
#' ward.D", "ward.D2", "single", "complete", "average" (= UPGMA),
#' "mcquitty" (= WPGMA), "median" (= WPGMC) or "centroid" (= UPGMC).
#' @param tabDF is a dataframe or numeric matrix, each row represents a gene,
#' each column represents a sample come from TCGAPrepare.
#' @param method is method to be used for generic cluster such as 'hclust'
#' or 'consensus'
#' @param methodHC is method to be used for Hierarchical cluster.
#' @import stats
#' @importFrom ConsensusClusterPlus ConsensusClusterPlus
#' @export
#' @return object of class hclust if method selected is 'hclust'.
#' If method selected is 'Consensus' returns a list of length maxK
#' (maximum cluster number to evaluate.). Each element is a list containing
#' consensusMatrix (numerical matrix), consensusTree (hclust), consensusClass
#' (consensus class asssignments). ConsensusClusterPlus also produces images.
TCGAanalyze_Clustering <- function(tabDF, method, methodHC = "ward.D2"){
if( method == "hclust"){
ans <- hclust(ddist <- dist(tabDF), method = methodHC)
}
if( method == "consensus"){
sHc <- hclust(ddist <- dist(tabDF), method = methodHC) # time = 1.270 )
ans <- ConsensusClusterPlus(ddist, maxK = 7, pItem = 0.9, reps=1000
, title="mc_consensus_k7_1000"
, clusterAlg = "hc"
, innerLinkage = "ward.D2"
, finalLinkage = "complete"
, plot = 'pdf', writeTable = TRUE)
}
return(ans)
}
#' @title Array Array Intensity correlation (AAIC) and correlation boxplot to define outlier
#' @description TCGAanalyze_Preprocessing perform Array Array Intensity correlation (AAIC).
#' It defines a square symmetric matrix of pearson correlation among samples.
#' According this matrix and boxplot of correlation samples by samples it is possible
#' to find samples with low correlation that can be identified as possible outliers.
#' @param object of gene expression of class RangedSummarizedExperiment from TCGAprepare
#' @param cor.cut is a threshold to filter samples according their spearman correlation in
#' samples by samples. default cor.cut is 0
#' @param filename Filename of the image file
#' @param width Image width
#' @param height Image height
#' @param datatype is a string from RangedSummarizedExperiment assay
#' @importFrom grDevices dev.list
#' @importFrom SummarizedExperiment assays
#' @export
#' @return Plot with array array intensity correlation and boxplot of correlation samples by samples
TCGAanalyze_Preprocessing <- function(object,
cor.cut = 0,
filename = NULL,
width = 1000,
height = 1000,
datatype = names(assays(object))[1]){
# This is a work around for raw_counts and raw_count
if(grepl("raw_count",datatype) & any(grepl("raw_count",names(assays(object)))))
datatype <- names(assays(object))[grepl("raw_count",names(assays(object)))]
if(!any(grepl(datatype, names(assays(object)))))
stop(paste0(datatype, " not found in the assay list: ",
paste(names(assays(object)),collapse = ", "),
"\n Please set the correct datatype argument."))
if (!(is.null(dev.list()["RStudioGD"]))){dev.off()}
if(is.null(filename)) filename <- "PreprocessingOutput.png"
png(filename, width = width, height = height)
par(oma=c(10,10,10,10))
ArrayIndex <- as.character(1:length( colData(object)$barcode))
pmat_new <- matrix(0, length(ArrayIndex),4)
colnames(pmat_new) <- c("Disease","platform","SampleID","Study")
rownames(pmat_new) <- as.character(colData(object)$barcode)
pmat_new <- as.data.frame(pmat_new)
pmat_new$Disease <- as.character(colData(object)$definition)
pmat_new$platform <-"platform"
pmat_new$SampleID <- as.character(colData(object)$barcode)
pmat_new$Study <- "study"
tabGroupCol <-cbind(pmat_new, Color = matrix(0,nrow(pmat_new),1))
for(i in seq_along(unique(tabGroupCol$Disease))){
tabGroupCol[which(tabGroupCol$Disease == tabGroupCol$Disease[i]),"Color"] <- rainbow(length(unique(tabGroupCol$Disease)))[i]
}
# pmat <- as.matrix(pData(phenoData(object)))
pmat <- pmat_new
phenodepth <- min(ncol(pmat), 3)
order <- switch(phenodepth + 1, ArrayIndex, order(pmat[, 1]),
order(pmat[, 1], pmat[, 2]), order(pmat[, 1],
pmat[, 2], pmat[, 3]))
arraypos <- (1:length(ArrayIndex)) * (1/(length(ArrayIndex) - 1)) - (1/(length(ArrayIndex) - 1))
arraypos2 = seq(1:length(ArrayIndex) - 1)
for (i in 2:length(ArrayIndex)) { arraypos2[i - 1] <- (arraypos[i] + arraypos[i - 1])/2 }
layout(matrix(c(1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 3, 3, 3, 4), 4, 4, byrow = TRUE))
c <- cor(assay(object,datatype)[, order], method = "spearman")
image(c, xaxt = "n", yaxt = "n",
#xlab = "Array Samples",
#ylab = "Array Samples",
main = "Array-Array Intensity Correlation after RMA")
for (i in 1:length(names(table(tabGroupCol$Color)) )){
currentCol <- names(table(tabGroupCol$Color))[i]
pos.col <- arraypos[which(tabGroupCol$Color == currentCol)]
lab.col <- colnames(c)[which(tabGroupCol$Color == currentCol)]
#axis(1, labels = lab.col , at = pos.col, col = currentCol,lwd = 6,las = 2)
axis(2, labels = lab.col , at = pos.col, col = currentCol,lwd = 6,las = 2)
}
m <- matrix(pretty(c, 10), nrow = 1, ncol = length(pretty(c, 10)))
image(m, xaxt = "n", yaxt = "n", ylab = "Correlation Coefficient")
axis(2,
labels = as.list(pretty(c, 10)),
at = seq(0, 1, by = (1/(length(pretty(c, 10)) - 1))))
abline(h = seq((1/(length(pretty(c, 10)) - 1))/2,
1 - (1/(length(pretty(c, 10)) - 1)),
by = (1/(length(pretty(c, 10)) - 1))))
box()
boxplot(c,
outline = FALSE,
las =2,
lwd = 6,
# names = NULL,
col = tabGroupCol$Color,
main ="Boxplot of correlation samples by samples after normalization")
dev.off()
samplesCor <- rowMeans(c)
objectWO <- assay(object,datatype)[, samplesCor > cor.cut]
colnames(objectWO) <- colnames(object)[samplesCor > cor.cut]
return(objectWO)
}
#' @title survival analysis (SA) univariate with Kaplan-Meier (KM) method.
#' @description TCGAanalyze_SurvivalKM perform an univariate Kaplan-Meier (KM) survival analysis (SA).
#' It performed Kaplan-Meier survival univariate using complete follow up with all days
#' taking one gene a time from Genelist of gene symbols.
#' For each gene according its level of mean expression in cancer samples,
#' defining two thresholds for quantile
#' expression of that gene in all samples (default ThreshTop=0.67,ThreshDown=0.33) it is possible
#' to define a threshold of intensity of gene expression to divide the samples in 3 groups
#' (High, intermediate, low).
#' TCGAanalyze_SurvivalKM performs SA between High and low groups using following functions
#' from survival package
#' \enumerate{
#' \item survival::Surv
#' \item survival::survdiff
#' \item survival::survfit
#' }
#' @param clinical_patient is a data.frame using function 'clinic' with information
#' related to barcode / samples such as bcr_patient_barcode, days_to_death ,
#' days_to_last_follow_up , vital_status, etc
#' @param dataGE is a matrix of Gene expression (genes in rows, samples in cols) from TCGAprepare
#' @param Genelist is a list of gene symbols where perform survival KM.
#' @param Survresult is a parameter (default = FALSE) if is TRUE will show KM plot and results.
#' @param ThreshTop is a quantile threshold to identify samples with high expression of a gene
#' @param ThreshDown is a quantile threshold to identify samples with low expression of a gene
#' @param p.cut p.values threshold. Default: 0.05
#' @param group1 a string containing the barcode list of the samples in in control group
#' @param group2 a string containing the barcode list of the samples in in disease group
#' @importFrom survival Surv survdiff survfit
#' @export
#' @return table with survival genes pvalues from KM.
#' @examples
#' clinical_patient_Cancer <- GDCquery_clinic("TCGA-BRCA","clinical")
#' # Selecting only 20 genes for example
#' dataBRCAcomplete <- log2(dataBRCA[1:20,] + 1)
#' group1 <- TCGAquery_SampleTypes(colnames(dataBRCAcomplete), typesample = c("NT"))
#' group2 <- TCGAquery_SampleTypes(colnames(dataBRCAcomplete), typesample = c("TP"))
#'
#' tabSurvKM <- TCGAanalyze_SurvivalKM(clinical_patient_Cancer,
#' dataBRCAcomplete,
#' Genelist = rownames(dataBRCAcomplete),
#' Survresult = FALSE,
#' p.cut = 0.4,
#' ThreshTop = 0.67,
#' ThreshDown = 0.33,
#' group1 = group1, # Control group
#' group2 = group2) # Disease group
#'
#' # If the groups are not specified group1 == group2 and all samples are used
#' tabSurvKM <- TCGAanalyze_SurvivalKM(clinical_patient_Cancer,
#' dataBRCAcomplete,
#' Genelist = rownames(dataBRCAcomplete),
#' Survresult = TRUE,
#' p.cut = 0.2,
#' ThreshTop = 0.67,
#' ThreshDown = 0.33)
TCGAanalyze_SurvivalKM <- function(clinical_patient,
dataGE,
Genelist,
Survresult = FALSE,
ThreshTop = 0.67,
ThreshDown = 0.33,
p.cut = 0.05,
group1,
group2){
# Check which genes we really have in the matrix
Genelist <- intersect(rownames(dataGE),Genelist)
# Split gene expression matrix btw the groups
dataCancer <- dataGE[Genelist,group2, drop = FALSE]
dataNormal <- dataGE[Genelist,group1, drop = FALSE]
colnames(dataCancer) <- substr(colnames(dataCancer),1,12)
cfu <- clinical_patient[clinical_patient[,"bcr_patient_barcode"] %in% substr(colnames(dataCancer),1,12),]
if("days_to_last_followup" %in% colnames(cfu)) colnames(cfu)[grep("days_to_last_followup",colnames(cfu))] <- "days_to_last_follow_up"
cfu <- as.data.frame(subset(cfu, select=c("bcr_patient_barcode","days_to_death","days_to_last_follow_up","vital_status")) )
# Set alive death to inf
if(length(grep("alive",cfu$vital_status,ignore.case = TRUE)) > 0) cfu[grep("alive",cfu$vital_status,ignore.case = TRUE),"days_to_death"]<-"-Inf"
# Set dead follow up to inf
if(length(grep("dead",cfu$vital_status,ignore.case = TRUE)) > 0) cfu[grep("dead",cfu$vital_status,ignore.case = TRUE),"days_to_last_follow_up"]<-"-Inf"
cfu <- cfu[ !(is.na(cfu[,"days_to_last_follow_up"])),]
cfu <- cfu[ !(is.na(cfu[,"days_to_death"])),]
followUpLevel <- FALSE
#FC_FDR_table_mRNA
tabSurv_Matrix<-matrix(0,nrow(as.matrix(rownames(dataNormal))),8)
colnames(tabSurv_Matrix)<-c("mRNA",
"pvalue",
"Cancer Deaths",
"Cancer Deaths with Top",
"Cancer Deaths with Down",
"Mean Tumor Top",
"Mean Tumor Down",
"Mean Normal")
tabSurv_Matrix<-as.data.frame(tabSurv_Matrix)
cfu$days_to_death<-as.numeric(as.character(cfu$days_to_death))
cfu$days_to_last_follow_up<-as.numeric(as.character(cfu$days_to_last_follow_up))
rownames(cfu) <- cfu[, "bcr_patient_barcode" ] #mod1
cfu <- cfu[ !(is.na(cfu[,"days_to_last_follow_up"])),]
cfu <- cfu[ !(is.na(cfu[,"days_to_death"])),]
cfu_complete<-cfu
ngenes<-nrow(as.matrix(rownames(dataNormal)))
# Evaluate each gene
for(i in 1:nrow(as.matrix(rownames(dataNormal)))) {
cat(paste0( (ngenes-i),"."))
mRNAselected <- as.matrix(rownames(dataNormal))[i]
mRNAselected_values <- dataCancer[rownames(dataCancer) == mRNAselected,]
mRNAselected_values_normal <- dataNormal[rownames(dataNormal) == mRNAselected,]
if(all(mRNAselected_values == 0)) next # All genes are 0
tabSurv_Matrix[i,"mRNA"] <- mRNAselected
# Get Thresh values for cancer expression
mRNAselected_values_ordered <- sort(mRNAselected_values,decreasing=TRUE)
mRNAselected_values_ordered_top <- as.numeric(quantile(as.numeric(mRNAselected_values_ordered),ThreshTop)[1])
mRNAselected_values_ordered_down <- as.numeric(quantile(as.numeric(mRNAselected_values_ordered),ThreshDown)[1])
mRNAselected_values_newvector <- mRNAselected_values
if (!is.na(mRNAselected_values_ordered_top)){
# How many samples do we have
numberOfSamples <- length(mRNAselected_values_ordered)
# High group (above ThreshTop)
lastelementTOP <- max(which(mRNAselected_values_ordered>mRNAselected_values_ordered_top))
# Low group (below ThreshDown)
firstelementDOWN <- min(which(mRNAselected_values_ordered<=mRNAselected_values_ordered_down))
samples_top_mRNA_selected <- names(mRNAselected_values_ordered[1:lastelementTOP])
samples_down_mRNA_selected <- names(mRNAselected_values_ordered[firstelementDOWN:numberOfSamples])
# Which samples are in the intermediate group (above ThreshLow and below ThreshTop)
samples_UNCHANGED_mRNA_selected <- names(mRNAselected_values_newvector[which((mRNAselected_values_newvector) > mRNAselected_values_ordered_down &
mRNAselected_values_newvector < mRNAselected_values_ordered_top )])
cfu_onlyTOP<-cfu_complete[cfu_complete[,"bcr_patient_barcode"] %in% samples_top_mRNA_selected,]
cfu_onlyDOWN<-cfu_complete[cfu_complete[,"bcr_patient_barcode"] %in% samples_down_mRNA_selected,]
cfu_onlyUNCHANGED<-cfu_complete[cfu_complete[,"bcr_patient_barcode"] %in% samples_UNCHANGED_mRNA_selected,]
cfu_ordered <- NULL
cfu_ordered <- rbind(cfu_onlyTOP,cfu_onlyDOWN)
cfu <- cfu_ordered
ttime <- as.numeric(cfu[, "days_to_death"])
sum(status <- ttime > 0) # morti
deads_complete <- sum(status <- ttime > 0)
ttime_only_top <- cfu_onlyTOP[, "days_to_death"]
deads_top<- sum(ttime_only_top > 0)
if(dim(cfu_onlyDOWN)[1] >= 1) {
ttime_only_down <- cfu_onlyDOWN[, "days_to_death"]
deads_down<- sum(ttime_only_down > 0)
} else {
deads_down <- 0
}
tabSurv_Matrix[i,"Cancer Deaths"] <- deads_complete
tabSurv_Matrix[i,"Cancer Deaths with Top"] <- deads_top
tabSurv_Matrix[i,"Cancer Deaths with Down"] <- deads_down
tabSurv_Matrix[i,"Mean Normal"] <- mean(as.numeric(mRNAselected_values_normal))
dataCancer_onlyTop_sample <- dataCancer[,samples_top_mRNA_selected,drop = FALSE]
dataCancer_onlyTop_sample_mRNASelected <- dataCancer_onlyTop_sample[rownames(dataCancer_onlyTop_sample) == mRNAselected,]
dataCancer_onlyDown_sample <- dataCancer[,samples_down_mRNA_selected,drop = FALSE]
dataCancer_onlyDown_sample_mRNASelected <- dataCancer_onlyDown_sample[rownames(dataCancer_onlyDown_sample) == mRNAselected,]
tabSurv_Matrix[i,"Mean Tumor Top"] <- mean(as.numeric(dataCancer_onlyTop_sample_mRNASelected))
tabSurv_Matrix[i,"Mean Tumor Down"] <- mean(as.numeric(dataCancer_onlyDown_sample_mRNASelected))
ttime[!status] <- as.numeric(cfu[!status, "days_to_last_follow_up"])
ttime[which(ttime== -Inf)] <- 0
ttime <- Surv(ttime, status)
rownames(ttime) <- rownames(cfu)
legendHigh <- paste(mRNAselected,"High")
legendLow <- paste(mRNAselected,"Low")
tabSurv_pvalue <- tryCatch({
tabSurv <- survdiff(ttime ~ c(rep("top", nrow(cfu_onlyTOP)), rep("down", nrow(cfu_onlyDOWN)) ))
tabSurv_chis<-unlist(tabSurv)$chisq
tabSurv_pvalue <- as.numeric(1 - pchisq(abs(tabSurv$chisq), df = 1))
}, error = function(e){
return(Inf)
})
tabSurv_Matrix[i,"pvalue"] <- tabSurv_pvalue
if (Survresult ==TRUE) {
titlePlot<- paste("Kaplan-Meier Survival analysis, pvalue=",tabSurv_pvalue )
plot(survfit(ttime ~ c(rep("low", nrow(cfu_onlyTOP)), rep("high", nrow(cfu_onlyDOWN)))), col = c("green", "red"),main= titlePlot,xlab="Days",ylab="Survival")
legend(100, 1, legend = c(legendLow,legendHigh), col = c("green", "red"), text.col = c("green", "red"), pch = 15)
print(tabSurv)
}
} #end if
} #end for
tabSurv_Matrix[tabSurv_Matrix=="-Inf"]<-0
tabSurvKM <- tabSurv_Matrix
# Filtering by selected pvalue < 0.01
tabSurvKM <- tabSurvKM[tabSurvKM$mRNA != 0,]
tabSurvKM <- tabSurvKM[tabSurvKM$pvalue < p.cut,]
tabSurvKM <- tabSurvKM[!duplicated(tabSurvKM$mRNA),]
rownames(tabSurvKM) <-tabSurvKM$mRNA
tabSurvKM <- tabSurvKM[,-1]
tabSurvKM <- tabSurvKM[order(tabSurvKM$pvalue, decreasing=FALSE),]
colnames(tabSurvKM) <- gsub("Cancer","Group2",colnames(tabSurvKM))
colnames(tabSurvKM) <- gsub("Tumor","Group2",colnames(tabSurvKM))
colnames(tabSurvKM) <- gsub("Normal","Group1",colnames(tabSurvKM))
return(tabSurvKM)
}
#' @title Filtering mRNA transcripts and miRNA selecting a threshold.
#' @description
#' TCGAanalyze_Filtering allows user to filter mRNA transcripts and miRNA,
#' selecting a threshold. For istance returns all mRNA or miRNA with mean across all
#' samples, higher than the threshold defined quantile mean across all samples.
#' @param tabDF is a dataframe or numeric matrix, each row represents a gene,
#' each column represents a sample come from TCGAPrepare
#' @param method is method of filtering such as 'quantile', 'varFilter', 'filter1', 'filter2'
#' @param qnt.cut is threshold selected as mean for filtering
#' @param var.func is function used as the per-feature filtering statistic.
#' See genefilter documentation
#' @param var.cutoff is a numeric value. See genefilter documentation
#' @param eta is a paramter for filter1. default eta = 0.05.
#' @param foldChange is a paramter for filter2. default foldChange = 1.
#' @importFrom genefilter varFilter
#' @export
#' @return A filtered dataframe or numeric matrix where each row represents a gene,
#' each column represents a sample
#' @examples
#' dataNorm <- TCGAbiolinks::TCGAanalyze_Normalization(dataBRCA, geneInfo)
#' dataNorm <- TCGAanalyze_Normalization(tabDF = dataBRCA,
#' geneInfo = geneInfo,
#' method = "geneLength")
#' dataFilt <- TCGAanalyze_Filtering(tabDF = dataNorm, method = "quantile", qnt.cut = 0.25)
TCGAanalyze_Filtering <- function(tabDF,method,
qnt.cut = 0.25,
var.func = IQR,
var.cutoff = 0.75,
eta = 0.05,
foldChange = 1){
if(method == "quantile"){
GeneThresh <- as.numeric(quantile(rowMeans(tabDF), qnt.cut))
geneFiltered <- names(which(rowMeans(tabDF) > GeneThresh))
tabDF_Filt <- tabDF[geneFiltered, ]
}
if(method == "varFilter"){
tabDF_Filt <- genefilter::varFilter(tabDF, var.func = IQR,
var.cutoff= 0.75,
filterByQuantile = TRUE)
}
if(method == "filter1"){
normCounts <- tabDF
geData <- t(log(1 + normCounts, 2))
filter <- apply(geData, 2, function(x) sum(quantile(x, probs = c(1 - eta, eta)) * c(1, -1)))
tabDF_Filt <- geData[, which(filter > foldChange)]
}
if(method == "filter2"){
geData <- tabDF
filter <- apply(geData, 2, function(x) prod(quantile(x, probs = c(1 - eta, eta)) - 10) < 0)
tabDF_Filt <- geData[, which(filter)]
}
return( tabDF_Filt)
}
#' @title normalization mRNA transcripts and miRNA using EDASeq package.
#' @description
#' TCGAanalyze_Normalization allows user to normalize mRNA transcripts and miRNA,
#' using EDASeq package.
#'
#' Normalization for RNA-Seq Numerical and graphical
#' summaries of RNA-Seq read data. Within-lane normalization procedures
#' to adjust for GC-content effect (or other gene-level effects) on read counts:
#' loess robust local regression, global-scaling, and full-quantile normalization
#' (Risso et al., 2011). Between-lane normalization procedures to adjust for
#' distributional differences between lanes (e.g., sequencing depth):
#' global-scaling and full-quantile normalization (Bullard et al., 2010).
#'
#' For istance returns all mRNA or miRNA with mean across all
#' samples, higher than the threshold defined quantile mean across all samples.
#'
#' TCGAanalyze_Normalization performs normalization using following functions
#' from EDASeq
#' \enumerate{
#' \item EDASeq::newSeqExpressionSet
#' \item EDASeq::withinLaneNormalization
#' \item EDASeq::betweenLaneNormalization
#' \item EDASeq::counts
#' }
#' @param tabDF Rnaseq numeric matrix, each row represents a gene,
#' each column represents a sample
#' @param geneInfo Information matrix of 20531 genes about geneLength and gcContent.
#' Two objects are provided: TCGAbiolinks::geneInfoHT,TCGAbiolinks::geneInfo
#' @param method is method of normalization such as 'gcContent' or 'geneLength'
#' @importFrom EDASeq newSeqExpressionSet withinLaneNormalization
#' betweenLaneNormalization exprs counts offst
#' @export
#' @return Rnaseq matrix normalized with counts slot holds the count data as a matrix
#' of non-negative integer count values, one row for each observational unit (gene or the like),
#' and one column for each sample.
#' @examples
#' dataNorm <- TCGAbiolinks::TCGAanalyze_Normalization(dataBRCA, geneInfo)
TCGAanalyze_Normalization <- function(tabDF,geneInfo,method = "geneLength"){
# Check if we have a SE, we need a gene expression matrix
if(is(tabDF,"SummarizedExperiment")) tabDF <- assay(tabDF)
geneInfo <- geneInfo[!is.na(geneInfo[,1]),]
geneInfo <- as.data.frame(geneInfo)
geneInfo$geneLength <- as.numeric(as.character(geneInfo$geneLength))
geneInfo$gcContent <- as.numeric(as.character(geneInfo$gcContent))
if(method == "gcContent"){
tmp <- as.character(rownames(tabDF))
tmp <- strsplit(tmp, "\\|")
geneNames <- matrix("", ncol = 2, nrow = length(tmp))
j <- 1
while(j <= length(tmp)) {
geneNames[j, 1] <- tmp[[j]][1]
geneNames[j, 2] <- tmp[[j]][2]
j <- j + 1
}
tmp <- which(geneNames[, 1] == "?")
geneNames[tmp, 1] <- geneNames[tmp, 2]
tmp <- table(geneNames[,1])
tmp <- which(geneNames[,1] %in% names(tmp[which(tmp > 1)]))
geneNames[tmp, 1] <- paste(geneNames[tmp, 1], geneNames[tmp, 2], sep = ".")
tmp <- table(geneNames[,1])
rownames(tabDF) <- geneNames[,1]
rawCounts<- tabDF
commonGenes <- intersect(rownames(geneInfo), rownames(rawCounts))
geneInfo <- geneInfo[commonGenes,]
rawCounts <- rawCounts[commonGenes,]
timeEstimated <- format(ncol(tabDF)*nrow(tabDF)/80000,digits = 2)
message(messageEstimation <- paste("I Need about ", timeEstimated,
"seconds for this Complete Normalization Upper Quantile",
" [Processing 80k elements /s] "))
ffData <- as.data.frame(geneInfo)
rawCounts <- floor(rawCounts)
message("Step 1 of 4: newSeqExpressionSet ...")
tmp <- newSeqExpressionSet(as.matrix(rawCounts), featureData = ffData)
#fData(tmp)[, "gcContent"] <- as.numeric(geneInfo[, "gcContent"])
message("Step 2 of 4: withinLaneNormalization ...")
tmp <- withinLaneNormalization(tmp, "gcContent", which = "upper", offset = TRUE)
message("Step 3 of 4: betweenLaneNormalization ...")
tmp <- betweenLaneNormalization(tmp, which = "upper", offset = TRUE)
normCounts <- log(rawCounts + .1) + offst(tmp)
normCounts <- floor(exp(normCounts) - .1)
message("Step 4 of 4: .quantileNormalization ...")
tmp <- t(.quantileNormalization(t(normCounts)))
tabDF_norm <- floor(tmp)
}
if(method == "geneLength"){
tabDF <- tabDF[ !(GenesCutID(as.matrix(rownames(tabDF))) == "?"),]
tabDF <- tabDF[ !(GenesCutID(as.matrix(rownames(tabDF))) == "SLC35E2"),]
rownames(tabDF) <- GenesCutID(as.matrix(rownames(tabDF)))
tabDF <- tabDF[rownames(tabDF) != "?", ]
tabDF <- tabDF[!duplicated(rownames(tabDF)), !duplicated(colnames(tabDF))]
tabDF <- tabDF[rownames(tabDF) %in% rownames(geneInfo),]
tabDF <- as.matrix(tabDF)
geneInfo <- geneInfo[rownames(geneInfo) %in% rownames(tabDF), ]
geneInfo <- geneInfo[!duplicated(rownames(geneInfo)), ]
toKeep <- which(geneInfo[, "geneLength"] != 0)
geneInfo <- geneInfo[toKeep, ]
tabDF <- tabDF[toKeep, ]
geneInfo <- as.data.frame(geneInfo)
tabDF <- round(tabDF)
commonGenes <- intersect(rownames(tabDF),rownames(geneInfo))
tabDF <- tabDF[commonGenes,]
geneInfo <- geneInfo[commonGenes,]
timeEstimated <- format(ncol(tabDF)*nrow(tabDF)/80000,digits = 2)
message(messageEstimation <- paste("I Need about ", timeEstimated,
"seconds for this Complete Normalization Upper Quantile",
" [Processing 80k elements /s] "))
message("Step 1 of 4: newSeqExpressionSet ...")
system.time(tabDF_norm <- EDASeq::newSeqExpressionSet(tabDF, featureData = geneInfo))
message("Step 2 of 4: withinLaneNormalization ...")
system.time(tabDF_norm <- EDASeq::withinLaneNormalization(tabDF_norm, "geneLength", which = "upper", offset = FALSE))
message("Step 3 of 4: betweenLaneNormalization ...")
system.time(tabDF_norm <- EDASeq::betweenLaneNormalization(tabDF_norm, which = "upper", offset = FALSE))
message("Step 4 of 4: exprs ...")
#system.time(tabDF_norm <- EDASeq::exprs(tabDF_norm))
system.time(tabDF_norm <- EDASeq::counts(tabDF_norm))
}
return(tabDF_norm)
}
#' @title Differential expression analysis (DEA) using edgeR or limma package.
#' @description
#' TCGAanalyze_DEA allows user to perform Differentially expression analysis (DEA),
#' using edgeR package or limma to identify differentially expressed genes (DEGs).
#' It is possible to do a two-class analysis.
#'
#' TCGAanalyze_DEA performs DEA using following functions from edgeR:
#' \enumerate{
#' \item edgeR::DGEList converts the count matrix into an edgeR object.
#' \item edgeR::estimateCommonDisp each gene gets assigned the same dispersion estimate.
#' \item edgeR::exactTest performs pair-wise tests for differential expression between two groups.
#' \item edgeR::topTags takes the output from exactTest(), adjusts the raw p-values using the
#' False Discovery Rate (FDR) correction, and returns the top differentially expressed genes.
#' }
#' TCGAanalyze_DEA performs DEA using following functions from limma:
#' \enumerate{
#' \item limma::makeContrasts construct matrix of custom contrasts.
#' \item limma::lmFit Fit linear model for each gene given a series of arrays.
#' \item limma::contrasts.fit Given a linear model fit to microarray data, compute estimated coefficients and standard errors for a given set of contrasts.
#' \item limma::eBayes Given a microarray linear model fit, compute moderated t-statistics, moderated F-statistic, and log-odds of differential expression by empirical Bayes moderation of the standard errors towards a common value.
#' \item limma::toptable Extract a table of the top-ranked genes from a linear model fit.
#' }
#' @param mat1 numeric matrix, each row represents a gene,
#' each column represents a sample with Cond1type
#' @param mat2 numeric matrix, each row represents a gene,
#' each column represents a sample with Cond2type
#' @param metadata Add metadata
#' @param Cond1type a string containing the class label of the samples in mat1
#' (e.g., control group)
#' @param Cond2type a string containing the class label of the samples in mat2
#' (e.g., case group)
#' @param pipeline a string to specify which package to use ("limma" or "edgeR")
#' @param method is 'glmLRT' (1) or 'exactTest' (2) used for edgeR
#' (1) Fit a negative binomial generalized log-linear model to
#' the read counts for each gene
#' (2) Compute genewise exact tests for differences in the means between
#' two groups of negative-binomially distributed counts.
#' @param fdr.cut is a threshold to filter DEGs according their p-value corrected
#' @param logFC.cut is a threshold to filter DEGs according their logFC
#' @param elementsRatio is number of elements processed for second for time consumation estimation
#' @param batch.factors a vector containing strings to specify options for batch correction. Options are "Plate", "TSS", "Year", "Portion", "Center", and "Patients"
#' @param ClinicalDF a dataframe returned by GDCquery_clinic() to be used to extract year data
#' @param paired boolean to account for paired or non-paired samples. Set to TRUE for paired case
#' @param log.trans boolean to perform log cpm transformation. Set to TRUE for log transformation
#' @param trend boolean to perform limma-trend pipeline. Set to TRUE to go through limma-trend
#' @param MAT matrix containing expression set as all samples in columns and genes as rows. Do not provide if mat1 and mat2 are used
#' @param contrast.formula string input to determine coefficients and to design contrasts in a customized way
#' @param Condtypes vector of grouping for samples in MAT
#' @param voom boolean to perform voom transformation for limma-voom pipeline. Set to TRUE for voom transformation
#' @importFrom edgeR DGEList estimateCommonDisp exactTest topTags estimateGLMCommonDisp
#' estimateGLMTagwiseDisp glmFit glmLRT
#' @importFrom limma makeContrasts lmFit contrasts.fit eBayes toptable
#' @export
#' @examples
#' dataNorm <- TCGAbiolinks::TCGAanalyze_Normalization(dataBRCA, geneInfo)
#' dataFilt <- TCGAanalyze_Filtering(tabDF = dataBRCA, method = "quantile", qnt.cut = 0.25)
#' samplesNT <- TCGAquery_SampleTypes(colnames(dataFilt), typesample = c("NT"))
#' samplesTP <- TCGAquery_SampleTypes(colnames(dataFilt), typesample = c("TP"))
#' dataDEGs <- TCGAanalyze_DEA(mat1 = dataFilt[,samplesNT],
#' mat2 = dataFilt[,samplesTP],
#' Cond1type = "Normal",
#' Cond2type = "Tumor")
#'
#' @return table with DEGs containing for each gene logFC, logCPM, pValue,and FDR, also for each contrast
TCGAanalyze_DEA <- function(mat1,
mat2,
metadata=TRUE,
Cond1type,
Cond2type,
pipeline="edgeR",
method = "exactTest",
fdr.cut = 1,
logFC.cut = 0,
elementsRatio = 30000,
batch.factors=NULL,
ClinicalDF=data.frame(),
paired=FALSE,
log.trans=FALSE,
voom=FALSE,
trend=FALSE,
MAT=data.frame(),
contrast.formula="",
Condtypes=c()
) {
table.code <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
names(table.code)<- c('01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','20','40','50','60','61')
if(nrow(MAT)==0){
TOC <- cbind(mat1,mat2)
Cond1num <- ncol(mat1)
Cond2num <- ncol(mat2)
#print(map.ensg(genes = rownames(TOC))[,2:3])
}
else {
TOC<-MAT
}
if(metadata==TRUE){
#####
my_IDs <- get_IDs(TOC)
Plate<-factor(my_IDs$plate)
Condition<-factor(my_IDs$condition)
TSS<-factor(my_IDs$tss)
Portion<-factor(my_IDs$portion)
Center<-factor(my_IDs$center)
Patients<-factor(my_IDs$patient)
}
if(paired==TRUE){
matched.query<-TCGAquery_MatchedCoupledSampleTypes(my_IDs$barcode, table.code[unique(my_IDs$sample)])
my_IDs<-subset(my_IDs, barcode==matched.query)
TOC<-TOC[,(names(TOC) %in% matched.query)]
}
###Extract year data from clinical info:
if(nrow(ClinicalDF)>0){
names(ClinicalDF)[names(ClinicalDF)=="bcr_patient_barcode"] <- "patient"
ClinicalDF$age_at_diag_year <- floor(clinical$age_at_diagnosis/365)
ClinicalDF$diag_year<-ClinicalDF$age_at_diag_year+clinical$year_of_birth
diag_yearDF<-ClinicalDF[,c("patient", "diag_year")]
my_IDs<-merge(my_IDs, ClinicalDF, by="patient")
Year<-as.factor(my_IDs$diag_year)
}
####ADD PATIENT AS OPTION
options <- c("Plate", "TSS", "Year", "Portion", "Center", "Patients")
if(length(batch.factors)==0){
message("Batch correction skipped since no factors provided")
}
else
for(o in batch.factors){
if(o %in% options == FALSE)
stop(paste0(o, " is not a valid batch correction factor"))
if(o == "Year" & nrow(ClinicalDF)==0)
stop("batch correction using diagnosis year needs clinical info. Provide Clinical Data in arguments")
}
###Additive Formula#######
additiveformula <-paste(batch.factors, collapse="+")
###########################
message("----------------------- DEA -------------------------------")
if(nrow(MAT)==0){
message(message1 <- paste( "there are Cond1 type", Cond1type ,"in ",
Cond1num, "samples"))
message(message2 <- paste( "there are Cond2 type", Cond2type ,"in ",
Cond2num, "samples"))
message(message3 <- paste( "there are ", nrow(TOC) ,
"features as miRNA or genes "))
}
else{
message(message3 <- paste( "there are ", nrow(TOC) ,
"features as miRNA or genes "))
}
timeEstimated <- format(ncol(TOC)*nrow(TOC)/elementsRatio,digits = 2)
message(messageEstimation <- paste("I Need about ", timeEstimated,
"seconds for this DEA. [Processing 30k elements /s] "))
# Reading in the data and creating a DGEList object
colnames(TOC) <- paste0('s',1:ncol(TOC))
#DGE <- DGEList(TOC,group=rep(c("Normal","Tumor"),c(NormalSample,
#TumorSample)))
if(length(Condtypes)>0){
tumorType <- factor(x=Condtypes, levels=unique(Condtypes))
}
else {
tumorType <- factor(x = rep(c(Cond1type,Cond2type),
c(Cond1num,Cond2num)),
levels = c(Cond1type,Cond2type))
}
# DGE.mat<-edgeR::DGEList(TOC,group = tumorType)
if(length(batch.factors)== 0 & length(Condtypes)>0){
if(pipeline=="edgeR")
design <- model.matrix(~tumorType)
else
design <- model.matrix(~0+tumorType)
}
else if(length(batch.factors)== 0 & length(Condtypes)==0){
if(pipeline=="edgeR")
design <- model.matrix(~tumorType)
else
design <- model.matrix(~0+tumorType)
}
else if(length(batch.factors)> 0 & length(Condtypes)==0){
if(pipeline=="edgeR")
formula<-paste0("~tumorType+", additiveformula)
else
formula<-paste0("~0+tumorType+", additiveformula)
design <- model.matrix(eval(parse(text=formula)))
}
else if(length(batch.factors)> 0 & length(Condtypes)>0){
if(pipeline=="edgeR")
formula<-paste0("~tumorType+", additiveformula)
else
formula<-paste0("~0+tumorType+", additiveformula)
design <- model.matrix(eval(parse(text=formula)))
}
if(pipeline=="edgeR"){
if (method == "exactTest"){
DGE <- edgeR::DGEList(TOC,group = rep(c(Cond1type,Cond2type),
c(Cond1num,Cond2num)))
# Analysis using common dispersion
disp <- edgeR::estimateCommonDisp(DGE) # Estimating the common dispersion
#tested <- exactTest(disp,pair=c("Normal","Tumor")) # Testing
tested <- edgeR::exactTest(disp,pair = c(Cond1type,Cond2type)) # Testing
# Results visualization
logFC_table <- tested$table
tableDEA <- edgeR::topTags(tested,n = nrow(tested$table))$table
tableDEA <- tableDEA[tableDEA$FDR <= fdr.cut,]
tableDEA <- tableDEA[abs(tableDEA$logFC) >= logFC.cut,]
}
else if (method == "glmLRT"){
if(length(unique(tumorType))==2){
aDGEList <- edgeR::DGEList(counts = TOC, group = tumorType)
aDGEList <- edgeR::estimateGLMCommonDisp(aDGEList, design)
aDGEList <- edgeR::estimateGLMTagwiseDisp(aDGEList, design)
aGlmFit <- edgeR::glmFit(aDGEList, design, dispersion = aDGEList$tagwise.dispersion,
prior.count.total=0)
aGlmLRT <- edgeR::glmLRT(aGlmFit, coef = 2)
tableDEA <- cbind(aGlmLRT$table, FDR = p.adjust(aGlmLRT$table$PValue, "fdr"))
tableDEA <- tableDEA[tableDEA$FDR < fdr.cut,]
tableDEA <- tableDEA[abs(tableDEA$logFC) > logFC.cut,]
if(all(grepl("ENSG",rownames(tableDEA)))) tableDEA <- cbind(tableDEA,map.ensg(genes = rownames(tableDEA))[,2:3])
}
else if(length(unique(tumorType))>2) {
aDGEList <- edgeR::DGEList(counts = TOC, group = tumorType)
colnames(design)[1:length(levels(tumorType))]<-levels(tumorType)
prestr="makeContrasts("
poststr=",levels=colnames(design))"
commandstr=paste(prestr,contrast.formula,poststr,sep="")
commandstr=paste0("limma::", commandstr)
cont.matrix<-eval(parse(text=commandstr))
aDGEList <- edgeR::estimateGLMCommonDisp(aDGEList, design)
aDGEList <- edgeR::estimateGLMTagwiseDisp(aDGEList, design)
aGlmFit <- edgeR::glmFit(aDGEList, design, dispersion = aDGEList$tagwise.dispersion,
prior.count.total=0)
print(cont.matrix)
tableDEA<-list()
#[2:length(colnames(cont.matrix))]
for(mycoef in colnames(cont.matrix)){
message(paste0("DEA for", " :", mycoef))
aGlmLRT <- edgeR::glmLRT(aGlmFit, contrast=cont.matrix[,mycoef])
print("---toptags---")
print(topTags(aGlmLRT, adjust.method="fdr", sort.by="PValue"))
tt<-aGlmLRT$table
tt <- cbind(tt, FDR = p.adjust(aGlmLRT$table$PValue, "fdr"))
tt <- tt[(tt$FDR < fdr.cut & abs(as.numeric(tt$logFC)) > logFC.cut),]
#tt <- tt[abs(as.numeric(tt$logFC)) > logFC.cut,]
tableDEA[[as.character(mycoef)]]<-tt
#print(rownames(tableDEA[[as.character(mycoef)]]))
if(all(grepl("ENSG",rownames(tableDEA[[as.character(mycoef)]])))) tableDEA[[as.character(mycoef)]] <- cbind(tableDEA[[as.character(mycoef)]],map.ensg(genes = rownames(tableDEA[[as.character(mycoef)]]))[,2:3])
}
#sapply(colnames(dataFilt), FUN= function(x) subtypedata[which(subtypedata$samples==substr(x,1,12)),]$subtype)
}
#design <- model.matrix(~tumorType)
}
else stop(paste0(method, " is not a valid DEA method option. Choose 'exactTest' or 'glmLRT' "))
}
else if(pipeline=="limma"){
###logcpm transformation for limma-trend method using edgeR cpm method
if(log.trans==TRUE)
logCPM<- edgeR::cpm(TOC, log=TRUE, prior.count=3)
else
logCPM<-TOC
if(voom==TRUE){
message("Voom Transformation...")
logCPM<-limma::voom(logCPM, design)
}
if(length(unique(tumorType))==2){
#DGE <- edgeR::DGEList(TOC,group = rep(c(Cond1type,Cond2type),
#c(Cond1num,Cond2num)))
colnames(design)[1:2]<-c(Cond1type,Cond2type)
contr<-paste0(Cond2type,"-",Cond1type)
cont.matrix <- limma::makeContrasts(contrasts=contr, levels=design)
fit <- limma::lmFit(logCPM, design)
fit<-contrasts.fit(fit, cont.matrix)
if(trend==TRUE){
fit <- limma::eBayes(fit, trend=TRUE)
}
else{
fit <- limma::eBayes(fit, trend=FALSE)
}
tableDEA<-limma::topTable(fit, coef=1, adjust.method='fdr', number=nrow(TOC))
limma::volcanoplot(fit, highlight=10)
index <- which( tableDEA[,4] < fdr.cut)
tableDEA<-tableDEA[index,]
neg_logFC.cut<- -1*logFC.cut
index<-which(abs(as.numeric(tableDEA$logFC))>logFC.cut )
tableDEA<-tableDEA[index,]
#if(all(grepl("ENSG",rownames(tableDEA)))) tableDEA <- cbind(tableDEA,map.ensg(genes = rownames(tableDEA))[,2:3])
}
else if(length(unique(tumorType))>2){
DGE <- edgeR::DGEList(TOC,group = tumorType)
#colnames(design)[1:2]<-c(Cond1type,Cond2type)
colnames(design)[1:length(levels(tumorType))]<-levels(tumorType)
prestr="makeContrasts("
poststr=",levels=colnames(design))"
commandstr=paste(prestr,contrast.formula,poststr,sep="")
commandstr=paste0("limma::", commandstr)
cont.matrix<-eval(parse(text=commandstr))
fit <- limma::lmFit(logCPM, design)
fit<-limma::contrasts.fit(fit, cont.matrix)
if(trend==TRUE) ##limma-trend option
fit <- limma::eBayes(fit, trend=TRUE)
else
fit <- limma::eBayes(fit, trend=FALSE)
tableDEA<-list()
for(mycoef in colnames(cont.matrix)){
tableDEA[[as.character(mycoef)]]<-limma::topTable(fit, coef=mycoef, adjust.method="fdr", number=nrow(MAT))
message(paste0("DEA for", " :", mycoef))
tempDEA<-tableDEA[[as.character(mycoef)]]
index.up <- which(tempDEA$adj.P.Val < fdr.cut & abs(as.numeric(tempDEA$logFC))>logFC.cut)
tableDEA[[as.character(mycoef)]]<-tempDEA[index.up,]
if(all(grepl("ENSG",rownames(tableDEA[[as.character(mycoef)]])))) tableDEA[[as.character(mycoef)]] <- cbind(tableDEA[[as.character(mycoef)]],map.ensg(genes = rownames(tableDEA[[as.character(mycoef)]]))[,2:3])
#i<-i+1
}
#sapply(colnames(dataFilt), FUN= function(x) subtypedata[which(subtypedata$samples==substr(x,1,12)),]$subtype)
}
}
else stop(paste0(pipeline, " is not a valid pipeline option. Choose 'edgeR' or 'limma'"))
#if(all(grepl("ENSG",rownames(tableDEA)))) tableDEA <- cbind(tableDEA,map.ensg(genes = rownames(tableDEA))[,2:3])
message("----------------------- END DEA -------------------------------")
return(tableDEA)
}
#' @title Batch correction using ComBat and Voom transformation using limma package.
#' @description
#' TCGAbatch_correction allows user to perform a Voom correction on gene expression data and have it ready for DEA.
#' One can also use ComBat for batch correction for exploratory analysis. If batch.factor or adjustment argument is "Year"
#' please provide clinical data. If no batch factor is provided, the data will be voom corrected only
#'
#' TCGAanalyze_DEA performs DEA using following functions from sva and limma:
#' \enumerate{
#' \item limma::voom Transform RNA-Seq Data Ready for Linear Modelling.
#' \item sva::ComBat Adjust for batch effects using an empirical Bayes framework.
#' }
#' @param tabDF numeric matrix, each row represents a gene,
#' each column represents a sample
#' @param batch.factor a string containing the batch factor to use for correction. Options are "Plate", "TSS", "Year", "Portion", "Center"
#' @param adjustment vector containing strings for factors to adjust for using ComBat. Options are "Plate", "TSS", "Year", "Portion", "Center"
#' @param ClinicalDF a dataframe returned by GDCquery_clinic() to be used to extract year data
#' @importFrom limma voom
#' @importFrom sva ComBat
#' @export
#' @return data frame with ComBat batch correction applied
TCGAbatch_Correction<-function (tabDF, batch.factor = NULL, adjustment = NULL, ClinicalDF = data.frame())
{
if (length(batch.factor) == 0 & length(adjustment) == 0)
message("batch correction will be skipped")
else if (batch.factor %in% adjustment) {
stop(paste0("Cannot adjust and correct for the same factor|"))
}
my_IDs <- get_IDs(tabDF)
if (length(batch.factor) > 0 || length(adjustment) > 0)
if ((nrow(ClinicalDF) > 0 & batch.factor == "Year") ||
("Year" %in% adjustment == TRUE & nrow(ClinicalDF) >
0)) {
names(ClinicalDF)[names(ClinicalDF) == "bcr_patient_barcode"] <- "patient"
ClinicalDF$age_at_diag_year <- floor(ClinicalDF$age_at_diagnosis/365)
ClinicalDF$diag_year <- ClinicalDF$age_at_diag_year +
ClinicalDF$year_of_birth
diag_yearDF <- ClinicalDF[, c("patient", "diag_year")]
Year <- merge(my_IDs, diag_yearDF, by = "patient")
Year <- Year$diag_year
Year <- as.factor(Year)
}
else if (nrow(ClinicalDF) == 0 & batch.factor == "Year") {
stop("Cannot extract Year data. Clinical data was not provided")
}
Plate <- as.factor(my_IDs$plate)
Condition <- as.factor(my_IDs$condition)
TSS <- as.factor(my_IDs$tss)
Portion <- as.factor(my_IDs$portion)
Sequencing.Center <- as.factor(my_IDs$center)
design.matrix <- model.matrix(~Condition)
design.mod.combat <- model.matrix(~Condition)
options <- c("Plate", "TSS", "Year", "Portion", "Sequencing Center")
if (length(batch.factor) > 1)
stop("Combat can only correct for one batch variable. Provide one batch factor")
if (batch.factor %in% options == FALSE)
stop(paste0(o, " is not a valid batch correction factor"))
for (o in adjustment) {
if (o %in% options == FALSE)
stop(paste0(o, " is not a valid adjustment factor"))
}
adjustment.data <- c()
for (a in adjustment) {
if (a == "Sequencing Center")
a <- Sequencing.Center
adjustment.data <- cbind(eval(parse(text = a)), adjustment.data)
}
if (batch.factor == "Sequencing Center")
batch.factor <- Sequencing.Center
batchCombat <- eval(parse(text = batch.factor))
if (length(adjustment) > 0) {
adjustment.formula <- paste(adjustment, collapse = "+")
adjustment.formula <- paste0("+", adjustment.formula)
adjustment.formula <- paste0("~Condition", adjustment.formula)
print(adjustment.formula)
model <- data.frame(batchCombat, row.names = colnames(tabDF))
design.mod.combat <- model.matrix(eval(parse(text = adjustment.formula)),
data = model)
}
print(unique(batchCombat))
batch_corr <- sva::ComBat(dat = tabDF, batch = batchCombat,
mod = design.mod.combat, par.prior = TRUE, prior.plots = TRUE)
return(batch_corr)
}
##Function to take raw counts by removing rows filtered after norm and filter process###
#' @title Use raw count from the DataPrep object which genes are removed by normalization and filtering steps.
#' @description function to keep raw counts after filtering and/or normalizing.
#' @param DataPrep DataPrep object returned by TCGAanalyze_Preprocessing()
#' @param DataFilt Filtered data frame containing samples in columns and genes in rows after normalization and/or filtering steps
#' @examples
#' \dontrun{
#' dataPrep_raw <- UseRaw_afterFilter(dataPrep, dataFilt)
#' }
#' @export
#' @return Filtered return object similar to DataPrep with genes removed after normalization and filtering process.
UseRaw_afterFilter<-function(DataPrep, DataFilt){
rownames(DataPrep)<-lapply(rownames(DataPrep), function(x) gsub("[[:punct:]]\\d*", "", x ))
filtered.list <- setdiff(rownames(DataPrep), rownames(DataFilt))
Res <- DataPrep[!rownames(DataPrep) %in% filtered.list, ]
return(Res)
}
#' @importFrom biomaRt getBM useMart listDatasets
map.ensg <- function(genome = "hg38", genes) {
if (genome == "hg19"){
# for hg19
ensembl <- useMart(biomart = "ENSEMBL_MART_ENSEMBL",
host = "feb2014.archive.ensembl.org",
path = "/biomart/martservice" ,
dataset = "hsapiens_gene_ensembl")
attributes <- c("ensembl_gene_id", "entrezgene","external_gene_id")
} else {
# for hg38
ensembl <- useMart("ensembl", dataset = "hsapiens_gene_ensembl")
attributes <- c("ensembl_gene_id", "entrezgene","external_gene_name")
}
gene.location <- getBM(attributes = attributes,
filters = c("ensembl_gene_id"),
values = list(genes), mart = ensembl)
colnames(gene.location) <- c("ensembl_gene_id", "entrezgene","external_gene_name")
gene.location <- gene.location[match(genes,gene.location$ensembl_gene_id),]
return(gene.location)
}
#' @title Adding information related to DEGs genes from DEA as mean values in two conditions.
#' @description
#' TCGAanalyze_LevelTab allows user to add information related to DEGs genes from
#' Differentially expression analysis (DEA) such as mean values and in two conditions.
#' @param FC_FDR_table_mRNA Output of dataDEGs filter by abs(LogFC) >=1
#' @param typeCond1 a string containing the class label of the samples
#' in TableCond1 (e.g., control group)
#' @param typeCond2 a string containing the class label of the samples
#' in TableCond2 (e.g., case group)
#' @param TableCond1 numeric matrix, each row represents a gene, each column
#' represents a sample with Cond1type
#' @param TableCond2 numeric matrix, each row represents a gene, each column
#' represents a sample with Cond2type
#' @param typeOrder typeOrder
#' @importFrom edgeR DGEList estimateCommonDisp exactTest topTags
#' @export
#' @return table with DEGs, log Fold Change (FC), false discovery rate (FDR),
#' the gene expression level
#' for samples in Cond1type, and Cond2type, and Delta value (the difference
#' of gene expression between the two
#' conditions multiplied logFC)
#' @examples
#' dataNorm <- TCGAbiolinks::TCGAanalyze_Normalization(dataBRCA, geneInfo)
#' dataFilt <- TCGAanalyze_Filtering(tabDF = dataBRCA, method = "quantile", qnt.cut = 0.25)
#' samplesNT <- TCGAquery_SampleTypes(colnames(dataFilt), typesample = c("NT"))
#' samplesTP <- TCGAquery_SampleTypes(colnames(dataFilt), typesample = c("TP"))
#' dataDEGs <- TCGAanalyze_DEA(dataFilt[,samplesNT],
#' dataFilt[,samplesTP],
#' Cond1type = "Normal",
#' Cond2type = "Tumor")
#' dataDEGsFilt <- dataDEGs[abs(dataDEGs$logFC) >= 1,]
#' dataTP <- dataFilt[,samplesTP]
#' dataTN <- dataFilt[,samplesNT]
#' dataDEGsFiltLevel <- TCGAanalyze_LevelTab(dataDEGsFilt,"Tumor","Normal",
#' dataTP,dataTN)
TCGAanalyze_LevelTab <- function(FC_FDR_table_mRNA,
typeCond1,
typeCond2,
TableCond1,
TableCond2,
typeOrder = TRUE) {
TF_enriched <- as.matrix(rownames(FC_FDR_table_mRNA))
TableLevel <- matrix(0,nrow(TF_enriched),6)
TableLevel <- as.data.frame(TableLevel)
colnames(TableLevel) <- c("mRNA","logFC","FDR",typeCond1,typeCond2,"Delta")
TableLevel[,"mRNA"] <- TF_enriched
Tabfilt <- FC_FDR_table_mRNA[which( rownames(FC_FDR_table_mRNA) %in%
TF_enriched),]
TableLevel[,"logFC"] <- as.numeric(Tabfilt[TF_enriched,][,"logFC"])
TableLevel[,"FDR"] <- as.numeric(Tabfilt[TF_enriched,][,"FDR"])
MeanTumor <- matrix(0,nrow(TF_enriched),1)
MeanDiffTumorNormal <- matrix(0,nrow(TF_enriched),1)
for (i in 1:nrow(TF_enriched)) {
TableLevel[i,typeCond1] <- mean(as.numeric(TableCond1[rownames(TableCond1) %in%
TF_enriched[i] , ]))
TableLevel[i,typeCond2] <- mean(as.numeric(TableCond2[rownames(TableCond2) %in%
TF_enriched[i] , ]))
}
TableLevel[,"Delta"] <- as.numeric(abs(TableLevel[,"logFC"]) *
TableLevel[,typeCond1] )
TableLevel <- TableLevel[order( as.numeric(TableLevel[,"Delta"]),
decreasing = typeOrder),]
rownames(TableLevel) <- TableLevel[,"mRNA"]
if(all(grepl("ENSG",rownames(TableLevel)))) TableLevel <- cbind(TableLevel,map.ensg(genes = rownames(TableLevel))[,2:3])
return(TableLevel)
}
#' @title Enrichment analysis for Gene Ontology (GO) [BP,MF,CC] and Pathways
#' @description
#' Researchers, in order to better understand the underlying biological
#' processes, often want to retrieve a functional profile of a set of genes
#' that might have an important role. This can be done by performing an
#' enrichment analysis.
#'
#'We will perform an enrichment analysis on gene sets using the TCGAanalyze_EAcomplete
#'function. Given a set of genes that are
#'up-regulated under certain conditions, an enrichment analysis will find
#'identify classes of genes or proteins that are #'over-represented using
#'annotations for that gene set.
#' @param TFname is the name of the list of genes or TF's regulon.
#' @param RegulonList List of genes such as TF's regulon or DEGs where to find enrichment.
#' @export
#' @return Enrichment analysis GO[BP,MF,CC] and Pathways complete table enriched by genelist.
#' @examples
#' Genelist <- c("FN1","COL1A1")
#' ansEA <- TCGAanalyze_EAcomplete(TFname="DEA genes Normal Vs Tumor",Genelist)
#' \dontrun{
#' Genelist <- rownames(dataDEGsFiltLevel)
#' system.time(ansEA <- TCGAanalyze_EAcomplete(TFname="DEA genes Normal Vs Tumor",Genelist))
#' }
TCGAanalyze_EAcomplete <- function(TFname, RegulonList){
# This is a verification of the input
# in case the List is like Gene|ID
# we will get only the Gene
if(all(grepl("\\|",RegulonList))){
RegulonList <- strsplit(RegulonList,"\\|")
RegulonList <- unlist(lapply(RegulonList,function(x) x[1]))
}
print(paste("I need about ", "1 minute to finish complete ",
"Enrichment analysis GO[BP,MF,CC] and Pathways... "))
ResBP <- TCGAanalyze_EA(TFname,RegulonList,DAVID_BP_matrix,
EAGenes,GOtype = "DavidBP")
print("GO Enrichment Analysis BP completed....done")
ResMF <- TCGAanalyze_EA(TFname,RegulonList,DAVID_MF_matrix,
EAGenes,GOtype = "DavidMF")
print("GO Enrichment Analysis MF completed....done")
ResCC <- TCGAanalyze_EA(TFname,RegulonList,DAVID_CC_matrix,
EAGenes,GOtype = "DavidCC")
print("GO Enrichment Analysis CC completed....done")
ResPat <- TCGAanalyze_EA(TFname,RegulonList,listEA_pathways,
EAGenes,GOtype = "Pathway")
print("Pathway Enrichment Analysis completed....done")
ans <- list(ResBP = ResBP, ResMF = ResMF, ResCC = ResCC, ResPat = ResPat)
return(ans)
}
#' @title Enrichment analysis of a gene-set with GO [BP,MF,CC] and pathways.
#' @description
#' The rational behind a enrichment analysis ( gene-set, pathway etc) is to compute
#' statistics of whether the overlap between the focus list (signature) and the gene-set
#' is significant. ie the confidence that overlap between the list is not due to chance.
#' The Gene Ontology project describes genes (gene products) using terms from
#' three structured vocabularies: biological process, cellular component and molecular function.
#' The Gene Ontology Enrichment component, also referred to as the GO Terms" component, allows
#' the genes in any such "changed-gene" list to be characterized using the Gene Ontology terms
#' annotated to them. It asks, whether for any particular GO term, the fraction of genes
#' assigned to it in the "changed-gene" list is higher than expected by chance
#' (is over-represented), relative to the fraction of genes assigned to that term in the
#' reference set.
#' In statistical terms it peform the analysis tests the null hypothesis that,
#' for any particular ontology term, there is no diffeerence in the proportion of genes
#' annotated to it in the reference list and the proportion annotated to it in the test list.
#' We adopted a Fisher Exact Test to perform the EA.
#' @param GeneName is the name of gene signatures list
#' @param TableEnrichment is a table related to annotations of gene symbols such as
#' GO[BP,MF,CC] and Pathways. It was created from DAVID gene ontology on-line.
#' @param RegulonList is a gene signature (lisf of genes) in which perform EA.
#' @param GOtype is type of gene ontology Biological process (BP), Molecular Function (MF),
#' Cellular componet (CC)
#' @param FDRThresh pvalue corrected (FDR) as threshold to selected significant
#' BP, MF,CC, or pathways. (default FDR < 0.01)
#' @param EAGenes is a table with informations about genes
#' such as ID, Gene, Description, Location and Family.
# @export
#' @import stats
#' @return Table with enriched GO or pathways by selected gene signature.
#' @examples
#' \dontrun{
#' EAGenes <- get("EAGenes")
#' RegulonList <- rownames(dataDEGsFiltLevel)
#' ResBP <- TCGAanalyze_EA(GeneName="DEA genes Normal Vs Tumor",
#' RegulonList,DAVID_BP_matrix,
#' EAGenes,GOtype = "DavidBP")
#'}
TCGAanalyze_EA <- function(GeneName,RegulonList,TableEnrichment,
EAGenes,GOtype,FDRThresh=0.01) {
topPathways <- nrow(TableEnrichment)
topPathways_tab <- matrix(0,1,topPathways)
topPathways_tab <- as.matrix(topPathways_tab)
rownames(topPathways_tab) <- GeneName
rownames(EAGenes) <- toupper(rownames(EAGenes) )
EAGenes <- EAGenes[!duplicated(EAGenes[,"ID"]),]
rownames(EAGenes) <- EAGenes[,"ID"]
allgene <- EAGenes[,"ID"]
current_pathway_from_EA <- as.matrix(TableEnrichment[,GOtype]) # genes from EA pathways
TableNames <- gsub("David","",paste("Top ", GOtype, " n. ", 1:topPathways,
" of ", topPathways, sep = ""))
colnames(topPathways_tab) <- TableNames
topPathways_tab <- as.data.frame(topPathways_tab)
table_pathway_enriched <- matrix(1, nrow(current_pathway_from_EA),7)
colnames(table_pathway_enriched) <- c("Pathway","GenesInPathway","Pvalue",
"FDR","CommonGenesPathway",
"PercentPathway","PercentRegulon")
table_pathway_enriched <- as.data.frame(table_pathway_enriched)
for (i in 1:nrow(current_pathway_from_EA)) {
table_pathway_enriched[i,"Pathway"] <- as.character(current_pathway_from_EA[i,])
if (nrow(TableEnrichment) == 589) {
genes_from_current_pathway_from_EA <- GeneSplitRegulon(TableEnrichment[ TableEnrichment[GOtype] == as.character(current_pathway_from_EA[i,]) ,][,"Molecules"], ",")
}
else {
genes_from_current_pathway_from_EA <- GeneSplitRegulon(TableEnrichment[ TableEnrichment[GOtype] == as.character(current_pathway_from_EA[i,]) ,][,"Molecules"], ", ")
}
genes_common_pathway_TFregulon <- as.matrix(intersect(toupper(RegulonList),toupper(genes_from_current_pathway_from_EA)))
if (length(genes_common_pathway_TFregulon) != 0) {
current_pathway_commongenes_num <- length(genes_common_pathway_TFregulon)
seta <- allgene %in% RegulonList
setb <- allgene %in% genes_from_current_pathway_from_EA
ft <- fisher.test(seta,setb)
FisherpvalueTF <- ft$p.value
table_pathway_enriched[i,"Pvalue"] <- as.numeric(FisherpvalueTF)
if (FisherpvalueTF < 0.01) {
current_pathway_commongenes_percent <- paste("(",format( (current_pathway_commongenes_num/length(genes_from_current_pathway_from_EA)) * 100,digits = 2),"%)")
current_pathway_commongenes_num_with_percent <- gsub(" ","",paste(current_pathway_commongenes_num, current_pathway_commongenes_percent,"pv=",format(FisherpvalueTF,digits=2)))
table_pathway_enriched[i,"CommonGenesPathway"] <- length(genes_common_pathway_TFregulon)
table_pathway_enriched[i,"GenesInPathway"] <- length(genes_from_current_pathway_from_EA)
table_pathway_enriched[i,"PercentPathway"] <- as.numeric(table_pathway_enriched[i,"CommonGenesPathway"]) / as.numeric(table_pathway_enriched[i,"GenesInPathway"]) *100
table_pathway_enriched[i,"PercentRegulon"] <- as.numeric(table_pathway_enriched[i,"CommonGenesPathway"]) / length(RegulonList) *100
} }
}
table_pathway_enriched <- table_pathway_enriched[order(table_pathway_enriched[,"Pvalue"],decreasing = FALSE),]
table_pathway_enriched <- table_pathway_enriched[table_pathway_enriched[,"Pvalue"] < 0.01 ,]
table_pathway_enriched[,"FDR"] <- p.adjust(table_pathway_enriched[,"Pvalue"],method = "fdr")
table_pathway_enriched <- table_pathway_enriched[table_pathway_enriched[,"FDR"] < FDRThresh ,]
table_pathway_enriched <- table_pathway_enriched[order(table_pathway_enriched[,"FDR"],decreasing = FALSE),]
if(nrow(table_pathway_enriched) > 0) {
tmp <- table_pathway_enriched
tmp <- paste(tmp[,"Pathway"],"; FDR= ", format(tmp[,"FDR"],digits = 3),"; (ng=" ,round(tmp[,"GenesInPathway"]),"); (ncommon=", format(tmp[,"CommonGenesPathway"],digits = 2), ")" ,sep = "")
tmp <- as.matrix(tmp)
topPathways_tab <- topPathways_tab[,1:nrow(table_pathway_enriched),drop=FALSE]
topPathways_tab[1,] <- tmp
} else {
topPathways_tab <- NA
}
return(topPathways_tab)
}
#' @title Differentially expression analysis (DEA) using limma package.
#' @description Differentially expression analysis (DEA) using limma package.
#' @param FC.cut write
#' @param AffySet A matrix-like data object containing log-ratios or log-expression values
#' for a series of arrays, with rows corresponding to genes and columns to samples
#' @examples
#' \dontrun{
#' to add example
#' }
#' @export
#' @return List of list with tables in 2 by 2 comparison
#' of the top-ranked genes from a linear model fitted by DEA's limma
TCGAanalyze_DEA_Affy <- function(AffySet, FC.cut = 0.01){
if (!requireNamespace("Biobase", quietly = TRUE)) {
stop("affy package is needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("limma", quietly = TRUE)) {
stop("affy package is needed for this function to work. Please install it.",
call. = FALSE)
}
Pdatatable <- Biobase::phenoData(AffySet)
f <- factor(Pdatatable$Disease)
groupColors<-names(table(f))
tmp <- matrix(0,length(groupColors),length(groupColors))
colnames(tmp) <- groupColors
rownames(tmp) <- groupColors
tmp[upper.tri(tmp)] <- 1
sample_tab <- Pdatatable
f <- factor(Pdatatable$Disease)
design <- model.matrix(~0+f)
colnames(design) <- levels(f)
fit <- limma::lmFit(AffySet, design) ## fit is an object of class MArrayLM.
groupColors <- names(table(Pdatatable$Disease))
CompleteList<-vector("list",sum(tmp))
k<-1
for( i in 1: length(groupColors)){
col1 <- colnames(tmp)[i]
for( j in 1: length(groupColors)){
col2 <- rownames(tmp)[j]
if( i!=j ){
if(tmp[i,j]!=0){
Comparison <- paste(col2,"-",col1,sep="")
if(i==4 && j==6){ Comparison <- paste(col1,"-",col2,sep="") }
if(i==5 && j==6){ Comparison <- paste(col1,"-",col2,sep="") }
print( paste(i, j, Comparison,"to do..." ))
cont.matrix <- limmamakeContrasts(I=Comparison,levels=design)
fit2 <- limmacontrasts.fit(fit, cont.matrix)
fit2 <- limma::eBayes(fit2)
sigI <- limma::topTable(fit2,coef=1, adjust.method="BH", sort.by="B", p.value = 0.05, lfc = FC.cut, number = 50000)
sigIbis <- sigI[order(abs(as.numeric(sigI$logFC)), decreasing=TRUE),]
names(CompleteList)[k]<-gsub("-","_",Comparison)
CompleteList[[k]]<-sigIbis
k<-k+1
}
}
}
}
return(CompleteList)
}
#' @title Generate network
#' @description TCGAanalyze_analyseGRN perform gene regulatory network.
#' @param TFs a vector of genes.
#' @param normCounts is a matrix of gene expression with genes in rows and samples in columns.
#' @param kNum the number of nearest neighbors to consider to estimate the mutual information.
#' Must be less than the number of columns of normCounts.
#' @export
#' @return an adjacent matrix
TCGAanalyze_analyseGRN<- function(TFs, normCounts,kNum) {
if (!requireNamespace("parmigene", quietly = TRUE)) {
stop("parmigene package is needed for this function to work. Please install it.",
call. = FALSE)
}
MRcandidates <- intersect(rownames(normCounts),TFs)
# Mutual information between TF and genes
sampleNames <- colnames(normCounts)
geneNames <- rownames(normCounts)
messageMI_TFgenes <- paste("Estimation of MI among [", length(MRcandidates), " TRs and ", nrow(normCounts), " genes].....", sep = "")
timeEstimatedMI_TFgenes1 <- length(MRcandidates)*nrow(normCounts)/1000
timeEstimatedMI_TFgenes <- format(timeEstimatedMI_TFgenes1*ncol(normCounts)/17000, digits = 2)
messageEstimation <- print(paste("I Need about ", timeEstimatedMI_TFgenes, "seconds for this MI estimation. [Processing 17000k elements /s] "))
system.time(miTFGenes <- knnmi.cross(normCounts[MRcandidates, ], normCounts, k = kNum))
return(miTFGenes)
}
#' @title Generate pathview graph
#' @description TCGAanalyze_Pathview pathway based data integration and visualization.
#' @param dataDEGs dataDEGs
#' @param pathwayKEGG pathwayKEGG
#' @export
#' @return an adjacent matrix
#' @examples
#' \dontrun{
#' dataDEGs <- data.frame(mRNA = c("TP53","TP63","TP73"), logFC = c(1,2,3))
#' TCGAanalyze_Pathview(dataDEGs)
#' }
TCGAanalyze_Pathview <- function(dataDEGs, pathwayKEGG = "hsa05200" ){
if (!requireNamespace("clusterProfiler", quietly = TRUE)) {
stop("clusterProfiler needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("pathview", quietly = TRUE)) {
stop("pathview needed for this function to work. Please install it.",
call. = FALSE)
}
# Converting Gene symbol to gene ID
eg = as.data.frame(clusterProfiler::bitr(dataDEGs$mRNA,
fromType="SYMBOL",
toType="ENTREZID",
OrgDb="org.Hs.eg.db"))
eg <- eg[!duplicated(eg$SYMBOL),]
dataDEGs <- dataDEGs[dataDEGs$mRNA %in% eg$SYMBOL,]
dataDEGs <- dataDEGs[order(dataDEGs$mRNA,decreasing=FALSE),]
eg <- eg[order(eg$SYMBOL,decreasing=FALSE),]
dataDEGs$GeneID <- eg$ENTREZID
dataDEGsFiltLevel_sub <- subset(dataDEGs, select = c("GeneID", "logFC"))
genelistDEGs <- as.numeric(dataDEGsFiltLevel_sub$logFC)
names(genelistDEGs) <- dataDEGsFiltLevel_sub$GeneID
hsa05200 <- pathview::pathview(gene.data = genelistDEGs,
pathway.id = pathwayKEGG,
species = "hsa",
limit = list(gene=as.integer(max(abs(genelistDEGs)))))
}
#' @title infer gene regulatory networks
#' @description TCGAanalyze_networkInference taking expression data as input, this will return an adjacency matrix of interactions
#' @param data expression data, genes in columns, samples in rows
#' @param optionMethod inference method, chose from aracne, c3net, clr and mrnet
#' @export
#' @return an adjacent matrix
TCGAanalyze_networkInference <- function(data, optionMethod = "clr" ){
# Converting Gene symbol to gene ID
if(optionMethod == "c3net"){
if (!requireNamespace("c3net", quietly = TRUE)) {
stop("c3net package is needed for this function to work. Please install it.",
call. = FALSE)
}
net <- c3net(t(data))
}else{
if (!requireNamespace("minet", quietly = TRUE)) {
stop("minet package is needed for this function to work. Please install it.",
call. = FALSE)
}
net <- minet(data, method = optionMethod)
}
return(net)
}
#' Creates a plot for GAIA ouptut (all significant aberrant regions.)
#' @description
#' This function is a auxiliary function to visualize GAIA ouptut
#' (all significant aberrant regions.)
#' @param calls A matrix with the following columns: Chromossome, Aberration Kind
#' Region Start, Region End, Region Size and score
#' @param threshold Score threshold (orange horizontal line in the plot)
#' @export
#' @importFrom graphics abline axis legend plot points
#' @return A plot with all significant aberrant regions.
#' @examples
#' call <- data.frame("Chromossome" = rep(9,100),
#' "Aberration Kind" = rep(c(-2,-1,0,1,2),20),
#' "Region Start [bp]" = 18259823:18259922,
#' "Region End [bp]" = 18259823:18259922,
#' "score" = rep(c(1,2,3,4),25))
#' gaiaCNVplot(call,threshold = 0.01)
#' call <- data.frame("Chromossome" = rep(c(1,9),50),
#' "Aberration Kind" = rep(c(-2,-1,0,1,2),20),
#' "Region Start [bp]" = 18259823:18259922,
#' "Region End [bp]" = 18259823:18259922,
#' "score" = rep(c(1,2,3,4),25))
#' gaiaCNVplot(call,threshold = 0.01)
gaiaCNVplot <- function (calls, threshold = 0.01) {
Calls <- calls[order(calls[,grep("start",colnames(calls),ignore.case = TRUE)]),]
Calls <- Calls[order(Calls[,grep("chr",colnames(calls),ignore.case = TRUE)]),]
rownames(Calls) <- NULL
Chromo <- Calls[,grep("chr",colnames(calls),ignore.case = TRUE)]
Gains <- apply(Calls,1,function(x) ifelse(x[grep("aberration",colnames(calls),ignore.case = TRUE)] == 1, x["score"], 0))
Losses <- apply(Calls,1,function(x) ifelse(x[grep("aberration",colnames(calls),ignore.case = TRUE)] == 0, x["score"], 0))
plot(Gains,
ylim = c(-max(Calls[,"score"]+2), max(Calls[,"score"]+2)),
type = "h",
col = "red",
xlab = "Chromosome",
ylab = "Score",
xaxt = "n")
points(-(Losses), type = "h", col = "blue")
# Draw origin line
abline(h = 0, cex = 4)
# Draw threshold lines
abline(h = -log10(threshold), col = "orange", cex = 4, main="test")
abline(h = log10(threshold), col = "orange", cex = 4, main="test")
uni.chr <- unique(Chromo)
temp <- rep(0, length(uni.chr))
for (i in 1:length(uni.chr)) {
temp[i] <- max(which(uni.chr[i] == Chromo))
}
for (i in 1:length(temp)) {
abline(v = temp[i], col = "black", lty = "dashed")
}
nChroms <- length(uni.chr)
begin <- c()
for (d in 1:nChroms) {
chrom <- sum(Chromo == uni.chr[d])
begin <- append(begin, chrom)
}
temp2 <- rep(0, nChroms)
for (i in 1:nChroms) {
if (i == 1) {
temp2[1] <- (begin[1] * 0.5)
}
else if (i > 1) {
temp2[i] <- temp[i - 1] + (begin[i] * 0.5)
}
}
uni.chr[uni.chr==23] <- "X"
uni.chr[uni.chr==24] <- "Y"
for (i in 1:length(temp)) {
axis(1, at = temp2[i], labels = uni.chr[i], cex.axis = 1)
}
legend(x=1,y=max(Calls[,"score"]+2), y.intersp=0.8, c("Amp"), pch=15, col=c("red"), text.font=3)
legend(x=1,y=-max(Calls[,"score"]+0.5), y.intersp=0.8, c("Del"), pch=15, col=c("blue"), text.font=3)
}
#' Get a matrix of interactions of genes from biogrid
#' @description
#' Using biogrid database, it will create a matrix of gene interations.
#' If columns A and row B has value 1, it means the gene A and gene B interatcs.
#' @param tmp.biogrid Biogrid table
#' @export
#' @param names.genes List of genes to filter from output. Default: consider all genes
#' @return A matrix with 1 for genes that interacts, 0 for no interaction.
#' @examples
#' names.genes.de <- c("PLCB1","MCL1","PRDX4","TTF2","TACC3", "PARP4","LSM1")
#' tmp.biogrid <- data.frame("Official.Symbol.Interactor.A" = names.genes.de,
#' "Official.Symbol.Interactor.B" = rev(names.genes.de))
#' net.biogrid.de <- getAdjacencyBiogrid(tmp.biogrid, names.genes.de)
#' \dontrun{
#' file <- paste0("http://thebiogrid.org/downloads/archives/",
#' "Release%20Archive/BIOGRID-3.4.133/BIOGRID-ALL-3.4.133.tab2.zip")
#' downloader::download(file,basename(file))
#' unzip(basename(file),junkpaths =TRUE)
#' tmp.biogrid <- read.csv(gsub("zip","txt",basename(file)),
#' header=TRUE, sep="\t", stringsAsFactors=FALSE)
#' names.genes.de <- c("PLCB1","MCL1","PRDX4","TTF2","TACC3", "PARP4","LSM1")
#' net.biogrid.de <- getAdjacencyBiogrid(tmp.biogrid, names.genes.de)
#' }
getAdjacencyBiogrid <- function(tmp.biogrid, names.genes = NULL){
it.a <- grep("Symbol",colnames(tmp.biogrid),value = TRUE)[1]
it.b <- grep("Symbol",colnames(tmp.biogrid),value = TRUE)[2]
if(is.null(names.genes)){
names.genes <- sort(union(unique(tmp.biogrid[,it.a]), unique(tmp.biogrid[,it.b])))
ind <- seq(1,nrow(tmp.biogrid))
} else {
ind.A <- which(tmp.biogrid[,it.a] %in% names.genes)
ind.B <- which(tmp.biogrid[,it.b] %in% names.genes)
ind <- intersect(ind.A,ind.B)
}
mat.biogrid <- matrix(0, nrow=length(names.genes),
ncol=length(names.genes),
dimnames=list(names.genes, names.genes))
for(i in ind){
mat.biogrid[tmp.biogrid[i,it.a], tmp.biogrid[i,it.b]] <- mat.biogrid[tmp.biogrid[i,it.b], tmp.biogrid[i,it.a]] <- 1
}
diag(mat.biogrid) <- 0
return(mat.biogrid)
}
#' Get GDC samples with both DNA methylation (HM450K) and Gene expression data from
#' GDC databse
#' @description
#' For a given TCGA project it gets the samples (barcode) with both DNA methylation and Gene expression data
#' from GDC database
#' @param project A GDC project
#' @param n Number of samples to return. If NULL return all (default)
#' @param legacy Access legacy (hg19) or harmonized database (hg38).
#' @return A vector of barcodes
#' @export
#' @examples
#' # Get ACC samples with both DNA methylation (HM450K) and gene expression aligned to hg19
#' samples <- matchedMetExp("TCGA-ACC", legacy = TRUE)
matchedMetExp <- function(project, legacy = FALSE, n = NULL){
if(legacy) {
# get primary solid tumor samples: DNA methylation
message("Download DNA methylation information")
met450k <- GDCquery(project = project,
data.category = "DNA methylation",
platform = "Illumina Human Methylation 450",
legacy = TRUE,
sample.type = c("Primary solid Tumor"))
# get primary solid tumor samples: RNAseq
message("Download gene expression information")
exp <- GDCquery(project = project,
data.category = "Gene expression",
data.type = "Gene expression quantification",
platform = "Illumina HiSeq",
file.type = "results",
sample.type = c("Primary solid Tumor"),
legacy = TRUE)
} else {
# get primary solid tumor samples: DNA methylation
message("Download DNA methylation information")
met450k <- GDCquery(project = project,
data.category = "DNA Methylation",
platform = "Illumina Human Methylation 450",
sample.type = c("Primary solid Tumor"))
# get primary solid tumor samples: RNAseq
message("Download gene expression information")
exp <- GDCquery(project = project,
data.category = "Transcriptome Profiling",
data.type = "Gene Expression Quantification",
workflow.type = "HTSeq - Counts")
}
met450k.tp <- met450k$results[[1]]$cases
# Get patients with samples in both platforms
exp.tp <- exp$results[[1]]$cases
patients <- unique(substr(exp.tp,1,15)[substr(exp.tp,1,12) %in% substr(met450k.tp,1,12)] )
if(!is.null(n)) patients <- patients[1:n] # get only n samples
return(patients)
}
#' Create a Summary table for each sample in a project saying if it contains
#' or not files for a certain data category
#' @description
#' Create a Summary table for each sample in a project saying if it contains
#' or not files for a certain data category
#' @param project A GDC project
#' @param legacy Access legacy (hg19) or harmonized database (hg38).
#' @return A data frame
#' @export
#' @importFrom stats xtabs
#' @examples
#' summary <- getDataCategorySummary("TCGA-ACC", legacy = TRUE)
getDataCategorySummary <- function(project, legacy = FALSE){
baseURL <- ifelse(legacy,"https://api.gdc.cancer.gov/legacy/files/?","https://api.gdc.cancer.gov/files/?")
url <- paste0(baseURL,"&expand=cases&size=100000&fields=cases.submitter_id,data_category&filters=",
URLencode('{"op":"and","content":[{"op":"in","content":{"field":"cases.project.project_id","value":["'),
URLencode(project),
URLencode('"]}}]}'))
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
json <- json$data$hits
json$submitter_id <- unlist(lapply(json$cases, function(x) paste0(x$submitter_id,collapse = ",")))
json$cases <- NULL
json <- json[!duplicated(json),]
json <- json[stringr::str_length(json$submitter_id) == 12,]
ret <- as.data.frame.matrix(xtabs(~ submitter_id + data_category , json))
return(ret)
}
| /R/analyze.R | no_license | QiStark/TCGAbiolinks | R | false | false | 80,962 | r | #' @title Hierarchical cluster analysis
#' @description Hierarchical cluster analysis using several methods such as
#' ward.D", "ward.D2", "single", "complete", "average" (= UPGMA),
#' "mcquitty" (= WPGMA), "median" (= WPGMC) or "centroid" (= UPGMC).
#' @param tabDF is a dataframe or numeric matrix, each row represents a gene,
#' each column represents a sample come from TCGAPrepare.
#' @param method is method to be used for generic cluster such as 'hclust'
#' or 'consensus'
#' @param methodHC is method to be used for Hierarchical cluster.
#' @import stats
#' @importFrom ConsensusClusterPlus ConsensusClusterPlus
#' @export
#' @return object of class hclust if method selected is 'hclust'.
#' If method selected is 'Consensus' returns a list of length maxK
#' (maximum cluster number to evaluate.). Each element is a list containing
#' consensusMatrix (numerical matrix), consensusTree (hclust), consensusClass
#' (consensus class asssignments). ConsensusClusterPlus also produces images.
TCGAanalyze_Clustering <- function(tabDF, method, methodHC = "ward.D2"){
if( method == "hclust"){
ans <- hclust(ddist <- dist(tabDF), method = methodHC)
}
if( method == "consensus"){
sHc <- hclust(ddist <- dist(tabDF), method = methodHC) # time = 1.270 )
ans <- ConsensusClusterPlus(ddist, maxK = 7, pItem = 0.9, reps=1000
, title="mc_consensus_k7_1000"
, clusterAlg = "hc"
, innerLinkage = "ward.D2"
, finalLinkage = "complete"
, plot = 'pdf', writeTable = TRUE)
}
return(ans)
}
#' @title Array Array Intensity correlation (AAIC) and correlation boxplot to define outlier
#' @description TCGAanalyze_Preprocessing perform Array Array Intensity correlation (AAIC).
#' It defines a square symmetric matrix of pearson correlation among samples.
#' According this matrix and boxplot of correlation samples by samples it is possible
#' to find samples with low correlation that can be identified as possible outliers.
#' @param object of gene expression of class RangedSummarizedExperiment from TCGAprepare
#' @param cor.cut is a threshold to filter samples according their spearman correlation in
#' samples by samples. default cor.cut is 0
#' @param filename Filename of the image file
#' @param width Image width
#' @param height Image height
#' @param datatype is a string from RangedSummarizedExperiment assay
#' @importFrom grDevices dev.list
#' @importFrom SummarizedExperiment assays
#' @export
#' @return Plot with array array intensity correlation and boxplot of correlation samples by samples
TCGAanalyze_Preprocessing <- function(object,
cor.cut = 0,
filename = NULL,
width = 1000,
height = 1000,
datatype = names(assays(object))[1]){
# This is a work around for raw_counts and raw_count
if(grepl("raw_count",datatype) & any(grepl("raw_count",names(assays(object)))))
datatype <- names(assays(object))[grepl("raw_count",names(assays(object)))]
if(!any(grepl(datatype, names(assays(object)))))
stop(paste0(datatype, " not found in the assay list: ",
paste(names(assays(object)),collapse = ", "),
"\n Please set the correct datatype argument."))
if (!(is.null(dev.list()["RStudioGD"]))){dev.off()}
if(is.null(filename)) filename <- "PreprocessingOutput.png"
png(filename, width = width, height = height)
par(oma=c(10,10,10,10))
ArrayIndex <- as.character(1:length( colData(object)$barcode))
pmat_new <- matrix(0, length(ArrayIndex),4)
colnames(pmat_new) <- c("Disease","platform","SampleID","Study")
rownames(pmat_new) <- as.character(colData(object)$barcode)
pmat_new <- as.data.frame(pmat_new)
pmat_new$Disease <- as.character(colData(object)$definition)
pmat_new$platform <-"platform"
pmat_new$SampleID <- as.character(colData(object)$barcode)
pmat_new$Study <- "study"
tabGroupCol <-cbind(pmat_new, Color = matrix(0,nrow(pmat_new),1))
for(i in seq_along(unique(tabGroupCol$Disease))){
tabGroupCol[which(tabGroupCol$Disease == tabGroupCol$Disease[i]),"Color"] <- rainbow(length(unique(tabGroupCol$Disease)))[i]
}
# pmat <- as.matrix(pData(phenoData(object)))
pmat <- pmat_new
phenodepth <- min(ncol(pmat), 3)
order <- switch(phenodepth + 1, ArrayIndex, order(pmat[, 1]),
order(pmat[, 1], pmat[, 2]), order(pmat[, 1],
pmat[, 2], pmat[, 3]))
arraypos <- (1:length(ArrayIndex)) * (1/(length(ArrayIndex) - 1)) - (1/(length(ArrayIndex) - 1))
arraypos2 = seq(1:length(ArrayIndex) - 1)
for (i in 2:length(ArrayIndex)) { arraypos2[i - 1] <- (arraypos[i] + arraypos[i - 1])/2 }
layout(matrix(c(1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 3, 3, 3, 4), 4, 4, byrow = TRUE))
c <- cor(assay(object,datatype)[, order], method = "spearman")
image(c, xaxt = "n", yaxt = "n",
#xlab = "Array Samples",
#ylab = "Array Samples",
main = "Array-Array Intensity Correlation after RMA")
for (i in 1:length(names(table(tabGroupCol$Color)) )){
currentCol <- names(table(tabGroupCol$Color))[i]
pos.col <- arraypos[which(tabGroupCol$Color == currentCol)]
lab.col <- colnames(c)[which(tabGroupCol$Color == currentCol)]
#axis(1, labels = lab.col , at = pos.col, col = currentCol,lwd = 6,las = 2)
axis(2, labels = lab.col , at = pos.col, col = currentCol,lwd = 6,las = 2)
}
m <- matrix(pretty(c, 10), nrow = 1, ncol = length(pretty(c, 10)))
image(m, xaxt = "n", yaxt = "n", ylab = "Correlation Coefficient")
axis(2,
labels = as.list(pretty(c, 10)),
at = seq(0, 1, by = (1/(length(pretty(c, 10)) - 1))))
abline(h = seq((1/(length(pretty(c, 10)) - 1))/2,
1 - (1/(length(pretty(c, 10)) - 1)),
by = (1/(length(pretty(c, 10)) - 1))))
box()
boxplot(c,
outline = FALSE,
las =2,
lwd = 6,
# names = NULL,
col = tabGroupCol$Color,
main ="Boxplot of correlation samples by samples after normalization")
dev.off()
samplesCor <- rowMeans(c)
objectWO <- assay(object,datatype)[, samplesCor > cor.cut]
colnames(objectWO) <- colnames(object)[samplesCor > cor.cut]
return(objectWO)
}
#' @title survival analysis (SA) univariate with Kaplan-Meier (KM) method.
#' @description TCGAanalyze_SurvivalKM perform an univariate Kaplan-Meier (KM) survival analysis (SA).
#' It performed Kaplan-Meier survival univariate using complete follow up with all days
#' taking one gene a time from Genelist of gene symbols.
#' For each gene according its level of mean expression in cancer samples,
#' defining two thresholds for quantile
#' expression of that gene in all samples (default ThreshTop=0.67,ThreshDown=0.33) it is possible
#' to define a threshold of intensity of gene expression to divide the samples in 3 groups
#' (High, intermediate, low).
#' TCGAanalyze_SurvivalKM performs SA between High and low groups using following functions
#' from survival package
#' \enumerate{
#' \item survival::Surv
#' \item survival::survdiff
#' \item survival::survfit
#' }
#' @param clinical_patient is a data.frame using function 'clinic' with information
#' related to barcode / samples such as bcr_patient_barcode, days_to_death ,
#' days_to_last_follow_up , vital_status, etc
#' @param dataGE is a matrix of Gene expression (genes in rows, samples in cols) from TCGAprepare
#' @param Genelist is a list of gene symbols where perform survival KM.
#' @param Survresult is a parameter (default = FALSE) if is TRUE will show KM plot and results.
#' @param ThreshTop is a quantile threshold to identify samples with high expression of a gene
#' @param ThreshDown is a quantile threshold to identify samples with low expression of a gene
#' @param p.cut p.values threshold. Default: 0.05
#' @param group1 a string containing the barcode list of the samples in in control group
#' @param group2 a string containing the barcode list of the samples in in disease group
#' @importFrom survival Surv survdiff survfit
#' @export
#' @return table with survival genes pvalues from KM.
#' @examples
#' clinical_patient_Cancer <- GDCquery_clinic("TCGA-BRCA","clinical")
#' # Selecting only 20 genes for example
#' dataBRCAcomplete <- log2(dataBRCA[1:20,] + 1)
#' group1 <- TCGAquery_SampleTypes(colnames(dataBRCAcomplete), typesample = c("NT"))
#' group2 <- TCGAquery_SampleTypes(colnames(dataBRCAcomplete), typesample = c("TP"))
#'
#' tabSurvKM <- TCGAanalyze_SurvivalKM(clinical_patient_Cancer,
#' dataBRCAcomplete,
#' Genelist = rownames(dataBRCAcomplete),
#' Survresult = FALSE,
#' p.cut = 0.4,
#' ThreshTop = 0.67,
#' ThreshDown = 0.33,
#' group1 = group1, # Control group
#' group2 = group2) # Disease group
#'
#' # If the groups are not specified group1 == group2 and all samples are used
#' tabSurvKM <- TCGAanalyze_SurvivalKM(clinical_patient_Cancer,
#' dataBRCAcomplete,
#' Genelist = rownames(dataBRCAcomplete),
#' Survresult = TRUE,
#' p.cut = 0.2,
#' ThreshTop = 0.67,
#' ThreshDown = 0.33)
TCGAanalyze_SurvivalKM <- function(clinical_patient,
dataGE,
Genelist,
Survresult = FALSE,
ThreshTop = 0.67,
ThreshDown = 0.33,
p.cut = 0.05,
group1,
group2){
# Check which genes we really have in the matrix
Genelist <- intersect(rownames(dataGE),Genelist)
# Split gene expression matrix btw the groups
dataCancer <- dataGE[Genelist,group2, drop = FALSE]
dataNormal <- dataGE[Genelist,group1, drop = FALSE]
colnames(dataCancer) <- substr(colnames(dataCancer),1,12)
cfu <- clinical_patient[clinical_patient[,"bcr_patient_barcode"] %in% substr(colnames(dataCancer),1,12),]
if("days_to_last_followup" %in% colnames(cfu)) colnames(cfu)[grep("days_to_last_followup",colnames(cfu))] <- "days_to_last_follow_up"
cfu <- as.data.frame(subset(cfu, select=c("bcr_patient_barcode","days_to_death","days_to_last_follow_up","vital_status")) )
# Set alive death to inf
if(length(grep("alive",cfu$vital_status,ignore.case = TRUE)) > 0) cfu[grep("alive",cfu$vital_status,ignore.case = TRUE),"days_to_death"]<-"-Inf"
# Set dead follow up to inf
if(length(grep("dead",cfu$vital_status,ignore.case = TRUE)) > 0) cfu[grep("dead",cfu$vital_status,ignore.case = TRUE),"days_to_last_follow_up"]<-"-Inf"
cfu <- cfu[ !(is.na(cfu[,"days_to_last_follow_up"])),]
cfu <- cfu[ !(is.na(cfu[,"days_to_death"])),]
followUpLevel <- FALSE
#FC_FDR_table_mRNA
tabSurv_Matrix<-matrix(0,nrow(as.matrix(rownames(dataNormal))),8)
colnames(tabSurv_Matrix)<-c("mRNA",
"pvalue",
"Cancer Deaths",
"Cancer Deaths with Top",
"Cancer Deaths with Down",
"Mean Tumor Top",
"Mean Tumor Down",
"Mean Normal")
tabSurv_Matrix<-as.data.frame(tabSurv_Matrix)
cfu$days_to_death<-as.numeric(as.character(cfu$days_to_death))
cfu$days_to_last_follow_up<-as.numeric(as.character(cfu$days_to_last_follow_up))
rownames(cfu) <- cfu[, "bcr_patient_barcode" ] #mod1
cfu <- cfu[ !(is.na(cfu[,"days_to_last_follow_up"])),]
cfu <- cfu[ !(is.na(cfu[,"days_to_death"])),]
cfu_complete<-cfu
ngenes<-nrow(as.matrix(rownames(dataNormal)))
# Evaluate each gene
for(i in 1:nrow(as.matrix(rownames(dataNormal)))) {
cat(paste0( (ngenes-i),"."))
mRNAselected <- as.matrix(rownames(dataNormal))[i]
mRNAselected_values <- dataCancer[rownames(dataCancer) == mRNAselected,]
mRNAselected_values_normal <- dataNormal[rownames(dataNormal) == mRNAselected,]
if(all(mRNAselected_values == 0)) next # All genes are 0
tabSurv_Matrix[i,"mRNA"] <- mRNAselected
# Get Thresh values for cancer expression
mRNAselected_values_ordered <- sort(mRNAselected_values,decreasing=TRUE)
mRNAselected_values_ordered_top <- as.numeric(quantile(as.numeric(mRNAselected_values_ordered),ThreshTop)[1])
mRNAselected_values_ordered_down <- as.numeric(quantile(as.numeric(mRNAselected_values_ordered),ThreshDown)[1])
mRNAselected_values_newvector <- mRNAselected_values
if (!is.na(mRNAselected_values_ordered_top)){
# How many samples do we have
numberOfSamples <- length(mRNAselected_values_ordered)
# High group (above ThreshTop)
lastelementTOP <- max(which(mRNAselected_values_ordered>mRNAselected_values_ordered_top))
# Low group (below ThreshDown)
firstelementDOWN <- min(which(mRNAselected_values_ordered<=mRNAselected_values_ordered_down))
samples_top_mRNA_selected <- names(mRNAselected_values_ordered[1:lastelementTOP])
samples_down_mRNA_selected <- names(mRNAselected_values_ordered[firstelementDOWN:numberOfSamples])
# Which samples are in the intermediate group (above ThreshLow and below ThreshTop)
samples_UNCHANGED_mRNA_selected <- names(mRNAselected_values_newvector[which((mRNAselected_values_newvector) > mRNAselected_values_ordered_down &
mRNAselected_values_newvector < mRNAselected_values_ordered_top )])
cfu_onlyTOP<-cfu_complete[cfu_complete[,"bcr_patient_barcode"] %in% samples_top_mRNA_selected,]
cfu_onlyDOWN<-cfu_complete[cfu_complete[,"bcr_patient_barcode"] %in% samples_down_mRNA_selected,]
cfu_onlyUNCHANGED<-cfu_complete[cfu_complete[,"bcr_patient_barcode"] %in% samples_UNCHANGED_mRNA_selected,]
cfu_ordered <- NULL
cfu_ordered <- rbind(cfu_onlyTOP,cfu_onlyDOWN)
cfu <- cfu_ordered
ttime <- as.numeric(cfu[, "days_to_death"])
sum(status <- ttime > 0) # morti
deads_complete <- sum(status <- ttime > 0)
ttime_only_top <- cfu_onlyTOP[, "days_to_death"]
deads_top<- sum(ttime_only_top > 0)
if(dim(cfu_onlyDOWN)[1] >= 1) {
ttime_only_down <- cfu_onlyDOWN[, "days_to_death"]
deads_down<- sum(ttime_only_down > 0)
} else {
deads_down <- 0
}
tabSurv_Matrix[i,"Cancer Deaths"] <- deads_complete
tabSurv_Matrix[i,"Cancer Deaths with Top"] <- deads_top
tabSurv_Matrix[i,"Cancer Deaths with Down"] <- deads_down
tabSurv_Matrix[i,"Mean Normal"] <- mean(as.numeric(mRNAselected_values_normal))
dataCancer_onlyTop_sample <- dataCancer[,samples_top_mRNA_selected,drop = FALSE]
dataCancer_onlyTop_sample_mRNASelected <- dataCancer_onlyTop_sample[rownames(dataCancer_onlyTop_sample) == mRNAselected,]
dataCancer_onlyDown_sample <- dataCancer[,samples_down_mRNA_selected,drop = FALSE]
dataCancer_onlyDown_sample_mRNASelected <- dataCancer_onlyDown_sample[rownames(dataCancer_onlyDown_sample) == mRNAselected,]
tabSurv_Matrix[i,"Mean Tumor Top"] <- mean(as.numeric(dataCancer_onlyTop_sample_mRNASelected))
tabSurv_Matrix[i,"Mean Tumor Down"] <- mean(as.numeric(dataCancer_onlyDown_sample_mRNASelected))
ttime[!status] <- as.numeric(cfu[!status, "days_to_last_follow_up"])
ttime[which(ttime== -Inf)] <- 0
ttime <- Surv(ttime, status)
rownames(ttime) <- rownames(cfu)
legendHigh <- paste(mRNAselected,"High")
legendLow <- paste(mRNAselected,"Low")
tabSurv_pvalue <- tryCatch({
tabSurv <- survdiff(ttime ~ c(rep("top", nrow(cfu_onlyTOP)), rep("down", nrow(cfu_onlyDOWN)) ))
tabSurv_chis<-unlist(tabSurv)$chisq
tabSurv_pvalue <- as.numeric(1 - pchisq(abs(tabSurv$chisq), df = 1))
}, error = function(e){
return(Inf)
})
tabSurv_Matrix[i,"pvalue"] <- tabSurv_pvalue
if (Survresult ==TRUE) {
titlePlot<- paste("Kaplan-Meier Survival analysis, pvalue=",tabSurv_pvalue )
plot(survfit(ttime ~ c(rep("low", nrow(cfu_onlyTOP)), rep("high", nrow(cfu_onlyDOWN)))), col = c("green", "red"),main= titlePlot,xlab="Days",ylab="Survival")
legend(100, 1, legend = c(legendLow,legendHigh), col = c("green", "red"), text.col = c("green", "red"), pch = 15)
print(tabSurv)
}
} #end if
} #end for
tabSurv_Matrix[tabSurv_Matrix=="-Inf"]<-0
tabSurvKM <- tabSurv_Matrix
# Filtering by selected pvalue < 0.01
tabSurvKM <- tabSurvKM[tabSurvKM$mRNA != 0,]
tabSurvKM <- tabSurvKM[tabSurvKM$pvalue < p.cut,]
tabSurvKM <- tabSurvKM[!duplicated(tabSurvKM$mRNA),]
rownames(tabSurvKM) <-tabSurvKM$mRNA
tabSurvKM <- tabSurvKM[,-1]
tabSurvKM <- tabSurvKM[order(tabSurvKM$pvalue, decreasing=FALSE),]
colnames(tabSurvKM) <- gsub("Cancer","Group2",colnames(tabSurvKM))
colnames(tabSurvKM) <- gsub("Tumor","Group2",colnames(tabSurvKM))
colnames(tabSurvKM) <- gsub("Normal","Group1",colnames(tabSurvKM))
return(tabSurvKM)
}
#' @title Filtering mRNA transcripts and miRNA selecting a threshold.
#' @description
#' TCGAanalyze_Filtering allows user to filter mRNA transcripts and miRNA,
#' selecting a threshold. For istance returns all mRNA or miRNA with mean across all
#' samples, higher than the threshold defined quantile mean across all samples.
#' @param tabDF is a dataframe or numeric matrix, each row represents a gene,
#' each column represents a sample come from TCGAPrepare
#' @param method is method of filtering such as 'quantile', 'varFilter', 'filter1', 'filter2'
#' @param qnt.cut is threshold selected as mean for filtering
#' @param var.func is function used as the per-feature filtering statistic.
#' See genefilter documentation
#' @param var.cutoff is a numeric value. See genefilter documentation
#' @param eta is a paramter for filter1. default eta = 0.05.
#' @param foldChange is a paramter for filter2. default foldChange = 1.
#' @importFrom genefilter varFilter
#' @export
#' @return A filtered dataframe or numeric matrix where each row represents a gene,
#' each column represents a sample
#' @examples
#' dataNorm <- TCGAbiolinks::TCGAanalyze_Normalization(dataBRCA, geneInfo)
#' dataNorm <- TCGAanalyze_Normalization(tabDF = dataBRCA,
#' geneInfo = geneInfo,
#' method = "geneLength")
#' dataFilt <- TCGAanalyze_Filtering(tabDF = dataNorm, method = "quantile", qnt.cut = 0.25)
TCGAanalyze_Filtering <- function(tabDF,method,
qnt.cut = 0.25,
var.func = IQR,
var.cutoff = 0.75,
eta = 0.05,
foldChange = 1){
if(method == "quantile"){
GeneThresh <- as.numeric(quantile(rowMeans(tabDF), qnt.cut))
geneFiltered <- names(which(rowMeans(tabDF) > GeneThresh))
tabDF_Filt <- tabDF[geneFiltered, ]
}
if(method == "varFilter"){
tabDF_Filt <- genefilter::varFilter(tabDF, var.func = IQR,
var.cutoff= 0.75,
filterByQuantile = TRUE)
}
if(method == "filter1"){
normCounts <- tabDF
geData <- t(log(1 + normCounts, 2))
filter <- apply(geData, 2, function(x) sum(quantile(x, probs = c(1 - eta, eta)) * c(1, -1)))
tabDF_Filt <- geData[, which(filter > foldChange)]
}
if(method == "filter2"){
geData <- tabDF
filter <- apply(geData, 2, function(x) prod(quantile(x, probs = c(1 - eta, eta)) - 10) < 0)
tabDF_Filt <- geData[, which(filter)]
}
return( tabDF_Filt)
}
#' @title normalization mRNA transcripts and miRNA using EDASeq package.
#' @description
#' TCGAanalyze_Normalization allows user to normalize mRNA transcripts and miRNA,
#' using EDASeq package.
#'
#' Normalization for RNA-Seq Numerical and graphical
#' summaries of RNA-Seq read data. Within-lane normalization procedures
#' to adjust for GC-content effect (or other gene-level effects) on read counts:
#' loess robust local regression, global-scaling, and full-quantile normalization
#' (Risso et al., 2011). Between-lane normalization procedures to adjust for
#' distributional differences between lanes (e.g., sequencing depth):
#' global-scaling and full-quantile normalization (Bullard et al., 2010).
#'
#' For istance returns all mRNA or miRNA with mean across all
#' samples, higher than the threshold defined quantile mean across all samples.
#'
#' TCGAanalyze_Normalization performs normalization using following functions
#' from EDASeq
#' \enumerate{
#' \item EDASeq::newSeqExpressionSet
#' \item EDASeq::withinLaneNormalization
#' \item EDASeq::betweenLaneNormalization
#' \item EDASeq::counts
#' }
#' @param tabDF Rnaseq numeric matrix, each row represents a gene,
#' each column represents a sample
#' @param geneInfo Information matrix of 20531 genes about geneLength and gcContent.
#' Two objects are provided: TCGAbiolinks::geneInfoHT,TCGAbiolinks::geneInfo
#' @param method is method of normalization such as 'gcContent' or 'geneLength'
#' @importFrom EDASeq newSeqExpressionSet withinLaneNormalization
#' betweenLaneNormalization exprs counts offst
#' @export
#' @return Rnaseq matrix normalized with counts slot holds the count data as a matrix
#' of non-negative integer count values, one row for each observational unit (gene or the like),
#' and one column for each sample.
#' @examples
#' dataNorm <- TCGAbiolinks::TCGAanalyze_Normalization(dataBRCA, geneInfo)
TCGAanalyze_Normalization <- function(tabDF,geneInfo,method = "geneLength"){
# Check if we have a SE, we need a gene expression matrix
if(is(tabDF,"SummarizedExperiment")) tabDF <- assay(tabDF)
geneInfo <- geneInfo[!is.na(geneInfo[,1]),]
geneInfo <- as.data.frame(geneInfo)
geneInfo$geneLength <- as.numeric(as.character(geneInfo$geneLength))
geneInfo$gcContent <- as.numeric(as.character(geneInfo$gcContent))
if(method == "gcContent"){
tmp <- as.character(rownames(tabDF))
tmp <- strsplit(tmp, "\\|")
geneNames <- matrix("", ncol = 2, nrow = length(tmp))
j <- 1
while(j <= length(tmp)) {
geneNames[j, 1] <- tmp[[j]][1]
geneNames[j, 2] <- tmp[[j]][2]
j <- j + 1
}
tmp <- which(geneNames[, 1] == "?")
geneNames[tmp, 1] <- geneNames[tmp, 2]
tmp <- table(geneNames[,1])
tmp <- which(geneNames[,1] %in% names(tmp[which(tmp > 1)]))
geneNames[tmp, 1] <- paste(geneNames[tmp, 1], geneNames[tmp, 2], sep = ".")
tmp <- table(geneNames[,1])
rownames(tabDF) <- geneNames[,1]
rawCounts<- tabDF
commonGenes <- intersect(rownames(geneInfo), rownames(rawCounts))
geneInfo <- geneInfo[commonGenes,]
rawCounts <- rawCounts[commonGenes,]
timeEstimated <- format(ncol(tabDF)*nrow(tabDF)/80000,digits = 2)
message(messageEstimation <- paste("I Need about ", timeEstimated,
"seconds for this Complete Normalization Upper Quantile",
" [Processing 80k elements /s] "))
ffData <- as.data.frame(geneInfo)
rawCounts <- floor(rawCounts)
message("Step 1 of 4: newSeqExpressionSet ...")
tmp <- newSeqExpressionSet(as.matrix(rawCounts), featureData = ffData)
#fData(tmp)[, "gcContent"] <- as.numeric(geneInfo[, "gcContent"])
message("Step 2 of 4: withinLaneNormalization ...")
tmp <- withinLaneNormalization(tmp, "gcContent", which = "upper", offset = TRUE)
message("Step 3 of 4: betweenLaneNormalization ...")
tmp <- betweenLaneNormalization(tmp, which = "upper", offset = TRUE)
normCounts <- log(rawCounts + .1) + offst(tmp)
normCounts <- floor(exp(normCounts) - .1)
message("Step 4 of 4: .quantileNormalization ...")
tmp <- t(.quantileNormalization(t(normCounts)))
tabDF_norm <- floor(tmp)
}
if(method == "geneLength"){
tabDF <- tabDF[ !(GenesCutID(as.matrix(rownames(tabDF))) == "?"),]
tabDF <- tabDF[ !(GenesCutID(as.matrix(rownames(tabDF))) == "SLC35E2"),]
rownames(tabDF) <- GenesCutID(as.matrix(rownames(tabDF)))
tabDF <- tabDF[rownames(tabDF) != "?", ]
tabDF <- tabDF[!duplicated(rownames(tabDF)), !duplicated(colnames(tabDF))]
tabDF <- tabDF[rownames(tabDF) %in% rownames(geneInfo),]
tabDF <- as.matrix(tabDF)
geneInfo <- geneInfo[rownames(geneInfo) %in% rownames(tabDF), ]
geneInfo <- geneInfo[!duplicated(rownames(geneInfo)), ]
toKeep <- which(geneInfo[, "geneLength"] != 0)
geneInfo <- geneInfo[toKeep, ]
tabDF <- tabDF[toKeep, ]
geneInfo <- as.data.frame(geneInfo)
tabDF <- round(tabDF)
commonGenes <- intersect(rownames(tabDF),rownames(geneInfo))
tabDF <- tabDF[commonGenes,]
geneInfo <- geneInfo[commonGenes,]
timeEstimated <- format(ncol(tabDF)*nrow(tabDF)/80000,digits = 2)
message(messageEstimation <- paste("I Need about ", timeEstimated,
"seconds for this Complete Normalization Upper Quantile",
" [Processing 80k elements /s] "))
message("Step 1 of 4: newSeqExpressionSet ...")
system.time(tabDF_norm <- EDASeq::newSeqExpressionSet(tabDF, featureData = geneInfo))
message("Step 2 of 4: withinLaneNormalization ...")
system.time(tabDF_norm <- EDASeq::withinLaneNormalization(tabDF_norm, "geneLength", which = "upper", offset = FALSE))
message("Step 3 of 4: betweenLaneNormalization ...")
system.time(tabDF_norm <- EDASeq::betweenLaneNormalization(tabDF_norm, which = "upper", offset = FALSE))
message("Step 4 of 4: exprs ...")
#system.time(tabDF_norm <- EDASeq::exprs(tabDF_norm))
system.time(tabDF_norm <- EDASeq::counts(tabDF_norm))
}
return(tabDF_norm)
}
#' @title Differential expression analysis (DEA) using edgeR or limma package.
#' @description
#' TCGAanalyze_DEA allows user to perform Differentially expression analysis (DEA),
#' using edgeR package or limma to identify differentially expressed genes (DEGs).
#' It is possible to do a two-class analysis.
#'
#' TCGAanalyze_DEA performs DEA using following functions from edgeR:
#' \enumerate{
#' \item edgeR::DGEList converts the count matrix into an edgeR object.
#' \item edgeR::estimateCommonDisp each gene gets assigned the same dispersion estimate.
#' \item edgeR::exactTest performs pair-wise tests for differential expression between two groups.
#' \item edgeR::topTags takes the output from exactTest(), adjusts the raw p-values using the
#' False Discovery Rate (FDR) correction, and returns the top differentially expressed genes.
#' }
#' TCGAanalyze_DEA performs DEA using following functions from limma:
#' \enumerate{
#' \item limma::makeContrasts construct matrix of custom contrasts.
#' \item limma::lmFit Fit linear model for each gene given a series of arrays.
#' \item limma::contrasts.fit Given a linear model fit to microarray data, compute estimated coefficients and standard errors for a given set of contrasts.
#' \item limma::eBayes Given a microarray linear model fit, compute moderated t-statistics, moderated F-statistic, and log-odds of differential expression by empirical Bayes moderation of the standard errors towards a common value.
#' \item limma::toptable Extract a table of the top-ranked genes from a linear model fit.
#' }
#' @param mat1 numeric matrix, each row represents a gene,
#' each column represents a sample with Cond1type
#' @param mat2 numeric matrix, each row represents a gene,
#' each column represents a sample with Cond2type
#' @param metadata Add metadata
#' @param Cond1type a string containing the class label of the samples in mat1
#' (e.g., control group)
#' @param Cond2type a string containing the class label of the samples in mat2
#' (e.g., case group)
#' @param pipeline a string to specify which package to use ("limma" or "edgeR")
#' @param method is 'glmLRT' (1) or 'exactTest' (2) used for edgeR
#' (1) Fit a negative binomial generalized log-linear model to
#' the read counts for each gene
#' (2) Compute genewise exact tests for differences in the means between
#' two groups of negative-binomially distributed counts.
#' @param fdr.cut is a threshold to filter DEGs according their p-value corrected
#' @param logFC.cut is a threshold to filter DEGs according their logFC
#' @param elementsRatio is number of elements processed for second for time consumation estimation
#' @param batch.factors a vector containing strings to specify options for batch correction. Options are "Plate", "TSS", "Year", "Portion", "Center", and "Patients"
#' @param ClinicalDF a dataframe returned by GDCquery_clinic() to be used to extract year data
#' @param paired boolean to account for paired or non-paired samples. Set to TRUE for paired case
#' @param log.trans boolean to perform log cpm transformation. Set to TRUE for log transformation
#' @param trend boolean to perform limma-trend pipeline. Set to TRUE to go through limma-trend
#' @param MAT matrix containing expression set as all samples in columns and genes as rows. Do not provide if mat1 and mat2 are used
#' @param contrast.formula string input to determine coefficients and to design contrasts in a customized way
#' @param Condtypes vector of grouping for samples in MAT
#' @param voom boolean to perform voom transformation for limma-voom pipeline. Set to TRUE for voom transformation
#' @importFrom edgeR DGEList estimateCommonDisp exactTest topTags estimateGLMCommonDisp
#' estimateGLMTagwiseDisp glmFit glmLRT
#' @importFrom limma makeContrasts lmFit contrasts.fit eBayes toptable
#' @export
#' @examples
#' dataNorm <- TCGAbiolinks::TCGAanalyze_Normalization(dataBRCA, geneInfo)
#' dataFilt <- TCGAanalyze_Filtering(tabDF = dataBRCA, method = "quantile", qnt.cut = 0.25)
#' samplesNT <- TCGAquery_SampleTypes(colnames(dataFilt), typesample = c("NT"))
#' samplesTP <- TCGAquery_SampleTypes(colnames(dataFilt), typesample = c("TP"))
#' dataDEGs <- TCGAanalyze_DEA(mat1 = dataFilt[,samplesNT],
#' mat2 = dataFilt[,samplesTP],
#' Cond1type = "Normal",
#' Cond2type = "Tumor")
#'
#' @return table with DEGs containing for each gene logFC, logCPM, pValue,and FDR, also for each contrast
TCGAanalyze_DEA <- function(mat1,
mat2,
metadata=TRUE,
Cond1type,
Cond2type,
pipeline="edgeR",
method = "exactTest",
fdr.cut = 1,
logFC.cut = 0,
elementsRatio = 30000,
batch.factors=NULL,
ClinicalDF=data.frame(),
paired=FALSE,
log.trans=FALSE,
voom=FALSE,
trend=FALSE,
MAT=data.frame(),
contrast.formula="",
Condtypes=c()
) {
table.code <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
names(table.code)<- c('01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','20','40','50','60','61')
if(nrow(MAT)==0){
TOC <- cbind(mat1,mat2)
Cond1num <- ncol(mat1)
Cond2num <- ncol(mat2)
#print(map.ensg(genes = rownames(TOC))[,2:3])
}
else {
TOC<-MAT
}
if(metadata==TRUE){
#####
my_IDs <- get_IDs(TOC)
Plate<-factor(my_IDs$plate)
Condition<-factor(my_IDs$condition)
TSS<-factor(my_IDs$tss)
Portion<-factor(my_IDs$portion)
Center<-factor(my_IDs$center)
Patients<-factor(my_IDs$patient)
}
if(paired==TRUE){
matched.query<-TCGAquery_MatchedCoupledSampleTypes(my_IDs$barcode, table.code[unique(my_IDs$sample)])
my_IDs<-subset(my_IDs, barcode==matched.query)
TOC<-TOC[,(names(TOC) %in% matched.query)]
}
###Extract year data from clinical info:
if(nrow(ClinicalDF)>0){
names(ClinicalDF)[names(ClinicalDF)=="bcr_patient_barcode"] <- "patient"
ClinicalDF$age_at_diag_year <- floor(clinical$age_at_diagnosis/365)
ClinicalDF$diag_year<-ClinicalDF$age_at_diag_year+clinical$year_of_birth
diag_yearDF<-ClinicalDF[,c("patient", "diag_year")]
my_IDs<-merge(my_IDs, ClinicalDF, by="patient")
Year<-as.factor(my_IDs$diag_year)
}
####ADD PATIENT AS OPTION
options <- c("Plate", "TSS", "Year", "Portion", "Center", "Patients")
if(length(batch.factors)==0){
message("Batch correction skipped since no factors provided")
}
else
for(o in batch.factors){
if(o %in% options == FALSE)
stop(paste0(o, " is not a valid batch correction factor"))
if(o == "Year" & nrow(ClinicalDF)==0)
stop("batch correction using diagnosis year needs clinical info. Provide Clinical Data in arguments")
}
###Additive Formula#######
additiveformula <-paste(batch.factors, collapse="+")
###########################
message("----------------------- DEA -------------------------------")
if(nrow(MAT)==0){
message(message1 <- paste( "there are Cond1 type", Cond1type ,"in ",
Cond1num, "samples"))
message(message2 <- paste( "there are Cond2 type", Cond2type ,"in ",
Cond2num, "samples"))
message(message3 <- paste( "there are ", nrow(TOC) ,
"features as miRNA or genes "))
}
else{
message(message3 <- paste( "there are ", nrow(TOC) ,
"features as miRNA or genes "))
}
timeEstimated <- format(ncol(TOC)*nrow(TOC)/elementsRatio,digits = 2)
message(messageEstimation <- paste("I Need about ", timeEstimated,
"seconds for this DEA. [Processing 30k elements /s] "))
# Reading in the data and creating a DGEList object
colnames(TOC) <- paste0('s',1:ncol(TOC))
#DGE <- DGEList(TOC,group=rep(c("Normal","Tumor"),c(NormalSample,
#TumorSample)))
if(length(Condtypes)>0){
tumorType <- factor(x=Condtypes, levels=unique(Condtypes))
}
else {
tumorType <- factor(x = rep(c(Cond1type,Cond2type),
c(Cond1num,Cond2num)),
levels = c(Cond1type,Cond2type))
}
# DGE.mat<-edgeR::DGEList(TOC,group = tumorType)
if(length(batch.factors)== 0 & length(Condtypes)>0){
if(pipeline=="edgeR")
design <- model.matrix(~tumorType)
else
design <- model.matrix(~0+tumorType)
}
else if(length(batch.factors)== 0 & length(Condtypes)==0){
if(pipeline=="edgeR")
design <- model.matrix(~tumorType)
else
design <- model.matrix(~0+tumorType)
}
else if(length(batch.factors)> 0 & length(Condtypes)==0){
if(pipeline=="edgeR")
formula<-paste0("~tumorType+", additiveformula)
else
formula<-paste0("~0+tumorType+", additiveformula)
design <- model.matrix(eval(parse(text=formula)))
}
else if(length(batch.factors)> 0 & length(Condtypes)>0){
if(pipeline=="edgeR")
formula<-paste0("~tumorType+", additiveformula)
else
formula<-paste0("~0+tumorType+", additiveformula)
design <- model.matrix(eval(parse(text=formula)))
}
if(pipeline=="edgeR"){
if (method == "exactTest"){
DGE <- edgeR::DGEList(TOC,group = rep(c(Cond1type,Cond2type),
c(Cond1num,Cond2num)))
# Analysis using common dispersion
disp <- edgeR::estimateCommonDisp(DGE) # Estimating the common dispersion
#tested <- exactTest(disp,pair=c("Normal","Tumor")) # Testing
tested <- edgeR::exactTest(disp,pair = c(Cond1type,Cond2type)) # Testing
# Results visualization
logFC_table <- tested$table
tableDEA <- edgeR::topTags(tested,n = nrow(tested$table))$table
tableDEA <- tableDEA[tableDEA$FDR <= fdr.cut,]
tableDEA <- tableDEA[abs(tableDEA$logFC) >= logFC.cut,]
}
else if (method == "glmLRT"){
if(length(unique(tumorType))==2){
aDGEList <- edgeR::DGEList(counts = TOC, group = tumorType)
aDGEList <- edgeR::estimateGLMCommonDisp(aDGEList, design)
aDGEList <- edgeR::estimateGLMTagwiseDisp(aDGEList, design)
aGlmFit <- edgeR::glmFit(aDGEList, design, dispersion = aDGEList$tagwise.dispersion,
prior.count.total=0)
aGlmLRT <- edgeR::glmLRT(aGlmFit, coef = 2)
tableDEA <- cbind(aGlmLRT$table, FDR = p.adjust(aGlmLRT$table$PValue, "fdr"))
tableDEA <- tableDEA[tableDEA$FDR < fdr.cut,]
tableDEA <- tableDEA[abs(tableDEA$logFC) > logFC.cut,]
if(all(grepl("ENSG",rownames(tableDEA)))) tableDEA <- cbind(tableDEA,map.ensg(genes = rownames(tableDEA))[,2:3])
}
else if(length(unique(tumorType))>2) {
aDGEList <- edgeR::DGEList(counts = TOC, group = tumorType)
colnames(design)[1:length(levels(tumorType))]<-levels(tumorType)
prestr="makeContrasts("
poststr=",levels=colnames(design))"
commandstr=paste(prestr,contrast.formula,poststr,sep="")
commandstr=paste0("limma::", commandstr)
cont.matrix<-eval(parse(text=commandstr))
aDGEList <- edgeR::estimateGLMCommonDisp(aDGEList, design)
aDGEList <- edgeR::estimateGLMTagwiseDisp(aDGEList, design)
aGlmFit <- edgeR::glmFit(aDGEList, design, dispersion = aDGEList$tagwise.dispersion,
prior.count.total=0)
print(cont.matrix)
tableDEA<-list()
#[2:length(colnames(cont.matrix))]
for(mycoef in colnames(cont.matrix)){
message(paste0("DEA for", " :", mycoef))
aGlmLRT <- edgeR::glmLRT(aGlmFit, contrast=cont.matrix[,mycoef])
print("---toptags---")
print(topTags(aGlmLRT, adjust.method="fdr", sort.by="PValue"))
tt<-aGlmLRT$table
tt <- cbind(tt, FDR = p.adjust(aGlmLRT$table$PValue, "fdr"))
tt <- tt[(tt$FDR < fdr.cut & abs(as.numeric(tt$logFC)) > logFC.cut),]
#tt <- tt[abs(as.numeric(tt$logFC)) > logFC.cut,]
tableDEA[[as.character(mycoef)]]<-tt
#print(rownames(tableDEA[[as.character(mycoef)]]))
if(all(grepl("ENSG",rownames(tableDEA[[as.character(mycoef)]])))) tableDEA[[as.character(mycoef)]] <- cbind(tableDEA[[as.character(mycoef)]],map.ensg(genes = rownames(tableDEA[[as.character(mycoef)]]))[,2:3])
}
#sapply(colnames(dataFilt), FUN= function(x) subtypedata[which(subtypedata$samples==substr(x,1,12)),]$subtype)
}
#design <- model.matrix(~tumorType)
}
else stop(paste0(method, " is not a valid DEA method option. Choose 'exactTest' or 'glmLRT' "))
}
else if(pipeline=="limma"){
###logcpm transformation for limma-trend method using edgeR cpm method
if(log.trans==TRUE)
logCPM<- edgeR::cpm(TOC, log=TRUE, prior.count=3)
else
logCPM<-TOC
if(voom==TRUE){
message("Voom Transformation...")
logCPM<-limma::voom(logCPM, design)
}
if(length(unique(tumorType))==2){
#DGE <- edgeR::DGEList(TOC,group = rep(c(Cond1type,Cond2type),
#c(Cond1num,Cond2num)))
colnames(design)[1:2]<-c(Cond1type,Cond2type)
contr<-paste0(Cond2type,"-",Cond1type)
cont.matrix <- limma::makeContrasts(contrasts=contr, levels=design)
fit <- limma::lmFit(logCPM, design)
fit<-contrasts.fit(fit, cont.matrix)
if(trend==TRUE){
fit <- limma::eBayes(fit, trend=TRUE)
}
else{
fit <- limma::eBayes(fit, trend=FALSE)
}
tableDEA<-limma::topTable(fit, coef=1, adjust.method='fdr', number=nrow(TOC))
limma::volcanoplot(fit, highlight=10)
index <- which( tableDEA[,4] < fdr.cut)
tableDEA<-tableDEA[index,]
neg_logFC.cut<- -1*logFC.cut
index<-which(abs(as.numeric(tableDEA$logFC))>logFC.cut )
tableDEA<-tableDEA[index,]
#if(all(grepl("ENSG",rownames(tableDEA)))) tableDEA <- cbind(tableDEA,map.ensg(genes = rownames(tableDEA))[,2:3])
}
else if(length(unique(tumorType))>2){
DGE <- edgeR::DGEList(TOC,group = tumorType)
#colnames(design)[1:2]<-c(Cond1type,Cond2type)
colnames(design)[1:length(levels(tumorType))]<-levels(tumorType)
prestr="makeContrasts("
poststr=",levels=colnames(design))"
commandstr=paste(prestr,contrast.formula,poststr,sep="")
commandstr=paste0("limma::", commandstr)
cont.matrix<-eval(parse(text=commandstr))
fit <- limma::lmFit(logCPM, design)
fit<-limma::contrasts.fit(fit, cont.matrix)
if(trend==TRUE) ##limma-trend option
fit <- limma::eBayes(fit, trend=TRUE)
else
fit <- limma::eBayes(fit, trend=FALSE)
tableDEA<-list()
for(mycoef in colnames(cont.matrix)){
tableDEA[[as.character(mycoef)]]<-limma::topTable(fit, coef=mycoef, adjust.method="fdr", number=nrow(MAT))
message(paste0("DEA for", " :", mycoef))
tempDEA<-tableDEA[[as.character(mycoef)]]
index.up <- which(tempDEA$adj.P.Val < fdr.cut & abs(as.numeric(tempDEA$logFC))>logFC.cut)
tableDEA[[as.character(mycoef)]]<-tempDEA[index.up,]
if(all(grepl("ENSG",rownames(tableDEA[[as.character(mycoef)]])))) tableDEA[[as.character(mycoef)]] <- cbind(tableDEA[[as.character(mycoef)]],map.ensg(genes = rownames(tableDEA[[as.character(mycoef)]]))[,2:3])
#i<-i+1
}
#sapply(colnames(dataFilt), FUN= function(x) subtypedata[which(subtypedata$samples==substr(x,1,12)),]$subtype)
}
}
else stop(paste0(pipeline, " is not a valid pipeline option. Choose 'edgeR' or 'limma'"))
#if(all(grepl("ENSG",rownames(tableDEA)))) tableDEA <- cbind(tableDEA,map.ensg(genes = rownames(tableDEA))[,2:3])
message("----------------------- END DEA -------------------------------")
return(tableDEA)
}
#' @title Batch correction using ComBat and Voom transformation using limma package.
#' @description
#' TCGAbatch_correction allows user to perform a Voom correction on gene expression data and have it ready for DEA.
#' One can also use ComBat for batch correction for exploratory analysis. If batch.factor or adjustment argument is "Year"
#' please provide clinical data. If no batch factor is provided, the data will be voom corrected only
#'
#' TCGAanalyze_DEA performs DEA using following functions from sva and limma:
#' \enumerate{
#' \item limma::voom Transform RNA-Seq Data Ready for Linear Modelling.
#' \item sva::ComBat Adjust for batch effects using an empirical Bayes framework.
#' }
#' @param tabDF numeric matrix, each row represents a gene,
#' each column represents a sample
#' @param batch.factor a string containing the batch factor to use for correction. Options are "Plate", "TSS", "Year", "Portion", "Center"
#' @param adjustment vector containing strings for factors to adjust for using ComBat. Options are "Plate", "TSS", "Year", "Portion", "Center"
#' @param ClinicalDF a dataframe returned by GDCquery_clinic() to be used to extract year data
#' @importFrom limma voom
#' @importFrom sva ComBat
#' @export
#' @return data frame with ComBat batch correction applied
TCGAbatch_Correction<-function (tabDF, batch.factor = NULL, adjustment = NULL, ClinicalDF = data.frame())
{
if (length(batch.factor) == 0 & length(adjustment) == 0)
message("batch correction will be skipped")
else if (batch.factor %in% adjustment) {
stop(paste0("Cannot adjust and correct for the same factor|"))
}
my_IDs <- get_IDs(tabDF)
if (length(batch.factor) > 0 || length(adjustment) > 0)
if ((nrow(ClinicalDF) > 0 & batch.factor == "Year") ||
("Year" %in% adjustment == TRUE & nrow(ClinicalDF) >
0)) {
names(ClinicalDF)[names(ClinicalDF) == "bcr_patient_barcode"] <- "patient"
ClinicalDF$age_at_diag_year <- floor(ClinicalDF$age_at_diagnosis/365)
ClinicalDF$diag_year <- ClinicalDF$age_at_diag_year +
ClinicalDF$year_of_birth
diag_yearDF <- ClinicalDF[, c("patient", "diag_year")]
Year <- merge(my_IDs, diag_yearDF, by = "patient")
Year <- Year$diag_year
Year <- as.factor(Year)
}
else if (nrow(ClinicalDF) == 0 & batch.factor == "Year") {
stop("Cannot extract Year data. Clinical data was not provided")
}
Plate <- as.factor(my_IDs$plate)
Condition <- as.factor(my_IDs$condition)
TSS <- as.factor(my_IDs$tss)
Portion <- as.factor(my_IDs$portion)
Sequencing.Center <- as.factor(my_IDs$center)
design.matrix <- model.matrix(~Condition)
design.mod.combat <- model.matrix(~Condition)
options <- c("Plate", "TSS", "Year", "Portion", "Sequencing Center")
if (length(batch.factor) > 1)
stop("Combat can only correct for one batch variable. Provide one batch factor")
if (batch.factor %in% options == FALSE)
stop(paste0(o, " is not a valid batch correction factor"))
for (o in adjustment) {
if (o %in% options == FALSE)
stop(paste0(o, " is not a valid adjustment factor"))
}
adjustment.data <- c()
for (a in adjustment) {
if (a == "Sequencing Center")
a <- Sequencing.Center
adjustment.data <- cbind(eval(parse(text = a)), adjustment.data)
}
if (batch.factor == "Sequencing Center")
batch.factor <- Sequencing.Center
batchCombat <- eval(parse(text = batch.factor))
if (length(adjustment) > 0) {
adjustment.formula <- paste(adjustment, collapse = "+")
adjustment.formula <- paste0("+", adjustment.formula)
adjustment.formula <- paste0("~Condition", adjustment.formula)
print(adjustment.formula)
model <- data.frame(batchCombat, row.names = colnames(tabDF))
design.mod.combat <- model.matrix(eval(parse(text = adjustment.formula)),
data = model)
}
print(unique(batchCombat))
batch_corr <- sva::ComBat(dat = tabDF, batch = batchCombat,
mod = design.mod.combat, par.prior = TRUE, prior.plots = TRUE)
return(batch_corr)
}
##Function to take raw counts by removing rows filtered after norm and filter process###
#' @title Use raw count from the DataPrep object which genes are removed by normalization and filtering steps.
#' @description function to keep raw counts after filtering and/or normalizing.
#' @param DataPrep DataPrep object returned by TCGAanalyze_Preprocessing()
#' @param DataFilt Filtered data frame containing samples in columns and genes in rows after normalization and/or filtering steps
#' @examples
#' \dontrun{
#' dataPrep_raw <- UseRaw_afterFilter(dataPrep, dataFilt)
#' }
#' @export
#' @return Filtered return object similar to DataPrep with genes removed after normalization and filtering process.
UseRaw_afterFilter<-function(DataPrep, DataFilt){
rownames(DataPrep)<-lapply(rownames(DataPrep), function(x) gsub("[[:punct:]]\\d*", "", x ))
filtered.list <- setdiff(rownames(DataPrep), rownames(DataFilt))
Res <- DataPrep[!rownames(DataPrep) %in% filtered.list, ]
return(Res)
}
#' @importFrom biomaRt getBM useMart listDatasets
map.ensg <- function(genome = "hg38", genes) {
if (genome == "hg19"){
# for hg19
ensembl <- useMart(biomart = "ENSEMBL_MART_ENSEMBL",
host = "feb2014.archive.ensembl.org",
path = "/biomart/martservice" ,
dataset = "hsapiens_gene_ensembl")
attributes <- c("ensembl_gene_id", "entrezgene","external_gene_id")
} else {
# for hg38
ensembl <- useMart("ensembl", dataset = "hsapiens_gene_ensembl")
attributes <- c("ensembl_gene_id", "entrezgene","external_gene_name")
}
gene.location <- getBM(attributes = attributes,
filters = c("ensembl_gene_id"),
values = list(genes), mart = ensembl)
colnames(gene.location) <- c("ensembl_gene_id", "entrezgene","external_gene_name")
gene.location <- gene.location[match(genes,gene.location$ensembl_gene_id),]
return(gene.location)
}
#' @title Adding information related to DEGs genes from DEA as mean values in two conditions.
#' @description
#' TCGAanalyze_LevelTab allows user to add information related to DEGs genes from
#' Differentially expression analysis (DEA) such as mean values and in two conditions.
#' @param FC_FDR_table_mRNA Output of dataDEGs filter by abs(LogFC) >=1
#' @param typeCond1 a string containing the class label of the samples
#' in TableCond1 (e.g., control group)
#' @param typeCond2 a string containing the class label of the samples
#' in TableCond2 (e.g., case group)
#' @param TableCond1 numeric matrix, each row represents a gene, each column
#' represents a sample with Cond1type
#' @param TableCond2 numeric matrix, each row represents a gene, each column
#' represents a sample with Cond2type
#' @param typeOrder typeOrder
#' @importFrom edgeR DGEList estimateCommonDisp exactTest topTags
#' @export
#' @return table with DEGs, log Fold Change (FC), false discovery rate (FDR),
#' the gene expression level
#' for samples in Cond1type, and Cond2type, and Delta value (the difference
#' of gene expression between the two
#' conditions multiplied logFC)
#' @examples
#' dataNorm <- TCGAbiolinks::TCGAanalyze_Normalization(dataBRCA, geneInfo)
#' dataFilt <- TCGAanalyze_Filtering(tabDF = dataBRCA, method = "quantile", qnt.cut = 0.25)
#' samplesNT <- TCGAquery_SampleTypes(colnames(dataFilt), typesample = c("NT"))
#' samplesTP <- TCGAquery_SampleTypes(colnames(dataFilt), typesample = c("TP"))
#' dataDEGs <- TCGAanalyze_DEA(dataFilt[,samplesNT],
#' dataFilt[,samplesTP],
#' Cond1type = "Normal",
#' Cond2type = "Tumor")
#' dataDEGsFilt <- dataDEGs[abs(dataDEGs$logFC) >= 1,]
#' dataTP <- dataFilt[,samplesTP]
#' dataTN <- dataFilt[,samplesNT]
#' dataDEGsFiltLevel <- TCGAanalyze_LevelTab(dataDEGsFilt,"Tumor","Normal",
#' dataTP,dataTN)
TCGAanalyze_LevelTab <- function(FC_FDR_table_mRNA,
typeCond1,
typeCond2,
TableCond1,
TableCond2,
typeOrder = TRUE) {
TF_enriched <- as.matrix(rownames(FC_FDR_table_mRNA))
TableLevel <- matrix(0,nrow(TF_enriched),6)
TableLevel <- as.data.frame(TableLevel)
colnames(TableLevel) <- c("mRNA","logFC","FDR",typeCond1,typeCond2,"Delta")
TableLevel[,"mRNA"] <- TF_enriched
Tabfilt <- FC_FDR_table_mRNA[which( rownames(FC_FDR_table_mRNA) %in%
TF_enriched),]
TableLevel[,"logFC"] <- as.numeric(Tabfilt[TF_enriched,][,"logFC"])
TableLevel[,"FDR"] <- as.numeric(Tabfilt[TF_enriched,][,"FDR"])
MeanTumor <- matrix(0,nrow(TF_enriched),1)
MeanDiffTumorNormal <- matrix(0,nrow(TF_enriched),1)
for (i in 1:nrow(TF_enriched)) {
TableLevel[i,typeCond1] <- mean(as.numeric(TableCond1[rownames(TableCond1) %in%
TF_enriched[i] , ]))
TableLevel[i,typeCond2] <- mean(as.numeric(TableCond2[rownames(TableCond2) %in%
TF_enriched[i] , ]))
}
TableLevel[,"Delta"] <- as.numeric(abs(TableLevel[,"logFC"]) *
TableLevel[,typeCond1] )
TableLevel <- TableLevel[order( as.numeric(TableLevel[,"Delta"]),
decreasing = typeOrder),]
rownames(TableLevel) <- TableLevel[,"mRNA"]
if(all(grepl("ENSG",rownames(TableLevel)))) TableLevel <- cbind(TableLevel,map.ensg(genes = rownames(TableLevel))[,2:3])
return(TableLevel)
}
#' @title Enrichment analysis for Gene Ontology (GO) [BP,MF,CC] and Pathways
#' @description
#' Researchers, in order to better understand the underlying biological
#' processes, often want to retrieve a functional profile of a set of genes
#' that might have an important role. This can be done by performing an
#' enrichment analysis.
#'
#'We will perform an enrichment analysis on gene sets using the TCGAanalyze_EAcomplete
#'function. Given a set of genes that are
#'up-regulated under certain conditions, an enrichment analysis will find
#'identify classes of genes or proteins that are #'over-represented using
#'annotations for that gene set.
#' @param TFname is the name of the list of genes or TF's regulon.
#' @param RegulonList List of genes such as TF's regulon or DEGs where to find enrichment.
#' @export
#' @return Enrichment analysis GO[BP,MF,CC] and Pathways complete table enriched by genelist.
#' @examples
#' Genelist <- c("FN1","COL1A1")
#' ansEA <- TCGAanalyze_EAcomplete(TFname="DEA genes Normal Vs Tumor",Genelist)
#' \dontrun{
#' Genelist <- rownames(dataDEGsFiltLevel)
#' system.time(ansEA <- TCGAanalyze_EAcomplete(TFname="DEA genes Normal Vs Tumor",Genelist))
#' }
TCGAanalyze_EAcomplete <- function(TFname, RegulonList){
# This is a verification of the input
# in case the List is like Gene|ID
# we will get only the Gene
if(all(grepl("\\|",RegulonList))){
RegulonList <- strsplit(RegulonList,"\\|")
RegulonList <- unlist(lapply(RegulonList,function(x) x[1]))
}
print(paste("I need about ", "1 minute to finish complete ",
"Enrichment analysis GO[BP,MF,CC] and Pathways... "))
ResBP <- TCGAanalyze_EA(TFname,RegulonList,DAVID_BP_matrix,
EAGenes,GOtype = "DavidBP")
print("GO Enrichment Analysis BP completed....done")
ResMF <- TCGAanalyze_EA(TFname,RegulonList,DAVID_MF_matrix,
EAGenes,GOtype = "DavidMF")
print("GO Enrichment Analysis MF completed....done")
ResCC <- TCGAanalyze_EA(TFname,RegulonList,DAVID_CC_matrix,
EAGenes,GOtype = "DavidCC")
print("GO Enrichment Analysis CC completed....done")
ResPat <- TCGAanalyze_EA(TFname,RegulonList,listEA_pathways,
EAGenes,GOtype = "Pathway")
print("Pathway Enrichment Analysis completed....done")
ans <- list(ResBP = ResBP, ResMF = ResMF, ResCC = ResCC, ResPat = ResPat)
return(ans)
}
#' @title Enrichment analysis of a gene-set with GO [BP,MF,CC] and pathways.
#' @description
#' The rational behind a enrichment analysis ( gene-set, pathway etc) is to compute
#' statistics of whether the overlap between the focus list (signature) and the gene-set
#' is significant. ie the confidence that overlap between the list is not due to chance.
#' The Gene Ontology project describes genes (gene products) using terms from
#' three structured vocabularies: biological process, cellular component and molecular function.
#' The Gene Ontology Enrichment component, also referred to as the GO Terms" component, allows
#' the genes in any such "changed-gene" list to be characterized using the Gene Ontology terms
#' annotated to them. It asks, whether for any particular GO term, the fraction of genes
#' assigned to it in the "changed-gene" list is higher than expected by chance
#' (is over-represented), relative to the fraction of genes assigned to that term in the
#' reference set.
#' In statistical terms it peform the analysis tests the null hypothesis that,
#' for any particular ontology term, there is no diffeerence in the proportion of genes
#' annotated to it in the reference list and the proportion annotated to it in the test list.
#' We adopted a Fisher Exact Test to perform the EA.
#' @param GeneName is the name of gene signatures list
#' @param TableEnrichment is a table related to annotations of gene symbols such as
#' GO[BP,MF,CC] and Pathways. It was created from DAVID gene ontology on-line.
#' @param RegulonList is a gene signature (lisf of genes) in which perform EA.
#' @param GOtype is type of gene ontology Biological process (BP), Molecular Function (MF),
#' Cellular componet (CC)
#' @param FDRThresh pvalue corrected (FDR) as threshold to selected significant
#' BP, MF,CC, or pathways. (default FDR < 0.01)
#' @param EAGenes is a table with informations about genes
#' such as ID, Gene, Description, Location and Family.
# @export
#' @import stats
#' @return Table with enriched GO or pathways by selected gene signature.
#' @examples
#' \dontrun{
#' EAGenes <- get("EAGenes")
#' RegulonList <- rownames(dataDEGsFiltLevel)
#' ResBP <- TCGAanalyze_EA(GeneName="DEA genes Normal Vs Tumor",
#' RegulonList,DAVID_BP_matrix,
#' EAGenes,GOtype = "DavidBP")
#'}
TCGAanalyze_EA <- function(GeneName,RegulonList,TableEnrichment,
EAGenes,GOtype,FDRThresh=0.01) {
topPathways <- nrow(TableEnrichment)
topPathways_tab <- matrix(0,1,topPathways)
topPathways_tab <- as.matrix(topPathways_tab)
rownames(topPathways_tab) <- GeneName
rownames(EAGenes) <- toupper(rownames(EAGenes) )
EAGenes <- EAGenes[!duplicated(EAGenes[,"ID"]),]
rownames(EAGenes) <- EAGenes[,"ID"]
allgene <- EAGenes[,"ID"]
current_pathway_from_EA <- as.matrix(TableEnrichment[,GOtype]) # genes from EA pathways
TableNames <- gsub("David","",paste("Top ", GOtype, " n. ", 1:topPathways,
" of ", topPathways, sep = ""))
colnames(topPathways_tab) <- TableNames
topPathways_tab <- as.data.frame(topPathways_tab)
table_pathway_enriched <- matrix(1, nrow(current_pathway_from_EA),7)
colnames(table_pathway_enriched) <- c("Pathway","GenesInPathway","Pvalue",
"FDR","CommonGenesPathway",
"PercentPathway","PercentRegulon")
table_pathway_enriched <- as.data.frame(table_pathway_enriched)
for (i in 1:nrow(current_pathway_from_EA)) {
table_pathway_enriched[i,"Pathway"] <- as.character(current_pathway_from_EA[i,])
if (nrow(TableEnrichment) == 589) {
genes_from_current_pathway_from_EA <- GeneSplitRegulon(TableEnrichment[ TableEnrichment[GOtype] == as.character(current_pathway_from_EA[i,]) ,][,"Molecules"], ",")
}
else {
genes_from_current_pathway_from_EA <- GeneSplitRegulon(TableEnrichment[ TableEnrichment[GOtype] == as.character(current_pathway_from_EA[i,]) ,][,"Molecules"], ", ")
}
genes_common_pathway_TFregulon <- as.matrix(intersect(toupper(RegulonList),toupper(genes_from_current_pathway_from_EA)))
if (length(genes_common_pathway_TFregulon) != 0) {
current_pathway_commongenes_num <- length(genes_common_pathway_TFregulon)
seta <- allgene %in% RegulonList
setb <- allgene %in% genes_from_current_pathway_from_EA
ft <- fisher.test(seta,setb)
FisherpvalueTF <- ft$p.value
table_pathway_enriched[i,"Pvalue"] <- as.numeric(FisherpvalueTF)
if (FisherpvalueTF < 0.01) {
current_pathway_commongenes_percent <- paste("(",format( (current_pathway_commongenes_num/length(genes_from_current_pathway_from_EA)) * 100,digits = 2),"%)")
current_pathway_commongenes_num_with_percent <- gsub(" ","",paste(current_pathway_commongenes_num, current_pathway_commongenes_percent,"pv=",format(FisherpvalueTF,digits=2)))
table_pathway_enriched[i,"CommonGenesPathway"] <- length(genes_common_pathway_TFregulon)
table_pathway_enriched[i,"GenesInPathway"] <- length(genes_from_current_pathway_from_EA)
table_pathway_enriched[i,"PercentPathway"] <- as.numeric(table_pathway_enriched[i,"CommonGenesPathway"]) / as.numeric(table_pathway_enriched[i,"GenesInPathway"]) *100
table_pathway_enriched[i,"PercentRegulon"] <- as.numeric(table_pathway_enriched[i,"CommonGenesPathway"]) / length(RegulonList) *100
} }
}
table_pathway_enriched <- table_pathway_enriched[order(table_pathway_enriched[,"Pvalue"],decreasing = FALSE),]
table_pathway_enriched <- table_pathway_enriched[table_pathway_enriched[,"Pvalue"] < 0.01 ,]
table_pathway_enriched[,"FDR"] <- p.adjust(table_pathway_enriched[,"Pvalue"],method = "fdr")
table_pathway_enriched <- table_pathway_enriched[table_pathway_enriched[,"FDR"] < FDRThresh ,]
table_pathway_enriched <- table_pathway_enriched[order(table_pathway_enriched[,"FDR"],decreasing = FALSE),]
if(nrow(table_pathway_enriched) > 0) {
tmp <- table_pathway_enriched
tmp <- paste(tmp[,"Pathway"],"; FDR= ", format(tmp[,"FDR"],digits = 3),"; (ng=" ,round(tmp[,"GenesInPathway"]),"); (ncommon=", format(tmp[,"CommonGenesPathway"],digits = 2), ")" ,sep = "")
tmp <- as.matrix(tmp)
topPathways_tab <- topPathways_tab[,1:nrow(table_pathway_enriched),drop=FALSE]
topPathways_tab[1,] <- tmp
} else {
topPathways_tab <- NA
}
return(topPathways_tab)
}
#' @title Differentially expression analysis (DEA) using limma package.
#' @description Differentially expression analysis (DEA) using limma package.
#' @param FC.cut write
#' @param AffySet A matrix-like data object containing log-ratios or log-expression values
#' for a series of arrays, with rows corresponding to genes and columns to samples
#' @examples
#' \dontrun{
#' to add example
#' }
#' @export
#' @return List of list with tables in 2 by 2 comparison
#' of the top-ranked genes from a linear model fitted by DEA's limma
TCGAanalyze_DEA_Affy <- function(AffySet, FC.cut = 0.01){
if (!requireNamespace("Biobase", quietly = TRUE)) {
stop("affy package is needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("limma", quietly = TRUE)) {
stop("affy package is needed for this function to work. Please install it.",
call. = FALSE)
}
Pdatatable <- Biobase::phenoData(AffySet)
f <- factor(Pdatatable$Disease)
groupColors<-names(table(f))
tmp <- matrix(0,length(groupColors),length(groupColors))
colnames(tmp) <- groupColors
rownames(tmp) <- groupColors
tmp[upper.tri(tmp)] <- 1
sample_tab <- Pdatatable
f <- factor(Pdatatable$Disease)
design <- model.matrix(~0+f)
colnames(design) <- levels(f)
fit <- limma::lmFit(AffySet, design) ## fit is an object of class MArrayLM.
groupColors <- names(table(Pdatatable$Disease))
CompleteList<-vector("list",sum(tmp))
k<-1
for( i in 1: length(groupColors)){
col1 <- colnames(tmp)[i]
for( j in 1: length(groupColors)){
col2 <- rownames(tmp)[j]
if( i!=j ){
if(tmp[i,j]!=0){
Comparison <- paste(col2,"-",col1,sep="")
if(i==4 && j==6){ Comparison <- paste(col1,"-",col2,sep="") }
if(i==5 && j==6){ Comparison <- paste(col1,"-",col2,sep="") }
print( paste(i, j, Comparison,"to do..." ))
cont.matrix <- limmamakeContrasts(I=Comparison,levels=design)
fit2 <- limmacontrasts.fit(fit, cont.matrix)
fit2 <- limma::eBayes(fit2)
sigI <- limma::topTable(fit2,coef=1, adjust.method="BH", sort.by="B", p.value = 0.05, lfc = FC.cut, number = 50000)
sigIbis <- sigI[order(abs(as.numeric(sigI$logFC)), decreasing=TRUE),]
names(CompleteList)[k]<-gsub("-","_",Comparison)
CompleteList[[k]]<-sigIbis
k<-k+1
}
}
}
}
return(CompleteList)
}
#' @title Generate network
#' @description TCGAanalyze_analyseGRN perform gene regulatory network.
#' @param TFs a vector of genes.
#' @param normCounts is a matrix of gene expression with genes in rows and samples in columns.
#' @param kNum the number of nearest neighbors to consider to estimate the mutual information.
#' Must be less than the number of columns of normCounts.
#' @export
#' @return an adjacent matrix
TCGAanalyze_analyseGRN<- function(TFs, normCounts,kNum) {
if (!requireNamespace("parmigene", quietly = TRUE)) {
stop("parmigene package is needed for this function to work. Please install it.",
call. = FALSE)
}
MRcandidates <- intersect(rownames(normCounts),TFs)
# Mutual information between TF and genes
sampleNames <- colnames(normCounts)
geneNames <- rownames(normCounts)
messageMI_TFgenes <- paste("Estimation of MI among [", length(MRcandidates), " TRs and ", nrow(normCounts), " genes].....", sep = "")
timeEstimatedMI_TFgenes1 <- length(MRcandidates)*nrow(normCounts)/1000
timeEstimatedMI_TFgenes <- format(timeEstimatedMI_TFgenes1*ncol(normCounts)/17000, digits = 2)
messageEstimation <- print(paste("I Need about ", timeEstimatedMI_TFgenes, "seconds for this MI estimation. [Processing 17000k elements /s] "))
system.time(miTFGenes <- knnmi.cross(normCounts[MRcandidates, ], normCounts, k = kNum))
return(miTFGenes)
}
#' @title Generate pathview graph
#' @description TCGAanalyze_Pathview pathway based data integration and visualization.
#' @param dataDEGs dataDEGs
#' @param pathwayKEGG pathwayKEGG
#' @export
#' @return an adjacent matrix
#' @examples
#' \dontrun{
#' dataDEGs <- data.frame(mRNA = c("TP53","TP63","TP73"), logFC = c(1,2,3))
#' TCGAanalyze_Pathview(dataDEGs)
#' }
TCGAanalyze_Pathview <- function(dataDEGs, pathwayKEGG = "hsa05200" ){
if (!requireNamespace("clusterProfiler", quietly = TRUE)) {
stop("clusterProfiler needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("pathview", quietly = TRUE)) {
stop("pathview needed for this function to work. Please install it.",
call. = FALSE)
}
# Converting Gene symbol to gene ID
eg = as.data.frame(clusterProfiler::bitr(dataDEGs$mRNA,
fromType="SYMBOL",
toType="ENTREZID",
OrgDb="org.Hs.eg.db"))
eg <- eg[!duplicated(eg$SYMBOL),]
dataDEGs <- dataDEGs[dataDEGs$mRNA %in% eg$SYMBOL,]
dataDEGs <- dataDEGs[order(dataDEGs$mRNA,decreasing=FALSE),]
eg <- eg[order(eg$SYMBOL,decreasing=FALSE),]
dataDEGs$GeneID <- eg$ENTREZID
dataDEGsFiltLevel_sub <- subset(dataDEGs, select = c("GeneID", "logFC"))
genelistDEGs <- as.numeric(dataDEGsFiltLevel_sub$logFC)
names(genelistDEGs) <- dataDEGsFiltLevel_sub$GeneID
hsa05200 <- pathview::pathview(gene.data = genelistDEGs,
pathway.id = pathwayKEGG,
species = "hsa",
limit = list(gene=as.integer(max(abs(genelistDEGs)))))
}
#' @title infer gene regulatory networks
#' @description TCGAanalyze_networkInference taking expression data as input, this will return an adjacency matrix of interactions
#' @param data expression data, genes in columns, samples in rows
#' @param optionMethod inference method, chose from aracne, c3net, clr and mrnet
#' @export
#' @return an adjacent matrix
TCGAanalyze_networkInference <- function(data, optionMethod = "clr" ){
# Converting Gene symbol to gene ID
if(optionMethod == "c3net"){
if (!requireNamespace("c3net", quietly = TRUE)) {
stop("c3net package is needed for this function to work. Please install it.",
call. = FALSE)
}
net <- c3net(t(data))
}else{
if (!requireNamespace("minet", quietly = TRUE)) {
stop("minet package is needed for this function to work. Please install it.",
call. = FALSE)
}
net <- minet(data, method = optionMethod)
}
return(net)
}
#' Creates a plot for GAIA ouptut (all significant aberrant regions.)
#' @description
#' This function is a auxiliary function to visualize GAIA ouptut
#' (all significant aberrant regions.)
#' @param calls A matrix with the following columns: Chromossome, Aberration Kind
#' Region Start, Region End, Region Size and score
#' @param threshold Score threshold (orange horizontal line in the plot)
#' @export
#' @importFrom graphics abline axis legend plot points
#' @return A plot with all significant aberrant regions.
#' @examples
#' call <- data.frame("Chromossome" = rep(9,100),
#' "Aberration Kind" = rep(c(-2,-1,0,1,2),20),
#' "Region Start [bp]" = 18259823:18259922,
#' "Region End [bp]" = 18259823:18259922,
#' "score" = rep(c(1,2,3,4),25))
#' gaiaCNVplot(call,threshold = 0.01)
#' call <- data.frame("Chromossome" = rep(c(1,9),50),
#' "Aberration Kind" = rep(c(-2,-1,0,1,2),20),
#' "Region Start [bp]" = 18259823:18259922,
#' "Region End [bp]" = 18259823:18259922,
#' "score" = rep(c(1,2,3,4),25))
#' gaiaCNVplot(call,threshold = 0.01)
gaiaCNVplot <- function (calls, threshold = 0.01) {
Calls <- calls[order(calls[,grep("start",colnames(calls),ignore.case = TRUE)]),]
Calls <- Calls[order(Calls[,grep("chr",colnames(calls),ignore.case = TRUE)]),]
rownames(Calls) <- NULL
Chromo <- Calls[,grep("chr",colnames(calls),ignore.case = TRUE)]
Gains <- apply(Calls,1,function(x) ifelse(x[grep("aberration",colnames(calls),ignore.case = TRUE)] == 1, x["score"], 0))
Losses <- apply(Calls,1,function(x) ifelse(x[grep("aberration",colnames(calls),ignore.case = TRUE)] == 0, x["score"], 0))
plot(Gains,
ylim = c(-max(Calls[,"score"]+2), max(Calls[,"score"]+2)),
type = "h",
col = "red",
xlab = "Chromosome",
ylab = "Score",
xaxt = "n")
points(-(Losses), type = "h", col = "blue")
# Draw origin line
abline(h = 0, cex = 4)
# Draw threshold lines
abline(h = -log10(threshold), col = "orange", cex = 4, main="test")
abline(h = log10(threshold), col = "orange", cex = 4, main="test")
uni.chr <- unique(Chromo)
temp <- rep(0, length(uni.chr))
for (i in 1:length(uni.chr)) {
temp[i] <- max(which(uni.chr[i] == Chromo))
}
for (i in 1:length(temp)) {
abline(v = temp[i], col = "black", lty = "dashed")
}
nChroms <- length(uni.chr)
begin <- c()
for (d in 1:nChroms) {
chrom <- sum(Chromo == uni.chr[d])
begin <- append(begin, chrom)
}
temp2 <- rep(0, nChroms)
for (i in 1:nChroms) {
if (i == 1) {
temp2[1] <- (begin[1] * 0.5)
}
else if (i > 1) {
temp2[i] <- temp[i - 1] + (begin[i] * 0.5)
}
}
uni.chr[uni.chr==23] <- "X"
uni.chr[uni.chr==24] <- "Y"
for (i in 1:length(temp)) {
axis(1, at = temp2[i], labels = uni.chr[i], cex.axis = 1)
}
legend(x=1,y=max(Calls[,"score"]+2), y.intersp=0.8, c("Amp"), pch=15, col=c("red"), text.font=3)
legend(x=1,y=-max(Calls[,"score"]+0.5), y.intersp=0.8, c("Del"), pch=15, col=c("blue"), text.font=3)
}
#' Get a matrix of interactions of genes from biogrid
#' @description
#' Using biogrid database, it will create a matrix of gene interations.
#' If columns A and row B has value 1, it means the gene A and gene B interatcs.
#' @param tmp.biogrid Biogrid table
#' @export
#' @param names.genes List of genes to filter from output. Default: consider all genes
#' @return A matrix with 1 for genes that interacts, 0 for no interaction.
#' @examples
#' names.genes.de <- c("PLCB1","MCL1","PRDX4","TTF2","TACC3", "PARP4","LSM1")
#' tmp.biogrid <- data.frame("Official.Symbol.Interactor.A" = names.genes.de,
#' "Official.Symbol.Interactor.B" = rev(names.genes.de))
#' net.biogrid.de <- getAdjacencyBiogrid(tmp.biogrid, names.genes.de)
#' \dontrun{
#' file <- paste0("http://thebiogrid.org/downloads/archives/",
#' "Release%20Archive/BIOGRID-3.4.133/BIOGRID-ALL-3.4.133.tab2.zip")
#' downloader::download(file,basename(file))
#' unzip(basename(file),junkpaths =TRUE)
#' tmp.biogrid <- read.csv(gsub("zip","txt",basename(file)),
#' header=TRUE, sep="\t", stringsAsFactors=FALSE)
#' names.genes.de <- c("PLCB1","MCL1","PRDX4","TTF2","TACC3", "PARP4","LSM1")
#' net.biogrid.de <- getAdjacencyBiogrid(tmp.biogrid, names.genes.de)
#' }
getAdjacencyBiogrid <- function(tmp.biogrid, names.genes = NULL){
it.a <- grep("Symbol",colnames(tmp.biogrid),value = TRUE)[1]
it.b <- grep("Symbol",colnames(tmp.biogrid),value = TRUE)[2]
if(is.null(names.genes)){
names.genes <- sort(union(unique(tmp.biogrid[,it.a]), unique(tmp.biogrid[,it.b])))
ind <- seq(1,nrow(tmp.biogrid))
} else {
ind.A <- which(tmp.biogrid[,it.a] %in% names.genes)
ind.B <- which(tmp.biogrid[,it.b] %in% names.genes)
ind <- intersect(ind.A,ind.B)
}
mat.biogrid <- matrix(0, nrow=length(names.genes),
ncol=length(names.genes),
dimnames=list(names.genes, names.genes))
for(i in ind){
mat.biogrid[tmp.biogrid[i,it.a], tmp.biogrid[i,it.b]] <- mat.biogrid[tmp.biogrid[i,it.b], tmp.biogrid[i,it.a]] <- 1
}
diag(mat.biogrid) <- 0
return(mat.biogrid)
}
#' Get GDC samples with both DNA methylation (HM450K) and Gene expression data from
#' GDC databse
#' @description
#' For a given TCGA project it gets the samples (barcode) with both DNA methylation and Gene expression data
#' from GDC database
#' @param project A GDC project
#' @param n Number of samples to return. If NULL return all (default)
#' @param legacy Access legacy (hg19) or harmonized database (hg38).
#' @return A vector of barcodes
#' @export
#' @examples
#' # Get ACC samples with both DNA methylation (HM450K) and gene expression aligned to hg19
#' samples <- matchedMetExp("TCGA-ACC", legacy = TRUE)
matchedMetExp <- function(project, legacy = FALSE, n = NULL){
if(legacy) {
# get primary solid tumor samples: DNA methylation
message("Download DNA methylation information")
met450k <- GDCquery(project = project,
data.category = "DNA methylation",
platform = "Illumina Human Methylation 450",
legacy = TRUE,
sample.type = c("Primary solid Tumor"))
# get primary solid tumor samples: RNAseq
message("Download gene expression information")
exp <- GDCquery(project = project,
data.category = "Gene expression",
data.type = "Gene expression quantification",
platform = "Illumina HiSeq",
file.type = "results",
sample.type = c("Primary solid Tumor"),
legacy = TRUE)
} else {
# get primary solid tumor samples: DNA methylation
message("Download DNA methylation information")
met450k <- GDCquery(project = project,
data.category = "DNA Methylation",
platform = "Illumina Human Methylation 450",
sample.type = c("Primary solid Tumor"))
# get primary solid tumor samples: RNAseq
message("Download gene expression information")
exp <- GDCquery(project = project,
data.category = "Transcriptome Profiling",
data.type = "Gene Expression Quantification",
workflow.type = "HTSeq - Counts")
}
met450k.tp <- met450k$results[[1]]$cases
# Get patients with samples in both platforms
exp.tp <- exp$results[[1]]$cases
patients <- unique(substr(exp.tp,1,15)[substr(exp.tp,1,12) %in% substr(met450k.tp,1,12)] )
if(!is.null(n)) patients <- patients[1:n] # get only n samples
return(patients)
}
#' Create a Summary table for each sample in a project saying if it contains
#' or not files for a certain data category
#' @description
#' Create a Summary table for each sample in a project saying if it contains
#' or not files for a certain data category
#' @param project A GDC project
#' @param legacy Access legacy (hg19) or harmonized database (hg38).
#' @return A data frame
#' @export
#' @importFrom stats xtabs
#' @examples
#' summary <- getDataCategorySummary("TCGA-ACC", legacy = TRUE)
getDataCategorySummary <- function(project, legacy = FALSE){
baseURL <- ifelse(legacy,"https://api.gdc.cancer.gov/legacy/files/?","https://api.gdc.cancer.gov/files/?")
url <- paste0(baseURL,"&expand=cases&size=100000&fields=cases.submitter_id,data_category&filters=",
URLencode('{"op":"and","content":[{"op":"in","content":{"field":"cases.project.project_id","value":["'),
URLencode(project),
URLencode('"]}}]}'))
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
json <- json$data$hits
json$submitter_id <- unlist(lapply(json$cases, function(x) paste0(x$submitter_id,collapse = ",")))
json$cases <- NULL
json <- json[!duplicated(json),]
json <- json[stringr::str_length(json$submitter_id) == 12,]
ret <- as.data.frame.matrix(xtabs(~ submitter_id + data_category , json))
return(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stratEst.R
\name{stratEst}
\alias{stratEst}
\title{Strategy Estimation Function}
\usage{
stratEst(
data,
strategies,
shares,
coefficients,
covariates,
sample.id,
response = "mixed",
sample.specific = c("shares", "probs", "trembles"),
r.probs = "no",
r.trembles = "global",
select = NULL,
min.strategies = 1,
crit = "bic",
se = "analytic",
outer.runs = 1,
outer.tol = 1e-10,
outer.max = 1000,
inner.runs = 10,
inner.tol = 1e-05,
inner.max = 10,
lcr.runs = 100,
lcr.tol = 1e-10,
lcr.max = 1000,
bs.samples = 1000,
quantiles = c(0.01, 0.05, 0.5, 0.95, 0.99),
stepsize = 1,
penalty = FALSE,
verbose = TRUE
)
}
\arguments{
\item{data}{A \code{stratEst.data} object or \code{data.frame}. Must contain the variables \code{choice}, \code{input}, \code{id}, \code{game}, \code{period}. The variable \code{id} identifies observations of the same individual across games and periods. The factor \code{input} indicates the discrete information observed by the individual before making a choice. The factor \code{choice} indicates the choice of the individual.}
\item{strategies}{A list of strategies. Each strategy is a data.frame of class \code{stratEst.strategy}. Each row of the data.frame represents one state of the strategy. The first row defines the initial state which is entered if the variable input is NA. Column names which start with the string 'output.' indicate the columns which contain the multinomial choice probabilities of the strategy. For example, a column labeled 'output.x' contains the probability to observe the output 'x'. The column 'tremble' contains a tremble probability for pure strategies. Column names which start with the string 'input.' indicate the columns which contain the deterministic state transition of the strategy. For example, a column with name 'input.x' indicates the state transition after observing input 'x'.}
\item{shares}{A vector of strategy shares. The elements to the order of strategies in the list \code{strategies}. Shares which are \code{NA} are estimated from the data. With more than one sample and sample specific shares, a list of column vectors is required.}
\item{coefficients}{Column vector which contains the latent class regression coefficients. The elements correspond to the vector of estimates.}
\item{covariates}{A character vector indicating the names of the variables in data that are the covariates of the latent class regression model. Rows with the same id must have the values of covariates. Missing value are not allowed.}
\item{sample.id}{A character indicating the name of the variable which identifies the samples. Individual observations must be nested in samples. The same must be true for clusters if specified. If more than one sample exists, shares are estimated for each sample. All other parameters are estimated for the data of all samples. If the object is not supplied, it is assumed that the data contains only one sample.}
\item{response}{A string which can be set to \code{"pure"} or \code{"mixed"}. If set to \code{"pure"} all estimated choice probabilities are pure, i.e. either zero or one. If set to \code{"mixed"} all estimated choice probabilities are mixed. The default is \code{"mixed"}.}
\item{sample.specific}{A character vector defining which model parameters are sample specific. If the vector contains the character \code{"shares"} (\code{"probs"}, \code{"trembles"}), the estimation function estimates a set of shares (choice probabilities, trembles) for each sample in the data. If the vector does not contains the character \code{"shares"} (\code{"probs"}, \code{"trembles"}) one set of shares (choice probabilities, trembles) is estimated for the pooled data of all samples. Default is \code{c("shares","probs","trembles")}.}
\item{r.probs}{A string which can be set to \code{"no"}, \code{"strategies"}, \code{"states"} or \code{"global"}. If set to \code{"strategies"}, the estimation function estimates strategies with one strategy specific vector of choice probabilities in every state of the strategy. If set to \code{"states"}, one state specific vector of choice probabilities is estimated for each state. If set to \code{"global"}, a single vector of probabilities is estimated which applies in every state of each strategy. Default is \code{"no"}.}
\item{r.trembles}{A string which can be set to \code{"no"}, \code{"strategies"}, \code{"states"} or \code{"global"}. If set to \code{"strategies"}, the estimation unction estimates strategies with one strategy specific tremble probability. If set to \code{"states"}, one state specific tremble probability is estimated for each state. If set to \code{"global"}, a single tremble probability is estimated which globally. Default is \code{"global"}.}
\item{select}{A character vector indicating which model parameters are selected. If the vector contains the character \code{"strategies"} (\code{"probs"}, \code{"trembles"}), the number of strategies (choice probabilities, trembles) is selected based on the selection criterion specified in \code{"crit"}. The selection of choice probabilities and trembles occurs obeying the restriction specified in \code{r.probs} and \code{r.trembles}. (E.g. if \code{r.probs} is set to \code{"strategies"}, \code{select = "probs"} will select the sets of choice probabilities within each strategy). Default is \code{NULL}.}
\item{min.strategies}{An integer which specifies the minimum number of strategies in case of strategy selection. The strategy selection procedure stops if the minimum is reached.}
\item{crit}{A string which can be set to \code{"bic"}, \code{"aic"} or \code{"icl"}. If set to \code{"bic"}, model selection based on the Bayesian Information criterion is performed. If set to \code{"aic"}, the Akaike Information criterion is used. If set to \code{"icl"} the Integrated Classification Likelihood criterion is used. Default is \code{"bic"}.}
\item{se}{A string which can be set to \code{"analytic"} or \code{"bootstrap"}. If set to \code{"bootstrap"}, bootstrapped standard errors are reported. Default is \code{"analytic"}.}
\item{outer.runs}{A positive integer which stets the number of outer runs of the solver. Default is 1.}
\item{outer.tol}{A positive number which stets the tolerance of the continuation condition of the outer runs. The iterative algorithm stops if the relative decrease of the log-likelihood is smaller than \code{outer.tol}. Default is 0.}
\item{outer.max}{A positive integer which stets the maximum number of iterations of the outer runs of the solver. The iterative algorithm stops if it did not converge after \code{"outer.max"} iterations. Default is 1000.}
\item{inner.runs}{A positive integer which stets the number of inner runs of the solver. Default is 10.}
\item{inner.tol}{A positive number which stets the tolerance of the continuation condition of the inner EM runs. The iterative algorithm stops if the relative decrease of the log-likelihood is smaller than \code{inner.tol}. Default is 0.}
\item{inner.max}{A positive integer which stets the maximum number of iterations of the inner EM runs. The iterative algorithm stops if it did not converge after \code{inner.max} iterations. Default is 10.}
\item{lcr.runs}{A positive integer which stets the number of estimation runs for latent class regression. Default is 100.}
\item{lcr.tol}{A positive number which stets the tolerance of the continuation condition of the Latent Class Regression runs. The iterative algorithm stops if the relative decrease of the log-likelihood is smaller than \code{lcr.tol}. Default is 0.}
\item{lcr.max}{A positive integer which stets the maximum number of iterations of the Latent Class Regression EM runs. The iterative algorithm stops if it did not converge after \code{lcr.max} iterations. Default is 1000.}
\item{bs.samples}{A positive integer which sets the number of bootstrap samples drawn with replacement.}
\item{quantiles}{A numeric vector indicating the quantiles of the sampling distribution of the estimated parameters. The quantiles are identified based on the standard error or based on bootstrapping the sampling distribution of the parameter.}
\item{stepsize}{A positive number which sets the stepsize of the Fisher scoring algorithm used to estimate the coefficients of the latent class regression model. Default is one. Values smaller than one slow down the convergence of the algorithm.}
\item{penalty}{A logical indicating if the Firth penalty is used to estimate the coefficients of the latent class regression model. Default is \code{FALSE}. Irrespective of the value specified here, the penalty is used in the case of a bootstrap of the standard errors of latent class regression coefficients.}
\item{verbose}{A logical, if \code{TRUE} messages of the estimation process and a summary of the estimated model is printed to the console. Default is \code{TRUE}.}
}
\value{
An object of class \code{stratEst}. A list with the following elements.
\item{strategies}{A list of fitted strategies.}
\item{shares}{Matrix of strategy shares. The order of rows corresponds to the order of strategies defined in the input object \code{strategies}.}
\item{probs}{Matrix of choice probabilities. The value \code{NA} indicates that the probability could not be estimated since data does not contain observations the model assigns to the corresponding state.}
\item{trembles}{Matrix of tremble probabilities of the strategies. The value \code{NA} indicates that the corresponding probability could not be estimated since data does not contain observations the model assigns to the corresponding state.}
\item{coefficients}{Matrix of latent class regression coefficients for strategies.}
\item{shares.par}{Estimated strategy shares.}
\item{probs.par}{Estimated choice probabilities.}
\item{trembles.par}{Estimated tremble probabilities.}
\item{coefficients.par}{Estimated latent class regression coefficients.}
\item{shares.indices}{Indices of strategy shares.}
\item{probs.indices}{Indices of choice probabilities.}
\item{trembles.indices}{Indices of tremble probabilities.}
\item{coefficients.indices}{Indices of latent class regression coefficients.}
\item{loglike}{The log-likelihood of the model. Larger values indicate a better fit of the model to the data.}
\item{crit.val}{The value of the selection criterion defined under \code{crit}. Larger values indicate a better fit of the model.}
\item{eval}{Number of iterations of the solver. The reported number is the sum of iterations performed in the inner and the outer run which produced the reported estimates.}
\item{tol.val}{The relative decrease of the log-likelihood in the last iteration of the algorithm. }
\item{convergence}{Maximum absolute score of the model parameters. Small values indicate convergence of the algorithm to a (local) maximum of the negative log likelihood.}
\item{entropy}{Entropy of the posterior probability assignments of individuals to strategies.}
\item{state.obs}{A column vector with the number of weighted observations for each strategy state corresponding to the rows of \code{strategies}.}
\item{posterior.assignments}{Posterior probability of each individual to use a strategy.}
\item{prior.assignments}{Prior probability of each individual to use a strategy as predicted by the individual covariates.}
\item{shares.se}{Standard errors of the estimated shares.}
\item{probs.se}{Standard errors of the estimated choice probabilities.}
\item{trembles.se}{Standard errors of the estimated trembles.}
\item{coefficients.se}{Standard errors of the estimated coefficients.}
\item{shares.score}{Score of the estimated shares.}
\item{probs.score}{Score of the reported choice probabilities.}
\item{trembles.score}{Score of the reported trembles.}
\item{coefficients.score}{Score of the reported coefficients.}
\item{shares.fisher}{Fisher information of the estimated shares.}
\item{probs.fisher}{Fisher information of the reported choice probabilities.}
\item{trembles.fisher}{Fisher information of the reported trembles.}
\item{coefficients.fisher}{Fisher information of the reported coefficients.}
\item{num.obs}{Number of observations.}
\item{num.ids}{Number of individuals.}
\item{num.par}{Total number of model parameters.}
\item{free.par}{Total number of free model parameters.}
\item{res.degrees}{Residual degrees of freedom (num.ids - free.par).}
\item{shares.quantiles}{Quantiles of the estimated shares.}
\item{probs.quantiles}{Quantiles of the estimated choice probabilities.}
\item{trembles.quantiles}{Quantiles of the estimated tremble probabilities.}
\item{coefficients.quantiles}{Quantiles of the estimated latent class regression coefficients.}
\item{gammas}{Gamma parameter of the model.}
\item{gammas.par}{Estimated gamma parameters.}
\item{gammas.se}{Standard errors of the gamma parameters.}#
\item{aic}{Akaike information criterion.}
\item{bic}{Bayesian information criterion.}
\item{icl}{Integrated classification likelihood information criteria.}
}
\description{
Performs variants of the strategy estimation method.
}
\details{
The estimation function \code{stratEst()} returns maximum-likelihood estimates for the population shares and choice probabilities of a set of candidate strategies given some data from an economic experiment. Candidate strategies can be supplied by the user in the form of deterministic finite-state automata. The number and the complexity of strategies can be restricted by the user or selected based on information criteria. stratEst also features latent class regression to assess the influence of covariates on strategy choice.
}
\note{
The strategy estimation method was introduced by (Dal Bo & Frechette 2011) to estimate the relative frequency of a fixed set of pure strategies in the indefinitely repeated prisoner's dilemma. Breitmoser (2015) extended the method to the estimation of behavior strategies. The \pkg{stratEst} package uses the EM algorithm (Dempster, Laird & Rubin 1977) and the Newton-Raphson method to obtain maximum-likelihood estimates for the population shares and choice probabilities of a set of candidate strategies. The package builds on other software contributions of the R community. To increase speed the estimation procedures, the package uses integration of C++ and R achieved by the Rcpp package (Eddelbuettel & Francois 2011) and the open source linear algebra library for the C++ language RppArmadillo (Sanderson & Curtin 2016).
}
\references{
Breitmoser, Y. (2015): Cooperation, but no reciprocity: Individual strategies in the repeated prisoner's dilemma, \emph{American Economic Review}, 105, 2882-2910.
Dal Bo, P. and G. R. Frechette (2011): The evolution of cooperation in infinitely repeated games: Experimental evidence, \emph{American Economic Review}, 101, 411-429.
Dempster, A., N. Laird, and D. B. Rubin (1977): Maximum likelihood from incomplete data via the EM algorithm," \emph{Journal of the Royal Statistical Society Series B}, 39, 1-38.
Eddelbuettel, D. and R. Francois (2011): Rcpp: Seamless R and C++ Integration, \emph{Journal of Statistical Software}, 40, 1-18.
Sanderson, C. and R. Curtin (2016): Armadillo: a template-based C++ library for linear algebra. \emph{Journal of Open Source Software}, 1-26.
}
| /man/stratEst.Rd | no_license | fdvorak/stratEst | R | false | true | 15,317 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stratEst.R
\name{stratEst}
\alias{stratEst}
\title{Strategy Estimation Function}
\usage{
stratEst(
data,
strategies,
shares,
coefficients,
covariates,
sample.id,
response = "mixed",
sample.specific = c("shares", "probs", "trembles"),
r.probs = "no",
r.trembles = "global",
select = NULL,
min.strategies = 1,
crit = "bic",
se = "analytic",
outer.runs = 1,
outer.tol = 1e-10,
outer.max = 1000,
inner.runs = 10,
inner.tol = 1e-05,
inner.max = 10,
lcr.runs = 100,
lcr.tol = 1e-10,
lcr.max = 1000,
bs.samples = 1000,
quantiles = c(0.01, 0.05, 0.5, 0.95, 0.99),
stepsize = 1,
penalty = FALSE,
verbose = TRUE
)
}
\arguments{
\item{data}{A \code{stratEst.data} object or \code{data.frame}. Must contain the variables \code{choice}, \code{input}, \code{id}, \code{game}, \code{period}. The variable \code{id} identifies observations of the same individual across games and periods. The factor \code{input} indicates the discrete information observed by the individual before making a choice. The factor \code{choice} indicates the choice of the individual.}
\item{strategies}{A list of strategies. Each strategy is a data.frame of class \code{stratEst.strategy}. Each row of the data.frame represents one state of the strategy. The first row defines the initial state which is entered if the variable input is NA. Column names which start with the string 'output.' indicate the columns which contain the multinomial choice probabilities of the strategy. For example, a column labeled 'output.x' contains the probability to observe the output 'x'. The column 'tremble' contains a tremble probability for pure strategies. Column names which start with the string 'input.' indicate the columns which contain the deterministic state transition of the strategy. For example, a column with name 'input.x' indicates the state transition after observing input 'x'.}
\item{shares}{A vector of strategy shares. The elements to the order of strategies in the list \code{strategies}. Shares which are \code{NA} are estimated from the data. With more than one sample and sample specific shares, a list of column vectors is required.}
\item{coefficients}{Column vector which contains the latent class regression coefficients. The elements correspond to the vector of estimates.}
\item{covariates}{A character vector indicating the names of the variables in data that are the covariates of the latent class regression model. Rows with the same id must have the values of covariates. Missing value are not allowed.}
\item{sample.id}{A character indicating the name of the variable which identifies the samples. Individual observations must be nested in samples. The same must be true for clusters if specified. If more than one sample exists, shares are estimated for each sample. All other parameters are estimated for the data of all samples. If the object is not supplied, it is assumed that the data contains only one sample.}
\item{response}{A string which can be set to \code{"pure"} or \code{"mixed"}. If set to \code{"pure"} all estimated choice probabilities are pure, i.e. either zero or one. If set to \code{"mixed"} all estimated choice probabilities are mixed. The default is \code{"mixed"}.}
\item{sample.specific}{A character vector defining which model parameters are sample specific. If the vector contains the character \code{"shares"} (\code{"probs"}, \code{"trembles"}), the estimation function estimates a set of shares (choice probabilities, trembles) for each sample in the data. If the vector does not contains the character \code{"shares"} (\code{"probs"}, \code{"trembles"}) one set of shares (choice probabilities, trembles) is estimated for the pooled data of all samples. Default is \code{c("shares","probs","trembles")}.}
\item{r.probs}{A string which can be set to \code{"no"}, \code{"strategies"}, \code{"states"} or \code{"global"}. If set to \code{"strategies"}, the estimation function estimates strategies with one strategy specific vector of choice probabilities in every state of the strategy. If set to \code{"states"}, one state specific vector of choice probabilities is estimated for each state. If set to \code{"global"}, a single vector of probabilities is estimated which applies in every state of each strategy. Default is \code{"no"}.}
\item{r.trembles}{A string which can be set to \code{"no"}, \code{"strategies"}, \code{"states"} or \code{"global"}. If set to \code{"strategies"}, the estimation unction estimates strategies with one strategy specific tremble probability. If set to \code{"states"}, one state specific tremble probability is estimated for each state. If set to \code{"global"}, a single tremble probability is estimated which globally. Default is \code{"global"}.}
\item{select}{A character vector indicating which model parameters are selected. If the vector contains the character \code{"strategies"} (\code{"probs"}, \code{"trembles"}), the number of strategies (choice probabilities, trembles) is selected based on the selection criterion specified in \code{"crit"}. The selection of choice probabilities and trembles occurs obeying the restriction specified in \code{r.probs} and \code{r.trembles}. (E.g. if \code{r.probs} is set to \code{"strategies"}, \code{select = "probs"} will select the sets of choice probabilities within each strategy). Default is \code{NULL}.}
\item{min.strategies}{An integer which specifies the minimum number of strategies in case of strategy selection. The strategy selection procedure stops if the minimum is reached.}
\item{crit}{A string which can be set to \code{"bic"}, \code{"aic"} or \code{"icl"}. If set to \code{"bic"}, model selection based on the Bayesian Information criterion is performed. If set to \code{"aic"}, the Akaike Information criterion is used. If set to \code{"icl"} the Integrated Classification Likelihood criterion is used. Default is \code{"bic"}.}
\item{se}{A string which can be set to \code{"analytic"} or \code{"bootstrap"}. If set to \code{"bootstrap"}, bootstrapped standard errors are reported. Default is \code{"analytic"}.}
\item{outer.runs}{A positive integer which stets the number of outer runs of the solver. Default is 1.}
\item{outer.tol}{A positive number which stets the tolerance of the continuation condition of the outer runs. The iterative algorithm stops if the relative decrease of the log-likelihood is smaller than \code{outer.tol}. Default is 0.}
\item{outer.max}{A positive integer which stets the maximum number of iterations of the outer runs of the solver. The iterative algorithm stops if it did not converge after \code{"outer.max"} iterations. Default is 1000.}
\item{inner.runs}{A positive integer which stets the number of inner runs of the solver. Default is 10.}
\item{inner.tol}{A positive number which stets the tolerance of the continuation condition of the inner EM runs. The iterative algorithm stops if the relative decrease of the log-likelihood is smaller than \code{inner.tol}. Default is 0.}
\item{inner.max}{A positive integer which stets the maximum number of iterations of the inner EM runs. The iterative algorithm stops if it did not converge after \code{inner.max} iterations. Default is 10.}
\item{lcr.runs}{A positive integer which stets the number of estimation runs for latent class regression. Default is 100.}
\item{lcr.tol}{A positive number which stets the tolerance of the continuation condition of the Latent Class Regression runs. The iterative algorithm stops if the relative decrease of the log-likelihood is smaller than \code{lcr.tol}. Default is 0.}
\item{lcr.max}{A positive integer which stets the maximum number of iterations of the Latent Class Regression EM runs. The iterative algorithm stops if it did not converge after \code{lcr.max} iterations. Default is 1000.}
\item{bs.samples}{A positive integer which sets the number of bootstrap samples drawn with replacement.}
\item{quantiles}{A numeric vector indicating the quantiles of the sampling distribution of the estimated parameters. The quantiles are identified based on the standard error or based on bootstrapping the sampling distribution of the parameter.}
\item{stepsize}{A positive number which sets the stepsize of the Fisher scoring algorithm used to estimate the coefficients of the latent class regression model. Default is one. Values smaller than one slow down the convergence of the algorithm.}
\item{penalty}{A logical indicating if the Firth penalty is used to estimate the coefficients of the latent class regression model. Default is \code{FALSE}. Irrespective of the value specified here, the penalty is used in the case of a bootstrap of the standard errors of latent class regression coefficients.}
\item{verbose}{A logical, if \code{TRUE} messages of the estimation process and a summary of the estimated model is printed to the console. Default is \code{TRUE}.}
}
\value{
An object of class \code{stratEst}. A list with the following elements.
\item{strategies}{A list of fitted strategies.}
\item{shares}{Matrix of strategy shares. The order of rows corresponds to the order of strategies defined in the input object \code{strategies}.}
\item{probs}{Matrix of choice probabilities. The value \code{NA} indicates that the probability could not be estimated since data does not contain observations the model assigns to the corresponding state.}
\item{trembles}{Matrix of tremble probabilities of the strategies. The value \code{NA} indicates that the corresponding probability could not be estimated since data does not contain observations the model assigns to the corresponding state.}
\item{coefficients}{Matrix of latent class regression coefficients for strategies.}
\item{shares.par}{Estimated strategy shares.}
\item{probs.par}{Estimated choice probabilities.}
\item{trembles.par}{Estimated tremble probabilities.}
\item{coefficients.par}{Estimated latent class regression coefficients.}
\item{shares.indices}{Indices of strategy shares.}
\item{probs.indices}{Indices of choice probabilities.}
\item{trembles.indices}{Indices of tremble probabilities.}
\item{coefficients.indices}{Indices of latent class regression coefficients.}
\item{loglike}{The log-likelihood of the model. Larger values indicate a better fit of the model to the data.}
\item{crit.val}{The value of the selection criterion defined under \code{crit}. Larger values indicate a better fit of the model.}
\item{eval}{Number of iterations of the solver. The reported number is the sum of iterations performed in the inner and the outer run which produced the reported estimates.}
\item{tol.val}{The relative decrease of the log-likelihood in the last iteration of the algorithm. }
\item{convergence}{Maximum absolute score of the model parameters. Small values indicate convergence of the algorithm to a (local) maximum of the negative log likelihood.}
\item{entropy}{Entropy of the posterior probability assignments of individuals to strategies.}
\item{state.obs}{A column vector with the number of weighted observations for each strategy state corresponding to the rows of \code{strategies}.}
\item{posterior.assignments}{Posterior probability of each individual to use a strategy.}
\item{prior.assignments}{Prior probability of each individual to use a strategy as predicted by the individual covariates.}
\item{shares.se}{Standard errors of the estimated shares.}
\item{probs.se}{Standard errors of the estimated choice probabilities.}
\item{trembles.se}{Standard errors of the estimated trembles.}
\item{coefficients.se}{Standard errors of the estimated coefficients.}
\item{shares.score}{Score of the estimated shares.}
\item{probs.score}{Score of the reported choice probabilities.}
\item{trembles.score}{Score of the reported trembles.}
\item{coefficients.score}{Score of the reported coefficients.}
\item{shares.fisher}{Fisher information of the estimated shares.}
\item{probs.fisher}{Fisher information of the reported choice probabilities.}
\item{trembles.fisher}{Fisher information of the reported trembles.}
\item{coefficients.fisher}{Fisher information of the reported coefficients.}
\item{num.obs}{Number of observations.}
\item{num.ids}{Number of individuals.}
\item{num.par}{Total number of model parameters.}
\item{free.par}{Total number of free model parameters.}
\item{res.degrees}{Residual degrees of freedom (num.ids - free.par).}
\item{shares.quantiles}{Quantiles of the estimated shares.}
\item{probs.quantiles}{Quantiles of the estimated choice probabilities.}
\item{trembles.quantiles}{Quantiles of the estimated tremble probabilities.}
\item{coefficients.quantiles}{Quantiles of the estimated latent class regression coefficients.}
\item{gammas}{Gamma parameter of the model.}
\item{gammas.par}{Estimated gamma parameters.}
\item{gammas.se}{Standard errors of the gamma parameters.}#
\item{aic}{Akaike information criterion.}
\item{bic}{Bayesian information criterion.}
\item{icl}{Integrated classification likelihood information criteria.}
}
\description{
Performs variants of the strategy estimation method.
}
\details{
The estimation function \code{stratEst()} returns maximum-likelihood estimates for the population shares and choice probabilities of a set of candidate strategies given some data from an economic experiment. Candidate strategies can be supplied by the user in the form of deterministic finite-state automata. The number and the complexity of strategies can be restricted by the user or selected based on information criteria. stratEst also features latent class regression to assess the influence of covariates on strategy choice.
}
\note{
The strategy estimation method was introduced by (Dal Bo & Frechette 2011) to estimate the relative frequency of a fixed set of pure strategies in the indefinitely repeated prisoner's dilemma. Breitmoser (2015) extended the method to the estimation of behavior strategies. The \pkg{stratEst} package uses the EM algorithm (Dempster, Laird & Rubin 1977) and the Newton-Raphson method to obtain maximum-likelihood estimates for the population shares and choice probabilities of a set of candidate strategies. The package builds on other software contributions of the R community. To increase speed the estimation procedures, the package uses integration of C++ and R achieved by the Rcpp package (Eddelbuettel & Francois 2011) and the open source linear algebra library for the C++ language RppArmadillo (Sanderson & Curtin 2016).
}
\references{
Breitmoser, Y. (2015): Cooperation, but no reciprocity: Individual strategies in the repeated prisoner's dilemma, \emph{American Economic Review}, 105, 2882-2910.
Dal Bo, P. and G. R. Frechette (2011): The evolution of cooperation in infinitely repeated games: Experimental evidence, \emph{American Economic Review}, 101, 411-429.
Dempster, A., N. Laird, and D. B. Rubin (1977): Maximum likelihood from incomplete data via the EM algorithm," \emph{Journal of the Royal Statistical Society Series B}, 39, 1-38.
Eddelbuettel, D. and R. Francois (2011): Rcpp: Seamless R and C++ Integration, \emph{Journal of Statistical Software}, 40, 1-18.
Sanderson, C. and R. Curtin (2016): Armadillo: a template-based C++ library for linear algebra. \emph{Journal of Open Source Software}, 1-26.
}
|
#leer hatco.completo.csv
flip <- read.delim("R-FlipEncodedData.txt", row.names=1)
#Mostramos las 6 primeras líneas del fichero de datos
head(flip)
names(flip)
#De todo el fichero, separamos las variables que utilizaremos como bases de segmentación
bases<-data.frame(flip[13:19])
descrip <- data.frame(flip[1:12])
#Mostramos las 6 primeras líneas del fichero de datos
head(bases)
head(descrip)
#Agrupamos a los clientes
bases.hclust<-hclust(dist(bases, method="euclidean"), method="ward")
#Mostramos el resultado de la agrupación
plot(bases.hclust)
#Ahora calculamos los centros de los grupos formados durante elproceso de agrupación jerárquica.
source("marketing-models.R")
#centros-hclust, calcula las medias en los segmentos obtenidos con hclust
centros.bases<-centros.hclust(bases.hclust, bases, 4)
centros.bases
#Dividimos la muestra con kmeans
bases.kmeans4<-kmeans(bases, centros.bases)
names(bases.kmeans4)
t(bases.kmeans4$centers)
bases.kmeans4$size
#Descriptores
names(descrip)
install.packages("tableone")
library(tableone)
listVars <- c("BS.Reputation1", "BS.Reputation2", "TWorkExperience", "RWorkExperience", "MaSpecialization1", "MaSpecialization2", "MiSpecialization1", "MiSpecialization2", "UGDegree1", "UGDegree2","PercentileClass1","PercentileClass2")
flip$segmento3<-bases.kmeans3$cluster
flip.descriptores <- CreateTableOne(vars = listVars, data = flip, strata = "segmento3")
flip.descriptores
########
#La función discriminante: Hay que preparar los descriptores, son colineales)
names(bases)
names(descrip)
#cargamos el packete MASS que nos ofrece la función disciminante, lda()
library(MASS)
#Como argumentos tenemos que dar las bases de segmentación y la clasificacion que hemos realizado
names(descrip)
cor(descrip)
descrip2<-descrip[,-7]
head(descrip2)
flip.des.lda<-lda(descrip2, bases.kmeans3$cluster)
summary(flip.des.lda)
flip.des.lda
plot(flip.des.lda, dimen=2)
?lda
lda.arrows(flip.des.lda)
#Comprobamos la calidad dela prediccion realizada por la función discriminante
options(digits=4)
flip.des.predict<-predict(flip.des.lda, descrip2)$class
table(segmento=bases.kmeans2$cluster, lda=flip.des.predict)
#para interpretar el significado de las funciones discriminantes
#estimamos su correlación con los descriptores
flip.des.puntos<-predict(flip.des.lda, descrip2)$x
cor(descrip, flip.des.puntos)
#Ahora vamos a calcular los valores medios de las variables en los grupos
#Observemos primero los valores medios. Con esta información ya sería suficiente
#para caracterizar a los grupos. Su representación en el espacio de las funciones
#discriminante nos permiten visualizar la tabla de medias y ver su asociación con los grupos.
t(flip.des.lda$means)
#Para visualizar los segmentos
#También podemos representar gráficamente los centros de los grupos en el espacio de las funciones discriminantes.
#Para ello tenemos, primero, la función predict que toma como argumento un objeto de la clase lda y utilizar
#las funciones discriminantes obtenidas para predecir los valores de un conjunto de valores,
#concretamente nos interesa conocer la puntuación de las medias en el espacio de las funciones discriminantes.
predict(flip.des.lda, flip.des.lda$means)$x
#Vamos ya podemos representar visualmente la caracterización de los grupos.
#Para ello utilizamos la función biplot con dos grupos de datos,
#la puntuación de los centros de los grupos en las funciones discriminantes
#y la correlación de las variables discriminantes con las funciones discriminantes.
flip.des.cor<-cor(descrip2, flip.des.puntos)
biplot(predict(flip.des.lda, flip.des.lda$means)$x, flip.des.cor)
################ | /flip/flip.R | no_license | jlopezsi/mktg | R | false | false | 3,751 | r | #leer hatco.completo.csv
flip <- read.delim("R-FlipEncodedData.txt", row.names=1)
#Mostramos las 6 primeras líneas del fichero de datos
head(flip)
names(flip)
#De todo el fichero, separamos las variables que utilizaremos como bases de segmentación
bases<-data.frame(flip[13:19])
descrip <- data.frame(flip[1:12])
#Mostramos las 6 primeras líneas del fichero de datos
head(bases)
head(descrip)
#Agrupamos a los clientes
bases.hclust<-hclust(dist(bases, method="euclidean"), method="ward")
#Mostramos el resultado de la agrupación
plot(bases.hclust)
#Ahora calculamos los centros de los grupos formados durante elproceso de agrupación jerárquica.
source("marketing-models.R")
#centros-hclust, calcula las medias en los segmentos obtenidos con hclust
centros.bases<-centros.hclust(bases.hclust, bases, 4)
centros.bases
#Dividimos la muestra con kmeans
bases.kmeans4<-kmeans(bases, centros.bases)
names(bases.kmeans4)
t(bases.kmeans4$centers)
bases.kmeans4$size
#Descriptores
names(descrip)
install.packages("tableone")
library(tableone)
listVars <- c("BS.Reputation1", "BS.Reputation2", "TWorkExperience", "RWorkExperience", "MaSpecialization1", "MaSpecialization2", "MiSpecialization1", "MiSpecialization2", "UGDegree1", "UGDegree2","PercentileClass1","PercentileClass2")
flip$segmento3<-bases.kmeans3$cluster
flip.descriptores <- CreateTableOne(vars = listVars, data = flip, strata = "segmento3")
flip.descriptores
########
#La función discriminante: Hay que preparar los descriptores, son colineales)
names(bases)
names(descrip)
#cargamos el packete MASS que nos ofrece la función disciminante, lda()
library(MASS)
#Como argumentos tenemos que dar las bases de segmentación y la clasificacion que hemos realizado
names(descrip)
cor(descrip)
descrip2<-descrip[,-7]
head(descrip2)
flip.des.lda<-lda(descrip2, bases.kmeans3$cluster)
summary(flip.des.lda)
flip.des.lda
plot(flip.des.lda, dimen=2)
?lda
lda.arrows(flip.des.lda)
#Comprobamos la calidad dela prediccion realizada por la función discriminante
options(digits=4)
flip.des.predict<-predict(flip.des.lda, descrip2)$class
table(segmento=bases.kmeans2$cluster, lda=flip.des.predict)
#para interpretar el significado de las funciones discriminantes
#estimamos su correlación con los descriptores
flip.des.puntos<-predict(flip.des.lda, descrip2)$x
cor(descrip, flip.des.puntos)
#Ahora vamos a calcular los valores medios de las variables en los grupos
#Observemos primero los valores medios. Con esta información ya sería suficiente
#para caracterizar a los grupos. Su representación en el espacio de las funciones
#discriminante nos permiten visualizar la tabla de medias y ver su asociación con los grupos.
t(flip.des.lda$means)
#Para visualizar los segmentos
#También podemos representar gráficamente los centros de los grupos en el espacio de las funciones discriminantes.
#Para ello tenemos, primero, la función predict que toma como argumento un objeto de la clase lda y utilizar
#las funciones discriminantes obtenidas para predecir los valores de un conjunto de valores,
#concretamente nos interesa conocer la puntuación de las medias en el espacio de las funciones discriminantes.
predict(flip.des.lda, flip.des.lda$means)$x
#Vamos ya podemos representar visualmente la caracterización de los grupos.
#Para ello utilizamos la función biplot con dos grupos de datos,
#la puntuación de los centros de los grupos en las funciones discriminantes
#y la correlación de las variables discriminantes con las funciones discriminantes.
flip.des.cor<-cor(descrip2, flip.des.puntos)
biplot(predict(flip.des.lda, flip.des.lda$means)$x, flip.des.cor)
################ |
## https://spark.rstudio.com/guides/pipelines/
library(sparklyr)
library(dplyr)
library(tidyr)
library(DBI)
library(nycflights13)
# Connect to local spark cluster and load data
# Convert titanic_train data into parquet format and output to disk
# https://github.com/rstudio/sparkDemos/blob/master/dev/cloudera/spark_ml_classification_titanic.Rmd
sc <- spark_connect(master = "local", version = "2.0.0")
# first R pipeline
# empty pipeline
r_pipeline <- . %>%lm(am ~ cyl + mpg, data = .)
r_pipeline
# use the pipeline on a dataframe
r_model <- r_pipeline(mtcars)
r_model
# to prevent from errors futher down the coding , reformat sched_dep_time
flights$sched_dep_time <- as.double(flights$sched_dep_time)
spark_flights <- sdf_copy_to(sc, flights,overwrite = TRUE)
flightsSQL <- dbGetQuery(sc, 'SELECT *
FROM flights
WHERE DEP_DELAY <= 0')
flightsSQL %>% collect()
flightsSQL <- dbGetQuery(sc, 'SELECT carrier,avg(dep_delay) AS AvgDelay
FROM flights
GROUP BY carrier
ORDER BY AvgDelay DESC')
flightsSQL %>% collect()
flightsSQL <- dbGetQuery(sc, 'SELECT carrier,max(dep_delay)/60 AS MaxDelay
FROM flights
GROUP BY carrier
ORDER BY MaxDelay DESC')
flightsSQL %>% collect()
# only instructions nothing is done yet on the sprakcluster
df <- spark_flights %>%
filter(!is.na(dep_delay)) %>%
mutate(
month = paste0("m", month),
day = paste0("d", day)
) %>%
select(dep_delay, sched_dep_time, month, day, distance)
ft_dplyr_transformer(sc, df)
ft_dplyr_transformer(sc, df) %>%
ml_param("statement")
flights_pipeline <- ml_pipeline(sc) %>%
ft_dplyr_transformer(
tbl = df
) %>%
ft_binarizer(
input_col = "dep_delay",
output_col = "delayed",
threshold = 15
) %>%
ft_bucketizer(
input_col = "sched_dep_time",
output_col = "hours",
splits = c(400, 800, 1200, 1600, 2000, 2400)
) %>%
ft_r_formula(delayed ~ month + day + hours + distance) %>%
ml_logistic_regression()
flights_pipeline
# split the date in training and testing
partitioned_flights <- sdf_random_split(
spark_flights,
training = 0.01,
testing = 0.01,
rest = 0.98
)
# build a model on SPARK
fitted_pipeline <- ml_fit(
flights_pipeline,
partitioned_flights$training
)
fitted_pipeline
# do some predictions
predictions <- ml_transform(
fitted_pipeline,
partitioned_flights$testing
)
# show confusion matrix
predictions %>%
group_by(delayed, prediction) %>%
tally()
# save SPARK df to disk
ml_save(
flights_pipeline,
"flights_pipeline",
overwrite = TRUE
)
# save SPARK model to disk
ml_save(
fitted_pipeline,
"flights_model",
overwrite = TRUE
)
# load model again
reloaded_model <- ml_load(sc, "flights_model")
# do some new predictions
new_df <- spark_flights %>%
filter(
month == 7 #,
#day == 5
)
newPredictions <- ml_transform(reloaded_model, new_df)
newPredictions %>%
group_by(delayed, prediction) %>%
tally()
| /example_r_spark/04 SPARK_PipeLines.R | no_license | winterleitner/bi_assignment_3 | R | false | false | 3,148 | r | ## https://spark.rstudio.com/guides/pipelines/
library(sparklyr)
library(dplyr)
library(tidyr)
library(DBI)
library(nycflights13)
# Connect to local spark cluster and load data
# Convert titanic_train data into parquet format and output to disk
# https://github.com/rstudio/sparkDemos/blob/master/dev/cloudera/spark_ml_classification_titanic.Rmd
sc <- spark_connect(master = "local", version = "2.0.0")
# first R pipeline
# empty pipeline
r_pipeline <- . %>%lm(am ~ cyl + mpg, data = .)
r_pipeline
# use the pipeline on a dataframe
r_model <- r_pipeline(mtcars)
r_model
# to prevent from errors futher down the coding , reformat sched_dep_time
flights$sched_dep_time <- as.double(flights$sched_dep_time)
spark_flights <- sdf_copy_to(sc, flights,overwrite = TRUE)
flightsSQL <- dbGetQuery(sc, 'SELECT *
FROM flights
WHERE DEP_DELAY <= 0')
flightsSQL %>% collect()
flightsSQL <- dbGetQuery(sc, 'SELECT carrier,avg(dep_delay) AS AvgDelay
FROM flights
GROUP BY carrier
ORDER BY AvgDelay DESC')
flightsSQL %>% collect()
flightsSQL <- dbGetQuery(sc, 'SELECT carrier,max(dep_delay)/60 AS MaxDelay
FROM flights
GROUP BY carrier
ORDER BY MaxDelay DESC')
flightsSQL %>% collect()
# only instructions nothing is done yet on the sprakcluster
df <- spark_flights %>%
filter(!is.na(dep_delay)) %>%
mutate(
month = paste0("m", month),
day = paste0("d", day)
) %>%
select(dep_delay, sched_dep_time, month, day, distance)
ft_dplyr_transformer(sc, df)
ft_dplyr_transformer(sc, df) %>%
ml_param("statement")
flights_pipeline <- ml_pipeline(sc) %>%
ft_dplyr_transformer(
tbl = df
) %>%
ft_binarizer(
input_col = "dep_delay",
output_col = "delayed",
threshold = 15
) %>%
ft_bucketizer(
input_col = "sched_dep_time",
output_col = "hours",
splits = c(400, 800, 1200, 1600, 2000, 2400)
) %>%
ft_r_formula(delayed ~ month + day + hours + distance) %>%
ml_logistic_regression()
flights_pipeline
# split the date in training and testing
partitioned_flights <- sdf_random_split(
spark_flights,
training = 0.01,
testing = 0.01,
rest = 0.98
)
# build a model on SPARK
fitted_pipeline <- ml_fit(
flights_pipeline,
partitioned_flights$training
)
fitted_pipeline
# do some predictions
predictions <- ml_transform(
fitted_pipeline,
partitioned_flights$testing
)
# show confusion matrix
predictions %>%
group_by(delayed, prediction) %>%
tally()
# save SPARK df to disk
ml_save(
flights_pipeline,
"flights_pipeline",
overwrite = TRUE
)
# save SPARK model to disk
ml_save(
fitted_pipeline,
"flights_model",
overwrite = TRUE
)
# load model again
reloaded_model <- ml_load(sc, "flights_model")
# do some new predictions
new_df <- spark_flights %>%
filter(
month == 7 #,
#day == 5
)
newPredictions <- ml_transform(reloaded_model, new_df)
newPredictions %>%
group_by(delayed, prediction) %>%
tally()
|
library(shiny)
library(leaflet)
library(dplyr)
source("get_routes_ex31b.R")
source("pvrp.R")
# name mappings:
# sequence_no = sqn
# salesman_id = smi
# salesman_no = smn
routes_all = get_routes_verbal()
salesman = get_salesman()
sqn_selected = 0
smn_selected = 1
smi_selected = 7
wkd_selected = 0
sqn_init = get_routes_by_smi_wkd(routes_all, smi_selected, wkd_selected)
smi_init = salesman$salesman_id
wkd_init = routes_all$week_day %>% unique %>% sort
ui = fluidPage(
title = "Rotalar arasında navigasyon",
sidebarLayout(
sidebarPanel(
actionButton("sqn_prev", "Önceki")
, actionButton("sqn_next", "Sonraki")
, selectInput("sqn_select", "Rota sırası", choices = sqn_init, selected = sqn_selected, selectize = F)
, actionButton("smn_prev", "Önceki Satıcı")
, actionButton("smn_next", "Sonraki Satıcı")
, selectInput("smi_select", "Satıcı", choices = smi_init, selected = smi_selected, selectize = F)
, actionButton("wkd_prev", "Önceki Gün")
, actionButton("wkd_next", "Sonraki Gün")
, selectInput("wkd_select", "Gün", choices = wkd_init, selected = wkd_selected, selectize = F)
)
, mainPanel(
textOutput("sqn_out")
, textOutput("smn_out")
, tableOutput("wkd_out")
, tableOutput("routes")
, leafletOutput("map")
)
)
)
server = function(input, output, session) {
state = reactiveValues(sqn = sqn_selected, routes = get_routes_by_smi_wkd(routes_all, smi_selected, wkd_selected), smn = smn_selected, wkd = wkd_selected, smi = smi_selected)
observeEvent(input$sqn_next, { state$sqn = state$sqn + 1 })
observeEvent(input$sqn_prev, { state$sqn = state$sqn - 1 })
observeEvent(input$sqn_select, { state$sqn = as.numeric(input$sqn_select) })
observe({
updateSelectInput(session, "sqn_select",
choices = state$routes$sequence_no
, selected = state$sqn
)})
observe({
updateSelectInput(session, "smi_select",
selected = state$smi
)})
observe({
updateSelectInput(session, "wkd_select",
selected = state$wkd
)})
output$sqn_out = renderText({ state$sqn })
output$smn_out = renderText({ state$smn })
output$wkd_out = renderText({ state$wkd })
refresh_salesman_no = function() {
state$smi = (dplyr::filter(salesman, salesman_no == state$smn))$salesman_id
refresh_salesman_routes()
}
refresh_salesman_id = function() {
state$smn = (dplyr::filter(salesman, salesman_id == state$smi))$salesman_no
refresh_salesman_routes()
}
refresh_salesman_routes = function() {
state$routes = get_routes_by_smi_wkd(routes_all, state$smi, state$wkd)
state$sqn = 0
return(state)
}
observeEvent(input$smn_next, {
state$smn = state$smn + 1
refresh_salesman_no()
})
observeEvent(input$smn_prev, {
state$smn = state$smn - 1
refresh_salesman_no()
})
observeEvent(input$smi_select, {
state$smi = as.numeric(input$smi_select)
refresh_salesman_id()
})
observeEvent(input$wkd_next, {
state$wkd = state$wkd + 1
refresh_salesman_routes()
})
observeEvent(input$wkd_prev, {
state$wkd = state$wkd - 1
refresh_salesman_routes()
})
observeEvent(input$wkd_select, {
state$wkd = as.numeric(input$wkd_select)
refresh_salesman_routes()
})
routeS = reactive({ get_route_upto_sequence_no(state$routes, state$sqn) })
output$routes = renderTable({ routeS() })
output$map = renderLeaflet({ get_routes_all(routeS()) })
}
runApp(shinyApp(ui, server), host="0.0.0.0",port=5050)
| /doc/study/ex/leaflet_rota_cizimi_20190530/ex31b.R | permissive | mertnuhoglu/pmap | R | false | false | 3,404 | r | library(shiny)
library(leaflet)
library(dplyr)
source("get_routes_ex31b.R")
source("pvrp.R")
# name mappings:
# sequence_no = sqn
# salesman_id = smi
# salesman_no = smn
routes_all = get_routes_verbal()
salesman = get_salesman()
sqn_selected = 0
smn_selected = 1
smi_selected = 7
wkd_selected = 0
sqn_init = get_routes_by_smi_wkd(routes_all, smi_selected, wkd_selected)
smi_init = salesman$salesman_id
wkd_init = routes_all$week_day %>% unique %>% sort
ui = fluidPage(
title = "Rotalar arasında navigasyon",
sidebarLayout(
sidebarPanel(
actionButton("sqn_prev", "Önceki")
, actionButton("sqn_next", "Sonraki")
, selectInput("sqn_select", "Rota sırası", choices = sqn_init, selected = sqn_selected, selectize = F)
, actionButton("smn_prev", "Önceki Satıcı")
, actionButton("smn_next", "Sonraki Satıcı")
, selectInput("smi_select", "Satıcı", choices = smi_init, selected = smi_selected, selectize = F)
, actionButton("wkd_prev", "Önceki Gün")
, actionButton("wkd_next", "Sonraki Gün")
, selectInput("wkd_select", "Gün", choices = wkd_init, selected = wkd_selected, selectize = F)
)
, mainPanel(
textOutput("sqn_out")
, textOutput("smn_out")
, tableOutput("wkd_out")
, tableOutput("routes")
, leafletOutput("map")
)
)
)
server = function(input, output, session) {
state = reactiveValues(sqn = sqn_selected, routes = get_routes_by_smi_wkd(routes_all, smi_selected, wkd_selected), smn = smn_selected, wkd = wkd_selected, smi = smi_selected)
observeEvent(input$sqn_next, { state$sqn = state$sqn + 1 })
observeEvent(input$sqn_prev, { state$sqn = state$sqn - 1 })
observeEvent(input$sqn_select, { state$sqn = as.numeric(input$sqn_select) })
observe({
updateSelectInput(session, "sqn_select",
choices = state$routes$sequence_no
, selected = state$sqn
)})
observe({
updateSelectInput(session, "smi_select",
selected = state$smi
)})
observe({
updateSelectInput(session, "wkd_select",
selected = state$wkd
)})
output$sqn_out = renderText({ state$sqn })
output$smn_out = renderText({ state$smn })
output$wkd_out = renderText({ state$wkd })
refresh_salesman_no = function() {
state$smi = (dplyr::filter(salesman, salesman_no == state$smn))$salesman_id
refresh_salesman_routes()
}
refresh_salesman_id = function() {
state$smn = (dplyr::filter(salesman, salesman_id == state$smi))$salesman_no
refresh_salesman_routes()
}
refresh_salesman_routes = function() {
state$routes = get_routes_by_smi_wkd(routes_all, state$smi, state$wkd)
state$sqn = 0
return(state)
}
observeEvent(input$smn_next, {
state$smn = state$smn + 1
refresh_salesman_no()
})
observeEvent(input$smn_prev, {
state$smn = state$smn - 1
refresh_salesman_no()
})
observeEvent(input$smi_select, {
state$smi = as.numeric(input$smi_select)
refresh_salesman_id()
})
observeEvent(input$wkd_next, {
state$wkd = state$wkd + 1
refresh_salesman_routes()
})
observeEvent(input$wkd_prev, {
state$wkd = state$wkd - 1
refresh_salesman_routes()
})
observeEvent(input$wkd_select, {
state$wkd = as.numeric(input$wkd_select)
refresh_salesman_routes()
})
routeS = reactive({ get_route_upto_sequence_no(state$routes, state$sqn) })
output$routes = renderTable({ routeS() })
output$map = renderLeaflet({ get_routes_all(routeS()) })
}
runApp(shinyApp(ui, server), host="0.0.0.0",port=5050)
|
library(shiny)
library(leaflet)
library(RColorBrewer)
gp.no.patients<- read.csv("gpNoPatientsData.csv", header=T)
prescriptions <- read.csv("Data_Practice.csv")
names(prescriptions)[names(prescriptions)=="Practice"] <- "PRACTICE_CODE"
gp.no.patients <- merge(prescriptions, gp.no.patients, by="PRACTICE_CODE")
gp.no.patients$Asthma <- gp.no.patients$Pres_Count_Asthma/gp.no.patients$totalPatients*100
gp.no.patients$Allergies <- gp.no.patients$Pres_Count_Allergies/gp.no.patients$totalPatients*100
gp.no.patients$Diabetes <- gp.no.patients$Pres_Count_Diabetes/gp.no.patients$totalPatients*100
gp.no.patients$totalPatientsSmall <- gp.no.patients$totalPatients*0.5
gp.no.patients<- na.omit(gp.no.patients)
gp.no.patients <- gp.no.patients[gp.no.patients$Allergies < 2,]
gp.no.patients <- gp.no.patients[gp.no.patients$Asthma < 2,]
gp.no.patients <- gp.no.patients[gp.no.patients$Diabetes < 2,]
ui <- bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("map", width = "100%", height = "100%"),
absolutePanel(top = 10, right = 10, width = 300,
draggable = TRUE,
style = "opacity: 0.9; padding: 8px; background: #FFFFEE;",
h3("GP Prescription Map"),
p("This map shows prescriptions issued by each GP for medications for different medical issues. The shading represents the number of prescriptions per 100 patients registered."),
sliderInput("range", "Select range for number of patients", min(gp.no.patients$totalPatients),
max(gp.no.patients$totalPatients),
value = range(gp.no.patients$totalPatients), step = 1
),
#selector in progress
selectInput("select",
label = "Select medical issue",
choices = c("Allergies" ,
"Asthma" ,
"Diabetes" ),
selected = "Diabetes"),
checkboxInput("legend", "Show legend", TRUE),
HTML('This map was developed as part of the <a href="https://www.ukdataservice.ac.uk/news-and-events/eventsitem/?id=4760">UKDS Manchester Data Dive</a> using <a href="https://data.gov.uk/dataset/prescribing-by-gp-practice-presentation-level">GP practice prescribing data data from NHS digital</a>. Contains public sector information licensed under the Open Government Licence v3.0.</a>')
)
)
server <- function(input, output, session) {
# Reactive expression for the data subsetted to what the user selected
filteredData <- reactive({
gp.no.patients[gp.no.patients$totalPatients >= input$range[1] & gp.no.patients$totalPatients <= input$range[2],]
})
colorpal <- reactive({
fd <- filteredData()
colorBin("Blues",
eval(call("$",
as.symbol("fd"),input$select)), 5, pretty=TRUE)
})
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>%
setView(-0.127758, 51.507351, zoom = 5)
})
observe({
pal <- colorpal()
fd <- filteredData()
leafletProxy("map", data = fd) %>%
clearShapes() %>%
addCircleMarkers(color = eval(call("$",
as.symbol("fd"),input$select)),
fillColor = ~pal(eval(call("$",
as.symbol("fd"),input$select))),
fillOpacity = 0.7,
popup = ~paste0("<b>GP:</b> ",
fd$PNAME,
"<br><b>Number of patients registered:</b> ",
fd$totalPatients,
"<br><b>Prescriptions per 100 patients:</b> ",
round(eval(call("$",
as.symbol("fd"),input$select)),2)))
})
}
shinyApp(ui, server)
| /app.R | permissive | maczokni/hackathon | R | false | false | 4,077 | r | library(shiny)
library(leaflet)
library(RColorBrewer)
gp.no.patients<- read.csv("gpNoPatientsData.csv", header=T)
prescriptions <- read.csv("Data_Practice.csv")
names(prescriptions)[names(prescriptions)=="Practice"] <- "PRACTICE_CODE"
gp.no.patients <- merge(prescriptions, gp.no.patients, by="PRACTICE_CODE")
gp.no.patients$Asthma <- gp.no.patients$Pres_Count_Asthma/gp.no.patients$totalPatients*100
gp.no.patients$Allergies <- gp.no.patients$Pres_Count_Allergies/gp.no.patients$totalPatients*100
gp.no.patients$Diabetes <- gp.no.patients$Pres_Count_Diabetes/gp.no.patients$totalPatients*100
gp.no.patients$totalPatientsSmall <- gp.no.patients$totalPatients*0.5
gp.no.patients<- na.omit(gp.no.patients)
gp.no.patients <- gp.no.patients[gp.no.patients$Allergies < 2,]
gp.no.patients <- gp.no.patients[gp.no.patients$Asthma < 2,]
gp.no.patients <- gp.no.patients[gp.no.patients$Diabetes < 2,]
ui <- bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("map", width = "100%", height = "100%"),
absolutePanel(top = 10, right = 10, width = 300,
draggable = TRUE,
style = "opacity: 0.9; padding: 8px; background: #FFFFEE;",
h3("GP Prescription Map"),
p("This map shows prescriptions issued by each GP for medications for different medical issues. The shading represents the number of prescriptions per 100 patients registered."),
sliderInput("range", "Select range for number of patients", min(gp.no.patients$totalPatients),
max(gp.no.patients$totalPatients),
value = range(gp.no.patients$totalPatients), step = 1
),
#selector in progress
selectInput("select",
label = "Select medical issue",
choices = c("Allergies" ,
"Asthma" ,
"Diabetes" ),
selected = "Diabetes"),
checkboxInput("legend", "Show legend", TRUE),
HTML('This map was developed as part of the <a href="https://www.ukdataservice.ac.uk/news-and-events/eventsitem/?id=4760">UKDS Manchester Data Dive</a> using <a href="https://data.gov.uk/dataset/prescribing-by-gp-practice-presentation-level">GP practice prescribing data data from NHS digital</a>. Contains public sector information licensed under the Open Government Licence v3.0.</a>')
)
)
server <- function(input, output, session) {
# Reactive expression for the data subsetted to what the user selected
filteredData <- reactive({
gp.no.patients[gp.no.patients$totalPatients >= input$range[1] & gp.no.patients$totalPatients <= input$range[2],]
})
colorpal <- reactive({
fd <- filteredData()
colorBin("Blues",
eval(call("$",
as.symbol("fd"),input$select)), 5, pretty=TRUE)
})
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>%
setView(-0.127758, 51.507351, zoom = 5)
})
observe({
pal <- colorpal()
fd <- filteredData()
leafletProxy("map", data = fd) %>%
clearShapes() %>%
addCircleMarkers(color = eval(call("$",
as.symbol("fd"),input$select)),
fillColor = ~pal(eval(call("$",
as.symbol("fd"),input$select))),
fillOpacity = 0.7,
popup = ~paste0("<b>GP:</b> ",
fd$PNAME,
"<br><b>Number of patients registered:</b> ",
fd$totalPatients,
"<br><b>Prescriptions per 100 patients:</b> ",
round(eval(call("$",
as.symbol("fd"),input$select)),2)))
})
}
shinyApp(ui, server)
|
group <- c("A", "B", "A", "B", "C", "C", "A", "A", "D")
test_that("get_partition_indices() works", {
set.seed(0)
outcomes <- c(
"normal", "cancer", "normal", "normal", "cancer", "cancer",
"normal", "normal", "normal", "cancer"
)
expect_equal(
get_partition_indices(outcomes,
training_frac = 0.8,
groups = group
),
c(1L, 3L, 5L, 6L, 7L, 8L, 9L)
)
set.seed(0)
expect_equal(
get_partition_indices(outcomes,
training_frac = 0.5,
groups = NULL
),
c(1L, 2L, 3L, 5L, 7L)
)
expect_error(
get_partition_indices(outcomes, training_frac = 0),
"`training_frac` must be a numeric between 0 and 1."
)
})
test_that("create_grouped_data_partition() works", {
set.seed(0)
train_ind <- create_grouped_data_partition(group, 0.8)
expect_equal(train_ind, c(1L, 3L, 5L, 6L, 7L, 8L, 9L))
expect_false(any(group[train_ind] %in% group[-train_ind]))
expect_false(any(group[-train_ind] %in% group[train_ind]))
expect_true(length(train_ind) / length(group) <= 0.8)
})
| /tests/testthat/test-partition.R | permissive | minghao2016/mikropml | R | false | false | 1,039 | r |
group <- c("A", "B", "A", "B", "C", "C", "A", "A", "D")
test_that("get_partition_indices() works", {
set.seed(0)
outcomes <- c(
"normal", "cancer", "normal", "normal", "cancer", "cancer",
"normal", "normal", "normal", "cancer"
)
expect_equal(
get_partition_indices(outcomes,
training_frac = 0.8,
groups = group
),
c(1L, 3L, 5L, 6L, 7L, 8L, 9L)
)
set.seed(0)
expect_equal(
get_partition_indices(outcomes,
training_frac = 0.5,
groups = NULL
),
c(1L, 2L, 3L, 5L, 7L)
)
expect_error(
get_partition_indices(outcomes, training_frac = 0),
"`training_frac` must be a numeric between 0 and 1."
)
})
test_that("create_grouped_data_partition() works", {
set.seed(0)
train_ind <- create_grouped_data_partition(group, 0.8)
expect_equal(train_ind, c(1L, 3L, 5L, 6L, 7L, 8L, 9L))
expect_false(any(group[train_ind] %in% group[-train_ind]))
expect_false(any(group[-train_ind] %in% group[train_ind]))
expect_true(length(train_ind) / length(group) <= 0.8)
})
|
# Script to run contemporary species distribution model for Papilio cresphontes & Zanthoxylum americanum
# Jeff Oliver
# jcoliver@email.arizona.edu
# 2018-01-10
rm(list = ls())
################################################################################
# SETUP
# Gather path information
# Load dependancies
butterfly.data.file <- "data/Papilio_cresphontes_data.csv"
plant.data.file <- "data/Zanthoxylum_americanum_data.csv"
outprefix <- "Papilio_cresphontes"
outpath <- "img/"
# Make sure the output path ends with "/" (and append one if it doesn't)
if (substring(text = outpath, first = nchar(outpath), last = nchar(outpath)) != "/") {
outpath <- paste0(outpath, "/")
}
# Make sure directories are writable
required.writables <- c("data", outpath)
write.access <- file.access(names = required.writables)
if (any(write.access != 0)) {
stop(paste0("You do not have sufficient write access to one or more directories. ",
"The following directories do not appear writable: \n",
paste(required.writables[write.access != 0], collapse = "\n")))
}
# Load dependancies, keeping track of any that fail
required.packages <- c("raster", "sp", "dismo", "maptools")
missing.packages <- character(0)
for (one.package in required.packages) {
if (!suppressMessages(require(package = one.package, character.only = TRUE))) {
missing.packages <- cbind(missing.packages, one.package)
}
}
if (length(missing.packages) > 0) {
stop(paste0("Missing one or more required packages. The following packages are required for run-sdm: ", paste(missing.packages, sep = "", collapse = ", ")), ".\n")
}
source(file = "functions/sdm-functions.R")
################################################################################
# ANALYSES
# Prepare data
# Run species distribution modeling
# Combine results from butterflies and plants
# Prepare data
butterfly.data <- PrepareData(file = butterfly.data.file)
plant.data <- PrepareData(file = plant.data.file)
# Run species distribution modeling
butterfly.raster <- SDMRaster(data = butterfly.data)
plant.raster <- SDMRaster(data = plant.data)
# Combine results from butterflies and plants
combined.raster <- StackTwoRasters(raster1 = butterfly.raster,
raster2 = plant.raster)
# Calculate the % of plant range occupied by butterfly
pixel.freqs <- freq(combined.raster)
plants <- pixel.freqs[which(pixel.freqs[, 1] == 2), 2]
both <- pixel.freqs[which(pixel.freqs[, 1] == 3), 2]
plant.percent <- round(100 * (both/(plants + both)), 2)
################################################################################
# PLOT
# Determine size of plot
# Plot to pdf file
# Add small value to all raster pixels so plot is colored correctly
combined.raster <- combined.raster + 0.00001
# Determine the geographic extent of our plot
xmin <- extent(combined.raster)[1]
xmax <- extent(combined.raster)[2]
ymin <- extent(combined.raster)[3]
ymax <- extent(combined.raster)[4]
# Plot the models for butterfly, plant and overlap; save to pdf
plot.file <- paste0(outpath, outprefix, "-current-pairwise.png")
png(file = plot.file)
breakpoints <- c(0, 1, 2, 3, 4)
plot.colors <- c("white", "purple3","darkolivegreen4", "orangered4", "black")
# Load in data for map borders
data(wrld_simpl)
# Draw the base map
plot(wrld_simpl, xlim = c(xmin, xmax), ylim = c(ymin, ymax), axes = TRUE, col = "gray95")
# Add the model rasters
plot(combined.raster, legend = FALSE, add = TRUE, breaks = breakpoints, col = plot.colors)
# Redraw the borders of the base map
plot(wrld_simpl, xlim = c(xmin, xmax), ylim = c(ymin, ymax), add = TRUE, border = "gray10", col = NA)
# Add the legend
legend("topright", legend = c("Insect", "Plant", "Both"), fill = plot.colors[2:4], bg = "#FFFFFF")
# Add bounding box around map
box()
# Stop re-direction to PNG graphics device
dev.off()
rm(list = ls()) | /biodiversity-sdm-lesson/dev/Papilio_cresphontes-example-current-pairwise.R | permissive | Barnard-Botany/Spring-2020-Course-Development | R | false | false | 3,879 | r | # Script to run contemporary species distribution model for Papilio cresphontes & Zanthoxylum americanum
# Jeff Oliver
# jcoliver@email.arizona.edu
# 2018-01-10
rm(list = ls())
################################################################################
# SETUP
# Gather path information
# Load dependancies
butterfly.data.file <- "data/Papilio_cresphontes_data.csv"
plant.data.file <- "data/Zanthoxylum_americanum_data.csv"
outprefix <- "Papilio_cresphontes"
outpath <- "img/"
# Make sure the output path ends with "/" (and append one if it doesn't)
if (substring(text = outpath, first = nchar(outpath), last = nchar(outpath)) != "/") {
outpath <- paste0(outpath, "/")
}
# Make sure directories are writable
required.writables <- c("data", outpath)
write.access <- file.access(names = required.writables)
if (any(write.access != 0)) {
stop(paste0("You do not have sufficient write access to one or more directories. ",
"The following directories do not appear writable: \n",
paste(required.writables[write.access != 0], collapse = "\n")))
}
# Load dependancies, keeping track of any that fail
required.packages <- c("raster", "sp", "dismo", "maptools")
missing.packages <- character(0)
for (one.package in required.packages) {
if (!suppressMessages(require(package = one.package, character.only = TRUE))) {
missing.packages <- cbind(missing.packages, one.package)
}
}
if (length(missing.packages) > 0) {
stop(paste0("Missing one or more required packages. The following packages are required for run-sdm: ", paste(missing.packages, sep = "", collapse = ", ")), ".\n")
}
source(file = "functions/sdm-functions.R")
################################################################################
# ANALYSES
# Prepare data
# Run species distribution modeling
# Combine results from butterflies and plants
# Prepare data
butterfly.data <- PrepareData(file = butterfly.data.file)
plant.data <- PrepareData(file = plant.data.file)
# Run species distribution modeling
butterfly.raster <- SDMRaster(data = butterfly.data)
plant.raster <- SDMRaster(data = plant.data)
# Combine results from butterflies and plants
combined.raster <- StackTwoRasters(raster1 = butterfly.raster,
raster2 = plant.raster)
# Calculate the % of plant range occupied by butterfly
pixel.freqs <- freq(combined.raster)
plants <- pixel.freqs[which(pixel.freqs[, 1] == 2), 2]
both <- pixel.freqs[which(pixel.freqs[, 1] == 3), 2]
plant.percent <- round(100 * (both/(plants + both)), 2)
################################################################################
# PLOT
# Determine size of plot
# Plot to pdf file
# Add small value to all raster pixels so plot is colored correctly
combined.raster <- combined.raster + 0.00001
# Determine the geographic extent of our plot
xmin <- extent(combined.raster)[1]
xmax <- extent(combined.raster)[2]
ymin <- extent(combined.raster)[3]
ymax <- extent(combined.raster)[4]
# Plot the models for butterfly, plant and overlap; save to pdf
plot.file <- paste0(outpath, outprefix, "-current-pairwise.png")
png(file = plot.file)
breakpoints <- c(0, 1, 2, 3, 4)
plot.colors <- c("white", "purple3","darkolivegreen4", "orangered4", "black")
# Load in data for map borders
data(wrld_simpl)
# Draw the base map
plot(wrld_simpl, xlim = c(xmin, xmax), ylim = c(ymin, ymax), axes = TRUE, col = "gray95")
# Add the model rasters
plot(combined.raster, legend = FALSE, add = TRUE, breaks = breakpoints, col = plot.colors)
# Redraw the borders of the base map
plot(wrld_simpl, xlim = c(xmin, xmax), ylim = c(ymin, ymax), add = TRUE, border = "gray10", col = NA)
# Add the legend
legend("topright", legend = c("Insect", "Plant", "Both"), fill = plot.colors[2:4], bg = "#FFFFFF")
# Add bounding box around map
box()
# Stop re-direction to PNG graphics device
dev.off()
rm(list = ls()) |
rm(list = ls())
setwd(dirname(parent.frame(2)$ofile))
library(ggplot2)
library(plyr)
library(dplyr)
library(tidyr)
library(readr)
source('./_function_task_expand_name.r')
source('./_compute_summary.r')
source('./_plot_parameter.r')
best.range = 5000
best.model.step.fn = function (errors) {
best.step = max(length(errors) - best.range, 0) + which.min(tail(errors, best.range))
if (length(best.step) == 0) {
return(length(errors))
} else {
return(best.step)
}
}
first.solved.step = function (steps, errors, threshold) {
index = first(which(errors < threshold))
if (is.na(index)) {
return(NA)
} else {
return(steps[index])
}
}
eps = read_csv('../results/function_task_static_mse_expectation.csv') %>%
filter(simple == FALSE & parameter == 'subset.ratio') %>%
mutate(
operation = revalue(operation, operation.full.to.short)
) %>%
select(operation, input.size, overlap.ratio, subset.ratio, extrapolation.range, threshold)
name.parameter = 'subset.ratio'
plot.label = 'Relative size of subsets compared to input size'
plot.x.breaks = waiver()
name.input = '../results/function_task_static_mul_subset.csv'
name.output = '../paper/results/simple_function_static_mul_subset.pdf'
dat = expand.name(read_csv(name.input)) %>%
merge(eps) %>%
mutate(
parameter = !!as.name(name.parameter)
)
dat.last = dat %>%
group_by(name, parameter) %>%
#filter(n() == 201) %>%
summarise(
threshold = last(threshold),
best.model.step = best.model.step.fn(metric.valid.interpolation),
interpolation.last = metric.valid.interpolation[best.model.step],
extrapolation.last = metric.test.extrapolation[best.model.step],
interpolation.step.solved = first.solved.step(step, metric.valid.interpolation, threshold),
extrapolation.step.solved = first.solved.step(step, metric.test.extrapolation, threshold),
sparse.error.max = sparse.error.max[best.model.step],
solved = replace_na(metric.test.extrapolation[best.model.step] < threshold, FALSE),
model = last(model),
operation = last(operation),
seed = last(seed),
size = n()
)
dat.last.rate = dat.last %>%
group_by(model, operation, parameter) %>%
group_modify(compute.summary)
p = plot.parameter(dat.last.rate, plot.label, plot.x.breaks)
print(p)
ggsave(name.output, p, device="pdf", width = 13.968, height = 5.7, scale=1.4, units = "cm")
| /export/function_task_static_mul_subset.r | permissive | AndreasMadsen/stable-nalu | R | false | false | 2,378 | r | rm(list = ls())
setwd(dirname(parent.frame(2)$ofile))
library(ggplot2)
library(plyr)
library(dplyr)
library(tidyr)
library(readr)
source('./_function_task_expand_name.r')
source('./_compute_summary.r')
source('./_plot_parameter.r')
best.range = 5000
best.model.step.fn = function (errors) {
best.step = max(length(errors) - best.range, 0) + which.min(tail(errors, best.range))
if (length(best.step) == 0) {
return(length(errors))
} else {
return(best.step)
}
}
first.solved.step = function (steps, errors, threshold) {
index = first(which(errors < threshold))
if (is.na(index)) {
return(NA)
} else {
return(steps[index])
}
}
eps = read_csv('../results/function_task_static_mse_expectation.csv') %>%
filter(simple == FALSE & parameter == 'subset.ratio') %>%
mutate(
operation = revalue(operation, operation.full.to.short)
) %>%
select(operation, input.size, overlap.ratio, subset.ratio, extrapolation.range, threshold)
name.parameter = 'subset.ratio'
plot.label = 'Relative size of subsets compared to input size'
plot.x.breaks = waiver()
name.input = '../results/function_task_static_mul_subset.csv'
name.output = '../paper/results/simple_function_static_mul_subset.pdf'
dat = expand.name(read_csv(name.input)) %>%
merge(eps) %>%
mutate(
parameter = !!as.name(name.parameter)
)
dat.last = dat %>%
group_by(name, parameter) %>%
#filter(n() == 201) %>%
summarise(
threshold = last(threshold),
best.model.step = best.model.step.fn(metric.valid.interpolation),
interpolation.last = metric.valid.interpolation[best.model.step],
extrapolation.last = metric.test.extrapolation[best.model.step],
interpolation.step.solved = first.solved.step(step, metric.valid.interpolation, threshold),
extrapolation.step.solved = first.solved.step(step, metric.test.extrapolation, threshold),
sparse.error.max = sparse.error.max[best.model.step],
solved = replace_na(metric.test.extrapolation[best.model.step] < threshold, FALSE),
model = last(model),
operation = last(operation),
seed = last(seed),
size = n()
)
dat.last.rate = dat.last %>%
group_by(model, operation, parameter) %>%
group_modify(compute.summary)
p = plot.parameter(dat.last.rate, plot.label, plot.x.breaks)
print(p)
ggsave(name.output, p, device="pdf", width = 13.968, height = 5.7, scale=1.4, units = "cm")
|
setwd("C:/rprogramming/household")
## Getting data
dataset <- read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
## Subsetting data and convert Date-Time
dataset$Date <- as.Date(dataset$Date, "%d/%m/%Y")
data <- subset(dataset, Date == "2007-02-01" | Date == "2007-02-02")
dates <- as.character(data$Date)
times <- as.character(data$Time)
x <- paste(dates, times, "MSK")
data$Time <- as.POSIXlt(x, "GMT")
## Making plot to png-file
png(file = "plot3.png")
plot(data$Time, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(data$Time, data$Sub_metering_2, col = "red")
lines(data$Time, data$Sub_metering_3, col = "blue")
legend("topright", lty = "solid", col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
## days of the week (Thu, Fri, Sat) on the x-axis automatically displayed in russian language | /plot3.r | no_license | Leanide/ExData_Plotting1 | R | false | false | 972 | r | setwd("C:/rprogramming/household")
## Getting data
dataset <- read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
## Subsetting data and convert Date-Time
dataset$Date <- as.Date(dataset$Date, "%d/%m/%Y")
data <- subset(dataset, Date == "2007-02-01" | Date == "2007-02-02")
dates <- as.character(data$Date)
times <- as.character(data$Time)
x <- paste(dates, times, "MSK")
data$Time <- as.POSIXlt(x, "GMT")
## Making plot to png-file
png(file = "plot3.png")
plot(data$Time, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(data$Time, data$Sub_metering_2, col = "red")
lines(data$Time, data$Sub_metering_3, col = "blue")
legend("topright", lty = "solid", col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
## days of the week (Thu, Fri, Sat) on the x-axis automatically displayed in russian language |
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{isAvailable}
\alias{isAvailable}
\alias{verifyAvailable}
\title{Check if RStudio is running.}
\usage{
isAvailable(version_needed = NULL)
verifyAvailable(version_needed = NULL)
}
\arguments{
\item{version_needed}{An optional version specification.
If supplied, ensures that Rstudio is at least that
version.}
}
\value{
\code{isAvailable} a boolean; \code{verifyAvailable} an error message
if Rstudio is not running
}
\description{
Check if RStudio is running.
}
\examples{
rstudioapi::isAvailable()
\dontrun{rstudioapi::verifyAvailable()}
}
| /man/isAvailable.Rd | no_license | Libardo1/rstudioapi | R | false | false | 609 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{isAvailable}
\alias{isAvailable}
\alias{verifyAvailable}
\title{Check if RStudio is running.}
\usage{
isAvailable(version_needed = NULL)
verifyAvailable(version_needed = NULL)
}
\arguments{
\item{version_needed}{An optional version specification.
If supplied, ensures that Rstudio is at least that
version.}
}
\value{
\code{isAvailable} a boolean; \code{verifyAvailable} an error message
if Rstudio is not running
}
\description{
Check if RStudio is running.
}
\examples{
rstudioapi::isAvailable()
\dontrun{rstudioapi::verifyAvailable()}
}
|
modelInfo <- list(label = "CART",
library = "rpart",
type = c("Regression", "Classification"),
parameters = data.frame(parameter = c('maxdepth'),
class = c("numeric"),
label = c("Max Tree Depth")),
grid = function(x, y, len = NULL){
dat <- if(is.data.frame(x)) x else as.data.frame(x)
dat$.outcome <- y
initialFit <- rpart(.outcome ~ .,
data = dat,
control = rpart.control(cp = 0))$cptable
initialFit <- initialFit[order(-initialFit[,"CP"]), "nsplit", drop = FALSE]
initialFit <- initialFit[initialFit[,"nsplit"] > 0 & initialFit[,"nsplit"] <= 30, , drop = FALSE]
if(dim(initialFit)[1] < len)
{
cat("note: only", nrow(initialFit),
"possible values of the max tree depth from the initial fit.\n",
"Truncating the grid to", nrow(initialFit), ".\n\n")
tuneSeq <- as.data.frame(initialFit)
} else tuneSeq <- as.data.frame(initialFit[1:len,])
colnames(tuneSeq) <- "maxdepth"
tuneSeq
},
loop = function(grid) {
grid <- grid[order(grid$maxdepth, decreasing = TRUE),, drop = FALSE]
loop <- grid[1,,drop = FALSE]
submodels <- list(grid[-1,,drop = FALSE])
list(loop = loop, submodels = submodels)
},
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
theDots <- list(...)
if(any(names(theDots) == "control"))
{
theDots$control$maxdepth <- param$maxdepth
theDots$control$xval <- 0
ctl <- theDots$control
theDots$control <- NULL
} else ctl <- rpart.control(maxdepth = param$maxdepth, xval = 0)
## check to see if weights were passed in (and availible)
if(!is.null(wts)) theDots$weights <- wts
modelArgs <- c(list(formula = as.formula(".outcome ~ ."),
data = if(is.data.frame(x)) x else as.data.frame(x),
control = ctl),
theDots)
modelArgs$data$.outcome <- y
out <- do.call("rpart", modelArgs)
out
},
predict = function(modelFit, newdata, submodels = NULL) {
## Models are indexed by Cp so approximate the Cp for
## the value of maxdepth
depth2cp <- function(x, depth)
{
out <- approx(x[,"nsplit"], x[,"CP"], depth)$y
out[depth > max(x[,"nsplit"])] <- min(x[,"CP"]) * .99
out
}
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
pType <- if(modelFit$problemType == "Classification") "class" else "vector"
out <- predict(modelFit, newdata, type=pType)
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
cpValues <- depth2cp(modelFit$cptable, submodels$maxdepth)
for(j in seq(along = cpValues))
{
prunedFit <- prune.rpart(modelFit, cp = cpValues[j])
tmp[[j+1]] <- predict(prunedFit, newdata, type=pType)
}
out <- tmp
}
out
},
prob = function(modelFit, newdata, submodels = NULL) {
depth2cp <- function(x, depth)
{
out <- approx(x[,"nsplit"], x[,"CP"], depth)$y
out[depth > max(x[,"nsplit"])] <- min(x[,"CP"]) * .99
out
}
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
out <- predict(modelFit, newdata, type = "prob")
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
cpValues <- depth2cp(modelFit$cptable, submodels$maxdepth)
for(j in seq(along = cpValues))
{
prunedFit <- prune.rpart(modelFit, cp = cpValues[j])
tmpProb <- predict(prunedFit, newdata, type = "prob")
tmp[[j+1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, drop = FALSE])
}
out <- tmp
}
out
},
predictors = function(x, surrogate = TRUE, ...) {
out <- as.character(x$frame$var)
out <- out[!(out %in% c("<leaf>"))]
if(surrogate)
{
splits <- x$splits
splits <- splits[splits[,"adj"] > 0,]
out <- c(out, rownames(splits))
}
unique(out)
},
varImp = function(object, surrogates = FALSE, competes = TRUE, ...) {
tmp <- rownames(object$splits)
rownames(object$splits) <- 1:nrow(object$splits)
splits <- data.frame(object$splits)
splits$var <- tmp
splits$type <- ""
frame <- as.data.frame(object$frame)
index <- 0
for(i in 1:nrow(frame)) {
if(frame$var[i] != "<leaf>") {
index <- index + 1
splits$type[index] <- "primary"
if(frame$ncompete[i] > 0) {
for(j in 1:frame$ncompete[i]) {
index <- index + 1
splits$type[index] <- "competing"
}
}
if(frame$nsurrogate[i] > 0) {
for(j in 1:frame$nsurrogate[i]) {
index <- index + 1
splits$type[index] <- "surrogate"
}
}
}
}
splits$var <- factor(as.character(splits$var))
if(!surrogates) splits <- subset(splits, type != "surrogate")
if(!competes) splits <- subset(splits, type != "competing")
out <- aggregate(splits$improve,
list(Variable = splits$var),
sum,
na.rm = TRUE)
allVars <- colnames(attributes(object$terms)$factors)
if(!all(allVars %in% out$Variable)) {
missingVars <- allVars[!(allVars %in% out$Variable)]
zeros <- data.frame(x = rep(0, length(missingVars)),
Variable = missingVars)
out <- rbind(out, zeros)
}
out2 <- data.frame(Overall = out$x)
rownames(out2) <- out$Variable
out2
},
levels = function(x) x$obsLevels,
trim = function(x) {
x$call <- list(na.action = (x$call)$na.action)
x$x <- NULL
x$y <- NULL
x$where <- NULL
x
},
tags = c("Tree-Based Model", "Implicit Feature Selection"),
sort = function(x) x[order(x[,1]),])
| /models/files/rpart2.R | no_license | bleutner/caret | R | false | false | 8,742 | r | modelInfo <- list(label = "CART",
library = "rpart",
type = c("Regression", "Classification"),
parameters = data.frame(parameter = c('maxdepth'),
class = c("numeric"),
label = c("Max Tree Depth")),
grid = function(x, y, len = NULL){
dat <- if(is.data.frame(x)) x else as.data.frame(x)
dat$.outcome <- y
initialFit <- rpart(.outcome ~ .,
data = dat,
control = rpart.control(cp = 0))$cptable
initialFit <- initialFit[order(-initialFit[,"CP"]), "nsplit", drop = FALSE]
initialFit <- initialFit[initialFit[,"nsplit"] > 0 & initialFit[,"nsplit"] <= 30, , drop = FALSE]
if(dim(initialFit)[1] < len)
{
cat("note: only", nrow(initialFit),
"possible values of the max tree depth from the initial fit.\n",
"Truncating the grid to", nrow(initialFit), ".\n\n")
tuneSeq <- as.data.frame(initialFit)
} else tuneSeq <- as.data.frame(initialFit[1:len,])
colnames(tuneSeq) <- "maxdepth"
tuneSeq
},
loop = function(grid) {
grid <- grid[order(grid$maxdepth, decreasing = TRUE),, drop = FALSE]
loop <- grid[1,,drop = FALSE]
submodels <- list(grid[-1,,drop = FALSE])
list(loop = loop, submodels = submodels)
},
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
theDots <- list(...)
if(any(names(theDots) == "control"))
{
theDots$control$maxdepth <- param$maxdepth
theDots$control$xval <- 0
ctl <- theDots$control
theDots$control <- NULL
} else ctl <- rpart.control(maxdepth = param$maxdepth, xval = 0)
## check to see if weights were passed in (and availible)
if(!is.null(wts)) theDots$weights <- wts
modelArgs <- c(list(formula = as.formula(".outcome ~ ."),
data = if(is.data.frame(x)) x else as.data.frame(x),
control = ctl),
theDots)
modelArgs$data$.outcome <- y
out <- do.call("rpart", modelArgs)
out
},
predict = function(modelFit, newdata, submodels = NULL) {
## Models are indexed by Cp so approximate the Cp for
## the value of maxdepth
depth2cp <- function(x, depth)
{
out <- approx(x[,"nsplit"], x[,"CP"], depth)$y
out[depth > max(x[,"nsplit"])] <- min(x[,"CP"]) * .99
out
}
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
pType <- if(modelFit$problemType == "Classification") "class" else "vector"
out <- predict(modelFit, newdata, type=pType)
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
cpValues <- depth2cp(modelFit$cptable, submodels$maxdepth)
for(j in seq(along = cpValues))
{
prunedFit <- prune.rpart(modelFit, cp = cpValues[j])
tmp[[j+1]] <- predict(prunedFit, newdata, type=pType)
}
out <- tmp
}
out
},
prob = function(modelFit, newdata, submodels = NULL) {
depth2cp <- function(x, depth)
{
out <- approx(x[,"nsplit"], x[,"CP"], depth)$y
out[depth > max(x[,"nsplit"])] <- min(x[,"CP"]) * .99
out
}
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
out <- predict(modelFit, newdata, type = "prob")
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
cpValues <- depth2cp(modelFit$cptable, submodels$maxdepth)
for(j in seq(along = cpValues))
{
prunedFit <- prune.rpart(modelFit, cp = cpValues[j])
tmpProb <- predict(prunedFit, newdata, type = "prob")
tmp[[j+1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, drop = FALSE])
}
out <- tmp
}
out
},
predictors = function(x, surrogate = TRUE, ...) {
out <- as.character(x$frame$var)
out <- out[!(out %in% c("<leaf>"))]
if(surrogate)
{
splits <- x$splits
splits <- splits[splits[,"adj"] > 0,]
out <- c(out, rownames(splits))
}
unique(out)
},
varImp = function(object, surrogates = FALSE, competes = TRUE, ...) {
tmp <- rownames(object$splits)
rownames(object$splits) <- 1:nrow(object$splits)
splits <- data.frame(object$splits)
splits$var <- tmp
splits$type <- ""
frame <- as.data.frame(object$frame)
index <- 0
for(i in 1:nrow(frame)) {
if(frame$var[i] != "<leaf>") {
index <- index + 1
splits$type[index] <- "primary"
if(frame$ncompete[i] > 0) {
for(j in 1:frame$ncompete[i]) {
index <- index + 1
splits$type[index] <- "competing"
}
}
if(frame$nsurrogate[i] > 0) {
for(j in 1:frame$nsurrogate[i]) {
index <- index + 1
splits$type[index] <- "surrogate"
}
}
}
}
splits$var <- factor(as.character(splits$var))
if(!surrogates) splits <- subset(splits, type != "surrogate")
if(!competes) splits <- subset(splits, type != "competing")
out <- aggregate(splits$improve,
list(Variable = splits$var),
sum,
na.rm = TRUE)
allVars <- colnames(attributes(object$terms)$factors)
if(!all(allVars %in% out$Variable)) {
missingVars <- allVars[!(allVars %in% out$Variable)]
zeros <- data.frame(x = rep(0, length(missingVars)),
Variable = missingVars)
out <- rbind(out, zeros)
}
out2 <- data.frame(Overall = out$x)
rownames(out2) <- out$Variable
out2
},
levels = function(x) x$obsLevels,
trim = function(x) {
x$call <- list(na.action = (x$call)$na.action)
x$x <- NULL
x$y <- NULL
x$where <- NULL
x
},
tags = c("Tree-Based Model", "Implicit Feature Selection"),
sort = function(x) x[order(x[,1]),])
|
\name{NISTpoiseTOpascalSec}
\alias{NISTpoiseTOpascalSec}
\title{Convert poise to pascal second }
\usage{NISTpoiseTOpascalSec(poise)}
\description{\code{NISTpoiseTOpascalSec} converts from poise (P) to pascal second (Pa * s) }
\arguments{
\item{poise}{poise (P) }
}
\value{pascal second (Pa * s) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTpoiseTOpascalSec(10)
}
\keyword{programming} | /man/NISTpoiseTOpascalSec.Rd | no_license | cran/NISTunits | R | false | false | 766 | rd | \name{NISTpoiseTOpascalSec}
\alias{NISTpoiseTOpascalSec}
\title{Convert poise to pascal second }
\usage{NISTpoiseTOpascalSec(poise)}
\description{\code{NISTpoiseTOpascalSec} converts from poise (P) to pascal second (Pa * s) }
\arguments{
\item{poise}{poise (P) }
}
\value{pascal second (Pa * s) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTpoiseTOpascalSec(10)
}
\keyword{programming} |
# Copyright 2011 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
has.rows = function(x) !is.null(nrow(x))
all.have.rows = Curry(all.predicate, P = has.rows)
rmr.length = NROW
sapply.rmr.length =
function(xx)
.Call("sapply_rmr_length", xx, PACKAGE = "rmr2")
sapply.rmr.length.lossy.data.frame =
function(xx)
.Call("sapply_rmr_length_lossy_data_frame", xx, PACKAGE = "rmr2")
rmr.equal =
function(xx, y) {
if(rmr.length(xx) == 0) logical()
else {
if(is.atomic(xx) && !is.matrix(xx)) xx == y
else {
if(is.matrix(xx) || is.data.frame(xx))
rowSums(xx == y[rep.int(1, rmr.length(xx)),, drop = FALSE]) == ncol(y)
else
sapply(xx, function(x) isTRUE(all.equal(list(x), y, check.attributes = FALSE)))}}}
length.keyval =
function(kv)
max(rmr.length(keys(kv)),
rmr.length(values(kv)))
keyval =
function(key, val = NULL) {
if(missing(val)) keyval(key = NULL, val = key)
else {
if(rmr.length(key) > 0 && rmr.length(val) > 0)
return(recycle.keyval(list(key = key, val = val)))
else {
if(rmr.length(key) == 0 && rmr.length(val) == 0)
return(list(key = NULL, val = NULL))
else {
if(is.null(key) && rmr.length(val) > 0)
return(list(key = NULL, val = val))}}
rmr.str(list(key, val));
stop("invalid key val combination")}}
keys = function(kv) kv$key
values = function(kv) kv$val
is.keyval =
function(x) {
is.list(x) &&
length(x) == 2 &&
!is.null(names(x)) &&
all(names(x) == qw(key, val))}
as.keyval =
function(x) {
if(is.keyval(x)) x
else keyval(x)}
rmr.slice =
function(x, r) {
if(has.rows(x))
x[r, , drop = FALSE]
else
x[r]}
rmr.recycle =
function(args) {
index =
suppressWarnings(
do.call(cbind, lapply(args, function(x) 1:rmr.length(x))))
mapply(
rmr.slice,
args,
split(index, col(index)),
SIMPLIFY = FALSE)}
recycle.keyval = rmr.recycle
slice.keyval =
function(kv, r) {
keyval(rmr.slice(keys(kv), r),
rmr.slice(values(kv), r))}
purge.nulls =
function(x)
.Call("null_purge", x, PACKAGE = "rmr2")
rbind.anything =
function(...) {
tryCatch(
rbind(...),
error = function(e) rbind.fill.fast(...))}
lapply.as.character =
function(xx)
.Call("lapply_as_character", xx, PACKAGE = "rmr2")
are.data.frame =
function(xx)
.Call("are_data_frame", xx, PACKAGE = "rmr2")
are.matrix =
function(xx)
.Call("are_matrix", xx, PACKAGE = "rmr2")
are.factor =
function(xx)
.Call("are_factor", xx, PACKAGE = "rmr2")
c.or.rbind =
Make.single.or.multi.arg(
function(x) {
if(is.null(x))
NULL
else {
x = purge.nulls(x)
if(length(x) == 0)
NULL
else {
if(any(are.data.frame(x))) {
X = do.call(rbind.fill.fast, lapply(x, as.data.frame))
rownames(X) = make.unique(unlist(sapply(x, rownames)))
X}
else {
if(any(are.matrix(x)))
do.call(rbind,x)
else {
if(all(are.factor(x)))
as.factor(do.call(c, lapply.as.character(x)))
else
do.call(c,x)}}}}})
c.or.rbind.rep =
function(x, n) {
ind = rep(1:length(x), n)
rmr.slice(c.or.rbind(x), ind)}
sapply.length.keyval =
function(kvs)
.Call("sapply_length_keyval", kvs, PACKAGE = "rmr2")
sapply.null.keys =
function(kvs)
.Call("sapply_null_keys", kvs, PACKAGE = "rmr2")
sapply.is.list =
function(l)
.Call("sapply_is_list", l, PACKAGE = "rmr2")
lapply.values =
function(kvs)
.Call("lapply_values", kvs, PACKAGE = "rmr2")
lapply.keys =
function(kvs)
.Call("lapply_keys", kvs, PACKAGE = "rmr2")
c.keyval =
Make.single.or.multi.arg(
function(kvs) {
zero.length = as.logical(sapply.length.keyval(kvs) == 0)
null.keys = as.logical(sapply.null.keys(kvs))
if(!(all(null.keys | zero.length) || !any(null.keys & !zero.length))) {
stop("can't mix NULL and not NULL key keyval pairs")}
vv = lapply.values(kvs)
kk = lapply.keys(kvs)
keyval(c.or.rbind(kk), c.or.rbind(vv))})
split.data.frame.fast =
function(x, ind, drop) {
y =
do.call(
Curry(
mapply,
function(...)
quickdf(list(...)),
SIMPLIFY=FALSE),
lapply(
x,
Curry(split, f = ind, drop = drop)))
rn = split(rownames(x), f = ind, drop = drop)
mapply(
function(a, na) {
rownames(a) = na
delevel(a)},
y,
rn,
SIMPLIFY = FALSE)}
split.data.frame.fastest =
function(x, ind, drop, keep.rownames)
t.list(
lapply(
if(keep.rownames) row.names.to.column(x) else x,
function(y)
split(
if(is.factor(y)) as.character(y) else y,
f = ind,
drop = drop)))
rmr.split =
function(x, ind, lossy, keep.rownames) {
spl =
switch(
class(x),
matrix = split.data.frame,
data.frame = {
if(lossy) Curry(split.data.frame.fastest, keep.rownames = keep.rownames)
else split.data.frame.fast},
split)
if(is.factor(x)) x = as.character(x)
y = spl(x,ind, drop = TRUE)
if (is.matrix(ind))
ind = as.data.frame(ind)
perm = NULL
perm[unlist(split(1:rmr.length(y), unique(ind), drop = TRUE))] = 1:rmr.length(y)
rmr.slice(y, perm)}
key.normalize= function(k) {
k = rmr.slice(k, 1)
if (is.data.frame(k) || is.matrix(k))
rownames(k) = NULL
if(!is.null(attributes(k)))
attributes(k) = attributes(k)[sort(names(attributes(k)))]
k}
row.names.to.column =
function(df){
df[, ncol(df) + 1] = rownames(df)
df}
defactor =
function(x)
lapply(
x,
function(y){
if(is.factor(y))
as.character(y)
else y})
split.keyval = function(kv, size, lossy = FALSE) {
k = keys(kv)
v = values(kv)
if(is.null(v))
keyval(NULL, NULL)
else {
if(length.keyval(kv) == 0)
keyval(list(), list())
else {
if(is.null(k)) {
k = ceiling((1:rmr.length(v))/(rmr.length(v) /(object.size(v)/size)))
keyval(
NULL,
unname(rmr.split(v, k, lossy = lossy, keep.rownames = TRUE)))}
else {
k = keys(kv) # TODO are these two redundant?
v = values(kv)
ind = {
if(is.list(k) && !is.data.frame(k))
cksum(k)
else {
if(is.matrix(k))
as.data.frame(k)
else {
if(is.raw(k))
as.integer(k)
else
k}}}
x = k
if(!has.rows(x))
names(x) = NULL
x = unique(x)
x =
switch(
class(x),
list = split(x, 1:length(x)),
data.frame = if(lossy) t.list(defactor(x)) else rmr.split(x, x , FALSE, keep.rownames = FALSE),
matrix = if(lossy) t.list(as.data.frame(x)) else rmr.split(x, as.data.frame(x), FALSE, keep.rownames = FALSE),
factor = as.list(as.character(x)),
as.list(x))
keyval(x, unname(rmr.split(v, ind, lossy = lossy, keep.rownames = TRUE)))}}}}
unsplit.keyval = function(kv) {
c.keyval(mapply(keyval, keys(kv), values(kv), SIMPLIFY = FALSE))}
reduce.keyval =
function(
kv,
FUN,
split.size =
stop("Must specify key when using reduce or combine functions")) {
k = keys(kv)
kvs = split.keyval(kv, split.size)
if(is.null(k))
lapply(values(kvs), function(v) FUN(NULL,v))
else
mapply(FUN, keys(kvs), values(kvs), SIMPLIFY = FALSE)}
| /pkg/R/keyval.R | no_license | andrewzhang1/rmr2 | R | false | false | 8,320 | r | # Copyright 2011 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
has.rows = function(x) !is.null(nrow(x))
all.have.rows = Curry(all.predicate, P = has.rows)
rmr.length = NROW
sapply.rmr.length =
function(xx)
.Call("sapply_rmr_length", xx, PACKAGE = "rmr2")
sapply.rmr.length.lossy.data.frame =
function(xx)
.Call("sapply_rmr_length_lossy_data_frame", xx, PACKAGE = "rmr2")
rmr.equal =
function(xx, y) {
if(rmr.length(xx) == 0) logical()
else {
if(is.atomic(xx) && !is.matrix(xx)) xx == y
else {
if(is.matrix(xx) || is.data.frame(xx))
rowSums(xx == y[rep.int(1, rmr.length(xx)),, drop = FALSE]) == ncol(y)
else
sapply(xx, function(x) isTRUE(all.equal(list(x), y, check.attributes = FALSE)))}}}
length.keyval =
function(kv)
max(rmr.length(keys(kv)),
rmr.length(values(kv)))
keyval =
function(key, val = NULL) {
if(missing(val)) keyval(key = NULL, val = key)
else {
if(rmr.length(key) > 0 && rmr.length(val) > 0)
return(recycle.keyval(list(key = key, val = val)))
else {
if(rmr.length(key) == 0 && rmr.length(val) == 0)
return(list(key = NULL, val = NULL))
else {
if(is.null(key) && rmr.length(val) > 0)
return(list(key = NULL, val = val))}}
rmr.str(list(key, val));
stop("invalid key val combination")}}
keys = function(kv) kv$key
values = function(kv) kv$val
is.keyval =
function(x) {
is.list(x) &&
length(x) == 2 &&
!is.null(names(x)) &&
all(names(x) == qw(key, val))}
as.keyval =
function(x) {
if(is.keyval(x)) x
else keyval(x)}
rmr.slice =
function(x, r) {
if(has.rows(x))
x[r, , drop = FALSE]
else
x[r]}
rmr.recycle =
function(args) {
index =
suppressWarnings(
do.call(cbind, lapply(args, function(x) 1:rmr.length(x))))
mapply(
rmr.slice,
args,
split(index, col(index)),
SIMPLIFY = FALSE)}
recycle.keyval = rmr.recycle
slice.keyval =
function(kv, r) {
keyval(rmr.slice(keys(kv), r),
rmr.slice(values(kv), r))}
purge.nulls =
function(x)
.Call("null_purge", x, PACKAGE = "rmr2")
rbind.anything =
function(...) {
tryCatch(
rbind(...),
error = function(e) rbind.fill.fast(...))}
lapply.as.character =
function(xx)
.Call("lapply_as_character", xx, PACKAGE = "rmr2")
are.data.frame =
function(xx)
.Call("are_data_frame", xx, PACKAGE = "rmr2")
are.matrix =
function(xx)
.Call("are_matrix", xx, PACKAGE = "rmr2")
are.factor =
function(xx)
.Call("are_factor", xx, PACKAGE = "rmr2")
c.or.rbind =
Make.single.or.multi.arg(
function(x) {
if(is.null(x))
NULL
else {
x = purge.nulls(x)
if(length(x) == 0)
NULL
else {
if(any(are.data.frame(x))) {
X = do.call(rbind.fill.fast, lapply(x, as.data.frame))
rownames(X) = make.unique(unlist(sapply(x, rownames)))
X}
else {
if(any(are.matrix(x)))
do.call(rbind,x)
else {
if(all(are.factor(x)))
as.factor(do.call(c, lapply.as.character(x)))
else
do.call(c,x)}}}}})
c.or.rbind.rep =
function(x, n) {
ind = rep(1:length(x), n)
rmr.slice(c.or.rbind(x), ind)}
sapply.length.keyval =
function(kvs)
.Call("sapply_length_keyval", kvs, PACKAGE = "rmr2")
sapply.null.keys =
function(kvs)
.Call("sapply_null_keys", kvs, PACKAGE = "rmr2")
sapply.is.list =
function(l)
.Call("sapply_is_list", l, PACKAGE = "rmr2")
lapply.values =
function(kvs)
.Call("lapply_values", kvs, PACKAGE = "rmr2")
lapply.keys =
function(kvs)
.Call("lapply_keys", kvs, PACKAGE = "rmr2")
c.keyval =
Make.single.or.multi.arg(
function(kvs) {
zero.length = as.logical(sapply.length.keyval(kvs) == 0)
null.keys = as.logical(sapply.null.keys(kvs))
if(!(all(null.keys | zero.length) || !any(null.keys & !zero.length))) {
stop("can't mix NULL and not NULL key keyval pairs")}
vv = lapply.values(kvs)
kk = lapply.keys(kvs)
keyval(c.or.rbind(kk), c.or.rbind(vv))})
split.data.frame.fast =
function(x, ind, drop) {
y =
do.call(
Curry(
mapply,
function(...)
quickdf(list(...)),
SIMPLIFY=FALSE),
lapply(
x,
Curry(split, f = ind, drop = drop)))
rn = split(rownames(x), f = ind, drop = drop)
mapply(
function(a, na) {
rownames(a) = na
delevel(a)},
y,
rn,
SIMPLIFY = FALSE)}
split.data.frame.fastest =
function(x, ind, drop, keep.rownames)
t.list(
lapply(
if(keep.rownames) row.names.to.column(x) else x,
function(y)
split(
if(is.factor(y)) as.character(y) else y,
f = ind,
drop = drop)))
rmr.split =
function(x, ind, lossy, keep.rownames) {
spl =
switch(
class(x),
matrix = split.data.frame,
data.frame = {
if(lossy) Curry(split.data.frame.fastest, keep.rownames = keep.rownames)
else split.data.frame.fast},
split)
if(is.factor(x)) x = as.character(x)
y = spl(x,ind, drop = TRUE)
if (is.matrix(ind))
ind = as.data.frame(ind)
perm = NULL
perm[unlist(split(1:rmr.length(y), unique(ind), drop = TRUE))] = 1:rmr.length(y)
rmr.slice(y, perm)}
key.normalize= function(k) {
k = rmr.slice(k, 1)
if (is.data.frame(k) || is.matrix(k))
rownames(k) = NULL
if(!is.null(attributes(k)))
attributes(k) = attributes(k)[sort(names(attributes(k)))]
k}
row.names.to.column =
function(df){
df[, ncol(df) + 1] = rownames(df)
df}
defactor =
function(x)
lapply(
x,
function(y){
if(is.factor(y))
as.character(y)
else y})
split.keyval = function(kv, size, lossy = FALSE) {
k = keys(kv)
v = values(kv)
if(is.null(v))
keyval(NULL, NULL)
else {
if(length.keyval(kv) == 0)
keyval(list(), list())
else {
if(is.null(k)) {
k = ceiling((1:rmr.length(v))/(rmr.length(v) /(object.size(v)/size)))
keyval(
NULL,
unname(rmr.split(v, k, lossy = lossy, keep.rownames = TRUE)))}
else {
k = keys(kv) # TODO are these two redundant?
v = values(kv)
ind = {
if(is.list(k) && !is.data.frame(k))
cksum(k)
else {
if(is.matrix(k))
as.data.frame(k)
else {
if(is.raw(k))
as.integer(k)
else
k}}}
x = k
if(!has.rows(x))
names(x) = NULL
x = unique(x)
x =
switch(
class(x),
list = split(x, 1:length(x)),
data.frame = if(lossy) t.list(defactor(x)) else rmr.split(x, x , FALSE, keep.rownames = FALSE),
matrix = if(lossy) t.list(as.data.frame(x)) else rmr.split(x, as.data.frame(x), FALSE, keep.rownames = FALSE),
factor = as.list(as.character(x)),
as.list(x))
keyval(x, unname(rmr.split(v, ind, lossy = lossy, keep.rownames = TRUE)))}}}}
unsplit.keyval = function(kv) {
c.keyval(mapply(keyval, keys(kv), values(kv), SIMPLIFY = FALSE))}
reduce.keyval =
function(
kv,
FUN,
split.size =
stop("Must specify key when using reduce or combine functions")) {
k = keys(kv)
kvs = split.keyval(kv, split.size)
if(is.null(k))
lapply(values(kvs), function(v) FUN(NULL,v))
else
mapply(FUN, keys(kvs), values(kvs), SIMPLIFY = FALSE)}
|
\name{doublingTime}
\Rdversion{1.1}
\alias{doublingTime}
\title{
Doubling time for genome projects
}
\description{
Calculates the doubling time of genome sequencing project releases
}
\usage{
doublingTime(x, subset, time = "days", curdate=TRUE)
}
\arguments{
\item{x}{ genomes data frame with class 'genomes'}
\item{subset}{logical vector indicating rows to keep }
\item{time}{ return doubling time in days (default), months, or
years }
\item{ curdate}{ include the current date in calculation, if false, then
default is range of release dates }
}
%\details{ }
\value{
the doubling time
}
%\references{}
\author{ Chris Stubben
}
%\note{ }
%\seealso{ \code{\link{help}} }
\examples{
data(proks)
doublingTime(proks)
doublingTime(proks, status == 'Contig', time='months')
}
\keyword{ methods }
| /man/doublingTime.Rd | no_license | cstubben/genomes2 | R | false | false | 820 | rd | \name{doublingTime}
\Rdversion{1.1}
\alias{doublingTime}
\title{
Doubling time for genome projects
}
\description{
Calculates the doubling time of genome sequencing project releases
}
\usage{
doublingTime(x, subset, time = "days", curdate=TRUE)
}
\arguments{
\item{x}{ genomes data frame with class 'genomes'}
\item{subset}{logical vector indicating rows to keep }
\item{time}{ return doubling time in days (default), months, or
years }
\item{ curdate}{ include the current date in calculation, if false, then
default is range of release dates }
}
%\details{ }
\value{
the doubling time
}
%\references{}
\author{ Chris Stubben
}
%\note{ }
%\seealso{ \code{\link{help}} }
\examples{
data(proks)
doublingTime(proks)
doublingTime(proks, status == 'Contig', time='months')
}
\keyword{ methods }
|
#' Set all missing values to indicated value
#'
#' Quickly set all missing values to indicated value.
#' @param data input data, in \link{data.table} format only.
#' @param value a single value or a list of two values to be set to. See 'Details'.
#' @param exclude column index or name to be excluded.
#' @keywords set_missing
#' @aliases SetNaTo
#' @details The class of \code{value} will determine what type of columns to be set, e.g., if \code{value} is 0, then missing values for continuous features will be set.
#' When supplying a list of two values, only one numeric and one non-numeric is allowed.
#' @details \bold{This function updates \link{data.table} object directly.} Otherwise, output data will be returned matching input object class.
#' @import data.table
#' @export set_missing SetNaTo
#' @examples
#' # Load packages
#' library(data.table)
#'
#' # Generate missing values in iris data
#' dt <- data.table(iris)
#' for (j in 1:4) set(dt, i = sample.int(150, j * 30), j, value = NA_integer_)
#' set(dt, i = sample.int(150, 25), 5L, value = NA_character_)
#'
#' # Set all missing values to 0L and unknown
#' dt2 <- copy(dt)
#' set_missing(dt2, list(0L, "unknown"))
#'
#' # Set missing numerical values to 0L
#' dt3 <- copy(dt)
#' set_missing(dt3, 0L)
#'
#' # Set missing discrete values to unknown
#' dt4 <- copy(dt)
#' set_missing(dt4, "unknown")
#'
#' # Set missing values excluding some columns
#' dt5 <- copy(dt)
#' set_missing(dt4, 0L, 1L:2L)
#' set_missing(dt4, 0L, names(dt5)[3L:4L])
#'
#' # Return from non-data.table input
#' set_missing(airquality, 999999L)
set_missing <- function(data, value, exclude = NULL) {
if (!(length(value) %in% seq(2))) stop("Please specify one single value or a list of two values!")
## Check if input is data.table
is_data_table <- is.data.table(data)
## Detect input data class
data_class <- class(data)
## Set data to data.table
if (!is_data_table) data <- data.table(data)
if (!is.numeric(exclude)) {
exclude_ind <- which(names(data) %in% exclude)
} else {
exclude_ind <- exclude
}
if (length(value) == 1) {
if (is.numeric(value)) {
col_ind <- which(sapply(data, is.numeric))
} else {
col_ind <- which(!sapply(data, is.numeric))
}
for (j in setdiff(col_ind, exclude_ind)) {
num_missing <- sum(is.na(data[[j]]))
set(data, i = which(is.na(data[[j]])), j = j, value = value)
if (num_missing > 0) message(paste0("Column [", names(data)[j], "]: Set ", num_missing, " missing values to ", value))
}
} else {
if (!is.list(value)) stop("Value must be a list of two!")
val_ind <- sapply(value, is.numeric)
if (val_ind[1] == val_ind[2]) stop("Please set one numerical value!")
val_c <- value[[which(val_ind)]]
val_d <- value[[which(!val_ind)]]
col_c <- which(sapply(data, is.numeric))
col_d <- which(!sapply(data, is.numeric))
for (j in setdiff(col_c, exclude_ind)) {
num_missing <- sum(is.na(data[[j]]))
set(data, i = which(is.na(data[[j]])), j = j, value = val_c)
if (num_missing > 0) message(paste0("Column [", names(data)[j], "]: Set ", num_missing, " missing values to ", val_c))
}
for (j in setdiff(col_d, exclude_ind)) {
num_missing <- sum(is.na(data[[j]]))
set(data, i = which(is.na(data[[j]])), j = j, value = val_d)
if (num_missing > 0) message(paste0("Column [", names(data)[j], "]: Set ", num_missing, " missing values to ", val_d))
}
}
## Set data class back to original
if (!is_data_table) {
class(data) <- data_class
return(data)
}
}
SetNaTo <- function(data, value, exclude = NULL) {
.Deprecated("set_missing")
set_missing(data = data, value = value, exclude = exclude)
}
| /R/set_missing.r | no_license | jcboost/DataExplorer | R | false | false | 3,722 | r | #' Set all missing values to indicated value
#'
#' Quickly set all missing values to indicated value.
#' @param data input data, in \link{data.table} format only.
#' @param value a single value or a list of two values to be set to. See 'Details'.
#' @param exclude column index or name to be excluded.
#' @keywords set_missing
#' @aliases SetNaTo
#' @details The class of \code{value} will determine what type of columns to be set, e.g., if \code{value} is 0, then missing values for continuous features will be set.
#' When supplying a list of two values, only one numeric and one non-numeric is allowed.
#' @details \bold{This function updates \link{data.table} object directly.} Otherwise, output data will be returned matching input object class.
#' @import data.table
#' @export set_missing SetNaTo
#' @examples
#' # Load packages
#' library(data.table)
#'
#' # Generate missing values in iris data
#' dt <- data.table(iris)
#' for (j in 1:4) set(dt, i = sample.int(150, j * 30), j, value = NA_integer_)
#' set(dt, i = sample.int(150, 25), 5L, value = NA_character_)
#'
#' # Set all missing values to 0L and unknown
#' dt2 <- copy(dt)
#' set_missing(dt2, list(0L, "unknown"))
#'
#' # Set missing numerical values to 0L
#' dt3 <- copy(dt)
#' set_missing(dt3, 0L)
#'
#' # Set missing discrete values to unknown
#' dt4 <- copy(dt)
#' set_missing(dt4, "unknown")
#'
#' # Set missing values excluding some columns
#' dt5 <- copy(dt)
#' set_missing(dt4, 0L, 1L:2L)
#' set_missing(dt4, 0L, names(dt5)[3L:4L])
#'
#' # Return from non-data.table input
#' set_missing(airquality, 999999L)
set_missing <- function(data, value, exclude = NULL) {
if (!(length(value) %in% seq(2))) stop("Please specify one single value or a list of two values!")
## Check if input is data.table
is_data_table <- is.data.table(data)
## Detect input data class
data_class <- class(data)
## Set data to data.table
if (!is_data_table) data <- data.table(data)
if (!is.numeric(exclude)) {
exclude_ind <- which(names(data) %in% exclude)
} else {
exclude_ind <- exclude
}
if (length(value) == 1) {
if (is.numeric(value)) {
col_ind <- which(sapply(data, is.numeric))
} else {
col_ind <- which(!sapply(data, is.numeric))
}
for (j in setdiff(col_ind, exclude_ind)) {
num_missing <- sum(is.na(data[[j]]))
set(data, i = which(is.na(data[[j]])), j = j, value = value)
if (num_missing > 0) message(paste0("Column [", names(data)[j], "]: Set ", num_missing, " missing values to ", value))
}
} else {
if (!is.list(value)) stop("Value must be a list of two!")
val_ind <- sapply(value, is.numeric)
if (val_ind[1] == val_ind[2]) stop("Please set one numerical value!")
val_c <- value[[which(val_ind)]]
val_d <- value[[which(!val_ind)]]
col_c <- which(sapply(data, is.numeric))
col_d <- which(!sapply(data, is.numeric))
for (j in setdiff(col_c, exclude_ind)) {
num_missing <- sum(is.na(data[[j]]))
set(data, i = which(is.na(data[[j]])), j = j, value = val_c)
if (num_missing > 0) message(paste0("Column [", names(data)[j], "]: Set ", num_missing, " missing values to ", val_c))
}
for (j in setdiff(col_d, exclude_ind)) {
num_missing <- sum(is.na(data[[j]]))
set(data, i = which(is.na(data[[j]])), j = j, value = val_d)
if (num_missing > 0) message(paste0("Column [", names(data)[j], "]: Set ", num_missing, " missing values to ", val_d))
}
}
## Set data class back to original
if (!is_data_table) {
class(data) <- data_class
return(data)
}
}
SetNaTo <- function(data, value, exclude = NULL) {
.Deprecated("set_missing")
set_missing(data = data, value = value, exclude = exclude)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abm6.R
\name{make_schedule}
\alias{make_schedule}
\title{Make schedule}
\usage{
make_schedule(time = 30, type = "base", total_days = 5, df)
}
\arguments{
\item{time}{number of days; defaults to 30}
\item{type}{"base", "On/off", "A/B", "Remote"; defaults to "base"}
\item{total_days}{number of days in school; defaults to 5}
\item{df}{data frame from make_school()}
}
\value{
d Returns a n x time data frame that indicates whether an individual is
in the school building at a particular time
}
\description{
Make a schedule of when individuals in the school community are
present/absent
}
| /1 - R package/BackToSchool/man/make_schedule.Rd | no_license | mccorvie/BackToSchool2 | R | false | true | 669 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abm6.R
\name{make_schedule}
\alias{make_schedule}
\title{Make schedule}
\usage{
make_schedule(time = 30, type = "base", total_days = 5, df)
}
\arguments{
\item{time}{number of days; defaults to 30}
\item{type}{"base", "On/off", "A/B", "Remote"; defaults to "base"}
\item{total_days}{number of days in school; defaults to 5}
\item{df}{data frame from make_school()}
}
\value{
d Returns a n x time data frame that indicates whether an individual is
in the school building at a particular time
}
\description{
Make a schedule of when individuals in the school community are
present/absent
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.