content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ArmadilloConnection.R
\name{dsAssignResource,ArmadilloConnection-method}
\alias{dsAssignResource,ArmadilloConnection-method}
\title{Assign a resource}
\usage{
\S4method{dsAssignResource}{ArmadilloConnection}(conn, symbol, resource, async = TRUE)
}
\arguments{
\item{conn}{An object that inherits from \code{\link{DSConnection-class}}.}
\item{symbol}{Name of the R symbol.}
\item{resource}{Fully qualified name of a resource reference in the data
repository.}
\item{async}{Whether the result of the call should be retrieved
asynchronously.}
}
\value{
A \code{\link{ArmadilloResult-class}} object.
}
\description{
Assign a resource in the DataSHIELD R session.
}
| /man/dsAssignResource-ArmadilloConnection-method.Rd | no_license | sidohaakma/molgenis-r-datashield | R | false | true | 742 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ArmadilloConnection.R
\name{dsAssignResource,ArmadilloConnection-method}
\alias{dsAssignResource,ArmadilloConnection-method}
\title{Assign a resource}
\usage{
\S4method{dsAssignResource}{ArmadilloConnection}(conn, symbol, resource, async = TRUE)
}
\arguments{
\item{conn}{An object that inherits from \code{\link{DSConnection-class}}.}
\item{symbol}{Name of the R symbol.}
\item{resource}{Fully qualified name of a resource reference in the data
repository.}
\item{async}{Whether the result of the call should be retrieved
asynchronously.}
}
\value{
A \code{\link{ArmadilloResult-class}} object.
}
\description{
Assign a resource in the DataSHIELD R session.
}
|
bandwidth <- function(wave, min_freq=1000, plot = FALSE, method = "quartile") {
if (method == "quartile") {
a <- orthophonia::frequencySpectrumPowerQuartiles(wave, min_freq, plot)
if (is.null(a)) {
return(NULL)
}
return (a[[2]] - a[[1]])
}
} | /R/analysis.R | no_license | orthoptera-aao/orthophonia | R | false | false | 267 | r | bandwidth <- function(wave, min_freq=1000, plot = FALSE, method = "quartile") {
if (method == "quartile") {
a <- orthophonia::frequencySpectrumPowerQuartiles(wave, min_freq, plot)
if (is.null(a)) {
return(NULL)
}
return (a[[2]] - a[[1]])
}
} |
# ALADYM Age length based dynamic model - version 12.3
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# ALADYM is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
#
#
#
#
#
#
#
#
#
#
# ------------------------------------------------------------------------------
# Function to reload the values for the production according to the
# seed value
# ------------------------------------------------------------------------------
#
reload_EMPTY_VESSELS_fore_table<- function(w) {
# fleet.GT_fore
VESSELS_fore <<- list()
VESSELS_foreIndex <<- 0
VESSELS_fore.model <<- gtkListStoreNew("gchararray", rep("gdouble", 12), "gboolean")
vess_matrix <- data.frame(matrix(0, nrow=length(years_forecast), ncol=13))
colnames(vess_matrix) <- c("year",MONTHS)
vess_matrix$year <- years_forecast
for (r in 1:nrow(vess_matrix)) {
vess_temp <- as.list(vess_matrix[r,])
VESSELS_fore <<- c(VESSELS_fore, list(vess_temp))
}
fleet.VESSELS_fore <<- VESSELS_fore
for (i in 1:length(VESSELS_fore)) {
iter <- VESSELS_fore.model$append()$iter
VESSELS_fore.model$set(iter,0, VESSELS_fore[[i]]$year)
for (e in 1:length(MONTHS)) {
VESSELS_fore.model$set(iter, e, as.double(VESSELS_fore[[i]][e+1]))
}
VESSELS_fore.model$set(iter,13,TRUE)
}
VESSELS_fore.treeview$destroy()
VESSELS_fore.treeview <<- gtkTreeViewNewWithModel(VESSELS_fore.model)
VESSELS_fore.treeview$setRulesHint(TRUE)
VESSELS_fore.treeview$getSelection()$setMode("single")
VESSELS_fore.add_columns(VESSELS_fore.treeview)
VESSELS_fore.sw$add(VESSELS_fore.treeview)
}
reload_VESSELS_fore_table<- function(w) {
# fleet.GT_fore
VESSELS_fore <<- list()
VESSELS_foreIndex <<- 0
VESSELS_fore.model <<- gtkListStoreNew("gchararray", rep("gdouble", 12), "gboolean")
vess_matrix <- fleet.VESSELS_fore
for (r in 1:nrow(vess_matrix)) {
vess_temp <- as.list(vess_matrix[r,])
VESSELS_fore <<- c(VESSELS_fore, list(vess_temp))
}
for (i in 1:length(VESSELS_fore)) {
iter <- VESSELS_fore.model$append()$iter
VESSELS_fore.model$set(iter,0, VESSELS_fore[[i]]$year)
for (e in 1:length(MONTHS)) {
VESSELS_fore.model$set(iter, e, as.double(VESSELS_fore[[i]][e+1]))
}
VESSELS_fore.model$set(iter,13,TRUE)
}
VESSELS_fore.treeview$destroy()
VESSELS_fore.treeview <<- gtkTreeViewNewWithModel(VESSELS_fore.model)
VESSELS_fore.treeview$setRulesHint(TRUE)
VESSELS_fore.treeview$getSelection()$setMode("single")
VESSELS_fore.add_columns(VESSELS_fore.treeview)
VESSELS_fore.sw$add(VESSELS_fore.treeview)
}
| /BEMTOOL-ver2.5-2018_0901/src/biol/bmtALADYM/ALADYM-ver12.3-2017_0501/gui/forecast/vesselsFun_fore/reload_VESSELS_fore_table.r | no_license | gresci/BEMTOOL2.5 | R | false | false | 3,009 | r | # ALADYM Age length based dynamic model - version 12.3
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# ALADYM is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
#
#
#
#
#
#
#
#
#
#
# ------------------------------------------------------------------------------
# Function to reload the values for the production according to the
# seed value
# ------------------------------------------------------------------------------
#
reload_EMPTY_VESSELS_fore_table<- function(w) {
# fleet.GT_fore
VESSELS_fore <<- list()
VESSELS_foreIndex <<- 0
VESSELS_fore.model <<- gtkListStoreNew("gchararray", rep("gdouble", 12), "gboolean")
vess_matrix <- data.frame(matrix(0, nrow=length(years_forecast), ncol=13))
colnames(vess_matrix) <- c("year",MONTHS)
vess_matrix$year <- years_forecast
for (r in 1:nrow(vess_matrix)) {
vess_temp <- as.list(vess_matrix[r,])
VESSELS_fore <<- c(VESSELS_fore, list(vess_temp))
}
fleet.VESSELS_fore <<- VESSELS_fore
for (i in 1:length(VESSELS_fore)) {
iter <- VESSELS_fore.model$append()$iter
VESSELS_fore.model$set(iter,0, VESSELS_fore[[i]]$year)
for (e in 1:length(MONTHS)) {
VESSELS_fore.model$set(iter, e, as.double(VESSELS_fore[[i]][e+1]))
}
VESSELS_fore.model$set(iter,13,TRUE)
}
VESSELS_fore.treeview$destroy()
VESSELS_fore.treeview <<- gtkTreeViewNewWithModel(VESSELS_fore.model)
VESSELS_fore.treeview$setRulesHint(TRUE)
VESSELS_fore.treeview$getSelection()$setMode("single")
VESSELS_fore.add_columns(VESSELS_fore.treeview)
VESSELS_fore.sw$add(VESSELS_fore.treeview)
}
reload_VESSELS_fore_table<- function(w) {
# fleet.GT_fore
VESSELS_fore <<- list()
VESSELS_foreIndex <<- 0
VESSELS_fore.model <<- gtkListStoreNew("gchararray", rep("gdouble", 12), "gboolean")
vess_matrix <- fleet.VESSELS_fore
for (r in 1:nrow(vess_matrix)) {
vess_temp <- as.list(vess_matrix[r,])
VESSELS_fore <<- c(VESSELS_fore, list(vess_temp))
}
for (i in 1:length(VESSELS_fore)) {
iter <- VESSELS_fore.model$append()$iter
VESSELS_fore.model$set(iter,0, VESSELS_fore[[i]]$year)
for (e in 1:length(MONTHS)) {
VESSELS_fore.model$set(iter, e, as.double(VESSELS_fore[[i]][e+1]))
}
VESSELS_fore.model$set(iter,13,TRUE)
}
VESSELS_fore.treeview$destroy()
VESSELS_fore.treeview <<- gtkTreeViewNewWithModel(VESSELS_fore.model)
VESSELS_fore.treeview$setRulesHint(TRUE)
VESSELS_fore.treeview$getSelection()$setMode("single")
VESSELS_fore.add_columns(VESSELS_fore.treeview)
VESSELS_fore.sw$add(VESSELS_fore.treeview)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/online_gst_detail_similarity.R
\name{similarity_geneset}
\alias{similarity_geneset}
\title{Query similarity gene sets}
\usage{
similarity_geneset(geneSetName)
}
\arguments{
\item{geneSetName}{one gene set name}
}
\value{
similarity gene sets
}
\description{
Query similarity gene sets
}
\examples{
\donttest{
x <- similarity_geneset('REACTOME_DEGRADATION_OF_AXIN')
x |>
msig_view()
}
}
| /man/similarity_geneset.Rd | no_license | cran/msig | R | false | true | 469 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/online_gst_detail_similarity.R
\name{similarity_geneset}
\alias{similarity_geneset}
\title{Query similarity gene sets}
\usage{
similarity_geneset(geneSetName)
}
\arguments{
\item{geneSetName}{one gene set name}
}
\value{
similarity gene sets
}
\description{
Query similarity gene sets
}
\examples{
\donttest{
x <- similarity_geneset('REACTOME_DEGRADATION_OF_AXIN')
x |>
msig_view()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interactionGraphs.R
\name{ig}
\alias{ig}
\title{Constructs Interaction Graph (S3 class)}
\usage{
ig(n, e)
}
\arguments{
\item{n}{ig.nodes (\code{a list of igNode objects})}
\item{e}{ig.edges (\code{a list of igEdge objects})}
}
\value{
An instance of the \code{ig} class
}
\description{
Constructs Interaction Graph (S3 class)
}
| /man/ig.Rd | no_license | peleplay/integr | R | false | true | 408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interactionGraphs.R
\name{ig}
\alias{ig}
\title{Constructs Interaction Graph (S3 class)}
\usage{
ig(n, e)
}
\arguments{
\item{n}{ig.nodes (\code{a list of igNode objects})}
\item{e}{ig.edges (\code{a list of igEdge objects})}
}
\value{
An instance of the \code{ig} class
}
\description{
Constructs Interaction Graph (S3 class)
}
|
\name{bootstrap.effectivemass}
\alias{bootstrap.effectivemass}
\title{Computes effective masses with bootstrapping errors}
\description{
Generates bootstrap samples for effective mass values computed from an
object of class \code{cf} (a correlation function)
}
\usage{
bootstrap.effectivemass(cf, type="solve", weight.factor=1)
}
\arguments{
\item{cf}{
a correlation function as an object of type \code{cf}, preferably
after a call to \code{\link{bootstrap.cf}}. If the latter has not
been called yet, it will be called in this function.
}
\item{type}{
The function to be used to compute the effective mass
values. Possibilities are "acosh", "solve", "log", "temporal",
"shifted" and "weighted". While the first three assume normal
cosh behaviour of
the correlation function, "temporal" is desigend to remove an
additional constant stemming from temporal states in two particle
correlation functions. The same for "shifted" and "weighted", the
latter for the case of two particle energies with the two particle
having different energies. In the latter case only the leading
polution is removed by \code{removeTemporal.cf} and taken into
account here.
}
\item{weight.factor}{
relative weight for type "weighted" only, see details
}
}
\value{
An object of class \code{effectivemass} is invisibly returned. It has
objects:
\code{effMass}:\cr
The computed effective mass values as a vector of length
\code{Time/2}. For \code{type="acosh"} also the first value is
\code{NA}, because this definition requires three time slices.
\code{deffMass}:\cr
The computed bootstrap errors for the effective masses of the same
length as \code{effMass}.
\code{effMass.tsboot}:\cr
The boostrap samples of the effective masses as an array of dimension
RxN, where \code{R=boot.R} is the number of bootstrap samples and
\code{N=(Time/2+1)}.
and \code{boot.R},
\code{boot.l}, \code{Time}
}
\details{
A number of types is implemented to compute effective mass values from
the correlation function:
"solve": the ratio\cr
\eqn{C(t+1) / C(t) = \cosh(-m*(t+1)) / \cosh(-m*t)}\cr
is numerically solved for m.
"acosh": the effective mass is computed from\cr
\eqn{m=acosh((C(t-1)+C(t+1)) / (2C(t)))}\cr
Note that this definition is less tolerant against noise.
"log": the effective mass is defined via\cr
\eqn{m=\log(C(t) / C(t+1))}\cr
which has artifacts of the periodicity at large t-values.
"temporal": the ratio\cr
\eqn{[C(t)-C(t+1)] / [C(t-1)-C(t)] = [\cosh(-m*(t))-\cosh(-m*(t+1))] / [\cosh(-m*(t-1))-\cosh(-m(t))]}\cr
is numerically solved for \eqn{m(t)}.
"shifted": like "temporal", but the differences \eqn{C(t)-C(t+1)}
are assumed to be taken already at the correlator matrix level using
\code{removeTemporal.cf} and hence the ratio\cr
\eqn{[C(t+1)] / [C(t)] = [\cosh(-m*(t))-\cosh(-m*(t+1))] /
[\cosh(-m*(t-1))-\cosh(-m(t))]}\cr
is numerically solved for \eqn{m(t)}.
"weighted": like "shifted", but now there is an additional weight
factor \eqn{w} from \code{removeTemporal.cf} to be taken into account,
such that the ratio\cr
\eqn{[C(t+1)] / [C(t)] = [\cosh(-m*(t))-w*\cosh(-m*(t+1))] /
[\cosh(-m*(t-1))-w*\cosh(-m(t))]}\cr
is numerically solved for \eqn{m(t)} with \eqn{w} as input.
}
\references{
arXiv:1203.6041
}
\seealso{
\code{\link{fit.effectivemass}}, \code{\link{bootstrap.cf}},
\code{removeTemporal.cf}
}
\examples{
data(samplecf)
samplecf <- bootstrap.cf(cf=samplecf, boot.R=1500, boot.l=2, seed=1442556)
effmass <- bootstrap.effectivemass(cf=samplecf)
summary(effmass)
plot(effmass, ylim=c(0.14,0.15))
}
\author{Carsten Urbach, \email{curbach@gmx.de}}
| /man/bootstrap.effectivemass.Rd | no_license | pittlerf/hadron | R | false | false | 3,731 | rd | \name{bootstrap.effectivemass}
\alias{bootstrap.effectivemass}
\title{Computes effective masses with bootstrapping errors}
\description{
Generates bootstrap samples for effective mass values computed from an
object of class \code{cf} (a correlation function)
}
\usage{
bootstrap.effectivemass(cf, type="solve", weight.factor=1)
}
\arguments{
\item{cf}{
a correlation function as an object of type \code{cf}, preferably
after a call to \code{\link{bootstrap.cf}}. If the latter has not
been called yet, it will be called in this function.
}
\item{type}{
The function to be used to compute the effective mass
values. Possibilities are "acosh", "solve", "log", "temporal",
"shifted" and "weighted". While the first three assume normal
cosh behaviour of
the correlation function, "temporal" is desigend to remove an
additional constant stemming from temporal states in two particle
correlation functions. The same for "shifted" and "weighted", the
latter for the case of two particle energies with the two particle
having different energies. In the latter case only the leading
polution is removed by \code{removeTemporal.cf} and taken into
account here.
}
\item{weight.factor}{
relative weight for type "weighted" only, see details
}
}
\value{
An object of class \code{effectivemass} is invisibly returned. It has
objects:
\code{effMass}:\cr
The computed effective mass values as a vector of length
\code{Time/2}. For \code{type="acosh"} also the first value is
\code{NA}, because this definition requires three time slices.
\code{deffMass}:\cr
The computed bootstrap errors for the effective masses of the same
length as \code{effMass}.
\code{effMass.tsboot}:\cr
The boostrap samples of the effective masses as an array of dimension
RxN, where \code{R=boot.R} is the number of bootstrap samples and
\code{N=(Time/2+1)}.
and \code{boot.R},
\code{boot.l}, \code{Time}
}
\details{
A number of types is implemented to compute effective mass values from
the correlation function:
"solve": the ratio\cr
\eqn{C(t+1) / C(t) = \cosh(-m*(t+1)) / \cosh(-m*t)}\cr
is numerically solved for m.
"acosh": the effective mass is computed from\cr
\eqn{m=acosh((C(t-1)+C(t+1)) / (2C(t)))}\cr
Note that this definition is less tolerant against noise.
"log": the effective mass is defined via\cr
\eqn{m=\log(C(t) / C(t+1))}\cr
which has artifacts of the periodicity at large t-values.
"temporal": the ratio\cr
\eqn{[C(t)-C(t+1)] / [C(t-1)-C(t)] = [\cosh(-m*(t))-\cosh(-m*(t+1))] / [\cosh(-m*(t-1))-\cosh(-m(t))]}\cr
is numerically solved for \eqn{m(t)}.
"shifted": like "temporal", but the differences \eqn{C(t)-C(t+1)}
are assumed to be taken already at the correlator matrix level using
\code{removeTemporal.cf} and hence the ratio\cr
\eqn{[C(t+1)] / [C(t)] = [\cosh(-m*(t))-\cosh(-m*(t+1))] /
[\cosh(-m*(t-1))-\cosh(-m(t))]}\cr
is numerically solved for \eqn{m(t)}.
"weighted": like "shifted", but now there is an additional weight
factor \eqn{w} from \code{removeTemporal.cf} to be taken into account,
such that the ratio\cr
\eqn{[C(t+1)] / [C(t)] = [\cosh(-m*(t))-w*\cosh(-m*(t+1))] /
[\cosh(-m*(t-1))-w*\cosh(-m(t))]}\cr
is numerically solved for \eqn{m(t)} with \eqn{w} as input.
}
\references{
arXiv:1203.6041
}
\seealso{
\code{\link{fit.effectivemass}}, \code{\link{bootstrap.cf}},
\code{removeTemporal.cf}
}
\examples{
data(samplecf)
samplecf <- bootstrap.cf(cf=samplecf, boot.R=1500, boot.l=2, seed=1442556)
effmass <- bootstrap.effectivemass(cf=samplecf)
summary(effmass)
plot(effmass, ylim=c(0.14,0.15))
}
\author{Carsten Urbach, \email{curbach@gmx.de}}
|
test_that("countOrSum() works", {
x <- data.frame(
Group = rep(c("A", "B", "C"), 4),
Even = rep(c(FALSE, TRUE), 6),
Value = seq_len(12)
)
y_1 <- countOrSum(x, "Group")
y_2 <- countOrSum(x, c("Group", "Even"))
y_3 <- countOrSum(x, "Group", sum.up = "Value")
y_4 <- countOrSum(x, c("Group", "Even"), sum.up = "Value")
expect_true(all(y_1 == 4))
expect_identical(dim(y_1), 3L)
expect_identical(dim(y_2), c(3L, 2L))
expect_identical(dim(y_3), 3L)
expect_identical(dim(y_4), c(3L, 2L))
n <- nrow(x)
expect_identical(sum(y_1), n)
expect_identical(sum(y_2), n)
S <- sum(x$Value)
expect_identical(sum(y_3), S)
expect_identical(sum(y_4), S)
})
| /tests/testthat/test-function-countOrSum.R | permissive | KWB-R/kwb.utils | R | false | false | 696 | r | test_that("countOrSum() works", {
x <- data.frame(
Group = rep(c("A", "B", "C"), 4),
Even = rep(c(FALSE, TRUE), 6),
Value = seq_len(12)
)
y_1 <- countOrSum(x, "Group")
y_2 <- countOrSum(x, c("Group", "Even"))
y_3 <- countOrSum(x, "Group", sum.up = "Value")
y_4 <- countOrSum(x, c("Group", "Even"), sum.up = "Value")
expect_true(all(y_1 == 4))
expect_identical(dim(y_1), 3L)
expect_identical(dim(y_2), c(3L, 2L))
expect_identical(dim(y_3), 3L)
expect_identical(dim(y_4), c(3L, 2L))
n <- nrow(x)
expect_identical(sum(y_1), n)
expect_identical(sum(y_2), n)
S <- sum(x$Value)
expect_identical(sum(y_3), S)
expect_identical(sum(y_4), S)
})
|
#' Get/Set Cluster Names by Marker Gene Expression
#'
#' \code{get.cluster.names} uses predefined marker genes to assign clusters with
#' putative cell type or state labels.
#'
#' @param environment \code{environment} object
#' @param types data frame associating cell type or state with marker genes
#' @param min.fold minimum fold change to consider a marker as overexpressed
#' @param max.Qval maximum FDR q value to consider a marker as overexpressed
#' @param print whether to print output calculations
#' @return \code{get.cluster.names} returns a vector containing assigned cluster
#' name labels
#' @export
#' @examples
#' \donttest{
#' LCMV1 <- setup_LCMV_example()
#' LCMV1 <- get.variable.genes(LCMV1, min.mean = 0.1, min.frac.cells = 0,
#' min.dispersion.scaled = 0.1)
#' LCMV1 <- PCA(LCMV1)
#' LCMV1 <- cluster.analysis(LCMV1)
#' types = rbind(
#' data.frame(type='Tfh',gene=c('Tcf7','Cxcr5','Bcl6')),
#' data.frame(type='Th1',gene=c('Cxcr6','Ifng','Tbx21')),
#' data.frame(type='Tcmp',gene=c('Ccr7','Bcl2','Tcf7')),
#' data.frame(type='Treg',gene=c('Foxp3','Il2ra')),
#' data.frame(type='Tmem',gene=c('Il7r','Ccr7')),
#' data.frame(type='CD8',gene=c('Cd8a')),
#' data.frame(type='CD4', gene = c("Cd4")),
#' data.frame(type='Cycle',gene=c('Mki67','Top2a','Birc5'))
#' )
#' summarize(LCMV1)
#' cluster_names <- get.cluster.names(LCMV1, types, min.fold = 1.0, max.Qval = 0.01)
#' LCMV1 <- set.cluster.names(LCMV1, names = cluster_names)
#' }
get.cluster.names <- function(environment, types, min.fold = 1.25, max.Qval = 0.1,
print = T) {
precomputed <- readRDS(file.path(environment$res.data.path, paste("main", "all",
"diff.exp.rds", sep = ".")))
limma.all <- precomputed$limma.all
if (print)
print(summary(limma.all))
diff.exp <- limma.all[limma.all$fold > min.fold & limma.all$QValue < max.Qval,
]
if (print)
print(summary(diff.exp))
cluster <- 1
cluster.names <- array("Unknown", environment$clustering$nclusters)
for (cluster in seq(environment$clustering$nclusters)) {
cluster.diff <- diff.exp[diff.exp$cluster == cluster, ]
cluster.name <- get.cluster.names.with.diff(cluster.diff, types, print)
if (print)
print(cluster.name)
if (!(length(cluster.names) == 1 && is.na(cluster.name)))
cluster.names[cluster] <- paste(cluster.name, collapse = "_")
}
cluster.names
for (name in unique(cluster.names)) {
match <- cluster.names == name
if (sum(match) > 1)
cluster.names[match] <- paste(name, seq(sum(match)), sep = "_")
}
return(cluster.names)
}
get.cluster.names.with.diff <- function(cluster.diff, types, print) {
types$gene <- as.vector(types$gene)
minimum.genes.to.qualify <- table(types$type)/2
expression <- cbind(types, cluster.diff[match(types$gene, cluster.diff$gene),
])
if (print)
print(expression[!is.na(expression$fold), ])
table <- sort(table(expression$type[!is.na(expression$fold)]) - minimum.genes.to.qualify,
decreasing = T)
if (print)
print(table)
if (sum(table > 0) == 0)
return("Unknown")
table <- table[table > 0]
cluster.name <- names(table)
return(cluster.name)
}
#' Set Cluster Names in Environment
#'
#' \code{set.cluster.names} saves the cluster names in storage and in the \code{environment} object
#'
#' @param names cluster names defined in get.cluster.names
#' @return \code{set.cluster.names} returns an \code{environment} object coded
#' with cluster names
#' @export
#' @describeIn get.cluster.names set annotations to clusters
set.cluster.names <- function(environment, names) {
cluster.name.map <- data.frame(id = seq(length(names)), name = names)
environment$cluster.names <- cluster.names <- names[environment$clustering$membership]
saveRDS(list(cluster.names = cluster.names, cluster.name.map = cluster.name.map),
file = file.path(environment$res.data.path, "cluster.names.rds"))
utils::write.csv(cluster.name.map, file = file.path(environment$work.path, "cluster.name.map.csv"))
print(table(environment$cluster.names))
return(environment)
}
load.cluster.names <- function(environment) {
precomputed <- readRDS(file.path(environment$res.data.path, "cluster.names.rds"))
environment$cluster.names <- precomputed$cluster.names
print(table(environment$cluster.names))
return(environment)
}
remove.cluster.names <- function(environment) {
environment$cluster.names <- environment$clustering$membership
return(environment)
}
#' Remove selected clusters
#'
#' Remove selected clusters from the environment object.
#'
#' @param environment The \code{environment} object
#' @param remove.clusters A character vector of the clusters to be removed
#' @return An environment object with selected clusters removed
#' @export
#' @examples
#' LCMV1 <- setup_LCMV_example()
#' LCMV1 <- filter_cluster_data(LCMV1, "1")
filter_cluster_data <- function(environment, remove.clusters) {
membership <- as.vector(environment$clustering$membership)
keep <- !membership %in% remove.clusters
filter.data(environment, keep)
}
filter.data <- function(environment, keep) {
data.file <- file.path(environment$baseline.data.path, "data.rds")
precomputed <- readRDS(data.file)
genes.filter <- precomputed$genes.filter
counts <- precomputed$counts
normalized <- precomputed$normalized
dataset.labels <- precomputed$dataset.labels
origins <- precomputed$origins
experiments <- precomputed$experiments
rm(precomputed)
file.rename(data.file, paste(data.file, format(Sys.time(), "%a_%b_%e_%Y__%H_%M_%S"),
sep = "---"))
counts <- counts[, keep]
genes.filter <- genes.filter & apply(counts, 1, stats::var) > 0
normalized <- normalized[, keep]
dataset.labels <- dataset.labels[keep]
origins <- origins[keep]
experiments <- experiments[keep]
unlink(environment$baseline.data.path, recursive = T, force = T)
dir.create(environment$baseline.data.path)
cache <- file.path(environment$baseline.data.path, "data.rds")
saveRDS(list(genes.filter = genes.filter, counts = counts, normalized = normalized,
dataset.labels = dataset.labels, origins = origins, experiments = experiments),
file = cache)
file.rename(environment$work.path, paste(environment$work.path, "pre.filter",
format(Sys.time(), "%a_%b_%e_%Y__%H_%M_%S"), sep = "_"))
}
filter.robust.clusters <- function(environment, robust.clusters) {
precomputed <- readRDS(file.path(environment$baseline.data.path, "preclustered.datasets.rds"))
genes.filter <- precomputed$genes.filter
counts <- precomputed$counts
normalized <- precomputed$normalized
dataset.labels <- precomputed$dataset.labels
origins <- precomputed$origins
experiments <- precomputed$experiments
HVG <- precomputed$HVG
clustering <- precomputed$clustering
merged.diff.exp <- precomputed$merged.diff.exp
merged.original.clustering <- precomputed$merged.original.clustering
rm(precomputed)
membership <- as.vector(environment$clustering$membership)
keep <- membership %in% robust.clusters
counts <- counts[, keep]
normalized <- normalized[, keep]
genes.filter <- genes.filter & apply(counts, 1, stats::var) > 0
dataset.labels <- dataset.labels[keep]
origins <- origins[keep]
experiments <- experiments[keep]
HVG <- NA
clustering <- clustering[keep]
merged.original.clustering <- merged.original.clustering[keep]
merged.diff.exp <- NA
dir <- dirname(environment$work.path)
new.dir <- file.path(dirname(dir), paste("filtered", basename(dir), sep = "_"),
"data")
dir.create(new.dir, recursive = T)
saveRDS(list(genes.filter = genes.filter, counts = counts, normalized = normalized,
dataset.labels = dataset.labels, origins = origins, experiments = experiments,
HVG = HVG, clustering = clustering, merged.diff.exp = merged.diff.exp, merged.original.clustering = merged.original.clustering),
file = file.path(new.dir, "preclustered.datasets.rds"))
}
| /R/clustering.R | permissive | asmagen/robustSingleCell | R | false | false | 8,176 | r |
#' Get/Set Cluster Names by Marker Gene Expression
#'
#' \code{get.cluster.names} uses predefined marker genes to assign clusters with
#' putative cell type or state labels.
#'
#' @param environment \code{environment} object
#' @param types data frame associating cell type or state with marker genes
#' @param min.fold minimum fold change to consider a marker as overexpressed
#' @param max.Qval maximum FDR q value to consider a marker as overexpressed
#' @param print whether to print output calculations
#' @return \code{get.cluster.names} returns a vector containing assigned cluster
#' name labels
#' @export
#' @examples
#' \donttest{
#' LCMV1 <- setup_LCMV_example()
#' LCMV1 <- get.variable.genes(LCMV1, min.mean = 0.1, min.frac.cells = 0,
#' min.dispersion.scaled = 0.1)
#' LCMV1 <- PCA(LCMV1)
#' LCMV1 <- cluster.analysis(LCMV1)
#' types = rbind(
#' data.frame(type='Tfh',gene=c('Tcf7','Cxcr5','Bcl6')),
#' data.frame(type='Th1',gene=c('Cxcr6','Ifng','Tbx21')),
#' data.frame(type='Tcmp',gene=c('Ccr7','Bcl2','Tcf7')),
#' data.frame(type='Treg',gene=c('Foxp3','Il2ra')),
#' data.frame(type='Tmem',gene=c('Il7r','Ccr7')),
#' data.frame(type='CD8',gene=c('Cd8a')),
#' data.frame(type='CD4', gene = c("Cd4")),
#' data.frame(type='Cycle',gene=c('Mki67','Top2a','Birc5'))
#' )
#' summarize(LCMV1)
#' cluster_names <- get.cluster.names(LCMV1, types, min.fold = 1.0, max.Qval = 0.01)
#' LCMV1 <- set.cluster.names(LCMV1, names = cluster_names)
#' }
get.cluster.names <- function(environment, types, min.fold = 1.25, max.Qval = 0.1,
print = T) {
precomputed <- readRDS(file.path(environment$res.data.path, paste("main", "all",
"diff.exp.rds", sep = ".")))
limma.all <- precomputed$limma.all
if (print)
print(summary(limma.all))
diff.exp <- limma.all[limma.all$fold > min.fold & limma.all$QValue < max.Qval,
]
if (print)
print(summary(diff.exp))
cluster <- 1
cluster.names <- array("Unknown", environment$clustering$nclusters)
for (cluster in seq(environment$clustering$nclusters)) {
cluster.diff <- diff.exp[diff.exp$cluster == cluster, ]
cluster.name <- get.cluster.names.with.diff(cluster.diff, types, print)
if (print)
print(cluster.name)
if (!(length(cluster.names) == 1 && is.na(cluster.name)))
cluster.names[cluster] <- paste(cluster.name, collapse = "_")
}
cluster.names
for (name in unique(cluster.names)) {
match <- cluster.names == name
if (sum(match) > 1)
cluster.names[match] <- paste(name, seq(sum(match)), sep = "_")
}
return(cluster.names)
}
get.cluster.names.with.diff <- function(cluster.diff, types, print) {
types$gene <- as.vector(types$gene)
minimum.genes.to.qualify <- table(types$type)/2
expression <- cbind(types, cluster.diff[match(types$gene, cluster.diff$gene),
])
if (print)
print(expression[!is.na(expression$fold), ])
table <- sort(table(expression$type[!is.na(expression$fold)]) - minimum.genes.to.qualify,
decreasing = T)
if (print)
print(table)
if (sum(table > 0) == 0)
return("Unknown")
table <- table[table > 0]
cluster.name <- names(table)
return(cluster.name)
}
#' Set Cluster Names in Environment
#'
#' \code{set.cluster.names} saves the cluster names in storage and in the \code{environment} object
#'
#' @param names cluster names defined in get.cluster.names
#' @return \code{set.cluster.names} returns an \code{environment} object coded
#' with cluster names
#' @export
#' @describeIn get.cluster.names set annotations to clusters
set.cluster.names <- function(environment, names) {
cluster.name.map <- data.frame(id = seq(length(names)), name = names)
environment$cluster.names <- cluster.names <- names[environment$clustering$membership]
saveRDS(list(cluster.names = cluster.names, cluster.name.map = cluster.name.map),
file = file.path(environment$res.data.path, "cluster.names.rds"))
utils::write.csv(cluster.name.map, file = file.path(environment$work.path, "cluster.name.map.csv"))
print(table(environment$cluster.names))
return(environment)
}
load.cluster.names <- function(environment) {
precomputed <- readRDS(file.path(environment$res.data.path, "cluster.names.rds"))
environment$cluster.names <- precomputed$cluster.names
print(table(environment$cluster.names))
return(environment)
}
remove.cluster.names <- function(environment) {
environment$cluster.names <- environment$clustering$membership
return(environment)
}
#' Remove selected clusters
#'
#' Remove selected clusters from the environment object.
#'
#' @param environment The \code{environment} object
#' @param remove.clusters A character vector of the clusters to be removed
#' @return An environment object with selected clusters removed
#' @export
#' @examples
#' LCMV1 <- setup_LCMV_example()
#' LCMV1 <- filter_cluster_data(LCMV1, "1")
filter_cluster_data <- function(environment, remove.clusters) {
membership <- as.vector(environment$clustering$membership)
keep <- !membership %in% remove.clusters
filter.data(environment, keep)
}
filter.data <- function(environment, keep) {
data.file <- file.path(environment$baseline.data.path, "data.rds")
precomputed <- readRDS(data.file)
genes.filter <- precomputed$genes.filter
counts <- precomputed$counts
normalized <- precomputed$normalized
dataset.labels <- precomputed$dataset.labels
origins <- precomputed$origins
experiments <- precomputed$experiments
rm(precomputed)
file.rename(data.file, paste(data.file, format(Sys.time(), "%a_%b_%e_%Y__%H_%M_%S"),
sep = "---"))
counts <- counts[, keep]
genes.filter <- genes.filter & apply(counts, 1, stats::var) > 0
normalized <- normalized[, keep]
dataset.labels <- dataset.labels[keep]
origins <- origins[keep]
experiments <- experiments[keep]
unlink(environment$baseline.data.path, recursive = T, force = T)
dir.create(environment$baseline.data.path)
cache <- file.path(environment$baseline.data.path, "data.rds")
saveRDS(list(genes.filter = genes.filter, counts = counts, normalized = normalized,
dataset.labels = dataset.labels, origins = origins, experiments = experiments),
file = cache)
file.rename(environment$work.path, paste(environment$work.path, "pre.filter",
format(Sys.time(), "%a_%b_%e_%Y__%H_%M_%S"), sep = "_"))
}
filter.robust.clusters <- function(environment, robust.clusters) {
precomputed <- readRDS(file.path(environment$baseline.data.path, "preclustered.datasets.rds"))
genes.filter <- precomputed$genes.filter
counts <- precomputed$counts
normalized <- precomputed$normalized
dataset.labels <- precomputed$dataset.labels
origins <- precomputed$origins
experiments <- precomputed$experiments
HVG <- precomputed$HVG
clustering <- precomputed$clustering
merged.diff.exp <- precomputed$merged.diff.exp
merged.original.clustering <- precomputed$merged.original.clustering
rm(precomputed)
membership <- as.vector(environment$clustering$membership)
keep <- membership %in% robust.clusters
counts <- counts[, keep]
normalized <- normalized[, keep]
genes.filter <- genes.filter & apply(counts, 1, stats::var) > 0
dataset.labels <- dataset.labels[keep]
origins <- origins[keep]
experiments <- experiments[keep]
HVG <- NA
clustering <- clustering[keep]
merged.original.clustering <- merged.original.clustering[keep]
merged.diff.exp <- NA
dir <- dirname(environment$work.path)
new.dir <- file.path(dirname(dir), paste("filtered", basename(dir), sep = "_"),
"data")
dir.create(new.dir, recursive = T)
saveRDS(list(genes.filter = genes.filter, counts = counts, normalized = normalized,
dataset.labels = dataset.labels, origins = origins, experiments = experiments,
HVG = HVG, clustering = clustering, merged.diff.exp = merged.diff.exp, merged.original.clustering = merged.original.clustering),
file = file.path(new.dir, "preclustered.datasets.rds"))
}
|
context("Gene expression normalization")
## Generate gene expression
nb.genes = 100
nb.cells = 10
mat = matrix(rpois(nb.cells*nb.genes, sample.int(3, nb.cells, TRUE)),
nb.genes, nb.cells, byrow=TRUE)
colnames(mat) = paste0('barcode', 1:nb.cells)
tot.raw = colSums(mat)
df = data.frame(symbol=paste0('gene', 1:nb.genes), stringsAsFactors=FALSE)
df = cbind(df, mat)
test_that("normalize using the total method", {
norm.df = norm_ge(df, method='total')
tot.norm = colSums(norm.df[, colnames(mat)])
expect_gt(sd(tot.raw), sd(tot.norm))
expect_true(all(abs(tot.norm-tot.norm[1])<.0000001))
})
test_that("normalize using the tmm method", {
norm.df = norm_ge(df, method='tmm')
tot.norm = colSums(norm.df[, colnames(mat)])
expect_gt(sd(tot.raw), sd(tot.norm))
})
test_that("normalize using the tmm method in parallel", {
norm.df = norm_ge(df, nb_cores=2)
tot.norm = colSums(norm.df[, colnames(mat)])
expect_gt(sd(tot.raw), sd(tot.norm))
})
| /tests/testthat/test_norm.R | permissive | jmonlong/scCNAutils | R | false | false | 969 | r | context("Gene expression normalization")
## Generate gene expression
nb.genes = 100
nb.cells = 10
mat = matrix(rpois(nb.cells*nb.genes, sample.int(3, nb.cells, TRUE)),
nb.genes, nb.cells, byrow=TRUE)
colnames(mat) = paste0('barcode', 1:nb.cells)
tot.raw = colSums(mat)
df = data.frame(symbol=paste0('gene', 1:nb.genes), stringsAsFactors=FALSE)
df = cbind(df, mat)
test_that("normalize using the total method", {
norm.df = norm_ge(df, method='total')
tot.norm = colSums(norm.df[, colnames(mat)])
expect_gt(sd(tot.raw), sd(tot.norm))
expect_true(all(abs(tot.norm-tot.norm[1])<.0000001))
})
test_that("normalize using the tmm method", {
norm.df = norm_ge(df, method='tmm')
tot.norm = colSums(norm.df[, colnames(mat)])
expect_gt(sd(tot.raw), sd(tot.norm))
})
test_that("normalize using the tmm method in parallel", {
norm.df = norm_ge(df, nb_cores=2)
tot.norm = colSums(norm.df[, colnames(mat)])
expect_gt(sd(tot.raw), sd(tot.norm))
})
|
setwd("./hw4")
## 1. The American Community Survey distributes downloadable data about
## United States communities. Download the 2006 microdata survey about housing
## for the state of Idaho using download.file() from here:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv
## and load the data into R. The code book, describing the variable names is here:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf
## Apply strsplit() to split all the names of the data frame on the characters
## "wgtp". What is the value of the 123 element of the resulting list?
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(url, "./hid.csv")
data <- read.csv("./hid.csv")
ans1 <- strsplit(names(data), "wgtp")[123]
## 2. Load the Gross Domestic Product data for the 190 ranked countries
## in this data set:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
## Remove the commas from the GDP numbers in millions of dollars
## and average them. What is the average?
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(url,"./gdp.csv")
data <- read.csv("./gdp.csv", skip = 5, header = FALSE, nrows = 190,
encoding = "UTF-8")
data$V5 <- as.numeric(gsub(",","",data$V5))
ans2 <- mean(data$V5)
## 3. In the data set from Question 2 what is a regular expression
## that would allow you to count the number of countries whose name
## begins with "United"?
## Assume that the variable with the country names in it is named countryNames.
## How many countries begin with United?
ans3 <- length(grep("^United",data$V4))
## 4. Load the Gross Domestic Product data for the 190 ranked
## countries in this data set:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
## Load the educational data from this data set:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv
## Match the data based on the country shortcode.
## Of the countries for which the end of the fiscal year is available,
## how many end in June?
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(url,"./edstats.csv")
edstats <- read.csv("./edstats.csv")
gdp <- data
gdp.ed <- merge(gdp, edstats, by.x = c("V1"), by.y = c("CountryCode"))
gdp.ed.fisc <- gdp.ed[grep("^Fiscal year end",gdp.ed$Special.Notes),]
ans4 <- length(grep("June",gdp.ed.fisc$Special.Notes))
## 5. You can use the quantmod (http://www.quantmod.com/) package
## to get historical stock prices for publicly traded companies
## on the NASDAQ and NYSE. Use the following code to download data
## on Amazon's stock price and get the times the data was sampled.
library(quantmod)
amzn = getSymbols("AMZN",auto.assign=FALSE)
sampleTimes = index(amzn)
## How many values were collected in 2012? How many values
## were collected on Mondays in 2012?
sampleTimes2012 <- sampleTimes[grep("2012", sampleTimes)]
sampleTimes2012Day <- weekdays(sampleTimes2012)
ans5 <- c(length(grep("2012", sampleTimes)), length(grep("Monday", sampleTimes2012Day)))
| /03_Data-Cleaning/assignments/hw4/data-cleaning_quiz4.R | no_license | angeliu24601/datasciencecoursera | R | false | false | 3,171 | r | setwd("./hw4")
## 1. The American Community Survey distributes downloadable data about
## United States communities. Download the 2006 microdata survey about housing
## for the state of Idaho using download.file() from here:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv
## and load the data into R. The code book, describing the variable names is here:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf
## Apply strsplit() to split all the names of the data frame on the characters
## "wgtp". What is the value of the 123 element of the resulting list?
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(url, "./hid.csv")
data <- read.csv("./hid.csv")
ans1 <- strsplit(names(data), "wgtp")[123]
## 2. Load the Gross Domestic Product data for the 190 ranked countries
## in this data set:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
## Remove the commas from the GDP numbers in millions of dollars
## and average them. What is the average?
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(url,"./gdp.csv")
data <- read.csv("./gdp.csv", skip = 5, header = FALSE, nrows = 190,
encoding = "UTF-8")
data$V5 <- as.numeric(gsub(",","",data$V5))
ans2 <- mean(data$V5)
## 3. In the data set from Question 2 what is a regular expression
## that would allow you to count the number of countries whose name
## begins with "United"?
## Assume that the variable with the country names in it is named countryNames.
## How many countries begin with United?
ans3 <- length(grep("^United",data$V4))
## 4. Load the Gross Domestic Product data for the 190 ranked
## countries in this data set:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
## Load the educational data from this data set:
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv
## Match the data based on the country shortcode.
## Of the countries for which the end of the fiscal year is available,
## how many end in June?
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(url,"./edstats.csv")
edstats <- read.csv("./edstats.csv")
gdp <- data
gdp.ed <- merge(gdp, edstats, by.x = c("V1"), by.y = c("CountryCode"))
gdp.ed.fisc <- gdp.ed[grep("^Fiscal year end",gdp.ed$Special.Notes),]
ans4 <- length(grep("June",gdp.ed.fisc$Special.Notes))
## 5. You can use the quantmod (http://www.quantmod.com/) package
## to get historical stock prices for publicly traded companies
## on the NASDAQ and NYSE. Use the following code to download data
## on Amazon's stock price and get the times the data was sampled.
library(quantmod)
amzn = getSymbols("AMZN",auto.assign=FALSE)
sampleTimes = index(amzn)
## How many values were collected in 2012? How many values
## were collected on Mondays in 2012?
sampleTimes2012 <- sampleTimes[grep("2012", sampleTimes)]
sampleTimes2012Day <- weekdays(sampleTimes2012)
ans5 <- c(length(grep("2012", sampleTimes)), length(grep("Monday", sampleTimes2012Day)))
|
\name{ARFIMAroll-class}
\Rdversion{1.1}
\docType{class}
\alias{ARFIMAroll-class}
\alias{as.data.frame,ARFIMAroll-method}
\alias{report,ARFIMAroll-method}
\alias{fpm,ARFIMAroll-method}
\alias{coef,ARFIMAroll-method}
\alias{resume,ARFIMAroll-method}
\alias{show,ARFIMAroll-method}
\title{class: ARFIMA Rolling Forecast Class}
\description{
Class for the ARFIMA rolling forecast.}
\section{Slots}{
\describe{
\item{\code{forecast}:}{Object of class \code{"vector"} }
\item{\code{model}:}{Object of class \code{"vector"} }
}
}
\section{Extends}{
Class \code{"\linkS4class{ARFIMA}"}, directly.
Class \code{"\linkS4class{rGARCH}"}, by class "ARFIMA", distance 2.
}
\section{Methods}{
\describe{
\item{as.data.frame}{\code{signature(x = "ARFIMAroll")}: extracts various
values from object (see note). }
\item{resume}{\code{signature(object = "ARFIMAroll")}:
Resumes a rolling backtest which has non-converged windows using
alternative solver and control parameters.}
\item{fpm}{\code{signature(object = "ARFIMAroll")}:
Forecast performance measures.}
\item{coef}{\code{signature(object = "ARFIMAroll")}:
Extracts the list of coefficients for each estimated window in the
rolling backtest.}
\item{report}{\code{signature(object = "ARFIMAroll")}: roll backtest reports
(see note).}
\item{show}{\code{signature(object = "ARFIMAroll")}:
Summary.}
}
}
\note{
The \code{as.data.frame} extractor method allows the extraction of either the
conditional forecast density or the VaR. It takes additional argument
\code{which} with valid values either \dQuote{density} or \dQuote{VaR}.\cr
The \code{coef} method will return a list of the coefficients and their robust
standard errors (assuming the keep.coef argument was set to TRUE in the
ugarchroll function), and the ending date of each estimation window.\cr
The \code{report} method takes the following additional arguments:\cr
1.\emph{type} for the report type. Valid values are \dQuote{VaR} for the VaR
report based on the unconditional and conditional coverage tests for exceedances
(discussed below) and \dQuote{fpm} for forecast performance measures.\cr
2.\emph{VaR.alpha} (for the VaR backtest report) is the tail probability and
defaults to 0.01.\cr
3.\emph{conf.level} the confidence level upon which the conditional coverage
hypothesis test will be based on (defaults to 0.95).\cr
Kupiec's unconditional coverage test looks at whether the amount of expected
versus actual exceedances given the tail probability of VaR actually occur as
predicted, while the conditional coverage test of Christoffersen is a joint test
of the unconditional coverage and the independence of the exceedances. Both the
joint and the separate unconditional test are reported since it is always
possible that the joint test passes while failing either the independence or
unconditional coverage test.
The \code{fpm} method (separately from report) takes additional logical argument
\emph{summary}, which when TRUE will return the mean squared error (MSE),
mean absolute error (MAE) and directional accuracy of the forecast versus
realized returns. When FALSE, it will return a data.frame of the time series
of squared (SE) errors, absolute errors (AE), directional hits (HITS), and a
VaR Loss function described in Gonzalez-Rivera, Lee, and Mishra (2004)
for each coverage level where it was calculated. This can then be compared, with
the VaR loss of competing models using such tests as the model confidence set
(MCS) of Hansen, Lunde and Nason (2011).
}
\author{Alexios Ghalanos}
\keyword{classes} | /man/ARFIMAroll-class.Rd | no_license | Dwj359582058/rugarch | R | false | false | 3,676 | rd | \name{ARFIMAroll-class}
\Rdversion{1.1}
\docType{class}
\alias{ARFIMAroll-class}
\alias{as.data.frame,ARFIMAroll-method}
\alias{report,ARFIMAroll-method}
\alias{fpm,ARFIMAroll-method}
\alias{coef,ARFIMAroll-method}
\alias{resume,ARFIMAroll-method}
\alias{show,ARFIMAroll-method}
\title{class: ARFIMA Rolling Forecast Class}
\description{
Class for the ARFIMA rolling forecast.}
\section{Slots}{
\describe{
\item{\code{forecast}:}{Object of class \code{"vector"} }
\item{\code{model}:}{Object of class \code{"vector"} }
}
}
\section{Extends}{
Class \code{"\linkS4class{ARFIMA}"}, directly.
Class \code{"\linkS4class{rGARCH}"}, by class "ARFIMA", distance 2.
}
\section{Methods}{
\describe{
\item{as.data.frame}{\code{signature(x = "ARFIMAroll")}: extracts various
values from object (see note). }
\item{resume}{\code{signature(object = "ARFIMAroll")}:
Resumes a rolling backtest which has non-converged windows using
alternative solver and control parameters.}
\item{fpm}{\code{signature(object = "ARFIMAroll")}:
Forecast performance measures.}
\item{coef}{\code{signature(object = "ARFIMAroll")}:
Extracts the list of coefficients for each estimated window in the
rolling backtest.}
\item{report}{\code{signature(object = "ARFIMAroll")}: roll backtest reports
(see note).}
\item{show}{\code{signature(object = "ARFIMAroll")}:
Summary.}
}
}
\note{
The \code{as.data.frame} extractor method allows the extraction of either the
conditional forecast density or the VaR. It takes additional argument
\code{which} with valid values either \dQuote{density} or \dQuote{VaR}.\cr
The \code{coef} method will return a list of the coefficients and their robust
standard errors (assuming the keep.coef argument was set to TRUE in the
ugarchroll function), and the ending date of each estimation window.\cr
The \code{report} method takes the following additional arguments:\cr
1.\emph{type} for the report type. Valid values are \dQuote{VaR} for the VaR
report based on the unconditional and conditional coverage tests for exceedances
(discussed below) and \dQuote{fpm} for forecast performance measures.\cr
2.\emph{VaR.alpha} (for the VaR backtest report) is the tail probability and
defaults to 0.01.\cr
3.\emph{conf.level} the confidence level upon which the conditional coverage
hypothesis test will be based on (defaults to 0.95).\cr
Kupiec's unconditional coverage test looks at whether the amount of expected
versus actual exceedances given the tail probability of VaR actually occur as
predicted, while the conditional coverage test of Christoffersen is a joint test
of the unconditional coverage and the independence of the exceedances. Both the
joint and the separate unconditional test are reported since it is always
possible that the joint test passes while failing either the independence or
unconditional coverage test.
The \code{fpm} method (separately from report) takes additional logical argument
\emph{summary}, which when TRUE will return the mean squared error (MSE),
mean absolute error (MAE) and directional accuracy of the forecast versus
realized returns. When FALSE, it will return a data.frame of the time series
of squared (SE) errors, absolute errors (AE), directional hits (HITS), and a
VaR Loss function described in Gonzalez-Rivera, Lee, and Mishra (2004)
for each coverage level where it was calculated. This can then be compared, with
the VaR loss of competing models using such tests as the model confidence set
(MCS) of Hansen, Lunde and Nason (2011).
}
\author{Alexios Ghalanos}
\keyword{classes} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_matrix.R
\name{get_matrix}
\alias{get_matrix}
\title{Create a matrix with extracted data from a bigWig file within a specified region around a peak.}
\usage{
get_matrix(bed = NULL, bw_files = NULL, bw_path = NULL, op_dir = NULL,
up = 2500, down = 2500, pos = "", binsize = 25)
}
\arguments{
\item{bed}{A file in bed format. Default value is NULL.}
\item{bw_files}{One or a character vector with multiple files in bigWig format. Default value is NULL.}
\item{bw_path}{The path to directory where bwtool is installed on the computer. Default value is NULL.}
\item{op_dir}{The path to the operation directory currently used. Default value is NULL.}
\item{up}{Number of basepairs from peak to 5' end. Default value is 2500.}
\item{down}{Number of basepairs from peak to 3' end.Default value is 2500.}
\item{pos}{Reference position of the region around a peak. Possibilities: '-starts' and '-ends'. Default value is '' and means a centered reference position.}
\item{binsize}{Binsize of how many basepairs the avergae will be calculated. Default value is 25.}
}
\value{
result list with matrices and additional information about the input of the function
}
\description{
Create a matrix with extracted data from bigWig files. Region around peak which should be observed can be specified. Returns a list with the matrix and the inserted parameters (region, binsize, reference position) and filenames (bed file andbigWig files).
}
| /man/get_matrix.Rd | no_license | PoisonAlien/chipAnalyser | R | false | true | 1,515 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_matrix.R
\name{get_matrix}
\alias{get_matrix}
\title{Create a matrix with extracted data from a bigWig file within a specified region around a peak.}
\usage{
get_matrix(bed = NULL, bw_files = NULL, bw_path = NULL, op_dir = NULL,
up = 2500, down = 2500, pos = "", binsize = 25)
}
\arguments{
\item{bed}{A file in bed format. Default value is NULL.}
\item{bw_files}{One or a character vector with multiple files in bigWig format. Default value is NULL.}
\item{bw_path}{The path to directory where bwtool is installed on the computer. Default value is NULL.}
\item{op_dir}{The path to the operation directory currently used. Default value is NULL.}
\item{up}{Number of basepairs from peak to 5' end. Default value is 2500.}
\item{down}{Number of basepairs from peak to 3' end.Default value is 2500.}
\item{pos}{Reference position of the region around a peak. Possibilities: '-starts' and '-ends'. Default value is '' and means a centered reference position.}
\item{binsize}{Binsize of how many basepairs the avergae will be calculated. Default value is 25.}
}
\value{
result list with matrices and additional information about the input of the function
}
\description{
Create a matrix with extracted data from bigWig files. Region around peak which should be observed can be specified. Returns a list with the matrix and the inserted parameters (region, binsize, reference position) and filenames (bed file andbigWig files).
}
|
simulatedata <-
function(x,y, num.mc){
listvls <- c()
for(i in 1:num.mc){
x_null <- rnorm(length(x),0,1)
y_null <- rnorm(length(y),0,1)
test_stat<-.C("CWrapper1", n1=as.integer(length(x_null)),n2=as.integer(length(y_null)),y1=as.double(x_null),y2=as.double(y_null),test_stat=as.double(1))$test_stat
listvls <- c(listvls,test_stat)
}
return(listvls)
}
| /tsc/R/simulatedata.R | no_license | ingted/R-Examples | R | false | false | 388 | r | simulatedata <-
function(x,y, num.mc){
listvls <- c()
for(i in 1:num.mc){
x_null <- rnorm(length(x),0,1)
y_null <- rnorm(length(y),0,1)
test_stat<-.C("CWrapper1", n1=as.integer(length(x_null)),n2=as.integer(length(y_null)),y1=as.double(x_null),y2=as.double(y_null),test_stat=as.double(1))$test_stat
listvls <- c(listvls,test_stat)
}
return(listvls)
}
|
NutritionStudy60 = subset(NutritionStudy, Age>59)
xyplot(Alcohol ~ Calories, data=subset(NutritionStudy60, Alcohol<25))
cor(Alcohol ~ Calories, data=subset(NutritionStudy60, Alcohol<25))
| /inst/snippets/Example2.38b.R | no_license | stacyderuiter/Lock5withR | R | false | false | 188 | r | NutritionStudy60 = subset(NutritionStudy, Age>59)
xyplot(Alcohol ~ Calories, data=subset(NutritionStudy60, Alcohol<25))
cor(Alcohol ~ Calories, data=subset(NutritionStudy60, Alcohol<25))
|
# install.packages("shiny")
# install.packages("ggplot2")
# install.packages("maps")
# install.packages("dplyr")
# install.packages("RColorBrewer")
# install.packages("ggpubr")
# install.packages("shinyWidgets")
# install.packages("fmsb")
library(shiny)
library(ggplot2)
library(maps)
library(dplyr)
library(RColorBrewer)
library(ggpubr)
library(shinyWidgets)
library(fmsb)
# Read dataset
df <- read.csv("europe.csv")
# Normalize the data
min_max_norm <- function(x) {
(x - min(x)) / (max(x) - min(x))
}
df_scaled <- df %>%
mutate(Area = min_max_norm(Area)) %>%
mutate(GDP = min_max_norm(GDP)) %>%
mutate(Inflation = min_max_norm(Inflation)) %>%
mutate(Life.expect = min_max_norm(Life.expect)) %>%
mutate(Military = min_max_norm(Military)) %>%
mutate(Pop.growth = min_max_norm(Pop.growth)) %>%
mutate(Unemployment = min_max_norm(Unemployment))
# Create World Map
world <- map_data("world")
worldmap <- ggplot() + theme(
panel.background = element_rect(fill = "white",
color = NA),
panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
# Create Europe boundaries
europe <- worldmap + coord_fixed(xlim = c(-20, 42.5),
ylim = c(36, 70.1),
ratio = 1.5)
# Joining our data with geopoints of the countries
joinMap <- full_join(df, world, by = c("Country" = "region"))
ui <- fluidPage(
# App title ----
titlePanel(
h1("Visualization Practical Work", align = "center")
),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
selectInput('var',
'Select variable',
c("Gross Domestic Product"="GDP",
"Inflation",
"Life Expectancy" = "Life.expect",
"Military",
"Population Growth" = "Pop.growth",
"Unemployment"))
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Map
plotOutput(outputId = "map"),
)
),
sidebarLayout(
# Sidebar panel for inputs
sidebarPanel(
radioButtons("radioButton_graph","Type of graph",
c("Histogram","Correlation between two variables")),
selectInput('corr_first', 'First Variable',
c("Gross Domestic Product"="GDP",
"Inflation",
"Life Expectancy" = "Life.expect",
"Military",
"Population Growth" = "Pop.growth",
"Unemployment")),
conditionalPanel(
condition = "input.radioButton_graph != 'Histogram'",
selectInput('corr_second', 'Second Variable',
c("Gross Domestic Product"="GDP",
"Inflation",
"Life Expectancy" = "Life.expect",
"Military",
"Population Growth" = "Pop.growth",
"Unemployment"),
selected = "Inflation")),
),
# Main panel for displaying outputs
mainPanel(
# Output: Correlation Matrix
plotOutput(outputId = "correlation_matrix")
)
),
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
radioButtons("radioButton_country","",
c("Country exploration","Country comparison")),
selectInput('country_1',
'Select Country',
df$Country),
conditionalPanel(
condition = "input.radioButton_country != 'Country exploration'",
selectInput('country_2', 'Select Country',
df$Country,
selected = "Belgium")),
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Country
plotOutput(outputId = "country"),
)
),
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
# Map output
output$map <- renderPlot({
symbol <- sym(input$var)
europe2 <-
europe + geom_polygon(data = joinMap,
aes(
fill = !! symbol,
x = long,
y = lat,
group = group
),
color = "grey70") +
scale_colour_gradient()
plot(europe2)
})
# Correlation-Histogram output
output$correlation_matrix <- renderPlot({
corr_first <- sym(input$corr_first)
corr_second <- sym(input$corr_second)
# If only a variable is selected, the histogram is shown
if(input$radioButton_graph=="Histogram")
{
ggplot(df %>% select(!! corr_first), aes(x=!! corr_first)) +
geom_histogram(color="black", fill="#2e608a") +
theme(
# Deleting the background
panel.background = element_rect(fill = "transparent", colour = NA),
plot.background = element_rect(fill = "transparent", colour = NA),
legend.background = element_rect(fill = "transparent", colour = NA),
legend.box.background = element_rect(fill = "transparent", colour = NA),
# Hide panel borders and add grid lines
panel.border = element_blank(),
panel.grid.major = element_line(colour = "grey"),
panel.grid.minor = element_line(colour = "grey"),
# Change axis line
axis.line = element_line(colour = "black"))
}
# If two different variables are selected, the correlation scatterplot is shown
else if (input$radioButton_graph == "Correlation between two variables"){
ggscatter(df_scaled %>% select(c(!! corr_first, !! corr_second)),
x = input$corr_first, y = input$corr_second,
# Add regressin line
add = "reg.line",
# Customize reg. line
add.params = list(color = "blue", fill = "white"),
# Add confidence interval
conf.int = TRUE
) + stat_cor(method = "pearson",
label.x = 0,
label.y = 0)
}
})
# Country output
output$country <- renderPlot({
if(input$radioButton_country=="Country exploration")
{
df_scaled <- df_scaled %>%
filter(Country==input$country_1) %>%
select(-Country)
df_scaled <- rbind(rep(1,7) , rep(0,7) , df_scaled)
radarchart(df_scaled ,
#Axist Type
axistype=1 ,
#custom polygon
pcol=rgb(0.19,0.39,0.59,0.9) , pfcol=rgb(0.2,0.55,0.94,0.4) , plwd=4 ,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,100,25), cglwd=0.8,
#custom labels
vlcex=0.8
)
}
else if (input$radioButton_country=="Country comparison")
{
df_scaled <- df_scaled %>%
filter(Country==input$country_1 | Country==input$country_2)
rownames(df_scaled) <- df_scaled[,1]
df_scaled <- df_scaled %>%
select(-Country)
df_scaled <- rbind(rep(1,7) , rep(0,7) , df_scaled)
colors_border=c(rgb(0.19,0.39,0.59,0.9), rgb(0.79,0.22,0.08,0.9))
colors_in=c( rgb(0.2,0.55,0.94,0.4), rgb(0.98,0.27,0.1,0.4))
radarchart( df_scaled , axistype=1 ,
#custom polygon
pcol=colors_border , pfcol=colors_in , plwd=4 , plty=1,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,100,25), cglwd=0.8,
#custom labels
vlcex=0.8
)
# Add a legend
legend(x=1.5, y=1, legend = rownames(df_scaled[-c(1,2),]), bty = "n", pch=20 , col=colors_in , text.col = "grey", cex=1.2, pt.cex=3)
}
})
}
shinyApp(ui = ui, server = server)
| /app.R | no_license | juanluisrto/shiny-app | R | false | false | 8,148 | r | # install.packages("shiny")
# install.packages("ggplot2")
# install.packages("maps")
# install.packages("dplyr")
# install.packages("RColorBrewer")
# install.packages("ggpubr")
# install.packages("shinyWidgets")
# install.packages("fmsb")
library(shiny)
library(ggplot2)
library(maps)
library(dplyr)
library(RColorBrewer)
library(ggpubr)
library(shinyWidgets)
library(fmsb)
# Read dataset
df <- read.csv("europe.csv")
# Normalize the data
min_max_norm <- function(x) {
(x - min(x)) / (max(x) - min(x))
}
df_scaled <- df %>%
mutate(Area = min_max_norm(Area)) %>%
mutate(GDP = min_max_norm(GDP)) %>%
mutate(Inflation = min_max_norm(Inflation)) %>%
mutate(Life.expect = min_max_norm(Life.expect)) %>%
mutate(Military = min_max_norm(Military)) %>%
mutate(Pop.growth = min_max_norm(Pop.growth)) %>%
mutate(Unemployment = min_max_norm(Unemployment))
# Create World Map
world <- map_data("world")
worldmap <- ggplot() + theme(
panel.background = element_rect(fill = "white",
color = NA),
panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
# Create Europe boundaries
europe <- worldmap + coord_fixed(xlim = c(-20, 42.5),
ylim = c(36, 70.1),
ratio = 1.5)
# Joining our data with geopoints of the countries
joinMap <- full_join(df, world, by = c("Country" = "region"))
ui <- fluidPage(
# App title ----
titlePanel(
h1("Visualization Practical Work", align = "center")
),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
selectInput('var',
'Select variable',
c("Gross Domestic Product"="GDP",
"Inflation",
"Life Expectancy" = "Life.expect",
"Military",
"Population Growth" = "Pop.growth",
"Unemployment"))
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Map
plotOutput(outputId = "map"),
)
),
sidebarLayout(
# Sidebar panel for inputs
sidebarPanel(
radioButtons("radioButton_graph","Type of graph",
c("Histogram","Correlation between two variables")),
selectInput('corr_first', 'First Variable',
c("Gross Domestic Product"="GDP",
"Inflation",
"Life Expectancy" = "Life.expect",
"Military",
"Population Growth" = "Pop.growth",
"Unemployment")),
conditionalPanel(
condition = "input.radioButton_graph != 'Histogram'",
selectInput('corr_second', 'Second Variable',
c("Gross Domestic Product"="GDP",
"Inflation",
"Life Expectancy" = "Life.expect",
"Military",
"Population Growth" = "Pop.growth",
"Unemployment"),
selected = "Inflation")),
),
# Main panel for displaying outputs
mainPanel(
# Output: Correlation Matrix
plotOutput(outputId = "correlation_matrix")
)
),
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
radioButtons("radioButton_country","",
c("Country exploration","Country comparison")),
selectInput('country_1',
'Select Country',
df$Country),
conditionalPanel(
condition = "input.radioButton_country != 'Country exploration'",
selectInput('country_2', 'Select Country',
df$Country,
selected = "Belgium")),
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Country
plotOutput(outputId = "country"),
)
),
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
# Map output
output$map <- renderPlot({
symbol <- sym(input$var)
europe2 <-
europe + geom_polygon(data = joinMap,
aes(
fill = !! symbol,
x = long,
y = lat,
group = group
),
color = "grey70") +
scale_colour_gradient()
plot(europe2)
})
# Correlation-Histogram output
output$correlation_matrix <- renderPlot({
corr_first <- sym(input$corr_first)
corr_second <- sym(input$corr_second)
# If only a variable is selected, the histogram is shown
if(input$radioButton_graph=="Histogram")
{
ggplot(df %>% select(!! corr_first), aes(x=!! corr_first)) +
geom_histogram(color="black", fill="#2e608a") +
theme(
# Deleting the background
panel.background = element_rect(fill = "transparent", colour = NA),
plot.background = element_rect(fill = "transparent", colour = NA),
legend.background = element_rect(fill = "transparent", colour = NA),
legend.box.background = element_rect(fill = "transparent", colour = NA),
# Hide panel borders and add grid lines
panel.border = element_blank(),
panel.grid.major = element_line(colour = "grey"),
panel.grid.minor = element_line(colour = "grey"),
# Change axis line
axis.line = element_line(colour = "black"))
}
# If two different variables are selected, the correlation scatterplot is shown
else if (input$radioButton_graph == "Correlation between two variables"){
ggscatter(df_scaled %>% select(c(!! corr_first, !! corr_second)),
x = input$corr_first, y = input$corr_second,
# Add regressin line
add = "reg.line",
# Customize reg. line
add.params = list(color = "blue", fill = "white"),
# Add confidence interval
conf.int = TRUE
) + stat_cor(method = "pearson",
label.x = 0,
label.y = 0)
}
})
# Country output
output$country <- renderPlot({
if(input$radioButton_country=="Country exploration")
{
df_scaled <- df_scaled %>%
filter(Country==input$country_1) %>%
select(-Country)
df_scaled <- rbind(rep(1,7) , rep(0,7) , df_scaled)
radarchart(df_scaled ,
#Axist Type
axistype=1 ,
#custom polygon
pcol=rgb(0.19,0.39,0.59,0.9) , pfcol=rgb(0.2,0.55,0.94,0.4) , plwd=4 ,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,100,25), cglwd=0.8,
#custom labels
vlcex=0.8
)
}
else if (input$radioButton_country=="Country comparison")
{
df_scaled <- df_scaled %>%
filter(Country==input$country_1 | Country==input$country_2)
rownames(df_scaled) <- df_scaled[,1]
df_scaled <- df_scaled %>%
select(-Country)
df_scaled <- rbind(rep(1,7) , rep(0,7) , df_scaled)
colors_border=c(rgb(0.19,0.39,0.59,0.9), rgb(0.79,0.22,0.08,0.9))
colors_in=c( rgb(0.2,0.55,0.94,0.4), rgb(0.98,0.27,0.1,0.4))
radarchart( df_scaled , axistype=1 ,
#custom polygon
pcol=colors_border , pfcol=colors_in , plwd=4 , plty=1,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,100,25), cglwd=0.8,
#custom labels
vlcex=0.8
)
# Add a legend
legend(x=1.5, y=1, legend = rownames(df_scaled[-c(1,2),]), bty = "n", pch=20 , col=colors_in , text.col = "grey", cex=1.2, pt.cex=3)
}
})
}
shinyApp(ui = ui, server = server)
|
#=======================================================================================
# cargamos las LIBRERIAS y las FUNCIONES
#=======================================================================================
library(readxl)
library(RODBC)
library(dplyr)
library(tidyr)
library(cluster)
library(NbClust)
library(data.table)
#=======================================================================================
# Encuentra la posición del primer dia uno
#
#=======================================================================================
encuentra.uno <- function(v){
i <- 1 # Inicializamos el contador
while(v[i]!=1){
i <- i+1
}
return(i)
}
#=======================================================================================
## Encuentra la posición del ultimo dia siete ##
#=======================================================================================
encuentra.usiete <- function(v){
j <- length(v) # Inicializamos el contador
while(v[j]!=7){
j <- j-1
}
return(j)
}
#=======================================================================================
## Vamos a crear la variable semana completa ##
#=======================================================================================
semana.completa <- function(v){
x <- encuentra.uno(v) # Encontramos el 1er uno
y <- encuentra.usiete(v) # Encontramos el ultimo siete
x1 <- v[x:y] # Recortamos la variable a las semanas completas
z0 <- 0 # Inicializamos z0
f <- (y-(x-1))/7 # Encontramos el nro de semanas completas
for (i in 1:f) {
zi <- rep(i,7)
z0 <- c(z0, zi)
}
return(c(rep(0,(x-1)),z0[-1],rep(f+1,length(v)-y)))
}
#=======================================================================================
## Creación suma semanal y densiadad semanal##
#=======================================================================================
dens.diaria <- function(serie){
suma.semana <- serie %>% group_by(semana) %>% summarise(suma_unidades=sum(Unidades))
serie <- merge(x=serie,y=suma.semana) %>%
arrange((FECHA)) %>%
mutate(dens_diaria=(Unidades/suma_unidades))
rm(suma.semana)
#serie <- serie %>% select(-suma_unidades)
return(serie)
}
#=======================================================================================
# Creación de la funcion reordenamiento
#=======================================================================================
reordenamiento <- function(serie){
dia.densidad <- serie %>% select(semana,dia_semana,dens_diaria)
# Queremos que los nombres de las variables de día:
dia <- c("lunes","martes","miercoles","jueves","viernes","sabado","domingo")
dia_semana <- c(1:7)
dia.codigo <- as.data.frame(cbind(dia,dia_semana))
# Ahora lo unimos con la serie:
dia.semana2 <- merge(dia.densidad,dia.codigo, by="dia_semana") %>%
arrange(semana) %>% select(semana,dia,dens_diaria)
# Vamos ahora a usar los días como variables:
dia.densidad2 <- unique(dia.semana2) %>%
spread(key=dia, value = dens_diaria) %>%
select(semana,lunes:miercoles,jueves,viernes,sabado,domingo) %>%
filter(semana != 0)
# Guardamos nuestro resultado
serie <- dia.densidad2
rm(dia.densidad,dia,dia_semana, dia.codigo)
return(serie)
}
#=======================================================================================
# Cargamos la función numerog
#=======================================================================================
numerog <- function(datos){
sumas.entre <- function(datos){
set.seed(6)
wcss = vector()
for (i in 1:5) {
wcss[i] = sum(kmeans(datos,i)$withinss)
}
return(wcss)
}
wcss <- sumas.entre(datos)
diferencias <- function(sumas){
diferencia = vector()
for (i in 1:length(sumas)-1){
diferencia[i]=(sumas[i]-sumas[i+1])/sumas[1]
}
return(diferencia)
}
numero.grupos <- function(diferencias){
numero <- 0
i <- 1
while (diferencias[i] > 0.08) {
numero=numero+1
i=i+1
}
return(numero+1)
}
numerog <- numero.grupos(diferencias = diferencias(sumas = wcss))
return(numerog)
}
#=======================================================================================
# Cargamos la funcion td() para CONSULTA SQL
#=======================================================================================
td <- function (server = 'SQL Server', uid = 'tclondono', pwd = 'tclondono01',
query ){
char <- paste("Driver=Teradata;DBCName=", server, ";UID=", uid, "PWD=", pwd);
#ch <- odbcDriverConnect(char); # Crea conexión
ch <- odbcConnect(dsn= server, uid = uid, pwd = pwd)
data <- sqlQuery(ch, query); # Ejecuta consulta
odbcClose(ch); # Cierra conexión
rm(char, ch); # Remueve variables
return(data); # Devuelve resultados de la consulta
}
query <- paste(readLines("D:/3.Scripts R/Piloto_PGC_FRESCOS/prueba_consulta_SQL.txt"),
collapse = " ",warn=FALSE) # el script sin ; y con un enter al final
#query <- gsub("__VAR1__", "20, 85, 146", query) # para cambiar cosas en el query
#data <- as_tibble(td(query=query)) # para que le guarde otros atributos
#=======================================================================================
## Creación densidad comercial ##
#=======================================================================================
dens.comercial <- function(dc){
# Calculamos la suma de unidades movilizadas en la semana:
suma.semana <- dc %>% group_by(semana) %>% summarise(suma_unidades=sum(S_Unidades))
# Unimos la suma de unidades y calculamos la densidad por agrupación comercial:
dc <- merge(x=dc,y=suma.semana) %>%
mutate(dens_comercial=(S_Unidades/suma_unidades))
return(dc)
}
#=======================================================================================
# Cargamos la funcion CENTROS para calcular el promedio de la densidad comercial
#=======================================================================================
centros <- function(filtro_historico){
# Agrupamos y sumamos las unidades en cada agrupación:
filtro_historico <- filtro_historico %>% group_by(semana,Agrupacion_ID) %>%
summarise(S_Unidades=sum(Unidades))
# Calculamos las densidades comerciales:
filtro_historico <- dens.comercial(filtro_historico)
dc2 <- filtro_historico %>% select(Agrupacion_ID,dens_comercial,semana)
# Ubicamos las densidades de secos y frescos en dos columnas
dc2 <- spread(dc2,key=Agrupacion_ID, value = dens_comercial)
# Borramos los NAS EN CASO DE QUE EXISTAN
if(sum(names(dc2)=="4001")==0){dc2$"4001"<-0}
if(sum(names(dc2)=="4002")==0){dc2$"4002"<-0}
dc2$"4001" = ifelse(is.na(dc2$"4001"), 0, dc2$"4001")
dc2$"4002" = ifelse(is.na(dc2$"4001"), 0, dc2$"4002")
#=====================================================================================
# Vamos a calcular el promedio con kmedias n=1
#=====================================================================================
kmeans = kmeans(x = dc2[2:3], centers = 1, nstart=25)
#=====================================================================================
# Guardamos el centros
centro <- as.data.frame(kmeans$centers)
#=====================================================================================
return(centro)
}
#=======================================================================================
# Esta función calcula la desagregacion comercial basado en el centros
#=======================================================================================
desag.co <- function(filtro_proyeccion,centros){
desag_co <- filtro_proyeccion %>%
mutate(frescos=Unidades*centros[1,1],
secos=Unidades*centros[1,2])
return(desag_co)
}
#=======================================================================================
#=======================================================================================
# FUNCIONES DE LA DESAGREGACION TEMPORAL
#=======================================================================================
cruzar.fechas <- function(consolidado_cedis,mes){
conso_ce <- consolidado_cedis %>% separate(Semana, into = c("S","semana"), sep = " ")
# Vamos a ubicar en una columna los agrupamientos
conso_ce <- conso_ce %>% gather(frescos,secos,key="AGRUPAMIENTO",value="UNIDADES" )
# Vamos a ordenar las variables para que los primeros campos conformen una serie
conso_ce <- conso_ce %>% select(-S,-Unidades,-GEN,CEDI,FLUJO,AGRUPAMIENTO,TAREA,
semana,UNIDADES) %>% arrange(CEDI,semana)
# CRUZAR FECHAS DEL MES
conso_ce$semana <- as.numeric(conso_ce$semana) # Pasamos semana a numerica
cruze <- inner_join(x=conso_ce,y=Marzo[1:2], by="semana")
# Vamos a CODIFICAR EL TIPO DE MERCANCIA
cruze$AGRUPAMIENTO <- ifelse(cruze$AGRUPAMIENTO=="frescos",4001,4002)
return(cruze)
}
#=======================================================================================
densidad.semana <- function(filtro_hi_m, dia_codigo, m){
dias_semana <- f_hi_m %>% group_by(semana) %>% summarise(n=n())
# filtramos los dias que coinciden con los dias de la semana
dias_semana <- dias_semana %>% filter(n==m)
# Ahora debemos HALLAR las semanas del HISTORICO que empiezan el "4"
semanas_iguales <- f_hi_m %>% filter(semana %in% dias_semana$semana)
# vamos a calcular los centros de estas semanas
semanas_iguales <- dens.diaria(semanas_iguales)
semanas_iguales <- semanas_iguales %>% select(semana,dia_semana,dens_diaria)
# Unimos con los nombres de los dias
semanas_iguales <- inner_join(semanas_iguales,dia.codigo, by="dia_semana") %>%
select(semana,dens_diaria,dia)
densidad_semana <- semanas_iguales %>%
spread(key = dia, value = dens_diaria)
return(densidad_semana)
}
#=======================================================================================
centro.dia <- function(densidad_semana){
kmedias <- kmeans(densidad_semana[2:ncol(densidad_semana)], centers=1)
centro <- as.data.frame(kmedias$centers)
# Volvemos como codigo la columna dia.semana
dia.codigo$dia_semana <- as.numeric(dia.codigo$dia_semana)
# Ordenamos el centro
centro <- gather(centro, key = dia, value = densidad)
# Utilizamos sólo los codigos de los dias que pertenecen a la semana
dia.codigo2 <- dia.codigo %>% filter(dia %in% centro$dia) # esto organiza los dias
dia.codigo2$dia <- as.character(dia.codigo2$dia)
# gUARDAMOS el CONSOLIADADO DE LA DESAGREGACION COMERCIAL
centro_dia <- inner_join(dia.codigo2,centro,by="dia")
return(centro_dia)
}
| /R/x1.R | no_license | CristianDataScience/MUM | R | false | false | 10,696 | r | #=======================================================================================
# cargamos las LIBRERIAS y las FUNCIONES
#=======================================================================================
library(readxl)
library(RODBC)
library(dplyr)
library(tidyr)
library(cluster)
library(NbClust)
library(data.table)
#=======================================================================================
# Encuentra la posición del primer dia uno
#
#=======================================================================================
encuentra.uno <- function(v){
i <- 1 # Inicializamos el contador
while(v[i]!=1){
i <- i+1
}
return(i)
}
#=======================================================================================
## Encuentra la posición del ultimo dia siete ##
#=======================================================================================
encuentra.usiete <- function(v){
j <- length(v) # Inicializamos el contador
while(v[j]!=7){
j <- j-1
}
return(j)
}
#=======================================================================================
## Vamos a crear la variable semana completa ##
#=======================================================================================
semana.completa <- function(v){
x <- encuentra.uno(v) # Encontramos el 1er uno
y <- encuentra.usiete(v) # Encontramos el ultimo siete
x1 <- v[x:y] # Recortamos la variable a las semanas completas
z0 <- 0 # Inicializamos z0
f <- (y-(x-1))/7 # Encontramos el nro de semanas completas
for (i in 1:f) {
zi <- rep(i,7)
z0 <- c(z0, zi)
}
return(c(rep(0,(x-1)),z0[-1],rep(f+1,length(v)-y)))
}
#=======================================================================================
## Creación suma semanal y densiadad semanal##
#=======================================================================================
dens.diaria <- function(serie){
suma.semana <- serie %>% group_by(semana) %>% summarise(suma_unidades=sum(Unidades))
serie <- merge(x=serie,y=suma.semana) %>%
arrange((FECHA)) %>%
mutate(dens_diaria=(Unidades/suma_unidades))
rm(suma.semana)
#serie <- serie %>% select(-suma_unidades)
return(serie)
}
#=======================================================================================
# Creación de la funcion reordenamiento
#=======================================================================================
reordenamiento <- function(serie){
dia.densidad <- serie %>% select(semana,dia_semana,dens_diaria)
# Queremos que los nombres de las variables de día:
dia <- c("lunes","martes","miercoles","jueves","viernes","sabado","domingo")
dia_semana <- c(1:7)
dia.codigo <- as.data.frame(cbind(dia,dia_semana))
# Ahora lo unimos con la serie:
dia.semana2 <- merge(dia.densidad,dia.codigo, by="dia_semana") %>%
arrange(semana) %>% select(semana,dia,dens_diaria)
# Vamos ahora a usar los días como variables:
dia.densidad2 <- unique(dia.semana2) %>%
spread(key=dia, value = dens_diaria) %>%
select(semana,lunes:miercoles,jueves,viernes,sabado,domingo) %>%
filter(semana != 0)
# Guardamos nuestro resultado
serie <- dia.densidad2
rm(dia.densidad,dia,dia_semana, dia.codigo)
return(serie)
}
#=======================================================================================
# Cargamos la función numerog
#=======================================================================================
numerog <- function(datos){
sumas.entre <- function(datos){
set.seed(6)
wcss = vector()
for (i in 1:5) {
wcss[i] = sum(kmeans(datos,i)$withinss)
}
return(wcss)
}
wcss <- sumas.entre(datos)
diferencias <- function(sumas){
diferencia = vector()
for (i in 1:length(sumas)-1){
diferencia[i]=(sumas[i]-sumas[i+1])/sumas[1]
}
return(diferencia)
}
numero.grupos <- function(diferencias){
numero <- 0
i <- 1
while (diferencias[i] > 0.08) {
numero=numero+1
i=i+1
}
return(numero+1)
}
numerog <- numero.grupos(diferencias = diferencias(sumas = wcss))
return(numerog)
}
#=======================================================================================
# Cargamos la funcion td() para CONSULTA SQL
#=======================================================================================
td <- function (server = 'SQL Server', uid = 'tclondono', pwd = 'tclondono01',
query ){
char <- paste("Driver=Teradata;DBCName=", server, ";UID=", uid, "PWD=", pwd);
#ch <- odbcDriverConnect(char); # Crea conexión
ch <- odbcConnect(dsn= server, uid = uid, pwd = pwd)
data <- sqlQuery(ch, query); # Ejecuta consulta
odbcClose(ch); # Cierra conexión
rm(char, ch); # Remueve variables
return(data); # Devuelve resultados de la consulta
}
query <- paste(readLines("D:/3.Scripts R/Piloto_PGC_FRESCOS/prueba_consulta_SQL.txt"),
collapse = " ",warn=FALSE) # el script sin ; y con un enter al final
#query <- gsub("__VAR1__", "20, 85, 146", query) # para cambiar cosas en el query
#data <- as_tibble(td(query=query)) # para que le guarde otros atributos
#=======================================================================================
## Creación densidad comercial ##
#=======================================================================================
dens.comercial <- function(dc){
# Calculamos la suma de unidades movilizadas en la semana:
suma.semana <- dc %>% group_by(semana) %>% summarise(suma_unidades=sum(S_Unidades))
# Unimos la suma de unidades y calculamos la densidad por agrupación comercial:
dc <- merge(x=dc,y=suma.semana) %>%
mutate(dens_comercial=(S_Unidades/suma_unidades))
return(dc)
}
#=======================================================================================
# Cargamos la funcion CENTROS para calcular el promedio de la densidad comercial
#=======================================================================================
centros <- function(filtro_historico){
# Agrupamos y sumamos las unidades en cada agrupación:
filtro_historico <- filtro_historico %>% group_by(semana,Agrupacion_ID) %>%
summarise(S_Unidades=sum(Unidades))
# Calculamos las densidades comerciales:
filtro_historico <- dens.comercial(filtro_historico)
dc2 <- filtro_historico %>% select(Agrupacion_ID,dens_comercial,semana)
# Ubicamos las densidades de secos y frescos en dos columnas
dc2 <- spread(dc2,key=Agrupacion_ID, value = dens_comercial)
# Borramos los NAS EN CASO DE QUE EXISTAN
if(sum(names(dc2)=="4001")==0){dc2$"4001"<-0}
if(sum(names(dc2)=="4002")==0){dc2$"4002"<-0}
dc2$"4001" = ifelse(is.na(dc2$"4001"), 0, dc2$"4001")
dc2$"4002" = ifelse(is.na(dc2$"4001"), 0, dc2$"4002")
#=====================================================================================
# Vamos a calcular el promedio con kmedias n=1
#=====================================================================================
kmeans = kmeans(x = dc2[2:3], centers = 1, nstart=25)
#=====================================================================================
# Guardamos el centros
centro <- as.data.frame(kmeans$centers)
#=====================================================================================
return(centro)
}
#=======================================================================================
# Esta función calcula la desagregacion comercial basado en el centros
#=======================================================================================
desag.co <- function(filtro_proyeccion,centros){
desag_co <- filtro_proyeccion %>%
mutate(frescos=Unidades*centros[1,1],
secos=Unidades*centros[1,2])
return(desag_co)
}
#=======================================================================================
#=======================================================================================
# FUNCIONES DE LA DESAGREGACION TEMPORAL
#=======================================================================================
cruzar.fechas <- function(consolidado_cedis,mes){
conso_ce <- consolidado_cedis %>% separate(Semana, into = c("S","semana"), sep = " ")
# Vamos a ubicar en una columna los agrupamientos
conso_ce <- conso_ce %>% gather(frescos,secos,key="AGRUPAMIENTO",value="UNIDADES" )
# Vamos a ordenar las variables para que los primeros campos conformen una serie
conso_ce <- conso_ce %>% select(-S,-Unidades,-GEN,CEDI,FLUJO,AGRUPAMIENTO,TAREA,
semana,UNIDADES) %>% arrange(CEDI,semana)
# CRUZAR FECHAS DEL MES
conso_ce$semana <- as.numeric(conso_ce$semana) # Pasamos semana a numerica
cruze <- inner_join(x=conso_ce,y=Marzo[1:2], by="semana")
# Vamos a CODIFICAR EL TIPO DE MERCANCIA
cruze$AGRUPAMIENTO <- ifelse(cruze$AGRUPAMIENTO=="frescos",4001,4002)
return(cruze)
}
#=======================================================================================
densidad.semana <- function(filtro_hi_m, dia_codigo, m){
dias_semana <- f_hi_m %>% group_by(semana) %>% summarise(n=n())
# filtramos los dias que coinciden con los dias de la semana
dias_semana <- dias_semana %>% filter(n==m)
# Ahora debemos HALLAR las semanas del HISTORICO que empiezan el "4"
semanas_iguales <- f_hi_m %>% filter(semana %in% dias_semana$semana)
# vamos a calcular los centros de estas semanas
semanas_iguales <- dens.diaria(semanas_iguales)
semanas_iguales <- semanas_iguales %>% select(semana,dia_semana,dens_diaria)
# Unimos con los nombres de los dias
semanas_iguales <- inner_join(semanas_iguales,dia.codigo, by="dia_semana") %>%
select(semana,dens_diaria,dia)
densidad_semana <- semanas_iguales %>%
spread(key = dia, value = dens_diaria)
return(densidad_semana)
}
#=======================================================================================
centro.dia <- function(densidad_semana){
kmedias <- kmeans(densidad_semana[2:ncol(densidad_semana)], centers=1)
centro <- as.data.frame(kmedias$centers)
# Volvemos como codigo la columna dia.semana
dia.codigo$dia_semana <- as.numeric(dia.codigo$dia_semana)
# Ordenamos el centro
centro <- gather(centro, key = dia, value = densidad)
# Utilizamos sólo los codigos de los dias que pertenecen a la semana
dia.codigo2 <- dia.codigo %>% filter(dia %in% centro$dia) # esto organiza los dias
dia.codigo2$dia <- as.character(dia.codigo2$dia)
# gUARDAMOS el CONSOLIADADO DE LA DESAGREGACION COMERCIAL
centro_dia <- inner_join(dia.codigo2,centro,by="dia")
return(centro_dia)
}
|
library(openxlsx)
source("ag_bestCombination.R")
# Put the address of the comparison file here
original <- "/home/leandro/R/brooks/data/Metrics.xlsx"
orig_file <- read.xlsx(xlsxFile = original, sheet = "gD2-285")
# Put the directory containing the results of the analyzes here
dir <- "/home/leandro/R/brooks/data/GD2-285/"
orig_groups <- unique(orig_file$cluster)
files <- list.files(path = dir)
results <- list(); parc_results <- list(); index <- 1; results_names <- NULL
for (f in files) {
#f <- files[42]
temp <- read.xlsx(xlsxFile = paste0(dir, f), sheet = "Table metrics")
groups <- unique(temp$Groups)
#cat("Name of file:",f,"\n")
#cat("Total elements:",length(orig_file$cluster),"\n")
score <- matrix(NA, length(groups), length(orig_groups))
colnames(score) <- orig_groups
rownames(score) <- groups
for (orig_class in orig_groups) {
#orig_class <- orig_groups[1]
orig_index <- which(orig_file$cluster == orig_class)
#cat("\n\n")
#cat(orig_class,":\n\n")
for(temp_class in groups){
m_match <- 0; n_match <- 0; match <- 0; mismatch <- 0
#temp_class <- groups[1]
temp_index <- which(temp$Groups == temp_class)
n_match <- intersect(orig_file$mAb[orig_index],
gsub(" ","",temp[temp_index,1]))
temp_size <- length(gsub(" ","",temp[temp_index,1]))
match <- length(n_match)
mismatch <- temp_size - match
m <- length(orig_index)
n <- nrow(orig_file) - m
k <- mismatch + match
#cat(orig_class,"(",length(orig_index),") -", temp_class,"(",match+mismatch,") -> match:", match, "; mismatch:", mismatch,"\n")
#cat("dhyper(",match,",",m,",",n,",", k,"):")
if(match > 0){ prob <- dhyper(match, m, n, k) }
else{ prob <- 1 }
#cat(prob,"\n\n")
score[temp_class,orig_class] <- prob
#m <- 10; n <- 7; k <- 8
#x <- 0:(k+1)
#browser()
}
}
best_score <- ag_bestCombination(score,200,10)
results[[index]] <- best_score
results_names <- c(results_names, f)
parc_results[[index]] <- score
index <- index + 1
#browser()
}
names(results) <- results_names
names(parc_results) <- results_names
valid_list <- list(); valid_index <- NULL
for(i in 1:length(results)){
sel_result <- results[[i]]
sel_parc_result <- parc_results[[i]]
temp <- NULL; group_teste <- NULL; group_valid <- NULL
for(j in 1:ncol(sel_result)){
x <- sel_result[1,j]; y <- sel_result[2,j]
temp[j] <- sel_parc_result[x,y]
group_teste[j] <- rownames(sel_parc_result)[x]
group_valid[j] <- colnames(sel_parc_result)[y]
}
valid_list[[i]] <- cbind(group_valid, group_teste)
valid_index[i] <- sum(temp) + (length(orig_groups) - nrow(valid_list[[i]]))
#valid_index[i] <- prod(temp) + (length(orig_groups) - nrow(valid_list[[i]]))
}
names(valid_list) <- results_names
min_indexes <- which(valid_index == min(valid_index))
cat("## Best strategies identified:\n\n")
for(i in 1:length(min_indexes)){
f <- files[min_indexes[i]]
cat("## Name of file:",f,"\n")
temp <- read.xlsx(xlsxFile = paste0(dir, f), sheet = "Settings")
apply_clust <- gsub("[.]"," ",temp[1,1])
apply_clust <- substr(apply_clust,1,nchar(apply_clust)-1)
cat(apply_clust,": ",temp[1,2],"\n")
dist_metric <- gsub("[.]"," ",temp[2,1])
dist_metric <- substr(dist_metric,1,nchar(dist_metric)-1)
cat(dist_metric,": ",temp[2,2],"\n")
type_analise <- gsub("[.]"," ",temp[3,1])
type_analise <- substr(type_analise,1,nchar(type_analise)-1)
cat(type_analise,": ",temp[3,2],"\n")
algorithm <- gsub("[.]"," ",temp[4,1])
algorithm <- substr(algorithm,1,nchar(algorithm)-1)
cat(algorithm,": ",temp[4,2],"\n")
if(nrow(temp) > 4){
cutting <- type_analise <- gsub("[.]"," ",temp[5,1])
cutting <- substr(cutting,1,nchar(cutting)-2)
cat(cutting,": ",temp[5,2],"\n")
}
cat("\nGroups identified in the validation sample and the corresponding groups identified in the test sample.\n\n")
print(valid_list[[min_indexes[i]]])
cat("\nHit rate : ",valid_index[min_indexes[i]])
cat("\n\n")
}
color <- rep("gray",length(valid_list))
color[min_indexes] <- rep("red",length(min_indexes))
names(valid_index) <- results_names
par(mar=c(10,7,2,1)+0.6,mgp=c(5,1,0))
barplot(valid_index,
main="Ratios",
ylab="sum of probabilities",
xlab="",
col = color,
las=2)
| /NBClust_program/compare.R | no_license | FargCart/Profile-Generator-Gui | R | false | false | 4,403 | r | library(openxlsx)
source("ag_bestCombination.R")
# Put the address of the comparison file here
original <- "/home/leandro/R/brooks/data/Metrics.xlsx"
orig_file <- read.xlsx(xlsxFile = original, sheet = "gD2-285")
# Put the directory containing the results of the analyzes here
dir <- "/home/leandro/R/brooks/data/GD2-285/"
orig_groups <- unique(orig_file$cluster)
files <- list.files(path = dir)
results <- list(); parc_results <- list(); index <- 1; results_names <- NULL
for (f in files) {
#f <- files[42]
temp <- read.xlsx(xlsxFile = paste0(dir, f), sheet = "Table metrics")
groups <- unique(temp$Groups)
#cat("Name of file:",f,"\n")
#cat("Total elements:",length(orig_file$cluster),"\n")
score <- matrix(NA, length(groups), length(orig_groups))
colnames(score) <- orig_groups
rownames(score) <- groups
for (orig_class in orig_groups) {
#orig_class <- orig_groups[1]
orig_index <- which(orig_file$cluster == orig_class)
#cat("\n\n")
#cat(orig_class,":\n\n")
for(temp_class in groups){
m_match <- 0; n_match <- 0; match <- 0; mismatch <- 0
#temp_class <- groups[1]
temp_index <- which(temp$Groups == temp_class)
n_match <- intersect(orig_file$mAb[orig_index],
gsub(" ","",temp[temp_index,1]))
temp_size <- length(gsub(" ","",temp[temp_index,1]))
match <- length(n_match)
mismatch <- temp_size - match
m <- length(orig_index)
n <- nrow(orig_file) - m
k <- mismatch + match
#cat(orig_class,"(",length(orig_index),") -", temp_class,"(",match+mismatch,") -> match:", match, "; mismatch:", mismatch,"\n")
#cat("dhyper(",match,",",m,",",n,",", k,"):")
if(match > 0){ prob <- dhyper(match, m, n, k) }
else{ prob <- 1 }
#cat(prob,"\n\n")
score[temp_class,orig_class] <- prob
#m <- 10; n <- 7; k <- 8
#x <- 0:(k+1)
#browser()
}
}
best_score <- ag_bestCombination(score,200,10)
results[[index]] <- best_score
results_names <- c(results_names, f)
parc_results[[index]] <- score
index <- index + 1
#browser()
}
names(results) <- results_names
names(parc_results) <- results_names
valid_list <- list(); valid_index <- NULL
for(i in 1:length(results)){
sel_result <- results[[i]]
sel_parc_result <- parc_results[[i]]
temp <- NULL; group_teste <- NULL; group_valid <- NULL
for(j in 1:ncol(sel_result)){
x <- sel_result[1,j]; y <- sel_result[2,j]
temp[j] <- sel_parc_result[x,y]
group_teste[j] <- rownames(sel_parc_result)[x]
group_valid[j] <- colnames(sel_parc_result)[y]
}
valid_list[[i]] <- cbind(group_valid, group_teste)
valid_index[i] <- sum(temp) + (length(orig_groups) - nrow(valid_list[[i]]))
#valid_index[i] <- prod(temp) + (length(orig_groups) - nrow(valid_list[[i]]))
}
names(valid_list) <- results_names
min_indexes <- which(valid_index == min(valid_index))
cat("## Best strategies identified:\n\n")
for(i in 1:length(min_indexes)){
f <- files[min_indexes[i]]
cat("## Name of file:",f,"\n")
temp <- read.xlsx(xlsxFile = paste0(dir, f), sheet = "Settings")
apply_clust <- gsub("[.]"," ",temp[1,1])
apply_clust <- substr(apply_clust,1,nchar(apply_clust)-1)
cat(apply_clust,": ",temp[1,2],"\n")
dist_metric <- gsub("[.]"," ",temp[2,1])
dist_metric <- substr(dist_metric,1,nchar(dist_metric)-1)
cat(dist_metric,": ",temp[2,2],"\n")
type_analise <- gsub("[.]"," ",temp[3,1])
type_analise <- substr(type_analise,1,nchar(type_analise)-1)
cat(type_analise,": ",temp[3,2],"\n")
algorithm <- gsub("[.]"," ",temp[4,1])
algorithm <- substr(algorithm,1,nchar(algorithm)-1)
cat(algorithm,": ",temp[4,2],"\n")
if(nrow(temp) > 4){
cutting <- type_analise <- gsub("[.]"," ",temp[5,1])
cutting <- substr(cutting,1,nchar(cutting)-2)
cat(cutting,": ",temp[5,2],"\n")
}
cat("\nGroups identified in the validation sample and the corresponding groups identified in the test sample.\n\n")
print(valid_list[[min_indexes[i]]])
cat("\nHit rate : ",valid_index[min_indexes[i]])
cat("\n\n")
}
color <- rep("gray",length(valid_list))
color[min_indexes] <- rep("red",length(min_indexes))
names(valid_index) <- results_names
par(mar=c(10,7,2,1)+0.6,mgp=c(5,1,0))
barplot(valid_index,
main="Ratios",
ylab="sum of probabilities",
xlab="",
col = color,
las=2)
|
library(devtools)
load_all("FLRcppAdolc")
document("FLRcppAdolc")
#library(FLCore)
#library(FLRcppAdolc)
#***************************************************************************
nzrl <- read.csv("NZRL.csv")
saa <- read.csv("SAA.csv")
nnh <- read.csv("NNH.csv")
#flspCpp(SEXP C_sexp, SEXP I_sexp, SEXP r_sexp, SEXP p_sexp, SEXP k_sexp)
r <- 0.0659
p <- 1
k <- 129000
nz <- flspCpp(nzrl$catch, nzrl$cpue, r, p, k)
nz$ll
r <- 0.328
p <- 1
k <- 239.6
sa <- flspCpp(saa$catch, saa$cpue, r, p, k)
flsp_wrapper <- function(log_params,catch,cpue){
#browser()
r <- exp(log_params["r"])
k <- exp(log_params["k"])
sp <- flspCpp(catch, cpue, r, 1, k)
cat("r: ", r, " k: ", k, "ll: ", sp$ll, "\n")
return(-sp$ll)
}
flsp_wrapper_grad <- function(log_params,catch,cpue){
#browser()
r <- exp(log_params["r"])
k <- exp(log_params["k"])
sp <- flspCpp(catch, cpue, r, 1, k)
#cat("r: ", r, " k: ", k, "ll: ", sp$ll, "\n")
return(-c(sp$ll_grad_r, sp$ll_grad_k))
}
# Without gradient
optim_nz <- optim(log(c(r=0.07,k=100000)),fn=flsp_wrapper,method="BFGS", catch = nzrl$catch, cpue=nzrl$cpue)
# With gradient
optim_nz <- optim(log(c(r=0.07,k=100000)),fn=flsp_wrapper, gr=flsp_wrapper_grad, method="BFGS", catch = nzrl$catch, cpue=nzrl$cpue)
# Nelder-Mead
optim_nz <- optim(log(c(r=0.07,k=300)),fn=flsp_wrapper,method="Nelder-Mead", catch = nzrl$catch/1000, cpue=nzrl$cpue)
optim_sa <- optim(log(c(r=0.3,k=240)),fn=flsp_wrapper,method="Nelder-Mead", catch = saa$catch, cpue=saa$cpue)
optim_sa <- optim(log(c(r=0.3,k=240)),fn=flsp_wrapper,method="BFGS", catch = saa$catch, cpue=saa$cpue)
library(FLCore)
load("om.RData")
om_c <- c(catch(om))
om_i <- c(stock(om))
r <- 0.7
p <- 1
k <- 100000
test <- flspCpp(om_c, om_i, r, p, k)
optim_om <- optim(log(c(r=r,k=k)),fn=flsp_wrapper,method="Nelder-Mead", catch = om_c, cpue=om_i)
optim_om <- optim(log(c(r=r,k=k)),fn=flsp_wrapper,method="BFGS", catch = om_c, cpue=om_i)
optim_om <- optim(log(c(r=r,k=k)),fn=flsp_wrapper, gr=flsp_wrapper_grad, method="BFGS", catch = om_c, cpue=om_i)
nlom <- nlminb(log(c(r=r, k=k)), objective = flsp_wrapper, gradient=flsp_wrapper_grad, catch = om_c, cpue=om_i)
nlom2 <- nlminb(log(c(r=r, k=k)), objective = flsp_wrapper, catch = om_c, cpue=om_i)
supp_fun <- function(pars){
r <- pars[1]
k <- pars[2]
if (r<0.6|| r > 0.8 || k < 1e-5 || k > 1e9)
return(FALSE)
return(TRUE)
}
flsp_run_wrapper <- function(params){
#browser()
r <- (params[1])
k <- (params[2])
sp <- flspCpp(om_c, om_i, r, 1, k)
cat("r: ", r, " k: ", k, "ll: ", sp$ll, "\n")
return(-sp$ll)
}
test <- flspCpp(om_c, om_i, exp(nlom2$par["r"]), 1, exp(nlom2$par["k"]))
exp(nlom$par)
exp(nlom2$par)
library(Rtwalk)
runom <- Runtwalk(5000, dim = 2, Obj = flsp_run_wrapper, x0=c(0.7,100000), xp0=c(0.6,150000), Supp=supp_fun)
| /S2-RunMSE/SPTest/sp.R | no_license | iotcwpm/MSE-Training | R | false | false | 2,862 | r | library(devtools)
load_all("FLRcppAdolc")
document("FLRcppAdolc")
#library(FLCore)
#library(FLRcppAdolc)
#***************************************************************************
nzrl <- read.csv("NZRL.csv")
saa <- read.csv("SAA.csv")
nnh <- read.csv("NNH.csv")
#flspCpp(SEXP C_sexp, SEXP I_sexp, SEXP r_sexp, SEXP p_sexp, SEXP k_sexp)
r <- 0.0659
p <- 1
k <- 129000
nz <- flspCpp(nzrl$catch, nzrl$cpue, r, p, k)
nz$ll
r <- 0.328
p <- 1
k <- 239.6
sa <- flspCpp(saa$catch, saa$cpue, r, p, k)
flsp_wrapper <- function(log_params,catch,cpue){
#browser()
r <- exp(log_params["r"])
k <- exp(log_params["k"])
sp <- flspCpp(catch, cpue, r, 1, k)
cat("r: ", r, " k: ", k, "ll: ", sp$ll, "\n")
return(-sp$ll)
}
flsp_wrapper_grad <- function(log_params,catch,cpue){
#browser()
r <- exp(log_params["r"])
k <- exp(log_params["k"])
sp <- flspCpp(catch, cpue, r, 1, k)
#cat("r: ", r, " k: ", k, "ll: ", sp$ll, "\n")
return(-c(sp$ll_grad_r, sp$ll_grad_k))
}
# Without gradient
optim_nz <- optim(log(c(r=0.07,k=100000)),fn=flsp_wrapper,method="BFGS", catch = nzrl$catch, cpue=nzrl$cpue)
# With gradient
optim_nz <- optim(log(c(r=0.07,k=100000)),fn=flsp_wrapper, gr=flsp_wrapper_grad, method="BFGS", catch = nzrl$catch, cpue=nzrl$cpue)
# Nelder-Mead
optim_nz <- optim(log(c(r=0.07,k=300)),fn=flsp_wrapper,method="Nelder-Mead", catch = nzrl$catch/1000, cpue=nzrl$cpue)
optim_sa <- optim(log(c(r=0.3,k=240)),fn=flsp_wrapper,method="Nelder-Mead", catch = saa$catch, cpue=saa$cpue)
optim_sa <- optim(log(c(r=0.3,k=240)),fn=flsp_wrapper,method="BFGS", catch = saa$catch, cpue=saa$cpue)
library(FLCore)
load("om.RData")
om_c <- c(catch(om))
om_i <- c(stock(om))
r <- 0.7
p <- 1
k <- 100000
test <- flspCpp(om_c, om_i, r, p, k)
optim_om <- optim(log(c(r=r,k=k)),fn=flsp_wrapper,method="Nelder-Mead", catch = om_c, cpue=om_i)
optim_om <- optim(log(c(r=r,k=k)),fn=flsp_wrapper,method="BFGS", catch = om_c, cpue=om_i)
optim_om <- optim(log(c(r=r,k=k)),fn=flsp_wrapper, gr=flsp_wrapper_grad, method="BFGS", catch = om_c, cpue=om_i)
nlom <- nlminb(log(c(r=r, k=k)), objective = flsp_wrapper, gradient=flsp_wrapper_grad, catch = om_c, cpue=om_i)
nlom2 <- nlminb(log(c(r=r, k=k)), objective = flsp_wrapper, catch = om_c, cpue=om_i)
supp_fun <- function(pars){
r <- pars[1]
k <- pars[2]
if (r<0.6|| r > 0.8 || k < 1e-5 || k > 1e9)
return(FALSE)
return(TRUE)
}
flsp_run_wrapper <- function(params){
#browser()
r <- (params[1])
k <- (params[2])
sp <- flspCpp(om_c, om_i, r, 1, k)
cat("r: ", r, " k: ", k, "ll: ", sp$ll, "\n")
return(-sp$ll)
}
test <- flspCpp(om_c, om_i, exp(nlom2$par["r"]), 1, exp(nlom2$par["k"]))
exp(nlom$par)
exp(nlom2$par)
library(Rtwalk)
runom <- Runtwalk(5000, dim = 2, Obj = flsp_run_wrapper, x0=c(0.7,100000), xp0=c(0.6,150000), Supp=supp_fun)
|
library(shiny)
ui <- fluidPage(
sliderInput(inputId = "num",
label = "choose a number",
value = 25, min = 1, max = 100),
plotOutput("hist")
)
server <- function(input, output) {}
shinyApp(ui = ui, server = server) | /shiny template.R | no_license | dynastang/shinyTemplate | R | false | false | 261 | r | library(shiny)
ui <- fluidPage(
sliderInput(inputId = "num",
label = "choose a number",
value = 25, min = 1, max = 100),
plotOutput("hist")
)
server <- function(input, output) {}
shinyApp(ui = ui, server = server) |
dat<- read.table("exdata_data_household_power_consumption\\household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## change format string to date
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
dat <- dat[complete.cases(dat),]
## filter data between correct date
dat <- subset(dat,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
dateTime <- paste(dat$Date, dat$Time)
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
dat<- dat[ ,!(names(dat) %in% c("Date","Time"))]
## Add DateTime column
dat <- cbind(dateTime, dat)
## Format dateTime Column
dat$dateTime <- as.POSIXct(dateTime)
plot(dat$Sub_metering_1~dat$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(dat$Sub_metering_3~dat$dateTime,col='Blue')
lines(dat$Sub_metering_2~dat$dateTime,col='Red')
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
| /plot3.R | no_license | fredyvel/ExData_Plotting1 | R | false | false | 1,173 | r |
dat<- read.table("exdata_data_household_power_consumption\\household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## change format string to date
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
dat <- dat[complete.cases(dat),]
## filter data between correct date
dat <- subset(dat,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
dateTime <- paste(dat$Date, dat$Time)
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
dat<- dat[ ,!(names(dat) %in% c("Date","Time"))]
## Add DateTime column
dat <- cbind(dateTime, dat)
## Format dateTime Column
dat$dateTime <- as.POSIXct(dateTime)
plot(dat$Sub_metering_1~dat$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(dat$Sub_metering_3~dat$dateTime,col='Blue')
lines(dat$Sub_metering_2~dat$dateTime,col='Red')
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
|
\name{plot.compgraph}
\alias{plot.compgraph}
\title{plot.compgraph}
\usage{
plot.compgraph(G, hilite.cbtime = FALSE,
suppress.vertex.labels = FALSE, ...)
}
\arguments{
\item{G}{A composite graph object}
\item{hilite.cbtime}{A boolean value}
\item{...}{Parameters to be passed to plot.igraph for g1
and g2}
}
\description{
Plot a composite graph
}
\details{
Plot a composite graph
}
\examples{
plot(ercg(20, 0.5))
}
| /man/plot.compgraph.Rd | no_license | beanumber/compgraph | R | false | false | 436 | rd | \name{plot.compgraph}
\alias{plot.compgraph}
\title{plot.compgraph}
\usage{
plot.compgraph(G, hilite.cbtime = FALSE,
suppress.vertex.labels = FALSE, ...)
}
\arguments{
\item{G}{A composite graph object}
\item{hilite.cbtime}{A boolean value}
\item{...}{Parameters to be passed to plot.igraph for g1
and g2}
}
\description{
Plot a composite graph
}
\details{
Plot a composite graph
}
\examples{
plot(ercg(20, 0.5))
}
|
## ----check_packages, echo=FALSE, messages=FALSE, warning=FALSE-----------
required <- c("raster", "rgdal", "rgeos", "sp")
if (!all(unlist(lapply(required, function(pkg) requireNamespace(pkg, quietly = TRUE)))))
knitr::opts_chunk$set(eval = FALSE, collapse = TRUE, comment = "#>", fig.align = "center", fig.width = 5, fig.height = 5)
library(sp)
## ---- eval=FALSE, message=FALSE------------------------------------------
# library(raster)
# RP0 <- getData(country = "Philippines", level = 0)
# RP1 <- getData(country = "Philippines", level = 1)
## ---- eval=FALSE---------------------------------------------------------
# Central_Luzon <- RP1[RP1@data$NAME_1 == "Pampanga" |
# RP1@data$NAME_1 == "Tarlac" |
# RP1@data$NAME_1 == "Pangasinan" |
# RP1@data$NAME_1 == "La Union" |
# RP1@data$NAME_1 == "Nueva Ecija" |
# RP1@data$NAME_1 == "Bulacan", ]
## ---- eval=FALSE---------------------------------------------------------
# library(rgeos)
# RP0 <- gSimplify(RP0, tol = 0.05)
## ---- eval=FALSE---------------------------------------------------------
# library(ggplot2)
# library(grid)
# library(gridExtra)
# library(sp)
#
# # get center coordinates of provinces in Central Luzon
# CL_names <- data.frame(coordinates(Central_Luzon))
#
# # this is then used to label the procinces on the map
# CL_names$label <- Central_Luzon@data$NAME_1
#
# # Main map
# p1 <- ggplot() +
# geom_polygon(data = Central_Luzon,
# aes(x = long,
# y = lat,
# group = group),
# colour = "grey10",
# fill = "#fff7bc") +
# geom_text(data = CL_names, aes(x = X1,
# y = X2,
# label = label),
# size = 2,
# colour = "grey20") +
# theme(axis.text.y = element_text(angle = 90,
# hjust = 0.5)) +
# ggtitle("Central Luzon Provinces Surveyed") +
# theme_bw() +
# xlab("Longitude") +
# ylab("Latitude") +
# coord_map()
#
# # Inset map
# p2 <- ggplot() +
# geom_polygon(data = RP0, aes(long, lat, group = group),
# colour = "grey10",
# fill = "#fff7bc") +
# coord_equal() +
# theme_bw() +
# labs(x = NULL, y = NULL) +
# geom_rect(aes(xmin = extent(Central_Luzon)[1],
# xmax = extent(Central_Luzon)[2],
# ymin = extent(Central_Luzon)[3],
# ymax = extent(Central_Luzon)[4]),
# alpha = 0,
# colour = "red",
# size = 0.7,
# linetype = 1) +
# theme(axis.text.x = element_blank(),
# axis.text.y = element_blank(),
# axis.ticks = element_blank(),
# axis.title.x = element_blank(),
# axis.title.y = element_blank(),
# plot.margin = unit(c(0, 0, 0 ,0), "mm"))
#
# grid.newpage()
# # plot area for the main map
# v1 <- viewport(width = 1, height = 1, x = 0.5, y = 0.5)
#
# # plot area for the inset map
# v2 <- viewport(width = 0.28, height = 0.28, x = 0.67, y = 0.79)
#
# # print the map object
# print(p1, vp = v1)
# print(p2, vp = v2)
## ---- eval=FALSE---------------------------------------------------------
# library(GSODR)
#
# # load the station metadata file from GSODR (this loads `isd_history` in your
# # R sesion)
# load(system.file("extdata", "isd_history.rda", package = "GSODR"))
#
# isd_history <- as.data.frame(isd_history)
#
# # convert to a spatial object to find stations within the states
# coordinates(isd_history) <- ~ LON + LAT
# proj4string(isd_history) <- proj4string(Central_Luzon)
#
# # what are the coordinates? We use the row numbers from this to match the
# # `stations` data.frame
# station_coords <- coordinates(isd_history[Central_Luzon, ])
#
# # get row numbers as an object
# rows <- as.numeric(row.names(station_coords))
#
# # create a data frame of only the stations which rows have been identified
# loop_stations <- as.data.frame(isd_history)[rows, ]
#
# # subset stations that match our criteria for years
# loop_stations <- loop_stations[loop_stations$BEGIN <= 19600101 &
# loop_stations$END >= 20151231, ]
#
# print(loop_stations[, c(1:2, 3, 7:12)])
## ---- station_locations, eval=FALSE--------------------------------------
# p1 +
# geom_point(data = loop_stations,
# aes(x = LON,
# y = LAT),
# size = 2) +
# geom_text(data = loop_stations,
# aes(x = LON,
# y = LAT,
# label = STN_NAME),
# alpha = 0.6,
# size = 2,
# position = position_nudge(0.1, -0.05)) +
# ggtitle("Station locations")
## ---- eval=FALSE---------------------------------------------------------
# PHL <- get_GSOD(station = loop_stations[, 12], years = 1960:2015)
## ---- eval=FALSE---------------------------------------------------------
# years <- 1960:2015
#
# loop_stations <- eval(parse(text = loop_stations[, 12]))
#
# # create file list
# loop_stations <- do.call(
# paste0, c(expand.grid(loop_stations, "-", years, ".op.gz"))
# )
#
# local_files <- list.files(path = "./GSOD", full.names = TRUE, recursive = TRUE)
# local_files <- local_files[basename(local_files) %in% loop_stations]
#
# loop_data <- reformat_GSOD(file_list = local_files)
#
# readr::write_csv(loop_data, path = "Loop_Survey_Weather_1960-2015", path = "./")
## ----cleanup GADM files, eval=TRUE, echo=FALSE, message=FALSE------------
unlink("GADM_2.8_PHL_adm0.rds")
unlink("GADM_2.8_PHL_adm1.rds")
| /data/genthat_extracted_code/GSODR/vignettes/use_case_1.R | no_license | surayaaramli/typeRrh | R | false | false | 5,788 | r | ## ----check_packages, echo=FALSE, messages=FALSE, warning=FALSE-----------
required <- c("raster", "rgdal", "rgeos", "sp")
if (!all(unlist(lapply(required, function(pkg) requireNamespace(pkg, quietly = TRUE)))))
knitr::opts_chunk$set(eval = FALSE, collapse = TRUE, comment = "#>", fig.align = "center", fig.width = 5, fig.height = 5)
library(sp)
## ---- eval=FALSE, message=FALSE------------------------------------------
# library(raster)
# RP0 <- getData(country = "Philippines", level = 0)
# RP1 <- getData(country = "Philippines", level = 1)
## ---- eval=FALSE---------------------------------------------------------
# Central_Luzon <- RP1[RP1@data$NAME_1 == "Pampanga" |
# RP1@data$NAME_1 == "Tarlac" |
# RP1@data$NAME_1 == "Pangasinan" |
# RP1@data$NAME_1 == "La Union" |
# RP1@data$NAME_1 == "Nueva Ecija" |
# RP1@data$NAME_1 == "Bulacan", ]
## ---- eval=FALSE---------------------------------------------------------
# library(rgeos)
# RP0 <- gSimplify(RP0, tol = 0.05)
## ---- eval=FALSE---------------------------------------------------------
# library(ggplot2)
# library(grid)
# library(gridExtra)
# library(sp)
#
# # get center coordinates of provinces in Central Luzon
# CL_names <- data.frame(coordinates(Central_Luzon))
#
# # this is then used to label the procinces on the map
# CL_names$label <- Central_Luzon@data$NAME_1
#
# # Main map
# p1 <- ggplot() +
# geom_polygon(data = Central_Luzon,
# aes(x = long,
# y = lat,
# group = group),
# colour = "grey10",
# fill = "#fff7bc") +
# geom_text(data = CL_names, aes(x = X1,
# y = X2,
# label = label),
# size = 2,
# colour = "grey20") +
# theme(axis.text.y = element_text(angle = 90,
# hjust = 0.5)) +
# ggtitle("Central Luzon Provinces Surveyed") +
# theme_bw() +
# xlab("Longitude") +
# ylab("Latitude") +
# coord_map()
#
# # Inset map
# p2 <- ggplot() +
# geom_polygon(data = RP0, aes(long, lat, group = group),
# colour = "grey10",
# fill = "#fff7bc") +
# coord_equal() +
# theme_bw() +
# labs(x = NULL, y = NULL) +
# geom_rect(aes(xmin = extent(Central_Luzon)[1],
# xmax = extent(Central_Luzon)[2],
# ymin = extent(Central_Luzon)[3],
# ymax = extent(Central_Luzon)[4]),
# alpha = 0,
# colour = "red",
# size = 0.7,
# linetype = 1) +
# theme(axis.text.x = element_blank(),
# axis.text.y = element_blank(),
# axis.ticks = element_blank(),
# axis.title.x = element_blank(),
# axis.title.y = element_blank(),
# plot.margin = unit(c(0, 0, 0 ,0), "mm"))
#
# grid.newpage()
# # plot area for the main map
# v1 <- viewport(width = 1, height = 1, x = 0.5, y = 0.5)
#
# # plot area for the inset map
# v2 <- viewport(width = 0.28, height = 0.28, x = 0.67, y = 0.79)
#
# # print the map object
# print(p1, vp = v1)
# print(p2, vp = v2)
## ---- eval=FALSE---------------------------------------------------------
# library(GSODR)
#
# # load the station metadata file from GSODR (this loads `isd_history` in your
# # R sesion)
# load(system.file("extdata", "isd_history.rda", package = "GSODR"))
#
# isd_history <- as.data.frame(isd_history)
#
# # convert to a spatial object to find stations within the states
# coordinates(isd_history) <- ~ LON + LAT
# proj4string(isd_history) <- proj4string(Central_Luzon)
#
# # what are the coordinates? We use the row numbers from this to match the
# # `stations` data.frame
# station_coords <- coordinates(isd_history[Central_Luzon, ])
#
# # get row numbers as an object
# rows <- as.numeric(row.names(station_coords))
#
# # create a data frame of only the stations which rows have been identified
# loop_stations <- as.data.frame(isd_history)[rows, ]
#
# # subset stations that match our criteria for years
# loop_stations <- loop_stations[loop_stations$BEGIN <= 19600101 &
# loop_stations$END >= 20151231, ]
#
# print(loop_stations[, c(1:2, 3, 7:12)])
## ---- station_locations, eval=FALSE--------------------------------------
# p1 +
# geom_point(data = loop_stations,
# aes(x = LON,
# y = LAT),
# size = 2) +
# geom_text(data = loop_stations,
# aes(x = LON,
# y = LAT,
# label = STN_NAME),
# alpha = 0.6,
# size = 2,
# position = position_nudge(0.1, -0.05)) +
# ggtitle("Station locations")
## ---- eval=FALSE---------------------------------------------------------
# PHL <- get_GSOD(station = loop_stations[, 12], years = 1960:2015)
## ---- eval=FALSE---------------------------------------------------------
# years <- 1960:2015
#
# loop_stations <- eval(parse(text = loop_stations[, 12]))
#
# # create file list
# loop_stations <- do.call(
# paste0, c(expand.grid(loop_stations, "-", years, ".op.gz"))
# )
#
# local_files <- list.files(path = "./GSOD", full.names = TRUE, recursive = TRUE)
# local_files <- local_files[basename(local_files) %in% loop_stations]
#
# loop_data <- reformat_GSOD(file_list = local_files)
#
# readr::write_csv(loop_data, path = "Loop_Survey_Weather_1960-2015", path = "./")
## ----cleanup GADM files, eval=TRUE, echo=FALSE, message=FALSE------------
unlink("GADM_2.8_PHL_adm0.rds")
unlink("GADM_2.8_PHL_adm1.rds")
|
#' Subset columns in a \code{\link{taxmap}} object
#'
#' Subsets \code{taxon_data} columns in a \code{\link{taxmap}} object. Takes and returns a
#' \code{\link{taxmap}} object. Any column name that appears in \code{taxon_data(.data)} can be
#' used as if it was a vector on its own. See \code{\link[dplyr]{select}} for more information.
#'
#' @param .data \code{\link{taxmap}}
#' @param ... One or more column names to return in the new object. This can be one of three things:
#' \describe{ \item{\code{expression with unquoted column name}}{The name of a column in
#' \code{taxon_data} typed as if it was a varaible on its own.} \item{\code{numeric}}{Indexes of
#' columns in \code{taxon_data}} } To match column names with a character vector, use
#' \code{matches("my_col_name")}. To match a logical vector, convert it to a column index using
#' \code{\link{which}}.
#'
#' @return An object of type \code{\link{taxmap}}
#'
#' @family dplyr-like functions
#'
#' @examples
#' # subset taxon columns
#' select_taxa(unite_ex_data_3, name)
#'
#' @export
select_taxa <- function(.data, ...) {
.data$taxon_data <- dplyr::bind_cols(.data$taxon_data[ , c("taxon_ids", "supertaxon_ids"), drop = FALSE],
dplyr::select(.data$taxon_data, ...))
return(.data)
}
#' Subset columns in a \code{\link{taxmap}} object
#'
#' Subsets \code{obs_data} columns in a \code{\link{taxmap}} object. Takes and returns a
#' \code{\link{taxmap}} object. Any column name that appears in \code{obs_data(.data)} can be
#' used as if it was a vector on its own. See \code{\link[dplyr]{select}} for more information.
#'
#' @param .data \code{\link{taxmap}}
#' @param ... One or more column names to return in the new object. This can be one of three things:
#' \describe{ \item{\code{expression with unquoted column name}}{The name of a column in
#' \code{taxon_data} typed as if it was a varaible on its own.} \item{\code{numeric}}{Indexes of
#' columns in \code{taxon_data}} } To match column names with a character vector, use
#' \code{matches("my_col_name")}. To match a logical vector, convert it to a column index using
#' \code{\link{which}}.
#'
#' @return An object of type \code{\link{taxmap}}
#'
#' @family dplyr-like functions
#'
#' @examples
#' # subset observation columns
#' select_obs(unite_ex_data_3, other_id, seq_id)
#'
#' @export
select_obs <- function(.data, ...) {
.data$obs_data <- dplyr::bind_cols(.data$obs_data[ , c("obs_taxon_ids"), drop = FALSE],
dplyr::select(.data$obs_data, ...))
return(.data)
} | /R/taxmap--select.R | permissive | seninp/metacoder | R | false | false | 2,624 | r | #' Subset columns in a \code{\link{taxmap}} object
#'
#' Subsets \code{taxon_data} columns in a \code{\link{taxmap}} object. Takes and returns a
#' \code{\link{taxmap}} object. Any column name that appears in \code{taxon_data(.data)} can be
#' used as if it was a vector on its own. See \code{\link[dplyr]{select}} for more information.
#'
#' @param .data \code{\link{taxmap}}
#' @param ... One or more column names to return in the new object. This can be one of three things:
#' \describe{ \item{\code{expression with unquoted column name}}{The name of a column in
#' \code{taxon_data} typed as if it was a varaible on its own.} \item{\code{numeric}}{Indexes of
#' columns in \code{taxon_data}} } To match column names with a character vector, use
#' \code{matches("my_col_name")}. To match a logical vector, convert it to a column index using
#' \code{\link{which}}.
#'
#' @return An object of type \code{\link{taxmap}}
#'
#' @family dplyr-like functions
#'
#' @examples
#' # subset taxon columns
#' select_taxa(unite_ex_data_3, name)
#'
#' @export
select_taxa <- function(.data, ...) {
.data$taxon_data <- dplyr::bind_cols(.data$taxon_data[ , c("taxon_ids", "supertaxon_ids"), drop = FALSE],
dplyr::select(.data$taxon_data, ...))
return(.data)
}
#' Subset columns in a \code{\link{taxmap}} object
#'
#' Subsets \code{obs_data} columns in a \code{\link{taxmap}} object. Takes and returns a
#' \code{\link{taxmap}} object. Any column name that appears in \code{obs_data(.data)} can be
#' used as if it was a vector on its own. See \code{\link[dplyr]{select}} for more information.
#'
#' @param .data \code{\link{taxmap}}
#' @param ... One or more column names to return in the new object. This can be one of three things:
#' \describe{ \item{\code{expression with unquoted column name}}{The name of a column in
#' \code{taxon_data} typed as if it was a varaible on its own.} \item{\code{numeric}}{Indexes of
#' columns in \code{taxon_data}} } To match column names with a character vector, use
#' \code{matches("my_col_name")}. To match a logical vector, convert it to a column index using
#' \code{\link{which}}.
#'
#' @return An object of type \code{\link{taxmap}}
#'
#' @family dplyr-like functions
#'
#' @examples
#' # subset observation columns
#' select_obs(unite_ex_data_3, other_id, seq_id)
#'
#' @export
select_obs <- function(.data, ...) {
.data$obs_data <- dplyr::bind_cols(.data$obs_data[ , c("obs_taxon_ids"), drop = FALSE],
dplyr::select(.data$obs_data, ...))
return(.data)
} |
library(ggplot2)
library(tikzDevice)
library(grid)
#read data from Edmondo
y.prices<-read.csv("e_world_yearly_prices.csv")
years<-seq(1992,1992+nrow(y.prices)-1)
#plot(years+0.5,y.prices$WorldPrice,type="l")
#read data from FAO
data<-read.csv("wheat_prices.csv",sep=";",na.strings="--")
types<-levels(data$info)
type<-1
data.type<-data[which(as.character(data$info)==types[type]),]
first.year<-as.numeric(substr(as.character(data.type$year[1]),1,4))
price.ts<-as.numeric(t(data.type[,3:14]))
month.ts<-first.year+1/24+(5+seq(1,length(price.ts)))/12
#read data from indexmundi
oil.wheat.data<-read.csv("crude_oil_wheat_indexmundi.csv")
month.number<-(4+seq(1,nrow(oil.wheat.data)))%%12
month.number[which(month.number==0)]<-12
month.addendum<-(month.number)/12-1/24
time<-oil.wheat.data[,2]+month.addendum
start.year<-1991
end.year<-2013
later.than.start<-which(time>start.year)
before.than.end<-which(time<(end.year+1))
wheat.growing.time.in.months<-6
start.position<-later.than.start[1]-wheat.growing.time.in.months
end.position<-before.than.end[length(before.than.end)]-wheat.growing.time.in.months
#plot(oil.wheat.data[,2]+month.addendum,0.4*(oil.wheat.data[,3]/oil.wheat.data[1,3]-1),type="l")
#lines(oil.wheat.data[,2]+month.addendum,(oil.wheat.data[,4]/oil.wheat.data[1,4]-1),col=2)
tofileflag<-FALSE
#tofileflag<-TRUE
filename<-"fig_prova"
fileextention<-".fig"
completefilename<-paste(filename,fileextention,sep="")
#if(tofileflag){
#xfig(file=completefilename,width=6.0,height=5.0)
#}
#plot(years+0.5,y.prices$WorldPriceWeighted,xlab="dollars",ylab="time",type="l",ylim=c(0,450))
#lines(month.ts,price.ts,col=2)
#lines(oil.wheat.data[,2]+month.addendum,oil.wheat.data[,4],col=3)
#lines(oil.wheat.data[,2]+month.addendum,oil.wheat.data[,3],col=4)
#lines(c(2008.3,2008.3),c(0,450))
#grid()
toplot<-data.frame(time=years+0.5,wpw=y.prices$WorldPriceWeighted)
oil.data<-data.frame(time=oil.wheat.data[,2]+month.addendum,oil=oil.wheat.data[,3])
usa.wheat.data<-data.frame(time=month.ts,wheat=price.ts)
#forylim<-c(smoothed_mean1$y,smoothed_mean2$y,smoothed_mean3$y)
#a<-ggplot(data=toplot)+theme_bw()+coord_cartesian(xlim=c(1,2550),ylim=c(min(forylim)*0.99,max(forylim)*1.01))+labs(y="n. defualts")+theme(panel.border = element_rect(color="black",size=1),panel.grid.major=element_line(colour="black",linetype="dashed"))
a<-ggplot(data=toplot)+theme_bw()+coord_cartesian(xlim=c(1993,2013),ylim=c(0,450))+labs(y="price US \\$")+theme(panel.border = element_rect(color="black",size=1),panel.grid.major=element_line(colour="black",linetype="dotted"))
a<-a+geom_line(aes(x=time,y=wpw),col="red")
a<-a+geom_text(aes(x=time,y=wpw), label="$\\clubsuit$",size=3,colour="red", data=subset(toplot, (toplot$time+0.5) %% 5 == 1))
a<-a+geom_line(data=oil.data,aes(x=time,y=oil),col="blue")
a<-a+geom_text(aes(x=time,y=oil), label="$\\spadesuit$",size=3,colour="blue", data=subset(oil.data, seq(36,36+length(oil.data$time)) %% 60 ==1 ))
a<-a+geom_line(data=usa.wheat.data,aes(x=time,y=wheat),col="green4")
a<-a+geom_text(aes(x=time,y=wheat), label="$\\bullet$",size=5,colour="green4", data=subset(usa.wheat.data, seq(44,44+length(oil.data$time)) %% 60 ==1 ))
#create white area for legenda
a<-a+annotate("rect",xmin=1992,xmax=2006,ymin=340,ymax=470,fill="white")
#legenda
a<-a+annotate("text",x=1993,y=450,label="$\\bullet$",colour="green4",size=5,hjust=0)+annotate("text",x=1994,y=450,label="wheat \\tiny{USA, monthly, metric ton}",colour="green4",size=4,hjust=0)
a<-a+annotate("text",x=1993,y=410,label="$\\clubsuit$",colour="red",size=3,hjust=0)+annotate("text",x=1994,y=410,label="wheat \\tiny{average, annual, metric ton}",colour="red",size=4,hjust=0)
a<-a+annotate("text",x=1993,y=370,label="$\\spadesuit$",colour="blue",size=3,hjust=0)+annotate("text",x=1994,y=370,label="crude oil \\tiny{average, monthly, barrel}",colour="blue",size=4,hjust=0)
#a<-a+geom_ribbon(aes(x=time,ymin=miny3,ymax=maxy3),alpha=0.3,fill="red")
#a<-a+geom_text(aes(x=time,y=miny3), label="$\\clubsuit$",alpha=0.3,size=3,colour="red", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=maxy3), label="$\\clubsuit$",alpha=0.3,size=3,colour="red", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_ribbon(aes(x=time,ymin=miny2,ymax=maxy2),alpha=0.3,fill="blue")
#a<-a+geom_text(aes(x=time,y=miny2), label="$\\spadesuit$",alpha=0.3,size=3,colour="blue", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=maxy2), label="$\\spadesuit$",alpha=0.3,size=3,colour="blue", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_ribbon(aes(x=time,ymin=miny1,ymax=maxy1),alpha=0.3,fill="green4")
#a<-a+geom_text(aes(x=time,y=miny1), label="$\\bullet$",alpha=0.3,size=5,colour="green4", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=maxy1), label="$\\bullet$",alpha=0.3,size=5,colour="green4", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=avy3), label="$\\clubsuit$",size=3,colour="red", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=avy2), label="$\\spadesuit$",size=3,colour="blue", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=avy1), label="$\\bullet$",size=5,colour="green4", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+annotate("rect",xmin=10,xmax=2100,ymin=2.05,ymax=2.25,fill="white")
#a<-a+annotate("text",x=50,y=2.16,label="$\\eta=0.13$, $\\theta=0.05$ ",size=4,hjust=0)
#a<-a+annotate("text",x=50,y=2.1,label="$\\bullet$",colour="green4",size=5,hjust=0)+annotate("text",x=150,y=2.1,label="low $A$",colour="green4",size=4,hjust=0)
#a<-a+annotate("text",x=750,y=2.1,label="$\\spadesuit$",colour="blue",size=3,hjust=0)+annotate("text",x=850,y=2.1,label="mid $A$",colour="blue",size=4,hjust=0)
#a<-a+annotate("text",x=1450,y=2.1,label="$\\clubsuit$",colour="red",size=3,hjust=0)+annotate("text",x=1550,y=2.1,label="high $A$",colour="red",size=4,hjust=0)
#figFileNames<-readLines("figFileNames.txt")
#thisFig<-3
#fileName<-figFileNames[thisFig]
fileName<-"fig_observed_prices.tex"
toFileFlag<-T
if(toFileFlag){
tikz(fileName,width=4,height=3,standAlone=T)
}
plot(a)
if(toFileFlag){
dev.off()
system(paste("pdflatex",fileName));
}
| /scripts/plot/r_plot_observed_prices.R | no_license | gfgprojects/cms_wheat | R | false | false | 6,192 | r | library(ggplot2)
library(tikzDevice)
library(grid)
#read data from Edmondo
y.prices<-read.csv("e_world_yearly_prices.csv")
years<-seq(1992,1992+nrow(y.prices)-1)
#plot(years+0.5,y.prices$WorldPrice,type="l")
#read data from FAO
data<-read.csv("wheat_prices.csv",sep=";",na.strings="--")
types<-levels(data$info)
type<-1
data.type<-data[which(as.character(data$info)==types[type]),]
first.year<-as.numeric(substr(as.character(data.type$year[1]),1,4))
price.ts<-as.numeric(t(data.type[,3:14]))
month.ts<-first.year+1/24+(5+seq(1,length(price.ts)))/12
#read data from indexmundi
oil.wheat.data<-read.csv("crude_oil_wheat_indexmundi.csv")
month.number<-(4+seq(1,nrow(oil.wheat.data)))%%12
month.number[which(month.number==0)]<-12
month.addendum<-(month.number)/12-1/24
time<-oil.wheat.data[,2]+month.addendum
start.year<-1991
end.year<-2013
later.than.start<-which(time>start.year)
before.than.end<-which(time<(end.year+1))
wheat.growing.time.in.months<-6
start.position<-later.than.start[1]-wheat.growing.time.in.months
end.position<-before.than.end[length(before.than.end)]-wheat.growing.time.in.months
#plot(oil.wheat.data[,2]+month.addendum,0.4*(oil.wheat.data[,3]/oil.wheat.data[1,3]-1),type="l")
#lines(oil.wheat.data[,2]+month.addendum,(oil.wheat.data[,4]/oil.wheat.data[1,4]-1),col=2)
tofileflag<-FALSE
#tofileflag<-TRUE
filename<-"fig_prova"
fileextention<-".fig"
completefilename<-paste(filename,fileextention,sep="")
#if(tofileflag){
#xfig(file=completefilename,width=6.0,height=5.0)
#}
#plot(years+0.5,y.prices$WorldPriceWeighted,xlab="dollars",ylab="time",type="l",ylim=c(0,450))
#lines(month.ts,price.ts,col=2)
#lines(oil.wheat.data[,2]+month.addendum,oil.wheat.data[,4],col=3)
#lines(oil.wheat.data[,2]+month.addendum,oil.wheat.data[,3],col=4)
#lines(c(2008.3,2008.3),c(0,450))
#grid()
toplot<-data.frame(time=years+0.5,wpw=y.prices$WorldPriceWeighted)
oil.data<-data.frame(time=oil.wheat.data[,2]+month.addendum,oil=oil.wheat.data[,3])
usa.wheat.data<-data.frame(time=month.ts,wheat=price.ts)
#forylim<-c(smoothed_mean1$y,smoothed_mean2$y,smoothed_mean3$y)
#a<-ggplot(data=toplot)+theme_bw()+coord_cartesian(xlim=c(1,2550),ylim=c(min(forylim)*0.99,max(forylim)*1.01))+labs(y="n. defualts")+theme(panel.border = element_rect(color="black",size=1),panel.grid.major=element_line(colour="black",linetype="dashed"))
a<-ggplot(data=toplot)+theme_bw()+coord_cartesian(xlim=c(1993,2013),ylim=c(0,450))+labs(y="price US \\$")+theme(panel.border = element_rect(color="black",size=1),panel.grid.major=element_line(colour="black",linetype="dotted"))
a<-a+geom_line(aes(x=time,y=wpw),col="red")
a<-a+geom_text(aes(x=time,y=wpw), label="$\\clubsuit$",size=3,colour="red", data=subset(toplot, (toplot$time+0.5) %% 5 == 1))
a<-a+geom_line(data=oil.data,aes(x=time,y=oil),col="blue")
a<-a+geom_text(aes(x=time,y=oil), label="$\\spadesuit$",size=3,colour="blue", data=subset(oil.data, seq(36,36+length(oil.data$time)) %% 60 ==1 ))
a<-a+geom_line(data=usa.wheat.data,aes(x=time,y=wheat),col="green4")
a<-a+geom_text(aes(x=time,y=wheat), label="$\\bullet$",size=5,colour="green4", data=subset(usa.wheat.data, seq(44,44+length(oil.data$time)) %% 60 ==1 ))
#create white area for legenda
a<-a+annotate("rect",xmin=1992,xmax=2006,ymin=340,ymax=470,fill="white")
#legenda
a<-a+annotate("text",x=1993,y=450,label="$\\bullet$",colour="green4",size=5,hjust=0)+annotate("text",x=1994,y=450,label="wheat \\tiny{USA, monthly, metric ton}",colour="green4",size=4,hjust=0)
a<-a+annotate("text",x=1993,y=410,label="$\\clubsuit$",colour="red",size=3,hjust=0)+annotate("text",x=1994,y=410,label="wheat \\tiny{average, annual, metric ton}",colour="red",size=4,hjust=0)
a<-a+annotate("text",x=1993,y=370,label="$\\spadesuit$",colour="blue",size=3,hjust=0)+annotate("text",x=1994,y=370,label="crude oil \\tiny{average, monthly, barrel}",colour="blue",size=4,hjust=0)
#a<-a+geom_ribbon(aes(x=time,ymin=miny3,ymax=maxy3),alpha=0.3,fill="red")
#a<-a+geom_text(aes(x=time,y=miny3), label="$\\clubsuit$",alpha=0.3,size=3,colour="red", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=maxy3), label="$\\clubsuit$",alpha=0.3,size=3,colour="red", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_ribbon(aes(x=time,ymin=miny2,ymax=maxy2),alpha=0.3,fill="blue")
#a<-a+geom_text(aes(x=time,y=miny2), label="$\\spadesuit$",alpha=0.3,size=3,colour="blue", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=maxy2), label="$\\spadesuit$",alpha=0.3,size=3,colour="blue", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_ribbon(aes(x=time,ymin=miny1,ymax=maxy1),alpha=0.3,fill="green4")
#a<-a+geom_text(aes(x=time,y=miny1), label="$\\bullet$",alpha=0.3,size=5,colour="green4", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=maxy1), label="$\\bullet$",alpha=0.3,size=5,colour="green4", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=avy3), label="$\\clubsuit$",size=3,colour="red", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=avy2), label="$\\spadesuit$",size=3,colour="blue", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+geom_text(aes(x=time,y=avy1), label="$\\bullet$",size=5,colour="green4", data=subset(toplot, (time+250) %% 500 == 1))
#a<-a+annotate("rect",xmin=10,xmax=2100,ymin=2.05,ymax=2.25,fill="white")
#a<-a+annotate("text",x=50,y=2.16,label="$\\eta=0.13$, $\\theta=0.05$ ",size=4,hjust=0)
#a<-a+annotate("text",x=50,y=2.1,label="$\\bullet$",colour="green4",size=5,hjust=0)+annotate("text",x=150,y=2.1,label="low $A$",colour="green4",size=4,hjust=0)
#a<-a+annotate("text",x=750,y=2.1,label="$\\spadesuit$",colour="blue",size=3,hjust=0)+annotate("text",x=850,y=2.1,label="mid $A$",colour="blue",size=4,hjust=0)
#a<-a+annotate("text",x=1450,y=2.1,label="$\\clubsuit$",colour="red",size=3,hjust=0)+annotate("text",x=1550,y=2.1,label="high $A$",colour="red",size=4,hjust=0)
#figFileNames<-readLines("figFileNames.txt")
#thisFig<-3
#fileName<-figFileNames[thisFig]
fileName<-"fig_observed_prices.tex"
toFileFlag<-T
if(toFileFlag){
tikz(fileName,width=4,height=3,standAlone=T)
}
plot(a)
if(toFileFlag){
dev.off()
system(paste("pdflatex",fileName));
}
|
##########################################################################################
# BAYESIAN ORDINATIO AND REGRESSION ANALYSIS PREDICTIONS
##########################################################################################
require(boral)
source(file.path(PD,"boralPredict.r"))
require(abind)
##########################################################################################
for (j in 1:3) {
nsites <- nrow(x_valid[[j]])
nsp <- ncol(y_valid[[j]])
for (m in 1:2) {
modelfile <- file.path(FD, set_no, paste0("brl",
m,
"_",
j,
"_",
dataN[sz]))
if (MCMC2) {
modelfile <- paste0(modelfile, "_MCMC2")
}
load(file = paste0(modelfile, ".RData"))
if (m==1) { brl <- brl1 }
if (m==2) { brl <- brl2 }
Xv <- x_valid[[j]][,-1]
linpred_boral <- boralPredict(brl,
newX = Xv,
predict.type = "marginal")
boral_PAs <- linpred_boral
for(k in 1:dim(linpred_boral)[3]) {
boral_PAs[,,k] <- matrix(rbinom(length(linpred_boral[,,k]),
1,
prob = pnorm(linpred_boral[,,k])),
nrow = dim(linpred_boral)[1],
ncol = dim(linpred_boral)[2])
}
set.seed(17)
smplREPs <- sample(1:dim(boral_PAs)[3],
REPs,
replace = T)
boral_PAs <- boral_PAs[,,smplREPs]
filebody <- paste0("boral", m, "_PAs_", j, "_", dataN[sz])
if (commSP) {
filebody <- paste0(filebody, "_commSP")
}
if (MCMC2) {
filebody <- paste0(filebody, "_MCMC2")
}
save(boral_PAs, file = file.path(PD2,
set_no,
paste0(filebody, ".RData")))
rm(brl)
rm(linpred_boral)
rm(boral_PAs)
gc()
}
}
##########################################################################################
| /PREDICT/predict.boral.r | no_license | davan690/SDM-comparison | R | false | false | 2,155 | r | ##########################################################################################
# BAYESIAN ORDINATIO AND REGRESSION ANALYSIS PREDICTIONS
##########################################################################################
require(boral)
source(file.path(PD,"boralPredict.r"))
require(abind)
##########################################################################################
for (j in 1:3) {
nsites <- nrow(x_valid[[j]])
nsp <- ncol(y_valid[[j]])
for (m in 1:2) {
modelfile <- file.path(FD, set_no, paste0("brl",
m,
"_",
j,
"_",
dataN[sz]))
if (MCMC2) {
modelfile <- paste0(modelfile, "_MCMC2")
}
load(file = paste0(modelfile, ".RData"))
if (m==1) { brl <- brl1 }
if (m==2) { brl <- brl2 }
Xv <- x_valid[[j]][,-1]
linpred_boral <- boralPredict(brl,
newX = Xv,
predict.type = "marginal")
boral_PAs <- linpred_boral
for(k in 1:dim(linpred_boral)[3]) {
boral_PAs[,,k] <- matrix(rbinom(length(linpred_boral[,,k]),
1,
prob = pnorm(linpred_boral[,,k])),
nrow = dim(linpred_boral)[1],
ncol = dim(linpred_boral)[2])
}
set.seed(17)
smplREPs <- sample(1:dim(boral_PAs)[3],
REPs,
replace = T)
boral_PAs <- boral_PAs[,,smplREPs]
filebody <- paste0("boral", m, "_PAs_", j, "_", dataN[sz])
if (commSP) {
filebody <- paste0(filebody, "_commSP")
}
if (MCMC2) {
filebody <- paste0(filebody, "_MCMC2")
}
save(boral_PAs, file = file.path(PD2,
set_no,
paste0(filebody, ".RData")))
rm(brl)
rm(linpred_boral)
rm(boral_PAs)
gc()
}
}
##########################################################################################
|
library(dplyr)
#Read data from file. From first reading chunks of 10000, data of interest starts
#beyond row 60000
power<-read.table('household_power_consumption.txt',skip=60000, nrows=10000, sep=";",
na.strings=c("?"),colClasses=c("character","character",rep("numeric",7)),
col.names=c("Date","Time","GlobalActivePower","GlobalReactivePower",
"Voltage","GlobalIntensity","SubMetering1","SubMetering2",
"SubMetering3"))
power<-tbl_df(power)
power<-filter(power, Date =="1/2/2007" | Date == "2/2/2007")
power<-mutate(power, Date = as.Date(Date,"%d/%m/%Y"))
power<-mutate(power, Time = as.POSIXct(strptime(Time,"%H:%M:%S")))
#Fix date offset in the time field
offset<-(as.POSIXct(power$Date[1])-power$Time[1])+0.333334
offset<-c(rep(offset,1440),rep(offset+1,1440))
power<-mutate(power, Time = Time + offset*60*60*24)
#Generate plot 2 - A line plot
png(filename = "plot2.png", width = 480, height = 480)
par(mar = c(6,6,5,4),cex.axis=0.75,cex.lab=0.75,cex.main=.9)
plot(power$GlobalActivePower~power$Time,ylab="Global Active Power (kilowatts)",xlab="",
type="l")
dev.off()
| /plot2.R | no_license | kgChem/ExData_Plotting1 | R | false | false | 1,177 | r | library(dplyr)
#Read data from file. From first reading chunks of 10000, data of interest starts
#beyond row 60000
power<-read.table('household_power_consumption.txt',skip=60000, nrows=10000, sep=";",
na.strings=c("?"),colClasses=c("character","character",rep("numeric",7)),
col.names=c("Date","Time","GlobalActivePower","GlobalReactivePower",
"Voltage","GlobalIntensity","SubMetering1","SubMetering2",
"SubMetering3"))
power<-tbl_df(power)
power<-filter(power, Date =="1/2/2007" | Date == "2/2/2007")
power<-mutate(power, Date = as.Date(Date,"%d/%m/%Y"))
power<-mutate(power, Time = as.POSIXct(strptime(Time,"%H:%M:%S")))
#Fix date offset in the time field
offset<-(as.POSIXct(power$Date[1])-power$Time[1])+0.333334
offset<-c(rep(offset,1440),rep(offset+1,1440))
power<-mutate(power, Time = Time + offset*60*60*24)
#Generate plot 2 - A line plot
png(filename = "plot2.png", width = 480, height = 480)
par(mar = c(6,6,5,4),cex.axis=0.75,cex.lab=0.75,cex.main=.9)
plot(power$GlobalActivePower~power$Time,ylab="Global Active Power (kilowatts)",xlab="",
type="l")
dev.off()
|
golem_sys <- function(
...,
lib.loc = NULL,
mustWork = FALSE
) {
system.file(
...,
package = "golem",
lib.loc = lib.loc,
mustWork = mustWork
)
}
# from usethis https://github.com/r-lib/usethis/
darkgrey <- function(x) {
x <- crayon::make_style("darkgrey")(x)
}
create_if_needed <- function(
path,
type = c("file", "directory"),
content = NULL
) {
type <- match.arg(type)
# Check if file or dir already exist
if (type == "file") {
dont_exist <- Negate(fs_file_exists)(path)
} else if (type == "directory") {
dont_exist <- Negate(fs_dir_exists)(path)
}
# If it doesn't exist, ask if we are allowed
# to create it
if (dont_exist) {
if (interactive()) {
ask <- yesno(
sprintf(
"The %s %s doesn't exist, create?",
basename(path),
type
)
)
# Return early if the user doesn't allow
if (!ask) {
return(FALSE)
} else {
# Create the file
if (type == "file") {
fs_file_create(path)
write(content, path, append = TRUE)
} else if (type == "directory") {
fs_dir_create(path, recurse = TRUE)
}
}
} else {
stop(
sprintf(
"The %s %s doesn't exist.",
basename(path),
type
)
)
}
}
# TRUE means that file exists (either
# created or already there)
return(TRUE)
}
check_file_exist <- function(file) {
res <- TRUE
if (fs_file_exists(file)) {
if (interactive()) {
res <- yesno("This file already exists, override?")
} else {
res <- TRUE
}
}
return(res)
}
# internal
replace_word <- function(
file,
pattern,
replace
) {
suppressWarnings(tx <- readLines(file))
tx2 <- gsub(
pattern = pattern,
replacement = replace,
x = tx
)
writeLines(
tx2,
con = file
)
}
remove_comments <- function(file) {
lines <- readLines(file)
lines_without_comment <- c()
for (line in lines) {
lines_without_comment <- append(
lines_without_comment,
gsub("(\\s*#+[^'@].*$| #+[^#].*$)", "", line)
)
}
lines_without_comment <- lines_without_comment[lines_without_comment != ""]
writeLines(text = lines_without_comment, con = file)
}
#' @importFrom cli cat_bullet
cat_green_tick <- function(...) {
cat_bullet(
...,
bullet = "tick",
bullet_col = "green"
)
}
#' @importFrom cli cat_bullet
cat_red_bullet <- function(...) {
cat_bullet(
...,
bullet = "bullet",
bullet_col = "red"
)
}
#' @importFrom cli cat_bullet
cat_info <- function(...) {
cat_bullet(
...,
bullet = "arrow_right",
bullet_col = "grey"
)
}
cat_exists <- function(where) {
cat_red_bullet(
sprintf(
"[Skipped] %s already exists.",
basename(where)
)
)
cat_info(
sprintf(
"If you want replace it, remove the %s file first.",
basename(where)
)
)
}
cat_dir_necessary <- function() {
cat_red_bullet(
"File not added (needs a valid directory)"
)
}
cat_start_download <- function() {
cat_line("")
cat_rule("Initiating file download")
}
cat_downloaded <- function(
where,
file = "File"
) {
cat_green_tick(
sprintf(
"%s downloaded at %s",
file,
where
)
)
}
cat_start_copy <- function() {
cat_line("")
cat_rule("Copying file")
}
cat_copied <- function(
where,
file = "File"
) {
cat_green_tick(
sprintf(
"%s copied to %s",
file,
where
)
)
}
cat_created <- function(
where,
file = "File"
) {
cat_green_tick(
sprintf(
"%s created at %s",
file,
where
)
)
}
# File made dance
cat_automatically_linked <- function() {
cat_green_tick(
"File automatically linked in `golem_add_external_resources()`."
)
}
open_or_go_to <- function(
where,
open_file
) {
if (
rstudioapi::isAvailable() &&
open_file &&
rstudioapi::hasFun("navigateToFile")
) {
rstudioapi::navigateToFile(where)
} else {
cat_red_bullet(
sprintf(
"Go to %s",
where
)
)
}
invisible(where)
}
desc_exist <- function(pkg) {
fs_file_exists(
paste0(pkg, "/DESCRIPTION")
)
}
after_creation_message_js <- function(
pkg,
dir,
name
) {
if (
desc_exist(pkg)
) {
if (
fs_path_abs(dir) != fs_path_abs("inst/app/www") &
utils::packageVersion("golem") < "0.2.0"
) {
cat_red_bullet(
sprintf(
'To link to this file, go to the `golem_add_external_resources()` function in `app_ui.R` and add `tags$script(src="www/%s.js")`',
name
)
)
} else {
cat_automatically_linked()
}
}
}
after_creation_message_css <- function(
pkg,
dir,
name
) {
if (
desc_exist(pkg)
) {
if (fs_path_abs(dir) != fs_path_abs("inst/app/www") &
utils::packageVersion("golem") < "0.2.0"
) {
cat_red_bullet(
sprintf(
'To link to this file, go to the `golem_add_external_resources()` function in `app_ui.R` and add `tags$link(rel="stylesheet", type="text/css", href="www/.css")`',
name
)
)
} else {
cat_automatically_linked()
}
}
}
after_creation_message_sass <- function(
pkg,
dir,
name
) {
if (
desc_exist(pkg)
) {
if (fs_path_abs(dir) != fs_path_abs("inst/app/www") &
utils::packageVersion("golem") < "0.2.0"
) {
cat_red_bullet(
sprintf(
'After compile your Sass file, to link your css file, go to the `golem_add_external_resources()` function in `app_ui.R` and add `tags$link(rel="stylesheet", type="text/css", href="www/.css")`'
)
)
}
}
}
after_creation_message_html_template <- function(
pkg,
dir,
name
) {
cat_line("")
cat_rule("To use this html file as a template, add the following code in your UI:")
cat_line(darkgrey("htmlTemplate("))
cat_line(darkgrey(sprintf(' app_sys("app/www/%s.html"),', file_path_sans_ext(name))))
cat_line(darkgrey(" body = tagList()"))
cat_line(darkgrey(" # add here other template arguments"))
cat_line(darkgrey(")"))
}
file_created_dance <- function(
where,
fun,
pkg,
dir,
name,
open_file,
open_or_go_to = TRUE,
catfun = cat_created
) {
catfun(where)
fun(pkg, dir, name)
if (open_or_go_to) {
open_or_go_to(
where = where,
open_file = open_file
)
} else {
return(invisible(where))
}
}
file_already_there_dance <- function(
where,
open_file
) {
cat_green_tick("File already exists.")
open_or_go_to(
where = where,
open_file = open_file
)
}
# Minor toolings
if_not_null <- function(x, ...) {
if (!is.null(x)) {
force(...)
}
}
set_name <- function(x, y) {
names(x) <- y
x
}
# FROM tools::file_path_sans_ext() & tools::file_ext
file_path_sans_ext <- function(x) {
sub("([^.]+)\\.[[:alnum:]]+$", "\\1", x)
}
file_ext <- function(x) {
pos <- regexpr("\\.([[:alnum:]]+)$", x)
ifelse(pos > -1L, substring(x, pos + 1L), "")
}
#' @importFrom utils menu
yesno <- function(...) {
cat(paste0(..., collapse = ""))
menu(c("Yes", "No")) == 1
}
# Checking that a package is installed
check_is_installed <- function(
pak,
...
) {
if (
!requireNamespace(pak, ..., quietly = TRUE)
) {
stop(
sprintf(
"The {%s} package is required to run this function.\nYou can install it with `install.packages('%s')`.",
pak,
pak
),
call. = FALSE
)
}
}
required_version <- function(
pak,
version
) {
if (
utils::packageVersion(pak) < version
) {
stop(
sprintf(
"This function require the version '%s' of the {%s} package.\nYou can update with `install.packages('%s')`.",
version,
pak,
pak
),
call. = FALSE
)
}
}
add_sass_code <- function(where, dir, name) {
if (fs_file_exists(where)) {
if (fs_file_exists("dev/run_dev.R")) {
lines <- readLines("dev/run_dev.R")
new_lines <- append(
x = lines,
values = c(
"# Sass code compilation",
sprintf(
'sass::sass(input = sass::sass_file("%s/%s.sass"), output = "%s/%s.css", cache = NULL)',
dir,
name,
dir,
name
),
""
),
after = 0
)
writeLines(
text = new_lines,
con = "dev/run_dev.R"
)
cat_green_tick(
"Code added in run_dev.R to compile your Sass file to CSS file."
)
}
}
}
#' Check if a module already exists
#'
#' Assumes it is called at the root of a golem project.
#'
#' @param module A character string. The name of a potentially existing module
#' @return Boolean. Does the module exist or not ?
#' @noRd
is_existing_module <- function(module) {
existing_module_files <- list.files("R/", pattern = "^mod_")
existing_module_names <- sub(
"^mod_([[:alnum:]_]+)\\.R$",
"\\1",
existing_module_files
)
module %in% existing_module_names
}
# This function is used for checking
# that the name argument of the function
# creating files is not of length() > 1
check_name_length <- function(name) {
stop_if(
name,
~ length(.x) > 1,
sprintf(
"`name` should be of length 1. Got %d.",
length(name)
)
)
}
| /R/utils.R | permissive | Cervangirard/golem | R | false | false | 9,362 | r | golem_sys <- function(
...,
lib.loc = NULL,
mustWork = FALSE
) {
system.file(
...,
package = "golem",
lib.loc = lib.loc,
mustWork = mustWork
)
}
# from usethis https://github.com/r-lib/usethis/
darkgrey <- function(x) {
x <- crayon::make_style("darkgrey")(x)
}
create_if_needed <- function(
path,
type = c("file", "directory"),
content = NULL
) {
type <- match.arg(type)
# Check if file or dir already exist
if (type == "file") {
dont_exist <- Negate(fs_file_exists)(path)
} else if (type == "directory") {
dont_exist <- Negate(fs_dir_exists)(path)
}
# If it doesn't exist, ask if we are allowed
# to create it
if (dont_exist) {
if (interactive()) {
ask <- yesno(
sprintf(
"The %s %s doesn't exist, create?",
basename(path),
type
)
)
# Return early if the user doesn't allow
if (!ask) {
return(FALSE)
} else {
# Create the file
if (type == "file") {
fs_file_create(path)
write(content, path, append = TRUE)
} else if (type == "directory") {
fs_dir_create(path, recurse = TRUE)
}
}
} else {
stop(
sprintf(
"The %s %s doesn't exist.",
basename(path),
type
)
)
}
}
# TRUE means that file exists (either
# created or already there)
return(TRUE)
}
check_file_exist <- function(file) {
res <- TRUE
if (fs_file_exists(file)) {
if (interactive()) {
res <- yesno("This file already exists, override?")
} else {
res <- TRUE
}
}
return(res)
}
# internal
replace_word <- function(
file,
pattern,
replace
) {
suppressWarnings(tx <- readLines(file))
tx2 <- gsub(
pattern = pattern,
replacement = replace,
x = tx
)
writeLines(
tx2,
con = file
)
}
remove_comments <- function(file) {
lines <- readLines(file)
lines_without_comment <- c()
for (line in lines) {
lines_without_comment <- append(
lines_without_comment,
gsub("(\\s*#+[^'@].*$| #+[^#].*$)", "", line)
)
}
lines_without_comment <- lines_without_comment[lines_without_comment != ""]
writeLines(text = lines_without_comment, con = file)
}
#' @importFrom cli cat_bullet
cat_green_tick <- function(...) {
cat_bullet(
...,
bullet = "tick",
bullet_col = "green"
)
}
#' @importFrom cli cat_bullet
cat_red_bullet <- function(...) {
cat_bullet(
...,
bullet = "bullet",
bullet_col = "red"
)
}
#' @importFrom cli cat_bullet
cat_info <- function(...) {
cat_bullet(
...,
bullet = "arrow_right",
bullet_col = "grey"
)
}
cat_exists <- function(where) {
cat_red_bullet(
sprintf(
"[Skipped] %s already exists.",
basename(where)
)
)
cat_info(
sprintf(
"If you want replace it, remove the %s file first.",
basename(where)
)
)
}
cat_dir_necessary <- function() {
cat_red_bullet(
"File not added (needs a valid directory)"
)
}
cat_start_download <- function() {
cat_line("")
cat_rule("Initiating file download")
}
cat_downloaded <- function(
where,
file = "File"
) {
cat_green_tick(
sprintf(
"%s downloaded at %s",
file,
where
)
)
}
cat_start_copy <- function() {
cat_line("")
cat_rule("Copying file")
}
cat_copied <- function(
where,
file = "File"
) {
cat_green_tick(
sprintf(
"%s copied to %s",
file,
where
)
)
}
cat_created <- function(
where,
file = "File"
) {
cat_green_tick(
sprintf(
"%s created at %s",
file,
where
)
)
}
# File made dance
cat_automatically_linked <- function() {
cat_green_tick(
"File automatically linked in `golem_add_external_resources()`."
)
}
open_or_go_to <- function(
where,
open_file
) {
if (
rstudioapi::isAvailable() &&
open_file &&
rstudioapi::hasFun("navigateToFile")
) {
rstudioapi::navigateToFile(where)
} else {
cat_red_bullet(
sprintf(
"Go to %s",
where
)
)
}
invisible(where)
}
desc_exist <- function(pkg) {
fs_file_exists(
paste0(pkg, "/DESCRIPTION")
)
}
after_creation_message_js <- function(
pkg,
dir,
name
) {
if (
desc_exist(pkg)
) {
if (
fs_path_abs(dir) != fs_path_abs("inst/app/www") &
utils::packageVersion("golem") < "0.2.0"
) {
cat_red_bullet(
sprintf(
'To link to this file, go to the `golem_add_external_resources()` function in `app_ui.R` and add `tags$script(src="www/%s.js")`',
name
)
)
} else {
cat_automatically_linked()
}
}
}
after_creation_message_css <- function(
pkg,
dir,
name
) {
if (
desc_exist(pkg)
) {
if (fs_path_abs(dir) != fs_path_abs("inst/app/www") &
utils::packageVersion("golem") < "0.2.0"
) {
cat_red_bullet(
sprintf(
'To link to this file, go to the `golem_add_external_resources()` function in `app_ui.R` and add `tags$link(rel="stylesheet", type="text/css", href="www/.css")`',
name
)
)
} else {
cat_automatically_linked()
}
}
}
after_creation_message_sass <- function(
pkg,
dir,
name
) {
if (
desc_exist(pkg)
) {
if (fs_path_abs(dir) != fs_path_abs("inst/app/www") &
utils::packageVersion("golem") < "0.2.0"
) {
cat_red_bullet(
sprintf(
'After compile your Sass file, to link your css file, go to the `golem_add_external_resources()` function in `app_ui.R` and add `tags$link(rel="stylesheet", type="text/css", href="www/.css")`'
)
)
}
}
}
after_creation_message_html_template <- function(
pkg,
dir,
name
) {
cat_line("")
cat_rule("To use this html file as a template, add the following code in your UI:")
cat_line(darkgrey("htmlTemplate("))
cat_line(darkgrey(sprintf(' app_sys("app/www/%s.html"),', file_path_sans_ext(name))))
cat_line(darkgrey(" body = tagList()"))
cat_line(darkgrey(" # add here other template arguments"))
cat_line(darkgrey(")"))
}
file_created_dance <- function(
where,
fun,
pkg,
dir,
name,
open_file,
open_or_go_to = TRUE,
catfun = cat_created
) {
catfun(where)
fun(pkg, dir, name)
if (open_or_go_to) {
open_or_go_to(
where = where,
open_file = open_file
)
} else {
return(invisible(where))
}
}
file_already_there_dance <- function(
where,
open_file
) {
cat_green_tick("File already exists.")
open_or_go_to(
where = where,
open_file = open_file
)
}
# Minor toolings
if_not_null <- function(x, ...) {
if (!is.null(x)) {
force(...)
}
}
set_name <- function(x, y) {
names(x) <- y
x
}
# FROM tools::file_path_sans_ext() & tools::file_ext
file_path_sans_ext <- function(x) {
sub("([^.]+)\\.[[:alnum:]]+$", "\\1", x)
}
file_ext <- function(x) {
pos <- regexpr("\\.([[:alnum:]]+)$", x)
ifelse(pos > -1L, substring(x, pos + 1L), "")
}
#' @importFrom utils menu
yesno <- function(...) {
cat(paste0(..., collapse = ""))
menu(c("Yes", "No")) == 1
}
# Checking that a package is installed
check_is_installed <- function(
pak,
...
) {
if (
!requireNamespace(pak, ..., quietly = TRUE)
) {
stop(
sprintf(
"The {%s} package is required to run this function.\nYou can install it with `install.packages('%s')`.",
pak,
pak
),
call. = FALSE
)
}
}
required_version <- function(
pak,
version
) {
if (
utils::packageVersion(pak) < version
) {
stop(
sprintf(
"This function require the version '%s' of the {%s} package.\nYou can update with `install.packages('%s')`.",
version,
pak,
pak
),
call. = FALSE
)
}
}
add_sass_code <- function(where, dir, name) {
if (fs_file_exists(where)) {
if (fs_file_exists("dev/run_dev.R")) {
lines <- readLines("dev/run_dev.R")
new_lines <- append(
x = lines,
values = c(
"# Sass code compilation",
sprintf(
'sass::sass(input = sass::sass_file("%s/%s.sass"), output = "%s/%s.css", cache = NULL)',
dir,
name,
dir,
name
),
""
),
after = 0
)
writeLines(
text = new_lines,
con = "dev/run_dev.R"
)
cat_green_tick(
"Code added in run_dev.R to compile your Sass file to CSS file."
)
}
}
}
#' Check if a module already exists
#'
#' Assumes it is called at the root of a golem project.
#'
#' @param module A character string. The name of a potentially existing module
#' @return Boolean. Does the module exist or not ?
#' @noRd
is_existing_module <- function(module) {
existing_module_files <- list.files("R/", pattern = "^mod_")
existing_module_names <- sub(
"^mod_([[:alnum:]_]+)\\.R$",
"\\1",
existing_module_files
)
module %in% existing_module_names
}
# This function is used for checking
# that the name argument of the function
# creating files is not of length() > 1
check_name_length <- function(name) {
stop_if(
name,
~ length(.x) > 1,
sprintf(
"`name` should be of length 1. Got %d.",
length(name)
)
)
}
|
#' Computes completeness values of the dataset
#'
#' Computes completeness values for each cell. Currently returns Chao2 index of
#' species richness.
#'
#' After dividing the extent of the dataset in cells (via the
#' \code{\link{get_cell_id}} function), the function calculates the Chao2 estimator
#' of species richness. Given the nature of the calculations, a minimum number of
#' records must be present on each cell to properly compute the index. If there
#' are too few records in the cells, the function is unable to finish, and it
#' throws an error.
#'
#' This function produces a plot of number of species versus completeness index to
#' give an idea of output. The data frame returned can be used to visualize the
#' completeness of the data using \code{\link{map_grid}} function with ptype as
#' "complete".
#'
#' @import sqldf
#' @importFrom stats na.omit
#' @importFrom graphics plot
#' @param indf input data frame containing biodiversity data set
#' @param recs minimum number of records per grid cell required to make the
#' calculations. Default is 50. If there are too few records, the function
#' throws an error.
#' @param gridscale plot the map grids at specific degree scale. Default is 1.
#' Currently valid values are 1 and 0.1.
#' @return data.frame with the columns \itemize{ \item{"Cell_id"}{ id of the cell}
#' \item{"nrec"}{ Number of records in the cell} \item{"Sobs"}{ Number of Observed species}
#' \item{"Sest"}{ Estimated number of species} \item{"c"}{ Completeness ratio the cell}
#' \item {"Centi_cell_id"}{ Cell ids for 0.1 degree cells}
#'
#' Plots a graph of Number of species vs completeness }
#' @examples
#' \dontrun{
#' bd_complete(inat)
#' }
#' @seealso \code{\link{get_cell_id}}
#' @export
bd_complete <- function(indf,
recs = 50,
gridscale = 1) {
centigrid <- FALSE
if (gridscale == 0.1) {
centigrid <- TRUE
}
if (!(gridscale == 1 | gridscale == 0.1)) {
stop("Only values accepted currently are 1 or 0.1")
}
if (centigrid) {
indf$Cell_id <- (indf$Cell_id * 100) + indf$Centi_cell_id
}
dat1 <-
sqldf(
"select Scientific_name, Date_collected, Cell_id from indf group by Scientific_name, Date_collected, Cell_id"
)
dat2 <-
sqldf("select cell_id,count(*) as cell_ct from dat1 group by cell_id")
dat3 <- sqldf(paste("select * from dat2 where cell_ct > ", recs))
dat1 <- na.omit(dat1)
dat2 <- na.omit(dat2)
dat3 <- na.omit(dat3)
retmat <- NULL
if (dim(dat3)[1] < 1) {
stop("Too few data records to compute completeness")
}
for (i in 1:dim(dat3)[1]) {
Cell_id <- dat3$Cell_id[i]
nrec <- dat3$cell_ct[i]
cset <- dat1[which(dat1$Cell_id == dat3$Cell_id[i]), ]
csum <-
sqldf("select Scientific_name, count(*) as sct from cset group by Scientific_name")
Q1 <-
as.numeric(sqldf("select count(*) from csum where sct = 1 "))
Q2 <- sqldf("select count(*) from csum where sct = 2 ")
m <-
sqldf("select count(*) from ( select * from cset group by Date_collected )")
Sobs <-
as.numeric(sqldf(
"select count(*) from ( select * from cset group by Scientific_name)"
))
if (Sobs > 0) {
Sest <-
as.numeric(Sobs + (((m - 1) / m) * ((Q1 * (Q1 - 1)) / (2 * (Q2 + 1)))))
c <- Sobs / Sest
retmat <- rbind(retmat, c(Cell_id, nrec, Sobs, Sest, c))
}
}
retmat <- as.data.frame(retmat)
names(retmat) <- c("Cell_id", "nrec", "Sobs", "Sest", "c")
plot(
retmat$Sobs,
retmat$c,
main = "Completeness vs number of species",
xlab = "Number of species",
ylab = "Completeness"
)
if (centigrid) {
retmat$Centi_cell_id <- retmat$Cell_id %% 100
retmat$Cell_id <- retmat$Cell_id %/% 100
}
return(retmat)
}
| /R/bdcomplete.R | no_license | thiloshon/bdvis | R | false | false | 3,782 | r | #' Computes completeness values of the dataset
#'
#' Computes completeness values for each cell. Currently returns Chao2 index of
#' species richness.
#'
#' After dividing the extent of the dataset in cells (via the
#' \code{\link{get_cell_id}} function), the function calculates the Chao2 estimator
#' of species richness. Given the nature of the calculations, a minimum number of
#' records must be present on each cell to properly compute the index. If there
#' are too few records in the cells, the function is unable to finish, and it
#' throws an error.
#'
#' This function produces a plot of number of species versus completeness index to
#' give an idea of output. The data frame returned can be used to visualize the
#' completeness of the data using \code{\link{map_grid}} function with ptype as
#' "complete".
#'
#' @import sqldf
#' @importFrom stats na.omit
#' @importFrom graphics plot
#' @param indf input data frame containing biodiversity data set
#' @param recs minimum number of records per grid cell required to make the
#' calculations. Default is 50. If there are too few records, the function
#' throws an error.
#' @param gridscale plot the map grids at specific degree scale. Default is 1.
#' Currently valid values are 1 and 0.1.
#' @return data.frame with the columns \itemize{ \item{"Cell_id"}{ id of the cell}
#' \item{"nrec"}{ Number of records in the cell} \item{"Sobs"}{ Number of Observed species}
#' \item{"Sest"}{ Estimated number of species} \item{"c"}{ Completeness ratio the cell}
#' \item {"Centi_cell_id"}{ Cell ids for 0.1 degree cells}
#'
#' Plots a graph of Number of species vs completeness }
#' @examples
#' \dontrun{
#' bd_complete(inat)
#' }
#' @seealso \code{\link{get_cell_id}}
#' @export
bd_complete <- function(indf,
recs = 50,
gridscale = 1) {
centigrid <- FALSE
if (gridscale == 0.1) {
centigrid <- TRUE
}
if (!(gridscale == 1 | gridscale == 0.1)) {
stop("Only values accepted currently are 1 or 0.1")
}
if (centigrid) {
indf$Cell_id <- (indf$Cell_id * 100) + indf$Centi_cell_id
}
dat1 <-
sqldf(
"select Scientific_name, Date_collected, Cell_id from indf group by Scientific_name, Date_collected, Cell_id"
)
dat2 <-
sqldf("select cell_id,count(*) as cell_ct from dat1 group by cell_id")
dat3 <- sqldf(paste("select * from dat2 where cell_ct > ", recs))
dat1 <- na.omit(dat1)
dat2 <- na.omit(dat2)
dat3 <- na.omit(dat3)
retmat <- NULL
if (dim(dat3)[1] < 1) {
stop("Too few data records to compute completeness")
}
for (i in 1:dim(dat3)[1]) {
Cell_id <- dat3$Cell_id[i]
nrec <- dat3$cell_ct[i]
cset <- dat1[which(dat1$Cell_id == dat3$Cell_id[i]), ]
csum <-
sqldf("select Scientific_name, count(*) as sct from cset group by Scientific_name")
Q1 <-
as.numeric(sqldf("select count(*) from csum where sct = 1 "))
Q2 <- sqldf("select count(*) from csum where sct = 2 ")
m <-
sqldf("select count(*) from ( select * from cset group by Date_collected )")
Sobs <-
as.numeric(sqldf(
"select count(*) from ( select * from cset group by Scientific_name)"
))
if (Sobs > 0) {
Sest <-
as.numeric(Sobs + (((m - 1) / m) * ((Q1 * (Q1 - 1)) / (2 * (Q2 + 1)))))
c <- Sobs / Sest
retmat <- rbind(retmat, c(Cell_id, nrec, Sobs, Sest, c))
}
}
retmat <- as.data.frame(retmat)
names(retmat) <- c("Cell_id", "nrec", "Sobs", "Sest", "c")
plot(
retmat$Sobs,
retmat$c,
main = "Completeness vs number of species",
xlab = "Number of species",
ylab = "Completeness"
)
if (centigrid) {
retmat$Centi_cell_id <- retmat$Cell_id %% 100
retmat$Cell_id <- retmat$Cell_id %/% 100
}
return(retmat)
}
|
#' @importFrom rlang .data
#' @title Load weightlifting logs
#' @description Loads weightlifting logs in CSV format into a data frame
#'
#' @param datadir A directory containing weightlifting logs in CSV format. The expected format is \code{date, exercise, variant, set1weight, set1reps, ..., setNweight, setNreps}
#' @param files A list of files containing weightlifting logs in CSV format. Not currently implemented.
#' @param header Whether the CSV file contains a header. Passed to read.csv.
#' @return A data frame containing a weightlifting log with one set of an exercise per row. The program name listed in the data frame will correspond to the name of the CSV file from which the data was read.
#'
#' @export
load_csv_data <- function(files = NA, datadir = NA, header = TRUE) {
# if (is.na(datadir)) {
# stop("You must enter a directory for your weightlifting files.")
# }
out.file <- data.frame()
if (! is.na(files)) {
# Load data from files passed to function (interactive Rshiny)
# files = files
} else if (! is.na(datadir)) {
# Load data from datadir
files <- dir(datadir, pattern = ".csv")
files <- paste0(datadir, "/", files)
} else {
stop("You must enter either a set of files or a directory path.")
}
for (i in 1:length(files)) {
file <- utils::read.csv(
paste(files[i], sep=""),
header = header,
sep = ",",
stringsAsFactors = FALSE,
strip.white = TRUE
)
file$program <- sub(".+\\/(.+?)[.csv]*$", "\\1", files[i]) # set program to name of file
file <- file[, c("program", setdiff(names(file), c("program")))] # Move program to first column
if (requireNamespace("tidyr", quietly = TRUE) &
requireNamespace("dplyr", quietly = TRUE) &
requireNamespace("readr", quietly = TRUE)) {
col.classes <- sapply(file, class)
numeric.cols <- names(col.classes[col.classes == "numeric" | col.classes == "integer"])
logical.cols <- names(col.classes[col.classes == "logical"])
if (all(is.na(file[, logical.cols]))) {
file <- dplyr::select(file, -logical.cols)
}
file <- tidyr::gather(file, "key", "value", numeric.cols, na.rm = TRUE)
file <- dplyr::mutate(file, set = readr::parse_number(.data$key))
file <- dplyr::mutate(file, key = ifelse(grepl("rep", .data$key), "reps", "weight"))
file <- tidyr::spread(file, .data$key, .data$value)
out.file <- dplyr::bind_rows(out.file, file)
}
else {
requireNamespace("tidyr", quietly = F)
requireNamespace("dplyr", quietly = F)
requireNamespace("readr", quietly = F)
set.cols <- grep("set", names(file), ignore.case = TRUE, value = TRUE)
other.cols <- names(file[, ! names(file) %in% set.cols])
# Get max set number
set.num <- max(as.numeric(sub("set[_ ]*(\\d+).+", "\\1", set.cols)), na.rm = TRUE)
# Loop through sets and add to dataframe
for (j in seq(1:set.num)) {
this.set <- grep(paste("set[_ ]*", j, "", sep = ""), set.cols, ignore.case = TRUE, value = TRUE)[1:2]
temp <- file[ , c(other.cols, this.set)]
if (grepl("reps", this.set[1], ignore.case = TRUE)) {
names(temp) <- c(other.cols, "reps","weight")
} else {
names(temp) <- c(other.cols, "weight","reps")
}
temp$set <- j
out.file <- rbind(out.file, temp)
}
}
}
# Return merged dataframe, removing NA values
out.file$date <- as.Date(out.file$date)
out.file[! is.na(out.file$weight) & ! is.na(out.file$reps), ]
}
#' Checks to see if a data frame is a valid weightlifting log. Looks for names.
#' @export
#'
#' @param weightlifting.log A data frame containing at least the following elements: \code{program, date, exercise, variant, reps, weight}
#' @return Boolean
is_valid_weightlifting_log <- function(weightlifting.log = NULL) {
if (is.null(weightlifting.log)) {
stop("Please provide a valid weightlifting log.")
}
if (! all(c("program", "date", "exercise", "equipment", "variant", "reps", "weight") %in% names(weightlifting.log))) {
stop("Please provide a weightlifting log that includes program, date, exercise, equipment, variant, reps, and weight.")
}
return(TRUE)
}
| /R/load_csv_data.R | no_license | titaniumtroop/rweightlifting | R | false | false | 4,250 | r | #' @importFrom rlang .data
#' @title Load weightlifting logs
#' @description Loads weightlifting logs in CSV format into a data frame
#'
#' @param datadir A directory containing weightlifting logs in CSV format. The expected format is \code{date, exercise, variant, set1weight, set1reps, ..., setNweight, setNreps}
#' @param files A list of files containing weightlifting logs in CSV format. Not currently implemented.
#' @param header Whether the CSV file contains a header. Passed to read.csv.
#' @return A data frame containing a weightlifting log with one set of an exercise per row. The program name listed in the data frame will correspond to the name of the CSV file from which the data was read.
#'
#' @export
load_csv_data <- function(files = NA, datadir = NA, header = TRUE) {
# if (is.na(datadir)) {
# stop("You must enter a directory for your weightlifting files.")
# }
out.file <- data.frame()
if (! is.na(files)) {
# Load data from files passed to function (interactive Rshiny)
# files = files
} else if (! is.na(datadir)) {
# Load data from datadir
files <- dir(datadir, pattern = ".csv")
files <- paste0(datadir, "/", files)
} else {
stop("You must enter either a set of files or a directory path.")
}
for (i in 1:length(files)) {
file <- utils::read.csv(
paste(files[i], sep=""),
header = header,
sep = ",",
stringsAsFactors = FALSE,
strip.white = TRUE
)
file$program <- sub(".+\\/(.+?)[.csv]*$", "\\1", files[i]) # set program to name of file
file <- file[, c("program", setdiff(names(file), c("program")))] # Move program to first column
if (requireNamespace("tidyr", quietly = TRUE) &
requireNamespace("dplyr", quietly = TRUE) &
requireNamespace("readr", quietly = TRUE)) {
col.classes <- sapply(file, class)
numeric.cols <- names(col.classes[col.classes == "numeric" | col.classes == "integer"])
logical.cols <- names(col.classes[col.classes == "logical"])
if (all(is.na(file[, logical.cols]))) {
file <- dplyr::select(file, -logical.cols)
}
file <- tidyr::gather(file, "key", "value", numeric.cols, na.rm = TRUE)
file <- dplyr::mutate(file, set = readr::parse_number(.data$key))
file <- dplyr::mutate(file, key = ifelse(grepl("rep", .data$key), "reps", "weight"))
file <- tidyr::spread(file, .data$key, .data$value)
out.file <- dplyr::bind_rows(out.file, file)
}
else {
requireNamespace("tidyr", quietly = F)
requireNamespace("dplyr", quietly = F)
requireNamespace("readr", quietly = F)
set.cols <- grep("set", names(file), ignore.case = TRUE, value = TRUE)
other.cols <- names(file[, ! names(file) %in% set.cols])
# Get max set number
set.num <- max(as.numeric(sub("set[_ ]*(\\d+).+", "\\1", set.cols)), na.rm = TRUE)
# Loop through sets and add to dataframe
for (j in seq(1:set.num)) {
this.set <- grep(paste("set[_ ]*", j, "", sep = ""), set.cols, ignore.case = TRUE, value = TRUE)[1:2]
temp <- file[ , c(other.cols, this.set)]
if (grepl("reps", this.set[1], ignore.case = TRUE)) {
names(temp) <- c(other.cols, "reps","weight")
} else {
names(temp) <- c(other.cols, "weight","reps")
}
temp$set <- j
out.file <- rbind(out.file, temp)
}
}
}
# Return merged dataframe, removing NA values
out.file$date <- as.Date(out.file$date)
out.file[! is.na(out.file$weight) & ! is.na(out.file$reps), ]
}
#' Checks to see if a data frame is a valid weightlifting log. Looks for names.
#' @export
#'
#' @param weightlifting.log A data frame containing at least the following elements: \code{program, date, exercise, variant, reps, weight}
#' @return Boolean
is_valid_weightlifting_log <- function(weightlifting.log = NULL) {
if (is.null(weightlifting.log)) {
stop("Please provide a valid weightlifting log.")
}
if (! all(c("program", "date", "exercise", "equipment", "variant", "reps", "weight") %in% names(weightlifting.log))) {
stop("Please provide a weightlifting log that includes program, date, exercise, equipment, variant, reps, and weight.")
}
return(TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfa.R
\name{test.lige}
\alias{test.lige}
\title{test.lige}
\usage{
test.lige(cfaobj, allt, se = T, h)
}
\arguments{
\item{cfaobj}{a CFA object}
\item{allt}{all the values of the treatment variable in the dataset}
\item{se}{boolean whether or not to compute standard errors}
\item{h}{a bandwidth}
}
\value{
a CFASE object
}
\description{
test if the local intergnerational elasticity is the same across
all values of the treatment variable
}
\examples{
\dontrun{
data(igm)
tvals <- seq(10,12,length.out=8)
yvals <- seq(quantile(igm$lcfincome, .05), quantile(igm$lcfincome, .95), length.out=50)
## obtain counterfactual results
out <- cfa2(lcfincome ~ lfincome, tvals, yvals, igm, method1="qr",
xformla2=~HEDUC, method2="qr", iters=10, tau1=seq(.05,.95,.05),
tau2=seq(.05,.95,.05))
test.lige(out$cfa1, allt=igm$lfincome, h=0.5)
}
}
| /man/test.lige.Rd | no_license | WeigeHuangEcon/ccfa | R | false | true | 914 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfa.R
\name{test.lige}
\alias{test.lige}
\title{test.lige}
\usage{
test.lige(cfaobj, allt, se = T, h)
}
\arguments{
\item{cfaobj}{a CFA object}
\item{allt}{all the values of the treatment variable in the dataset}
\item{se}{boolean whether or not to compute standard errors}
\item{h}{a bandwidth}
}
\value{
a CFASE object
}
\description{
test if the local intergnerational elasticity is the same across
all values of the treatment variable
}
\examples{
\dontrun{
data(igm)
tvals <- seq(10,12,length.out=8)
yvals <- seq(quantile(igm$lcfincome, .05), quantile(igm$lcfincome, .95), length.out=50)
## obtain counterfactual results
out <- cfa2(lcfincome ~ lfincome, tvals, yvals, igm, method1="qr",
xformla2=~HEDUC, method2="qr", iters=10, tau1=seq(.05,.95,.05),
tau2=seq(.05,.95,.05))
test.lige(out$cfa1, allt=igm$lfincome, h=0.5)
}
}
|
### R code from vignette source 'TR8_workflow.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: dryad (eval = FALSE)
###################################################
## ## the readxl package is needed
## ## library(readxl)
## ## store the url of the dryad package
## url<-"http://datadryad.org/bitstream/handle/
## 10255/dryad.65646/MEE-13-11-651R2_data.xlsx?sequence=1"
## ## choose the extension for the temp file where
## ## data will be stored
## tmp = tempfile(fileext = ".xlsx")
## ## download the data
## download.file(url = url, destfile = tmp)
##
## ## we first read the "metadata" sheet from the xlsx file
## ## (the row containing the species names start from
## ## row 13
## metadata<-read_excel(path=tmp,sheet="metadata",skip=12,col_names=F)
## ## lets rename the column of this dataset
## names(metadata)<-c("Col1","Col2")
##
## ## then read the vegetation data
## veg_data <-readWorksheetFromFile(file = tmp, sheet = "data.txt")
## ## only the columns from 11 to 123 contains the species data
## veg_data<-veg_data[,11:123]
## ## round veg_data numbers to the second digit
## veg_data<-round(veg_data,digits = 2)
## ## read the dataset with the environmental variables
## env_data<-read_excel(path = tmp, sheet = "data.txt")
## ## and select only the column from 1 to 4 which contain
## ## the data of interest
## env_data<-env_data[,1:4]
###################################################
### code chunk number 2: taxize (eval = FALSE)
###################################################
## library(taxize)
## check_names<-tnrs(metadata$Col2,source="iPlant_TNRS")
###################################################
### code chunk number 3: discarded (eval = FALSE)
###################################################
## setdiff(metadata$Col2,check_names$submittedname)
###################################################
### code chunk number 4: taxize2 (eval = FALSE)
###################################################
## issues<-with(check_names,check_names[score!="1",])
## issues[,c("submittedname","score","acceptedname","authority")]
###################################################
### code chunk number 5: substitution (eval = FALSE)
###################################################
## library(plyr)
## ## we use the revalue function in the plyr package
## ## to fix all the above mentioned issues
## metadata$Col2<-revalue(metadata$Col2,
## c("Taraxacum officinale!!!!!"="Taraxacum officinale F.H. Wigg."))
## metadata$Col2<-revalue(metadata$Col2,
## c("Polygonum mite (=Persicaria laxiflora)"="Persicaria mitis (Schrank) Assenov"))
## metadata$Col2<-revalue(metadata$Col2,
## c("Fallopia convolvulus (L.) A. Löwe"="Fallopia convolvulus (L.) Á. Löve"))
## metadata$Col2<-revalue(metadata$Col2,
## c("Setaria pumila (Poir.) Schult."="Setaria pumila (Poir.) Roem. & Schult."))
## metadata$Col2<-revalue(metadata$Col2,
## c("Phleum pratense agg."="Phleum pratense L."))
###################################################
### code chunk number 6: taxize_2 (eval = FALSE)
###################################################
## check_names<-tnrs(metadata$Col2,source="iPlant_TNRS")
## issues<-with(check_names,check_names[score!="1",])
## issues[,c("submittedname","acceptedname","score")]
###################################################
### code chunk number 7: two (eval = FALSE)
###################################################
## final_dataframe<-merge(metadata,check_names,
## by.x = "Col2",by.y="submittedname")
###################################################
### code chunk number 8: three (eval = FALSE)
###################################################
## final_dataframe<-final_dataframe[
## !final_dataframe$Col2%in%issues$submittedname,]
###################################################
### code chunk number 9: tr8_ex (eval = FALSE)
###################################################
## species_names<-final_dataframe$acceptedname
## my_traits<-c("h_max","le_area","leaf_mass","li_form_B","strategy")
## retrieved_traits<-tr8(species_list = species_names,download_list = my_traits)
###################################################
### code chunk number 10: cirsium (eval = FALSE)
###################################################
## ## we extract the data from the object returned by tr8()
## traits<-extract_traits(retrieved_traits)
## ## first I convert the column to character
## traits$h_max<-as.character(traits$h_max)
## traits$h_max[which(row.names(traits)=="Convolvulus arvensis")]<-"42.5"
###################################################
### code chunk number 11: convert (eval = FALSE)
###################################################
## traits$h_max<-as.numeric(traits$h_max)
###################################################
### code chunk number 12: leArea (eval = FALSE)
###################################################
## traits$le_area<-revalue(traits$le_area,
## c("0.1-1"=0.55,
## "1-10"=5.5,
## "10-100"=55,
## "100-1000"=550,
## "1-10;0.1-1"=1,
## "10-100;1-10"=10,
## "100-1000;10-100"=100,
## "10-100;100-1000"=100))
## ## and convert them to numeric
## traits$le_area<-as.numeric(as.character(traits$le_area))
###################################################
### code chunk number 13: liform (eval = FALSE)
###################################################
##
##
## traits$li_form_B<-revalue(traits$li_form_B,
## c("C (Chamaephyte) - H (Hemicryptophyte)"="C - H",
## "G (Geophyte)"="G",
## "G (Geophyte) - H (Hemicryptophyte)"="G - H",
## "H (Hemicryptophyte)"="H",
## "H (Hemicryptophyte) - T (Therophyte)"="H - T",
## "M (Macrophanerophyte)"="M",
## "M (Macrophanerophyte) - N (Nanophanerophyte)"="M - N",
## "T (Therophyte)"="T"))
## ## convert it to factor
## traits$li_form_B<-as.factor(traits$li_form_B)
###################################################
### code chunk number 14: strategy (eval = FALSE)
###################################################
## traits$strategy<-revalue(traits$strategy,c("c (competitors)"="c",
## "cr (competitors/ruderals)"="cr",
## "cs (competitors/stress-tolerators)"="cs",
## "csr (competitors/stress-tolerators/ruderals)"="csr",
## "r (ruderals)"="r"))
## traits$strategy<-as.factor(traits$strategy)
###################################################
### code chunk number 15: a (eval = FALSE)
###################################################
## row.names(traits)<-mapvalues(row.names(traits),
## from=final_dataframe$acceptedname,to=final_dataframe$Col1)
###################################################
### code chunk number 16: b (eval = FALSE)
###################################################
## traits<-traits[complete.cases(traits),]
###################################################
### code chunk number 17: c (eval = FALSE)
###################################################
## vegetation<-veg_data[,names(veg_data)%in%row.names(traits)]
###################################################
### code chunk number 18: d (eval = FALSE)
###################################################
## library(ade4)
## coa<-dudi.coa(vegetation,scannf=F)
###################################################
### code chunk number 19: e (eval = FALSE)
###################################################
## hil.traits<-dudi.hillsmith(traits,row.w=coa$cw,scannf = FALSE)
###################################################
### code chunk number 20: f (eval = FALSE)
###################################################
## ##select which columns have at least one non-zero value
## selection<-colSums(vegetation)>0
## ## and now we choose only those columns
## vegetation<-vegetation[,selection]
###################################################
### code chunk number 21: g (eval = FALSE)
###################################################
## traits<-traits[row.names(traits)%in%names(vegetation),]
###################################################
### code chunk number 22: hh (eval = FALSE)
###################################################
## vegetation<- vegetation[,order(names(vegetation))]
## traits<-traits[order(row.names(traits)),]
###################################################
### code chunk number 23: h (eval = FALSE)
###################################################
## coa<-dudi.coa(vegetation,scannf=F)
## traits.hill<-dudi.hillsmith(traits,row.w=coa$cw,scannf = F)
###################################################
### code chunk number 24: i (eval = FALSE)
###################################################
## env.hill<-dudi.hillsmith(env_data,row.w=coa$lw,scannf = FALSE)
###################################################
### code chunk number 25: l (eval = FALSE)
###################################################
## env_data$Treat<-as.factor(env_data$Treat)
###################################################
### code chunk number 26: i (eval = FALSE)
###################################################
## env.hill<-dudi.hillsmith(env_data,row.w=coa$lw,scannf = FALSE)
###################################################
### code chunk number 27: l (eval = FALSE)
###################################################
## rlq_tr8<-rlq(env.hill,coa,traits.hill,scannf = F)
###################################################
### code chunk number 28: m (eval = FALSE)
###################################################
## plot(rlq_tr8)
###################################################
### code chunk number 29: m (eval = FALSE)
###################################################
## clust<-hclust(dist(rlq_tr8$lQ),method="ward.D2")
## plot(clust,sub="Ward minimum variance clustering",xlab="TR8 tutorial")
###################################################
### code chunk number 30: o (eval = FALSE)
###################################################
## rect.hclust(clust,k=6)
###################################################
### code chunk number 31: p (eval = FALSE)
###################################################
## cuts<-cutree(clust,6)
###################################################
### code chunk number 32: q (eval = FALSE)
###################################################
## s.class(rlq_tr8$lQ,as.factor(cuts),col=1:6)
## s.arrow(rlq_tr8$c1,add.plot = TRUE)
###################################################
### code chunk number 33: aa (eval = FALSE)
###################################################
## par(mfrow=c(3,2))
## plot(traits$h_max~as.factor(cuts),main="Maxim height",
## ylab="max height",border = 1:6,xlab="Group number")
## plot(traits$le_area~as.factor(cuts),main="Leaf area",
## ylab="leaf area",border = 1:6,xlab="Group number")
## plot(traits$leaf_mass~as.factor(cuts),main="Leaf mass",
## ylab="leaf mass",border = 1:6,xlab="Group number")
## plot(table(cuts,traits$strategy),main="CSR strategy",
## ylab="strategy",border = 1:6,xlab="Group number")
## plot(table(cuts,traits$li_form_B),main="Life form",
## ylab="life form",border = 1:6,xlab="Group number")
## par(mfrow=c(1,1))
| /TR8/inst/doc/TR8_workflow.R | no_license | ingted/R-Examples | R | false | false | 11,138 | r | ### R code from vignette source 'TR8_workflow.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: dryad (eval = FALSE)
###################################################
## ## the readxl package is needed
## ## library(readxl)
## ## store the url of the dryad package
## url<-"http://datadryad.org/bitstream/handle/
## 10255/dryad.65646/MEE-13-11-651R2_data.xlsx?sequence=1"
## ## choose the extension for the temp file where
## ## data will be stored
## tmp = tempfile(fileext = ".xlsx")
## ## download the data
## download.file(url = url, destfile = tmp)
##
## ## we first read the "metadata" sheet from the xlsx file
## ## (the row containing the species names start from
## ## row 13
## metadata<-read_excel(path=tmp,sheet="metadata",skip=12,col_names=F)
## ## lets rename the column of this dataset
## names(metadata)<-c("Col1","Col2")
##
## ## then read the vegetation data
## veg_data <-readWorksheetFromFile(file = tmp, sheet = "data.txt")
## ## only the columns from 11 to 123 contains the species data
## veg_data<-veg_data[,11:123]
## ## round veg_data numbers to the second digit
## veg_data<-round(veg_data,digits = 2)
## ## read the dataset with the environmental variables
## env_data<-read_excel(path = tmp, sheet = "data.txt")
## ## and select only the column from 1 to 4 which contain
## ## the data of interest
## env_data<-env_data[,1:4]
###################################################
### code chunk number 2: taxize (eval = FALSE)
###################################################
## library(taxize)
## check_names<-tnrs(metadata$Col2,source="iPlant_TNRS")
###################################################
### code chunk number 3: discarded (eval = FALSE)
###################################################
## setdiff(metadata$Col2,check_names$submittedname)
###################################################
### code chunk number 4: taxize2 (eval = FALSE)
###################################################
## issues<-with(check_names,check_names[score!="1",])
## issues[,c("submittedname","score","acceptedname","authority")]
###################################################
### code chunk number 5: substitution (eval = FALSE)
###################################################
## library(plyr)
## ## we use the revalue function in the plyr package
## ## to fix all the above mentioned issues
## metadata$Col2<-revalue(metadata$Col2,
## c("Taraxacum officinale!!!!!"="Taraxacum officinale F.H. Wigg."))
## metadata$Col2<-revalue(metadata$Col2,
## c("Polygonum mite (=Persicaria laxiflora)"="Persicaria mitis (Schrank) Assenov"))
## metadata$Col2<-revalue(metadata$Col2,
## c("Fallopia convolvulus (L.) A. Löwe"="Fallopia convolvulus (L.) Á. Löve"))
## metadata$Col2<-revalue(metadata$Col2,
## c("Setaria pumila (Poir.) Schult."="Setaria pumila (Poir.) Roem. & Schult."))
## metadata$Col2<-revalue(metadata$Col2,
## c("Phleum pratense agg."="Phleum pratense L."))
###################################################
### code chunk number 6: taxize_2 (eval = FALSE)
###################################################
## check_names<-tnrs(metadata$Col2,source="iPlant_TNRS")
## issues<-with(check_names,check_names[score!="1",])
## issues[,c("submittedname","acceptedname","score")]
###################################################
### code chunk number 7: two (eval = FALSE)
###################################################
## final_dataframe<-merge(metadata,check_names,
## by.x = "Col2",by.y="submittedname")
###################################################
### code chunk number 8: three (eval = FALSE)
###################################################
## final_dataframe<-final_dataframe[
## !final_dataframe$Col2%in%issues$submittedname,]
###################################################
### code chunk number 9: tr8_ex (eval = FALSE)
###################################################
## species_names<-final_dataframe$acceptedname
## my_traits<-c("h_max","le_area","leaf_mass","li_form_B","strategy")
## retrieved_traits<-tr8(species_list = species_names,download_list = my_traits)
###################################################
### code chunk number 10: cirsium (eval = FALSE)
###################################################
## ## we extract the data from the object returned by tr8()
## traits<-extract_traits(retrieved_traits)
## ## first I convert the column to character
## traits$h_max<-as.character(traits$h_max)
## traits$h_max[which(row.names(traits)=="Convolvulus arvensis")]<-"42.5"
###################################################
### code chunk number 11: convert (eval = FALSE)
###################################################
## traits$h_max<-as.numeric(traits$h_max)
###################################################
### code chunk number 12: leArea (eval = FALSE)
###################################################
## traits$le_area<-revalue(traits$le_area,
## c("0.1-1"=0.55,
## "1-10"=5.5,
## "10-100"=55,
## "100-1000"=550,
## "1-10;0.1-1"=1,
## "10-100;1-10"=10,
## "100-1000;10-100"=100,
## "10-100;100-1000"=100))
## ## and convert them to numeric
## traits$le_area<-as.numeric(as.character(traits$le_area))
###################################################
### code chunk number 13: liform (eval = FALSE)
###################################################
##
##
## traits$li_form_B<-revalue(traits$li_form_B,
## c("C (Chamaephyte) - H (Hemicryptophyte)"="C - H",
## "G (Geophyte)"="G",
## "G (Geophyte) - H (Hemicryptophyte)"="G - H",
## "H (Hemicryptophyte)"="H",
## "H (Hemicryptophyte) - T (Therophyte)"="H - T",
## "M (Macrophanerophyte)"="M",
## "M (Macrophanerophyte) - N (Nanophanerophyte)"="M - N",
## "T (Therophyte)"="T"))
## ## convert it to factor
## traits$li_form_B<-as.factor(traits$li_form_B)
###################################################
### code chunk number 14: strategy (eval = FALSE)
###################################################
## traits$strategy<-revalue(traits$strategy,c("c (competitors)"="c",
## "cr (competitors/ruderals)"="cr",
## "cs (competitors/stress-tolerators)"="cs",
## "csr (competitors/stress-tolerators/ruderals)"="csr",
## "r (ruderals)"="r"))
## traits$strategy<-as.factor(traits$strategy)
###################################################
### code chunk number 15: a (eval = FALSE)
###################################################
## row.names(traits)<-mapvalues(row.names(traits),
## from=final_dataframe$acceptedname,to=final_dataframe$Col1)
###################################################
### code chunk number 16: b (eval = FALSE)
###################################################
## traits<-traits[complete.cases(traits),]
###################################################
### code chunk number 17: c (eval = FALSE)
###################################################
## vegetation<-veg_data[,names(veg_data)%in%row.names(traits)]
###################################################
### code chunk number 18: d (eval = FALSE)
###################################################
## library(ade4)
## coa<-dudi.coa(vegetation,scannf=F)
###################################################
### code chunk number 19: e (eval = FALSE)
###################################################
## hil.traits<-dudi.hillsmith(traits,row.w=coa$cw,scannf = FALSE)
###################################################
### code chunk number 20: f (eval = FALSE)
###################################################
## ##select which columns have at least one non-zero value
## selection<-colSums(vegetation)>0
## ## and now we choose only those columns
## vegetation<-vegetation[,selection]
###################################################
### code chunk number 21: g (eval = FALSE)
###################################################
## traits<-traits[row.names(traits)%in%names(vegetation),]
###################################################
### code chunk number 22: hh (eval = FALSE)
###################################################
## vegetation<- vegetation[,order(names(vegetation))]
## traits<-traits[order(row.names(traits)),]
###################################################
### code chunk number 23: h (eval = FALSE)
###################################################
## coa<-dudi.coa(vegetation,scannf=F)
## traits.hill<-dudi.hillsmith(traits,row.w=coa$cw,scannf = F)
###################################################
### code chunk number 24: i (eval = FALSE)
###################################################
## env.hill<-dudi.hillsmith(env_data,row.w=coa$lw,scannf = FALSE)
###################################################
### code chunk number 25: l (eval = FALSE)
###################################################
## env_data$Treat<-as.factor(env_data$Treat)
###################################################
### code chunk number 26: i (eval = FALSE)
###################################################
## env.hill<-dudi.hillsmith(env_data,row.w=coa$lw,scannf = FALSE)
###################################################
### code chunk number 27: l (eval = FALSE)
###################################################
## rlq_tr8<-rlq(env.hill,coa,traits.hill,scannf = F)
###################################################
### code chunk number 28: m (eval = FALSE)
###################################################
## plot(rlq_tr8)
###################################################
### code chunk number 29: m (eval = FALSE)
###################################################
## clust<-hclust(dist(rlq_tr8$lQ),method="ward.D2")
## plot(clust,sub="Ward minimum variance clustering",xlab="TR8 tutorial")
###################################################
### code chunk number 30: o (eval = FALSE)
###################################################
## rect.hclust(clust,k=6)
###################################################
### code chunk number 31: p (eval = FALSE)
###################################################
## cuts<-cutree(clust,6)
###################################################
### code chunk number 32: q (eval = FALSE)
###################################################
## s.class(rlq_tr8$lQ,as.factor(cuts),col=1:6)
## s.arrow(rlq_tr8$c1,add.plot = TRUE)
###################################################
### code chunk number 33: aa (eval = FALSE)
###################################################
## par(mfrow=c(3,2))
## plot(traits$h_max~as.factor(cuts),main="Maxim height",
## ylab="max height",border = 1:6,xlab="Group number")
## plot(traits$le_area~as.factor(cuts),main="Leaf area",
## ylab="leaf area",border = 1:6,xlab="Group number")
## plot(traits$leaf_mass~as.factor(cuts),main="Leaf mass",
## ylab="leaf mass",border = 1:6,xlab="Group number")
## plot(table(cuts,traits$strategy),main="CSR strategy",
## ylab="strategy",border = 1:6,xlab="Group number")
## plot(table(cuts,traits$li_form_B),main="Life form",
## ylab="life form",border = 1:6,xlab="Group number")
## par(mfrow=c(1,1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cognitoidentityprovider_operations.R
\name{describe_identity_provider}
\alias{describe_identity_provider}
\title{Gets information about a specific identity provider}
\usage{
describe_identity_provider(UserPoolId, ProviderName)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID.}
\item{ProviderName}{[required] The identity provider name.}
}
\description{
Gets information about a specific identity provider.
}
\section{Accepted Parameters}{
\preformatted{describe_identity_provider(
UserPoolId = "string",
ProviderName = "string"
)
}
}
| /service/paws.cognitoidentityprovider/man/describe_identity_provider.Rd | permissive | CR-Mercado/paws | R | false | true | 635 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cognitoidentityprovider_operations.R
\name{describe_identity_provider}
\alias{describe_identity_provider}
\title{Gets information about a specific identity provider}
\usage{
describe_identity_provider(UserPoolId, ProviderName)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID.}
\item{ProviderName}{[required] The identity provider name.}
}
\description{
Gets information about a specific identity provider.
}
\section{Accepted Parameters}{
\preformatted{describe_identity_provider(
UserPoolId = "string",
ProviderName = "string"
)
}
}
|
### Pre-impute data ###
preimputation<-function(data, imp.method='mean'){
if(imp.method=='mean'){
data<-mice(data, method = 'mean',printFlag = F)
datapreimput<-complete(data)
}
if(imp.method=='locf'){
data<-t(db.prov)
data<-na.locf(data)
data<-t(data)
data<-mice(data, method = 'mean', printFlag = F)
datapreimput<-complete(data)
}
return(datapreimput)
}
| /R/preimputation.R | permissive | helpstatanalysis/mvls0.1 | R | false | false | 394 | r | ### Pre-impute data ###
preimputation<-function(data, imp.method='mean'){
if(imp.method=='mean'){
data<-mice(data, method = 'mean',printFlag = F)
datapreimput<-complete(data)
}
if(imp.method=='locf'){
data<-t(db.prov)
data<-na.locf(data)
data<-t(data)
data<-mice(data, method = 'mean', printFlag = F)
datapreimput<-complete(data)
}
return(datapreimput)
}
|
# Load and process raw SAPIA database
# Import database
#raw_data <- readxl::read_xlsx("./data_raw/tAP_Main.xlsx")
raw_data <- readr::read_csv2("./data_raw/SAPIA_raw_database_march_2020.csv",
col_types = cols(.default = "c",
"Record_number" = "n",
"DecLat" = "n",
"DecLong" = "n"))
# Clean dataset
sapia_plant_db <- raw_data %>%
# Clean column names
janitor::clean_names() %>%
# Add year column
dplyr::mutate(year = as.numeric(stringr::str_sub(date,
start = 1,
end = 4))) %>%
# Select only columns of interest, and rename some columns
dplyr::select(plant_species = sapia_tax_id,
record_number,
year,
country,
region,
qdgc = x1_4_deg_sq,
longitude = dec_long,
latitude = dec_lat,
locality = locality_route,
density = abun,
#agent_name,
#agent_release,
#agent_abundance = abundance,
host_damage) %>%
# Sort alphabetically and by year
dplyr::arrange(plant_species, year) %>%
# Remove Invader Absent
dplyr::filter(plant_species != "Invader Absent") %>%
# Extract degrees for long/lat from QDGC
dplyr::mutate(lat_cell = (as.numeric(stringr::str_sub(qdgc,
start = 1,
end = 2)) * -1),
lon_cell = (as.numeric(stringr::str_sub(qdgc,
start = 3,
end = 4))),
big_square = stringr::str_sub(qdgc,
start = 5,
end = 5),
small_square = stringr::str_sub(qdgc,
start = 6,
end = 6)) %>%
# Calculate midpoints of latitude QDGC
dplyr::mutate(
lat_mp = dplyr::case_when(
big_square %in% c("A", "B") ~ as.numeric(lat_cell - 0.000),
big_square %in% c("C", "D") ~ as.numeric(lat_cell - 0.300)),
lat_mp = case_when(
small_square %in% c("A", "B") ~ as.numeric(lat_mp - 0.075),
small_square %in% c("C", "D") ~ as.numeric(lat_mp - 0.225))) %>%
# Calculate midpoints of longitude QDGC ( in degree minutes)
dplyr::mutate(
lon_mp = dplyr::case_when(
big_square %in% c("A", "C") ~ as.numeric(lon_cell + 0.000),
big_square %in% c("B", "D") ~ as.numeric(lon_cell + 0.300)),
lon_mp = case_when(
small_square %in% c("A", "C") ~ as.numeric(lon_mp + 0.075),
small_square %in% c("B", "D") ~ as.numeric(lon_mp + 0.225))) %>%
# Extract lat and lon minutes to convert to decimal degrees
dplyr::mutate(lat_mins = as.numeric(stringr::str_sub(lat_mp, start = -3)) / 10,
lon_mins = as.numeric(stringr::str_sub(lon_mp, start = -3)) / 10) %>%
# Convert lat and lon minutes to decimal degrees
dplyr::mutate(lat_dec = lat_mins / 60,
lon_dec = lon_mins / 60) %>%
# Extract lat and lon degrees
dplyr::mutate(lat_deg = as.numeric(stringr::str_sub(lat_mp,
start = 1,
end = 3)),
lon_deg = as.numeric(stringr::str_sub(lon_mp,
start = 1,
end = 2))) %>%
# Calculate final latitude and longitude for QDGC's (decimal degrees)
dplyr::mutate(lat_qdgc = lat_deg - lat_dec,
lon_qdgc = lon_deg + lon_dec) %>%
# Drop columns with qdgc calculations
dplyr::select(-(lat_cell:lon_deg)) %>%
# Convert existing lat/long columns to numeric
dplyr::mutate(longitude = as.numeric(longitude),
latitude = as.numeric(latitude)) %>%
# Combine lat/long mid-points with actual GPS co-ords
# If a record has actual GPS coords, then we drop the QDGC coords
# If no coords, then impute QDGC coords.
dplyr::mutate(
latitude = dplyr::case_when(
!is.na(latitude) ~ as.numeric(latitude),
is.na(latitude) ~ as.numeric(lat_qdgc)),
longitude = dplyr::case_when(
!is.na(longitude) ~ as.numeric(longitude),
is.na(longitude) ~ as.numeric(lon_qdgc))) %>%
# Drop columns with qdgc calculations
dplyr::select(-(lat_qdgc:lon_qdgc)) %>%
# Remove the rows that have no QDGC or coords (very few)
tidyr::drop_na(qdgc)
# Save processed data to PC
write_excel_csv2(sapia_plant_db,
"./data_proc/sapia_db_clean.csv")
###########################################################################
###########################################################################
###########################################################################
| /R/process_clean_import_data.R | no_license | guysutton/sapiaR | R | false | false | 5,109 | r | # Load and process raw SAPIA database
# Import database
#raw_data <- readxl::read_xlsx("./data_raw/tAP_Main.xlsx")
raw_data <- readr::read_csv2("./data_raw/SAPIA_raw_database_march_2020.csv",
col_types = cols(.default = "c",
"Record_number" = "n",
"DecLat" = "n",
"DecLong" = "n"))
# Clean dataset
sapia_plant_db <- raw_data %>%
# Clean column names
janitor::clean_names() %>%
# Add year column
dplyr::mutate(year = as.numeric(stringr::str_sub(date,
start = 1,
end = 4))) %>%
# Select only columns of interest, and rename some columns
dplyr::select(plant_species = sapia_tax_id,
record_number,
year,
country,
region,
qdgc = x1_4_deg_sq,
longitude = dec_long,
latitude = dec_lat,
locality = locality_route,
density = abun,
#agent_name,
#agent_release,
#agent_abundance = abundance,
host_damage) %>%
# Sort alphabetically and by year
dplyr::arrange(plant_species, year) %>%
# Remove Invader Absent
dplyr::filter(plant_species != "Invader Absent") %>%
# Extract degrees for long/lat from QDGC
dplyr::mutate(lat_cell = (as.numeric(stringr::str_sub(qdgc,
start = 1,
end = 2)) * -1),
lon_cell = (as.numeric(stringr::str_sub(qdgc,
start = 3,
end = 4))),
big_square = stringr::str_sub(qdgc,
start = 5,
end = 5),
small_square = stringr::str_sub(qdgc,
start = 6,
end = 6)) %>%
# Calculate midpoints of latitude QDGC
dplyr::mutate(
lat_mp = dplyr::case_when(
big_square %in% c("A", "B") ~ as.numeric(lat_cell - 0.000),
big_square %in% c("C", "D") ~ as.numeric(lat_cell - 0.300)),
lat_mp = case_when(
small_square %in% c("A", "B") ~ as.numeric(lat_mp - 0.075),
small_square %in% c("C", "D") ~ as.numeric(lat_mp - 0.225))) %>%
# Calculate midpoints of longitude QDGC ( in degree minutes)
dplyr::mutate(
lon_mp = dplyr::case_when(
big_square %in% c("A", "C") ~ as.numeric(lon_cell + 0.000),
big_square %in% c("B", "D") ~ as.numeric(lon_cell + 0.300)),
lon_mp = case_when(
small_square %in% c("A", "C") ~ as.numeric(lon_mp + 0.075),
small_square %in% c("B", "D") ~ as.numeric(lon_mp + 0.225))) %>%
# Extract lat and lon minutes to convert to decimal degrees
dplyr::mutate(lat_mins = as.numeric(stringr::str_sub(lat_mp, start = -3)) / 10,
lon_mins = as.numeric(stringr::str_sub(lon_mp, start = -3)) / 10) %>%
# Convert lat and lon minutes to decimal degrees
dplyr::mutate(lat_dec = lat_mins / 60,
lon_dec = lon_mins / 60) %>%
# Extract lat and lon degrees
dplyr::mutate(lat_deg = as.numeric(stringr::str_sub(lat_mp,
start = 1,
end = 3)),
lon_deg = as.numeric(stringr::str_sub(lon_mp,
start = 1,
end = 2))) %>%
# Calculate final latitude and longitude for QDGC's (decimal degrees)
dplyr::mutate(lat_qdgc = lat_deg - lat_dec,
lon_qdgc = lon_deg + lon_dec) %>%
# Drop columns with qdgc calculations
dplyr::select(-(lat_cell:lon_deg)) %>%
# Convert existing lat/long columns to numeric
dplyr::mutate(longitude = as.numeric(longitude),
latitude = as.numeric(latitude)) %>%
# Combine lat/long mid-points with actual GPS co-ords
# If a record has actual GPS coords, then we drop the QDGC coords
# If no coords, then impute QDGC coords.
dplyr::mutate(
latitude = dplyr::case_when(
!is.na(latitude) ~ as.numeric(latitude),
is.na(latitude) ~ as.numeric(lat_qdgc)),
longitude = dplyr::case_when(
!is.na(longitude) ~ as.numeric(longitude),
is.na(longitude) ~ as.numeric(lon_qdgc))) %>%
# Drop columns with qdgc calculations
dplyr::select(-(lat_qdgc:lon_qdgc)) %>%
# Remove the rows that have no QDGC or coords (very few)
tidyr::drop_na(qdgc)
# Save processed data to PC
write_excel_csv2(sapia_plant_db,
"./data_proc/sapia_db_clean.csv")
###########################################################################
###########################################################################
###########################################################################
|
MOSS.Hierarchical <-
function (startList = NULL, p = 0.2, alpha = 1, c = 0.1, cPrime = 0.0001, q = 0.1, replicates = 5, data) {
tools <- list()
varNames <- colnames(data)[which(colnames(data)!= "freq")]
n <- length(varNames)
varSets <- decToBin (0:(2**n-1),n)
colnames(varSets) <- varNames
lenVarSets <- rowSums(varSets)
nVarSets <- 2 ** n
# lattice
downLinks <- array(NA, c(nVarSets,n))
nDownLinks <- lenVarSets
upLinks <- array(NA,c(nVarSets,n))
nUpLinks <- n - lenVarSets
# downLinks
for(i in 1:nVarSets) {
k = 1
for(j in 1:n) {
if(varSets[i,j] == 1) {
varSets[i,j] <- 0
downLinks[i,k] <- binToDec(varSets[i,]) + 1
k <- k + 1
varSets[i,j] <- 1
}
}
}
# upLinks
for(i in 1:nVarSets) {
k = 1
for(j in 1:n) {
if(varSets[i,j] == 0) {
varSets[i,j] <- 1
upLinks[i,k] <- binToDec(varSets[i,]) + 1
k <- k + 1
varSets[i,j] <- 0
}
}
}
tools <- list(varNames = varNames, n = n, varSets = varSets, lenVarSets = lenVarSets, nVarSets = nVarSets, downLinks = downLinks, nDownLinks = nDownLinks, upLinks = upLinks, nUpLinks = nUpLinks)
postData <- priorData <- data
postData$freq <- data$freq + alpha / length(data$freq)
priorData$freq <- array (alpha / length(data$freq), c(length(data$freq)))
sizeOfStartList <- length(startList)
masterList <- c()
for (r in 1:replicates) {
models <- matrix (nrow = 1, ncol = nVarSets)
generators <- matrix (nrow = 1, ncol = nVarSets)
dualGenerators <- matrix (nrow = 1, ncol = nVarSets)
models[1,] <- randomHierModel(p, tools)
generators[1,] <- findGenerators(models[1,], tools)
dualGenerators[1,] <- findDualGenerators(models[1,], tools)
formulas <- findFormula(generators[1,], tools)
logMargLik <- logLaplace(formulas, postData, tools) - logLaplace(formulas, priorData, tools)
explored <- 0
iteration <- 1
#cat ("\n")
while(1) {
numUnExploredModels <- sum(1 - explored)
#outputMessage1 <- paste ("replicate [", r, "], ", "iteration [", iteration, "].", sep = "")
#outputMessage2 <- paste ("models in list [", length(formulas), "], ", "not studied [", numUnExploredModels, "].", sep = "")
#cat(outputMessage1, "\n", outputMessage2, "\n\n", sep = "")
if (sum(explored) == length(explored)) {
prettyHierFormulas <- vector(mode = "character", length(formulas))
for (i in 1:length(prettyHierFormulas))
prettyHierFormulas[i] <- prettyHierFormula (generators[i,], tools)
currentList <- data.frame(V1 = prettyHierFormulas, V2 = formulas, V3 = logMargLik, stringsAsFactors = F)
currentList <- currentList [logMargLik >= log(c) + max(logMargLik),]
masterList <- rbind(masterList, currentList)
break
}
unExploredModels <- which(explored == 0)
if (length(unExploredModels) == 1) {
m <- unExploredModels
}
else {
unExploredPostProb <- logMargLik[explored == 0]
unExploredPostProb <- unExploredPostProb - max(unExploredPostProb)
unExploredPostProb <- exp(unExploredPostProb)
m <- sample (x = unExploredModels, size = 1, prob = unExploredPostProb)
}
explored[m] <- 1
neighbourList <- findHierNeighbours (models[m,], generators[m,], dualGenerators[m,], tools)
for (i in 1:dim(neighbourList)[1]) {
currentNeighbourGenerators <- findGenerators (neighbourList[i,], tools)
currentNeighbourFormula <- findFormula(currentNeighbourGenerators, tools)
inList <- currentNeighbourFormula %in% formulas
if (!inList) {
models <- rbind(models, neighbourList[i,])
formulas <- c(formulas, currentNeighbourFormula)
generators <- rbind(generators, currentNeighbourGenerators)
currentNeighbourDualGenerators <- findDualGenerators(currentNeighbourGenerators, tools)
dualGenerators <- rbind(dualGenerators, currentNeighbourDualGenerators)
logMargLik <- c(logMargLik, logLaplace(as.formula(currentNeighbourFormula), postData, tools) - logLaplace(as.formula(currentNeighbourFormula), priorData, tools))
explored <- c(explored, 0)
}
}
criteria1 <- logMargLik >= log(cPrime) + max(logMargLik)
criteria2 <- ifelse(logMargLik >= log(c) + max(logMargLik), 1, rbinom(1,1,1-q))
toKeep <- criteria1 & criteria2
models <- models[toKeep,,drop = F]
formulas <- formulas[toKeep]
generators <- generators[toKeep,,drop = F]
dualGenerators <- dualGenerators[toKeep,,drop = F]
logMargLik <- logMargLik[toKeep]
explored <- explored[toKeep]
iteration <- iteration + 1
}
explored <- rep(0,length(explored))
}
masterList <- unique(masterList)
masterList <- masterList[order(masterList$V3, decreasing = T),]
row.names(masterList) <- rep(1:dim(masterList)[1])
return(masterList[1,,drop = F])
}
| /genMOSSplus/R/MOSS.Hierarchical.R | no_license | ingted/R-Examples | R | false | false | 5,065 | r | MOSS.Hierarchical <-
function (startList = NULL, p = 0.2, alpha = 1, c = 0.1, cPrime = 0.0001, q = 0.1, replicates = 5, data) {
tools <- list()
varNames <- colnames(data)[which(colnames(data)!= "freq")]
n <- length(varNames)
varSets <- decToBin (0:(2**n-1),n)
colnames(varSets) <- varNames
lenVarSets <- rowSums(varSets)
nVarSets <- 2 ** n
# lattice
downLinks <- array(NA, c(nVarSets,n))
nDownLinks <- lenVarSets
upLinks <- array(NA,c(nVarSets,n))
nUpLinks <- n - lenVarSets
# downLinks
for(i in 1:nVarSets) {
k = 1
for(j in 1:n) {
if(varSets[i,j] == 1) {
varSets[i,j] <- 0
downLinks[i,k] <- binToDec(varSets[i,]) + 1
k <- k + 1
varSets[i,j] <- 1
}
}
}
# upLinks
for(i in 1:nVarSets) {
k = 1
for(j in 1:n) {
if(varSets[i,j] == 0) {
varSets[i,j] <- 1
upLinks[i,k] <- binToDec(varSets[i,]) + 1
k <- k + 1
varSets[i,j] <- 0
}
}
}
tools <- list(varNames = varNames, n = n, varSets = varSets, lenVarSets = lenVarSets, nVarSets = nVarSets, downLinks = downLinks, nDownLinks = nDownLinks, upLinks = upLinks, nUpLinks = nUpLinks)
postData <- priorData <- data
postData$freq <- data$freq + alpha / length(data$freq)
priorData$freq <- array (alpha / length(data$freq), c(length(data$freq)))
sizeOfStartList <- length(startList)
masterList <- c()
for (r in 1:replicates) {
models <- matrix (nrow = 1, ncol = nVarSets)
generators <- matrix (nrow = 1, ncol = nVarSets)
dualGenerators <- matrix (nrow = 1, ncol = nVarSets)
models[1,] <- randomHierModel(p, tools)
generators[1,] <- findGenerators(models[1,], tools)
dualGenerators[1,] <- findDualGenerators(models[1,], tools)
formulas <- findFormula(generators[1,], tools)
logMargLik <- logLaplace(formulas, postData, tools) - logLaplace(formulas, priorData, tools)
explored <- 0
iteration <- 1
#cat ("\n")
while(1) {
numUnExploredModels <- sum(1 - explored)
#outputMessage1 <- paste ("replicate [", r, "], ", "iteration [", iteration, "].", sep = "")
#outputMessage2 <- paste ("models in list [", length(formulas), "], ", "not studied [", numUnExploredModels, "].", sep = "")
#cat(outputMessage1, "\n", outputMessage2, "\n\n", sep = "")
if (sum(explored) == length(explored)) {
prettyHierFormulas <- vector(mode = "character", length(formulas))
for (i in 1:length(prettyHierFormulas))
prettyHierFormulas[i] <- prettyHierFormula (generators[i,], tools)
currentList <- data.frame(V1 = prettyHierFormulas, V2 = formulas, V3 = logMargLik, stringsAsFactors = F)
currentList <- currentList [logMargLik >= log(c) + max(logMargLik),]
masterList <- rbind(masterList, currentList)
break
}
unExploredModels <- which(explored == 0)
if (length(unExploredModels) == 1) {
m <- unExploredModels
}
else {
unExploredPostProb <- logMargLik[explored == 0]
unExploredPostProb <- unExploredPostProb - max(unExploredPostProb)
unExploredPostProb <- exp(unExploredPostProb)
m <- sample (x = unExploredModels, size = 1, prob = unExploredPostProb)
}
explored[m] <- 1
neighbourList <- findHierNeighbours (models[m,], generators[m,], dualGenerators[m,], tools)
for (i in 1:dim(neighbourList)[1]) {
currentNeighbourGenerators <- findGenerators (neighbourList[i,], tools)
currentNeighbourFormula <- findFormula(currentNeighbourGenerators, tools)
inList <- currentNeighbourFormula %in% formulas
if (!inList) {
models <- rbind(models, neighbourList[i,])
formulas <- c(formulas, currentNeighbourFormula)
generators <- rbind(generators, currentNeighbourGenerators)
currentNeighbourDualGenerators <- findDualGenerators(currentNeighbourGenerators, tools)
dualGenerators <- rbind(dualGenerators, currentNeighbourDualGenerators)
logMargLik <- c(logMargLik, logLaplace(as.formula(currentNeighbourFormula), postData, tools) - logLaplace(as.formula(currentNeighbourFormula), priorData, tools))
explored <- c(explored, 0)
}
}
criteria1 <- logMargLik >= log(cPrime) + max(logMargLik)
criteria2 <- ifelse(logMargLik >= log(c) + max(logMargLik), 1, rbinom(1,1,1-q))
toKeep <- criteria1 & criteria2
models <- models[toKeep,,drop = F]
formulas <- formulas[toKeep]
generators <- generators[toKeep,,drop = F]
dualGenerators <- dualGenerators[toKeep,,drop = F]
logMargLik <- logMargLik[toKeep]
explored <- explored[toKeep]
iteration <- iteration + 1
}
explored <- rep(0,length(explored))
}
masterList <- unique(masterList)
masterList <- masterList[order(masterList$V3, decreasing = T),]
row.names(masterList) <- rep(1:dim(masterList)[1])
return(masterList[1,,drop = F])
}
|
#scrape dogtime.com
library(tidyverse)
library(rvest)
#function to scrape alle elements also missing elements
scrape_css<-function(css,group,html_page){
txt<-html_page %>%
html_nodes(group) %>%
lapply(.%>% html_nodes(css) %>% html_text() %>% ifelse(identical(.,character(0)),NA,.)) %>%
unlist()
return(txt)
}
#function to scrape alle attributes also missing elements
scrape_css_attr<-function(css,group,attribute,html_page){
txt<-html_page %>%
html_nodes(group) %>%
lapply(.%>% html_nodes(css) %>% html_attr(attribute) %>% ifelse(identical(.,character(0)),NA,.)) %>%
unlist()
return(txt)
}
#Get data of one element one level deeper
get_element_data<-function(link){
if(!is.na(link)){
#read the page
html<-read_html(link)
#delay of 2 seconds between requests
Sys.sleep(2)
#here I have to change for the one level deeper
#read the friendly towards stranger
Friendly_towards_strangers<-html %>%
html_node(".paws:nth-child(3) .child-characteristic:nth-child(5) .characteristic-star-block") %>%
html_text()
# #read the dog friendly
# Dog_Friendly<-html %>%
# html_node(".paws:nth-child(3) .child-characteristic:nth-child(4) .characteristic-star-block") %>%
# html_text()
#
# #read the Affectionate_with_family
# Affectionate_with_family<-html %>%
# html_node(".paws:nth-child(3) .parent-characteristic+ .child-characteristic .characteristic-star-block") %>%
# html_text()
#
# #read the Potential_to_Mouthiness
# Potential_to_Mouthiness<-html %>%
# html_node(".paws:nth-child(5) .child-characteristic:nth-child(4) .characteristic-star-block") %>%
# html_text()
#
# #read the Size
# Size<-html %>%
# html_node(".paws:nth-child(4) .child-characteristic:nth-child(7) .characteristic-star-block") %>%
# html_text()
#
#add everything to a tibble and return the tibble
return(tibble(Friendly_towards_strangers=Friendly_towards_strangers))
}
}
#Get all elements from 1 page
get_elements_from_url<-function(url){
#scrape all elements
html_page<-read_html(url)
#delay of 2 seconds between requests
Sys.sleep(2)
#get the house type
Type_of_Breed<-scrape_css(".list-item-title",".list-item",html_page)
#get all urls to go one level deeper
element_urls<-scrape_css_attr(".list-item-title",".list-item","href",html_page)
#Get all content of one episode (one level deeper)
element_data_detail<-element_urls %>%
# Apply to all URLs
map(get_element_data) %>%
# Combine the tibbles into one tibble
bind_rows()
# Combine into a tibble
elements_data<-tibble(Type_of_Breed = Type_of_Breed,element_urls=element_urls)
# Get rid of all the NA's (advertisements, which are links but don't contain data)
# Complete cases gives FALSE back when the column (in this case column 2), contains a NA
elements_data_overview <- elements_data[complete.cases(elements_data[,2]), ]
# Combine the page data en detail data into a tibble and return
return(bind_cols(elements_data_overview,element_data_detail))
}
#call the function and write the returned tibble to friends
Breeds<-get_elements_from_url("https://dogtime.com/dog-breeds/profiles")
#show the data
View(Breeds)
write_rds(Breeds, "Breeds.rds")
| /Scrape_dog_breed_profiles.R | no_license | ESeverijns/DOGS-AS-PETS-AND-THEIR-INFLUENCE-ON-OWNERS | R | false | false | 3,373 | r | #scrape dogtime.com
library(tidyverse)
library(rvest)
#function to scrape alle elements also missing elements
scrape_css<-function(css,group,html_page){
txt<-html_page %>%
html_nodes(group) %>%
lapply(.%>% html_nodes(css) %>% html_text() %>% ifelse(identical(.,character(0)),NA,.)) %>%
unlist()
return(txt)
}
#function to scrape alle attributes also missing elements
scrape_css_attr<-function(css,group,attribute,html_page){
txt<-html_page %>%
html_nodes(group) %>%
lapply(.%>% html_nodes(css) %>% html_attr(attribute) %>% ifelse(identical(.,character(0)),NA,.)) %>%
unlist()
return(txt)
}
#Get data of one element one level deeper
get_element_data<-function(link){
if(!is.na(link)){
#read the page
html<-read_html(link)
#delay of 2 seconds between requests
Sys.sleep(2)
#here I have to change for the one level deeper
#read the friendly towards stranger
Friendly_towards_strangers<-html %>%
html_node(".paws:nth-child(3) .child-characteristic:nth-child(5) .characteristic-star-block") %>%
html_text()
# #read the dog friendly
# Dog_Friendly<-html %>%
# html_node(".paws:nth-child(3) .child-characteristic:nth-child(4) .characteristic-star-block") %>%
# html_text()
#
# #read the Affectionate_with_family
# Affectionate_with_family<-html %>%
# html_node(".paws:nth-child(3) .parent-characteristic+ .child-characteristic .characteristic-star-block") %>%
# html_text()
#
# #read the Potential_to_Mouthiness
# Potential_to_Mouthiness<-html %>%
# html_node(".paws:nth-child(5) .child-characteristic:nth-child(4) .characteristic-star-block") %>%
# html_text()
#
# #read the Size
# Size<-html %>%
# html_node(".paws:nth-child(4) .child-characteristic:nth-child(7) .characteristic-star-block") %>%
# html_text()
#
#add everything to a tibble and return the tibble
return(tibble(Friendly_towards_strangers=Friendly_towards_strangers))
}
}
#Get all elements from 1 page
get_elements_from_url<-function(url){
#scrape all elements
html_page<-read_html(url)
#delay of 2 seconds between requests
Sys.sleep(2)
#get the house type
Type_of_Breed<-scrape_css(".list-item-title",".list-item",html_page)
#get all urls to go one level deeper
element_urls<-scrape_css_attr(".list-item-title",".list-item","href",html_page)
#Get all content of one episode (one level deeper)
element_data_detail<-element_urls %>%
# Apply to all URLs
map(get_element_data) %>%
# Combine the tibbles into one tibble
bind_rows()
# Combine into a tibble
elements_data<-tibble(Type_of_Breed = Type_of_Breed,element_urls=element_urls)
# Get rid of all the NA's (advertisements, which are links but don't contain data)
# Complete cases gives FALSE back when the column (in this case column 2), contains a NA
elements_data_overview <- elements_data[complete.cases(elements_data[,2]), ]
# Combine the page data en detail data into a tibble and return
return(bind_cols(elements_data_overview,element_data_detail))
}
#call the function and write the returned tibble to friends
Breeds<-get_elements_from_url("https://dogtime.com/dog-breeds/profiles")
#show the data
View(Breeds)
write_rds(Breeds, "Breeds.rds")
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(5.6941917864458e+81, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615835054-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 269 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(5.6941917864458e+81, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
library("data.table")
setwd("/Users/mauramz/JHU/ExploratoryDataAnalysis/CourseProject1/ExData_Plotting1")
#Reads in data from file then subsets data for specified dates
powerDT <- data.table::fread(input = "household_power_consumption.txt"
, na.strings="?"
)
# Prevents Scientific Notation
powerDT[, Global_active_power := lapply(.SD, as.numeric), .SDcols = c("Global_active_power")]
# Making a POSIXct date capable of being filtered and graphed by time of day
powerDT[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
# Filter Dates for 2007-02-01 and 2007-02-02
powerDT <- powerDT[(dateTime >= "2007-02-01") & (dateTime < "2007-02-03")]
png("Plot_2.png", width=480, height=480)
## Plot 2
plot(x = powerDT[, dateTime]
, y = powerDT[, Global_active_power]
, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
| /Plot2.R | no_license | mauramz/ExData_Plotting1 | R | false | false | 899 | r | library("data.table")
setwd("/Users/mauramz/JHU/ExploratoryDataAnalysis/CourseProject1/ExData_Plotting1")
#Reads in data from file then subsets data for specified dates
powerDT <- data.table::fread(input = "household_power_consumption.txt"
, na.strings="?"
)
# Prevents Scientific Notation
powerDT[, Global_active_power := lapply(.SD, as.numeric), .SDcols = c("Global_active_power")]
# Making a POSIXct date capable of being filtered and graphed by time of day
powerDT[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
# Filter Dates for 2007-02-01 and 2007-02-02
powerDT <- powerDT[(dateTime >= "2007-02-01") & (dateTime < "2007-02-03")]
png("Plot_2.png", width=480, height=480)
## Plot 2
plot(x = powerDT[, dateTime]
, y = powerDT[, Global_active_power]
, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gene_score_plot.R
\name{simple.legend}
\alias{simple.legend}
\title{Plot simple legend}
\usage{
simple.legend(labels, colors)
}
\arguments{
\item{labels}{vector of legend labels}
\item{colors}{vector of legend colors}
}
\description{
Plot simple color legend
}
| /source/rCNV2/man/simple.legend.Rd | permissive | talkowski-lab/rCNV2 | R | false | true | 340 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gene_score_plot.R
\name{simple.legend}
\alias{simple.legend}
\title{Plot simple legend}
\usage{
simple.legend(labels, colors)
}
\arguments{
\item{labels}{vector of legend labels}
\item{colors}{vector of legend colors}
}
\description{
Plot simple color legend
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enmtools.rf.R
\name{enmtools.rf}
\alias{enmtools.rf}
\title{Takes an emtools.species object with presence and background points, and builds a random forest model}
\usage{
enmtools.rf(species, env, f = NULL, test.prop = 0, eval = TRUE,
nback = 1000, report = NULL, overwrite = FALSE, rts.reps = 0, ...)
}
\arguments{
\item{species}{An enmtools.species object}
\item{env}{A raster or raster stack of environmental data.}
\item{f}{A formula for fitting the model}
\item{test.prop}{Proportion of data to withhold randomly for model evaluation, or "block" for spatially structured evaluation.}
\item{eval}{Determines whether model evaluation should be done. Turned on by default, but moses turns it off to speed things up.}
\item{nback}{Number of background points to draw from range or env, if background points aren't provided}
\item{report}{Optional name of an html file for generating reports}
\item{overwrite}{TRUE/FALSE whether to overwrite a report file if it already exists}
\item{rts.reps}{The number of replicates to do for a Raes and ter Steege-style test of significance}
\item{...}{Arguments to be passed to rf()}
}
\description{
Takes an emtools.species object with presence and background points, and builds a random forest model
}
\examples{
## NOT RUN
data(euro.worldclim)
data(iberolacerta.clade)
enmtools.rf(iberolacerta.clade$species$monticola, env = euro.worldclim, nback = 500)
}
| /man/enmtools.rf.Rd | no_license | helixcn/ENMTools | R | false | true | 1,487 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enmtools.rf.R
\name{enmtools.rf}
\alias{enmtools.rf}
\title{Takes an emtools.species object with presence and background points, and builds a random forest model}
\usage{
enmtools.rf(species, env, f = NULL, test.prop = 0, eval = TRUE,
nback = 1000, report = NULL, overwrite = FALSE, rts.reps = 0, ...)
}
\arguments{
\item{species}{An enmtools.species object}
\item{env}{A raster or raster stack of environmental data.}
\item{f}{A formula for fitting the model}
\item{test.prop}{Proportion of data to withhold randomly for model evaluation, or "block" for spatially structured evaluation.}
\item{eval}{Determines whether model evaluation should be done. Turned on by default, but moses turns it off to speed things up.}
\item{nback}{Number of background points to draw from range or env, if background points aren't provided}
\item{report}{Optional name of an html file for generating reports}
\item{overwrite}{TRUE/FALSE whether to overwrite a report file if it already exists}
\item{rts.reps}{The number of replicates to do for a Raes and ter Steege-style test of significance}
\item{...}{Arguments to be passed to rf()}
}
\description{
Takes an emtools.species object with presence and background points, and builds a random forest model
}
\examples{
## NOT RUN
data(euro.worldclim)
data(iberolacerta.clade)
enmtools.rf(iberolacerta.clade$species$monticola, env = euro.worldclim, nback = 500)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bw_hpi.R
\name{bw_hpi}
\alias{bw_hpi}
\title{Default Plug-in bandwidth selector using ks::Hpi function.}
\usage{
bw_hpi(x)
}
\arguments{
\item{x}{2d matrix of data values.}
}
\value{
A numeric vector of estimated x and y bandwidths. Must subset your data if you wish to obtain group specific bandwidths.
}
\description{
A simple wrapper for the ks::Hpi function.
}
\examples{
data("rodents")
# Subset the data for a single species
spec1<- rodents[rodents$Species == "Species1", ]
# Calculate the bandwidth
bw_hpi(as.matrix(spec1[, c("Ave_C", "Ave_N")]))
}
\author{
Shannon E. Albeke, Wyoming Geographic Information Science Center, University of Wyoming
}
| /man/bw_hpi.Rd | no_license | cran/rKIN | R | false | true | 760 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bw_hpi.R
\name{bw_hpi}
\alias{bw_hpi}
\title{Default Plug-in bandwidth selector using ks::Hpi function.}
\usage{
bw_hpi(x)
}
\arguments{
\item{x}{2d matrix of data values.}
}
\value{
A numeric vector of estimated x and y bandwidths. Must subset your data if you wish to obtain group specific bandwidths.
}
\description{
A simple wrapper for the ks::Hpi function.
}
\examples{
data("rodents")
# Subset the data for a single species
spec1<- rodents[rodents$Species == "Species1", ]
# Calculate the bandwidth
bw_hpi(as.matrix(spec1[, c("Ave_C", "Ave_N")]))
}
\author{
Shannon E. Albeke, Wyoming Geographic Information Science Center, University of Wyoming
}
|
pdf("/Users/wiliarj/Desktop/temp/polyCrCg_heatmap.pdf")
library(latticeExtra)
breaks1 = 50000
breaks2 = 50
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/4fold.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "4fold polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "4fold polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/0fold.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "0fold polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "0fold polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/intron.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "intron polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "intron polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/intergene.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "intergene polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "intergene polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/3utr.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "3utr polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "3utr polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/5utr.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "5utr polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "5utr polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
dev.off()
| /random R/.svn/text-base/polyHeatmap.R.svn-base | no_license | bioCKO/science | R | false | false | 5,771 | pdf("/Users/wiliarj/Desktop/temp/polyCrCg_heatmap.pdf")
library(latticeExtra)
breaks1 = 50000
breaks2 = 50
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/4fold.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "4fold polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "4fold polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/0fold.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "0fold polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "0fold polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/intron.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "intron polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "intron polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/intergene.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "intergene polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "intergene polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/3utr.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "3utr polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "3utr polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
########################
mydata = read.table("/Users/wiliarj/Desktop/temp/5utr.aligned.2.afs",header=T)
attach(mydata)
test = ifelse(Cg == 0 & Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks1)
ats = c()
max = max(freqs[test])
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks1), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "5utr polymorphism", xlim=c(-1,27), ylim=c(-1,25))
test = ifelse(Cg == 0 | Cr == 0, F, T)
freqs = Num_Sites/sum(Num_Sites[test])
col.l <- colorRampPalette(c('blue', 'green', 'purple', 'yellow', 'red'))(breaks2)
ats = c()
levelplot(freqs[test]~Cg[test]*Cr[test], at=do.breaks(c(0,max), breaks2), col.regions=col.l, xlab = "C. grandiflora", ylab = "C. rubella", main = "5utr polymorphism, neither fixed", xlim=c(0,27), ylim=c(0,25))
dev.off()
| |
# Getting and Cleaning Data Course Project
# run_analysis.R
#
# Purpose:
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for
# each measurement.
# 3. Uses descriptive activity names to name the activities in the data set.
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data
# set with the average of each variable for each activity and each subject.
#
# Requirements:
# 1. Project dataset in current working directory "UCI HAR Dataset"
# The download_data.R script can be used to download and unzip data set.
# 2. Packages dplyr, data.table, tidyr
#
# Usage:
# Set working directory to getting-cleaning-data directory
# source("run_analysis.R")
#
# Initialize prerequisites
datasetPath <- "./UCI HAR Dataset"
library(dplyr)
# Read labels and features
activityLabels <- tbl_df(read.table(file.path(datasetPath, "activity_labels.txt")))
names(activityLabels) <- c("activityNumber", "activityName")
features <- tbl_df(read.table(file.path(datasetPath, "features.txt")))
names(features) <- c("featureNumber", "featureName")
# Read and merge training and test subjects (7352 + 2947 = 10299 rows x 1 column)
# Subject files: Each row identifies the subject who performed the activity for
# each window sample. Its range is from 1 to 30.
trainingSubjects <- tbl_df(read.table(file.path(datasetPath, "train", "subject_train.txt")))
testSubjects <- tbl_df(read.table(file.path(datasetPath, "test", "subject_test.txt")))
mergedSubjects <- rbind(trainingSubjects, testSubjects)
# Update column names and clean up
names(mergedSubjects) <- "subject"
rm("trainingSubjects")
rm("testSubjects")
# Read and merge training and test labels (7352 + 2947 = 10299 rows x 1 column)
# Labels: Each row contains the activity label. Its range is from 1 to 6 (see also activityLabels)
trainingLabels <- tbl_df(read.table(file.path(datasetPath, "train", "y_train.txt"))) # Note: lowercase y
testLabels <- tbl_df(read.table(file.path(datasetPath, "test", "y_test.txt"))) # Note: lowercase y
mergedLabels <- rbind(trainingLabels, testLabels)
# Update column names and clean up
names(mergedLabels) <- "activityNumber"
rm("trainingLabels")
rm("testLabels")
# Read and merge training and test data (7352 + 2947 = 10299 rows x 561 columns)
# Set: Each row contains a list of 561 features (see also featureLabels)
trainingSet <- tbl_df(read.table(file.path(datasetPath, "train", "X_train.txt"))) # Note: uppercase X
testSet <- tbl_df(read.table(file.path(datasetPath, "test", "X_test.txt"))) # Note: uppercase X
mergedSet <- rbind(trainingSet, testSet)
# Update column names and clean up
names(mergedSet) <- features$featureName
rm("trainingSet")
rm("testSet")
# Extract only measurements on mean and std deviation for each measurement
# 10299 rows x 79 columns (includes mean frequency components)
# meanStdFeatures <- grep("mean|std", features$featureName)
# mergedSet <- mergedSet[, meanStdFeatures]
# 10299 rows x 66 columns (excludes mean of frequency components)
meanStdFeatures <- grep("mean\\(\\)|std\\(\\)", features$featureName)
mergedSet <- mergedSet[, meanStdFeatures]
# Add columns for subject, activityNumber, and an activityName placeholder
mergedSet <- cbind(mergedSubjects, mergedLabels, data.frame(activityName=""), mergedSet)
# Use descriptive activity names
mergedSet$activityName <- activityLabels[mergedSet$activityNumber,]$activityName
# Remove activity number column
mergedSet$activityNumber <- NULL
# Appropriately label the data set with descriptive variable names.
names(mergedSet) <- tolower(names(mergedSet)) # all lowercase
names(mergedSet) <- gsub("activityname", "activity", names(mergedSet)) # "activity" is sufficient
names(mergedSet) <- gsub("-", "_", names(mergedSet)) # all underscores
names(mergedSet) <- gsub("\\(\\)", "", names(mergedSet)) # remove parentheses
names(mergedSet) <- gsub("^t", "time_", names(mergedSet)) # t stands for time
names(mergedSet) <- gsub("^f", "frequency_", names(mergedSet)) # f stands for frequency
names(mergedSet) <- gsub("_body", "_body_", names(mergedSet)) # separate with underscore
names(mergedSet) <- gsub("_body_body", "_body_", names(mergedSet)) # remove duplicates
names(mergedSet) <- gsub("_gravity", "_gravity_", names(mergedSet)) # separate with underscore
names(mergedSet) <- gsub("_acc", "_accelerometer_", names(mergedSet)) # acc stands for accelerometer
names(mergedSet) <- gsub("_gyro", "_gyroscope_", names(mergedSet)) # gyro stands for gyroscope
names(mergedSet) <- gsub("jerkmag", "jerk_mag", names(mergedSet)) # separate with underscore
names(mergedSet) <- gsub("mag", "magnitude", names(mergedSet)) # mag stands for magnitude
names(mergedSet) <- gsub("__", "_", names(mergedSet)) # remove duplicates
# Sort column names alphabetically
mergedSet <- mergedSet[, order(names(mergedSet))]
# Make 'subject' the first column again
mergedSet <- mergedSet[, c("subject", setdiff(names(mergedSet), "subject"))]
# Save merged set
write.table(mergedSet, file = "merged.txt", row.name=FALSE)
# Create a second, independent tidy data set with the average of each variable for each activity and each subject.
summarySet <- group_by(mergedSet, subject, activity) %>%
summarize_each(funs(mean)) %>%
arrange(subject, activity)
# Save summary set
write.table(summarySet, file = "summary.txt", row.name=FALSE)
| /run_analysis.R | no_license | celeph/getting-cleaning-data | R | false | false | 5,629 | r | # Getting and Cleaning Data Course Project
# run_analysis.R
#
# Purpose:
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for
# each measurement.
# 3. Uses descriptive activity names to name the activities in the data set.
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data
# set with the average of each variable for each activity and each subject.
#
# Requirements:
# 1. Project dataset in current working directory "UCI HAR Dataset"
# The download_data.R script can be used to download and unzip data set.
# 2. Packages dplyr, data.table, tidyr
#
# Usage:
# Set working directory to getting-cleaning-data directory
# source("run_analysis.R")
#
# Initialize prerequisites
datasetPath <- "./UCI HAR Dataset"
library(dplyr)
# Read labels and features
activityLabels <- tbl_df(read.table(file.path(datasetPath, "activity_labels.txt")))
names(activityLabels) <- c("activityNumber", "activityName")
features <- tbl_df(read.table(file.path(datasetPath, "features.txt")))
names(features) <- c("featureNumber", "featureName")
# Read and merge training and test subjects (7352 + 2947 = 10299 rows x 1 column)
# Subject files: Each row identifies the subject who performed the activity for
# each window sample. Its range is from 1 to 30.
trainingSubjects <- tbl_df(read.table(file.path(datasetPath, "train", "subject_train.txt")))
testSubjects <- tbl_df(read.table(file.path(datasetPath, "test", "subject_test.txt")))
mergedSubjects <- rbind(trainingSubjects, testSubjects)
# Update column names and clean up
names(mergedSubjects) <- "subject"
rm("trainingSubjects")
rm("testSubjects")
# Read and merge training and test labels (7352 + 2947 = 10299 rows x 1 column)
# Labels: Each row contains the activity label. Its range is from 1 to 6 (see also activityLabels)
trainingLabels <- tbl_df(read.table(file.path(datasetPath, "train", "y_train.txt"))) # Note: lowercase y
testLabels <- tbl_df(read.table(file.path(datasetPath, "test", "y_test.txt"))) # Note: lowercase y
mergedLabels <- rbind(trainingLabels, testLabels)
# Update column names and clean up
names(mergedLabels) <- "activityNumber"
rm("trainingLabels")
rm("testLabels")
# Read and merge training and test data (7352 + 2947 = 10299 rows x 561 columns)
# Set: Each row contains a list of 561 features (see also featureLabels)
trainingSet <- tbl_df(read.table(file.path(datasetPath, "train", "X_train.txt"))) # Note: uppercase X
testSet <- tbl_df(read.table(file.path(datasetPath, "test", "X_test.txt"))) # Note: uppercase X
mergedSet <- rbind(trainingSet, testSet)
# Update column names and clean up
names(mergedSet) <- features$featureName
rm("trainingSet")
rm("testSet")
# Extract only measurements on mean and std deviation for each measurement
# 10299 rows x 79 columns (includes mean frequency components)
# meanStdFeatures <- grep("mean|std", features$featureName)
# mergedSet <- mergedSet[, meanStdFeatures]
# 10299 rows x 66 columns (excludes mean of frequency components)
meanStdFeatures <- grep("mean\\(\\)|std\\(\\)", features$featureName)
mergedSet <- mergedSet[, meanStdFeatures]
# Add columns for subject, activityNumber, and an activityName placeholder
mergedSet <- cbind(mergedSubjects, mergedLabels, data.frame(activityName=""), mergedSet)
# Use descriptive activity names
mergedSet$activityName <- activityLabels[mergedSet$activityNumber,]$activityName
# Remove activity number column
mergedSet$activityNumber <- NULL
# Appropriately label the data set with descriptive variable names.
names(mergedSet) <- tolower(names(mergedSet)) # all lowercase
names(mergedSet) <- gsub("activityname", "activity", names(mergedSet)) # "activity" is sufficient
names(mergedSet) <- gsub("-", "_", names(mergedSet)) # all underscores
names(mergedSet) <- gsub("\\(\\)", "", names(mergedSet)) # remove parentheses
names(mergedSet) <- gsub("^t", "time_", names(mergedSet)) # t stands for time
names(mergedSet) <- gsub("^f", "frequency_", names(mergedSet)) # f stands for frequency
names(mergedSet) <- gsub("_body", "_body_", names(mergedSet)) # separate with underscore
names(mergedSet) <- gsub("_body_body", "_body_", names(mergedSet)) # remove duplicates
names(mergedSet) <- gsub("_gravity", "_gravity_", names(mergedSet)) # separate with underscore
names(mergedSet) <- gsub("_acc", "_accelerometer_", names(mergedSet)) # acc stands for accelerometer
names(mergedSet) <- gsub("_gyro", "_gyroscope_", names(mergedSet)) # gyro stands for gyroscope
names(mergedSet) <- gsub("jerkmag", "jerk_mag", names(mergedSet)) # separate with underscore
names(mergedSet) <- gsub("mag", "magnitude", names(mergedSet)) # mag stands for magnitude
names(mergedSet) <- gsub("__", "_", names(mergedSet)) # remove duplicates
# Sort column names alphabetically
mergedSet <- mergedSet[, order(names(mergedSet))]
# Make 'subject' the first column again
mergedSet <- mergedSet[, c("subject", setdiff(names(mergedSet), "subject"))]
# Save merged set
write.table(mergedSet, file = "merged.txt", row.name=FALSE)
# Create a second, independent tidy data set with the average of each variable for each activity and each subject.
summarySet <- group_by(mergedSet, subject, activity) %>%
summarize_each(funs(mean)) %>%
arrange(subject, activity)
# Save summary set
write.table(summarySet, file = "summary.txt", row.name=FALSE)
|
#Load data
NEI <- readRDS("D:\\Documents\\exdata_data_NEI_data\\summarySCC_PM25.rds")
SCC <- readRDS("D:\\Documents\\exdata_data_NEI_data\\Source_Classification_Code.rds")
#Merge data
head(NEI)
head(SCC)
mydata <- NEI
head(mydata)
##there are so many outliers!
summary(mydata$Emissions)
summary(subset(mydata,year == 1999)$Emissions)
summary(subset(mydata,year == 2002)$Emissions)
summary(subset(mydata,year == 2005)$Emissions)
summary(subset(mydata,year == 2008)$Emissions)
##it seems that the pollution was decreasing from 1999 to 2008
## but there are so many outliers, it is hard to recognize difference if we plot with outliers
## and we shold use log10
negtive <- mydata$Emissions < 1
png("plot1.png", width=480, height=480)
boxplot(log10(Emissions + 1) ~ year, data = mydata[negtive,], col ="green")
dev.off()
##Answer: total emissions from PM2.5 decreased in the United States from 1999 to 2008 | /plot1.R | no_license | HeJiaolong/Exploratory-Data-Analysis-Course-Project-2 | R | false | false | 930 | r | #Load data
NEI <- readRDS("D:\\Documents\\exdata_data_NEI_data\\summarySCC_PM25.rds")
SCC <- readRDS("D:\\Documents\\exdata_data_NEI_data\\Source_Classification_Code.rds")
#Merge data
head(NEI)
head(SCC)
mydata <- NEI
head(mydata)
##there are so many outliers!
summary(mydata$Emissions)
summary(subset(mydata,year == 1999)$Emissions)
summary(subset(mydata,year == 2002)$Emissions)
summary(subset(mydata,year == 2005)$Emissions)
summary(subset(mydata,year == 2008)$Emissions)
##it seems that the pollution was decreasing from 1999 to 2008
## but there are so many outliers, it is hard to recognize difference if we plot with outliers
## and we shold use log10
negtive <- mydata$Emissions < 1
png("plot1.png", width=480, height=480)
boxplot(log10(Emissions + 1) ~ year, data = mydata[negtive,], col ="green")
dev.off()
##Answer: total emissions from PM2.5 decreased in the United States from 1999 to 2008 |
library(tidyr)
library(panelsim)
#setwd("~/Box Sync/Between Effects/Simulation")
#source("tw_sim.R")
# to run code, first install R package from Github
# remotes::install_github("saudiwin/panelsim")
set.seed(22902)
## Changing the mean of the within-time slopes
sim1 <- tw_sim(iter=30, parallel=FALSE, arg="beta.mean", at=-2:5)
sim1 <- gather(sim1, `Two-way FE`:`RE (v_t)`, key="Model", value="Coefficient",
factor_key=TRUE)
write_csv(sim1, path="sim1.csv")
## Changing the variance of the within-time slopes
sim2 <- tw_sim(iter=30, parallel=FALSE, arg="beta.sd", at=seq(0,1,by=.1))
sim2 <- gather(sim2, `Two-way FE`:`RE (v_t)`, key="Model", value="Coefficient",
factor_key=TRUE)
write_csv(sim2, path="sim2.csv")
## Changing the variance of the within-case slopes
sim3 <- tw_sim(iter=30, parallel=FALSE, arg="gamma.sd", at=seq(0,1,by=.1))
sim3 <- gather(sim3, `Two-way FE`:`RE (v_t)`, key="Model", value="Coefficient",
factor_key=TRUE)
write_csv(sim3, path="sim3.csv")
require(dplyr)
## Changing N and T, temporal autocorrelation
iterations <- 500
parallel <- TRUE
sim4 <- data.frame()
simtemp <- tw_sim(iter=iterations, parallel=parallel, arg="time.ac", at=c(0,.25,.75,.95), N=30, T=30, re_vt=FALSE)
simtemp <- gather(simtemp, `Two-way FE`:`RE (u_i)`, key="Model", value="Coefficient",
factor_key=TRUE)
simtemp <- mutate(simtemp, type="(30,30)")
sim4 <- bind_rows(sim4, simtemp)
simtemp <- tw_sim(iter=iterations, parallel=parallel, arg="time.ac", at=c(0,.25,.75,.95), N=100, T=10, re_vt=FALSE)
simtemp <- gather(simtemp, `Two-way FE`:`RE (u_i)`, key="Model", value="Coefficient",
factor_key=TRUE)
simtemp <- mutate(simtemp, type="(100,10)")
sim4 <- bind_rows(sim4, simtemp)
simtemp <- tw_sim(iter=iterations, parallel=parallel, arg="time.ac", at=c(0,.25,.75,.95), N=1000, T=3, re_vt=FALSE)
simtemp <- gather(simtemp, `Two-way FE`:`RE (u_i)`, key="Model", value="Coefficient",
factor_key=TRUE)
simtemp <- mutate(simtemp, type="(1000,3)")
sim4 <- bind_rows(sim4, simtemp)
sim4 <- sim4 %>%
mutate(ac="AC =",
type = factor(type, levels=c("(30,30)","(100,10)","(1000,3)"))) %>%
unite(time.ac, ac, time.ac, sep=" ")
write_csv(sim4, path="sim4.csv")
source("sim_august2017_graphs.R")
| /simulation_code.R | no_license | ibrahimalnafrah/panelsim | R | false | false | 2,301 | r | library(tidyr)
library(panelsim)
#setwd("~/Box Sync/Between Effects/Simulation")
#source("tw_sim.R")
# to run code, first install R package from Github
# remotes::install_github("saudiwin/panelsim")
set.seed(22902)
## Changing the mean of the within-time slopes
sim1 <- tw_sim(iter=30, parallel=FALSE, arg="beta.mean", at=-2:5)
sim1 <- gather(sim1, `Two-way FE`:`RE (v_t)`, key="Model", value="Coefficient",
factor_key=TRUE)
write_csv(sim1, path="sim1.csv")
## Changing the variance of the within-time slopes
sim2 <- tw_sim(iter=30, parallel=FALSE, arg="beta.sd", at=seq(0,1,by=.1))
sim2 <- gather(sim2, `Two-way FE`:`RE (v_t)`, key="Model", value="Coefficient",
factor_key=TRUE)
write_csv(sim2, path="sim2.csv")
## Changing the variance of the within-case slopes
sim3 <- tw_sim(iter=30, parallel=FALSE, arg="gamma.sd", at=seq(0,1,by=.1))
sim3 <- gather(sim3, `Two-way FE`:`RE (v_t)`, key="Model", value="Coefficient",
factor_key=TRUE)
write_csv(sim3, path="sim3.csv")
require(dplyr)
## Changing N and T, temporal autocorrelation
iterations <- 500
parallel <- TRUE
sim4 <- data.frame()
simtemp <- tw_sim(iter=iterations, parallel=parallel, arg="time.ac", at=c(0,.25,.75,.95), N=30, T=30, re_vt=FALSE)
simtemp <- gather(simtemp, `Two-way FE`:`RE (u_i)`, key="Model", value="Coefficient",
factor_key=TRUE)
simtemp <- mutate(simtemp, type="(30,30)")
sim4 <- bind_rows(sim4, simtemp)
simtemp <- tw_sim(iter=iterations, parallel=parallel, arg="time.ac", at=c(0,.25,.75,.95), N=100, T=10, re_vt=FALSE)
simtemp <- gather(simtemp, `Two-way FE`:`RE (u_i)`, key="Model", value="Coefficient",
factor_key=TRUE)
simtemp <- mutate(simtemp, type="(100,10)")
sim4 <- bind_rows(sim4, simtemp)
simtemp <- tw_sim(iter=iterations, parallel=parallel, arg="time.ac", at=c(0,.25,.75,.95), N=1000, T=3, re_vt=FALSE)
simtemp <- gather(simtemp, `Two-way FE`:`RE (u_i)`, key="Model", value="Coefficient",
factor_key=TRUE)
simtemp <- mutate(simtemp, type="(1000,3)")
sim4 <- bind_rows(sim4, simtemp)
sim4 <- sim4 %>%
mutate(ac="AC =",
type = factor(type, levels=c("(30,30)","(100,10)","(1000,3)"))) %>%
unite(time.ac, ac, time.ac, sep=" ")
write_csv(sim4, path="sim4.csv")
source("sim_august2017_graphs.R")
|
toy_df_no_na <- data.frame(chocolate_brand = (c("Lindt", "Rakhat", "Lindt", "Richart", "Lindt")),
price = c(3, 4, 4, 6, 3))
toy_df_na <- data.frame(chocolate_brand = (c("Lindt", "Rakhat", "Lindt", "Richart", "not available")),
price = c(3, NA, 4, 6, 3))
# Test the type of input
test_that("When the input is not a dataframe, error message is expeted",{
expect_error(autoimpute_na("test"), "The input should be of type 'data.frame'")
})
# Test an input with no missing values
test_that("When the input of autoimpute_na() doe not have missing values, the output should be the original dataframe", {
expect_equal(autoimpute_na(toy_df_no_na), toy_df_no_na)
})
# Test an input with missing values
test_that("When the input of autoimpute_na() has missing values, the output should be an imputed dataframe", {
expect_equal(autoimpute_na(toy_df_na), toy_df_no_na)
})
| /tests/testthat/test-autoimpute_na.R | permissive | UBC-MDS/Rmleda | R | false | false | 913 | r | toy_df_no_na <- data.frame(chocolate_brand = (c("Lindt", "Rakhat", "Lindt", "Richart", "Lindt")),
price = c(3, 4, 4, 6, 3))
toy_df_na <- data.frame(chocolate_brand = (c("Lindt", "Rakhat", "Lindt", "Richart", "not available")),
price = c(3, NA, 4, 6, 3))
# Test the type of input
test_that("When the input is not a dataframe, error message is expeted",{
expect_error(autoimpute_na("test"), "The input should be of type 'data.frame'")
})
# Test an input with no missing values
test_that("When the input of autoimpute_na() doe not have missing values, the output should be the original dataframe", {
expect_equal(autoimpute_na(toy_df_no_na), toy_df_no_na)
})
# Test an input with missing values
test_that("When the input of autoimpute_na() has missing values, the output should be an imputed dataframe", {
expect_equal(autoimpute_na(toy_df_na), toy_df_no_na)
})
|
# You can test the following two methods using the following tracks IDs
# trackIDs = '1CUVN2kn7mW5FjkqXTR2W1,387r02a1k6RZ4cwFraHkee'
#' Building a function to access to the specific tracks
#'
#' This function is building the access to the specific tracks
#'
#'@param artistID a character of the ID of spotify tracks
#'@param access_token a charater of the access token
#'@param market a charater of the market country
#'@return a response
#'@author Yumeng Li, Mattias Karlsson, Ashraf Sarhan
#'@details This function is to get the acesss to the specific tracks
#'@references
#'\url{https://developer.spotify.com/web-api/authorization-guide/}
#'@seealso \code{\link{auth}}
#'@export
getTracks <- function(trackIDs, market, access_token) {
stopifnot(is.character(trackIDs),is.character(market),is.character(access_token))
HeaderValue = paste0('Bearer ', access_token)
URI = paste0('https://api.spotify.com/v1/tracks?ids=', trackIDs, '&market=', market)
response = GET(url = URI, add_headers(Authorization = HeaderValue))
if((status_code(response) %% 400) %in% c(1:99) ){
stop("Bad request")
} else if((status_code(response) %% 500) %in% c(1:99)){
stop("Server failed")
} else if((status_code(response) %% 300) %in% c(1:99)){
stop("Redirections")
} else if((status_code(response) %% 100) %in% c(1:99)){
stop("Information from server")
}
return(response)
}
getAudioFeatures <- function(trackIDs, access_token) {
HeaderValue = paste0('Bearer ', access_token)
URI = paste0('https://api.spotify.com/v1/audio-features?ids=', trackIDs)
response = GET(url = URI, add_headers(Authorization = HeaderValue))
if(status_code(response) == 400 ){
stop("Bad request")
} else if(status_code(response) == 500){
stop("Server failed")
} else if(status_code(response) == 300){
stop("Redirections")
} else if(status_code(response) == 100){
stop("Information from server")
}
return(response)
}
| /spotifyr/R/tracks.R | no_license | ashrafsarhan/adv-r | R | false | false | 1,942 | r | # You can test the following two methods using the following tracks IDs
# trackIDs = '1CUVN2kn7mW5FjkqXTR2W1,387r02a1k6RZ4cwFraHkee'
#' Building a function to access to the specific tracks
#'
#' This function is building the access to the specific tracks
#'
#'@param artistID a character of the ID of spotify tracks
#'@param access_token a charater of the access token
#'@param market a charater of the market country
#'@return a response
#'@author Yumeng Li, Mattias Karlsson, Ashraf Sarhan
#'@details This function is to get the acesss to the specific tracks
#'@references
#'\url{https://developer.spotify.com/web-api/authorization-guide/}
#'@seealso \code{\link{auth}}
#'@export
getTracks <- function(trackIDs, market, access_token) {
stopifnot(is.character(trackIDs),is.character(market),is.character(access_token))
HeaderValue = paste0('Bearer ', access_token)
URI = paste0('https://api.spotify.com/v1/tracks?ids=', trackIDs, '&market=', market)
response = GET(url = URI, add_headers(Authorization = HeaderValue))
if((status_code(response) %% 400) %in% c(1:99) ){
stop("Bad request")
} else if((status_code(response) %% 500) %in% c(1:99)){
stop("Server failed")
} else if((status_code(response) %% 300) %in% c(1:99)){
stop("Redirections")
} else if((status_code(response) %% 100) %in% c(1:99)){
stop("Information from server")
}
return(response)
}
getAudioFeatures <- function(trackIDs, access_token) {
HeaderValue = paste0('Bearer ', access_token)
URI = paste0('https://api.spotify.com/v1/audio-features?ids=', trackIDs)
response = GET(url = URI, add_headers(Authorization = HeaderValue))
if(status_code(response) == 400 ){
stop("Bad request")
} else if(status_code(response) == 500){
stop("Server failed")
} else if(status_code(response) == 300){
stop("Redirections")
} else if(status_code(response) == 100){
stop("Information from server")
}
return(response)
}
|
## This function simply cache the inverse matrix object of a given matrix
makeCacheMatrix <- function(b = matrix()) {
cnt <- NULL
set <- function(a){
b <<- a
cnt <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Desolve the matrix cached by makeCacheMatrix function
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if (!is.null(inv)){
message("retrieving cached matrix")
return(inv)
}
cached <- x$get()
inv <- solve (cached, ...)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | DurararaKris/ProgrammingAssignment2 | R | false | false | 726 | r | ## This function simply cache the inverse matrix object of a given matrix
makeCacheMatrix <- function(b = matrix()) {
cnt <- NULL
set <- function(a){
b <<- a
cnt <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Desolve the matrix cached by makeCacheMatrix function
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if (!is.null(inv)){
message("retrieving cached matrix")
return(inv)
}
cached <- x$get()
inv <- solve (cached, ...)
x$setInverse(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{showGridLines}
\alias{showGridLines}
\title{Set worksheet gridlines to show or hide.}
\usage{
showGridLines(wb, sheet, showGridLines = FALSE)
}
\arguments{
\item{wb}{A workbook object}
\item{sheet}{A name or index of a worksheet}
\item{showGridLines}{A logical. If \code{TRUE}, grid lines are hidden.}
}
\description{
Set worksheet gridlines to show or hide.
}
\examples{
wb <- loadWorkbook(file = system.file("loadExample.xlsx", package = "openxlsx"))
names(wb) ## list worksheets in workbook
showGridLines(wb, 1, showGridLines = FALSE)
showGridLines(wb, "testing", showGridLines = FALSE)
saveWorkbook(wb, "showGridLinesExample.xlsx", overwrite = TRUE)
}
\author{
Alexander Walker
}
| /man/showGridLines.Rd | no_license | ecortens/openxlsx | R | false | true | 782 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{showGridLines}
\alias{showGridLines}
\title{Set worksheet gridlines to show or hide.}
\usage{
showGridLines(wb, sheet, showGridLines = FALSE)
}
\arguments{
\item{wb}{A workbook object}
\item{sheet}{A name or index of a worksheet}
\item{showGridLines}{A logical. If \code{TRUE}, grid lines are hidden.}
}
\description{
Set worksheet gridlines to show or hide.
}
\examples{
wb <- loadWorkbook(file = system.file("loadExample.xlsx", package = "openxlsx"))
names(wb) ## list worksheets in workbook
showGridLines(wb, 1, showGridLines = FALSE)
showGridLines(wb, "testing", showGridLines = FALSE)
saveWorkbook(wb, "showGridLinesExample.xlsx", overwrite = TRUE)
}
\author{
Alexander Walker
}
|
pdf(file='S8A_c0_t3.pdf',width=4.5,height=4.5);
gstable=read.table('S8A_c0_t3.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("SOX9","CCR5","ROSA26","PROM1","AAVS1","LRIG1","CTRL","mKate2","EGFP","KRT20")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='3_vs_0 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(862.0493757976374,462.5302567852563),c(574.9949073922577,340.8721743553874),c(527.152495991361,226.10039847815258),c(460.7047023790046,325.9518434913469),c(827.496523119212,453.3485147150775),c(1966.8546909257502,1269.375841202217))
targetgene="SOX9"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1142.9020501325306,734.5393656143028),c(1924.328103013842,1257.8986636144937),c(1119.8668150135804,791.9252535529203),c(863.8213169606336,519.9161447238737),c(1274.0256961942473,856.1974480441718),c(1298.8328724761936,712.7327281976283),c(2594.121862626395,1844.3824383471635),c(1605.3786936745314,1147.7177587723481),c(1004.6906394188292,693.2215262984982),c(1518.553576687719,1007.6961922021217))
targetgene="CCR5"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(5234.3141954906905,3556.777334435507),c(2201.636895022743,1550.5666921014424),c(2592.349921463399,1838.6438495533018),c(1809.151927419091,1273.9667122373064),c(1546.0186647141595,1170.6721139477952),c(2415.1558051637817,1440.3857872592969),c(1866.7400152164666,1080.0024110047796),c(1401.6054599299716,949.162586504732),c(639.6707598416178,508.4389671361502),c(1683.3441048463628,1181.0015737767462))
targetgene="ROSA26"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1717.0109869432902,1131.6497101495354),c(2475.4018047056516,1801.9168812725866),c(1592.08913495206,1101.8090484214542),c(2682.7189207762035,2022.2786909568774),c(1526.5273119212018,1034.0937006538857),c(3860.1738235871594,2704.0230396676525))
targetgene="PROM1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1676.2563401943783,1286.5916075838022),c(1727.6426339212671,1201.6604934346485),c(1192.5164026964235,791.9252535529203),c(778.7681411368173,542.8704998993206),c(1353.763048529075,921.6173602941956),c(657.3901714715796,479.74602316684155),c(996.7169041853464,679.44891319323),c(1991.6618672076968,1263.6372524083554),c(636.1268775156256,441.871337127354),c(870.9090816126183,580.7451859388082))
targetgene="AAVS1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1933.187808828823,1251.0123570618596),c(1000.2607865113388,710.4372926800835),c(1145.5599618770248,791.9252535529203),c(2039.5042786085933,1556.3052808953041),c(3188.6081228116104,2339.0487923780456),c(1111.0071091985994,734.5393656143028))
targetgene="LRIG1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1470.7111652868223,1093.7750241100478),c(1504.3780473837496,1039.8322894477474),c(4643.371817631467,3157.3715543827298),c(754.846935436369,561.2339840396783),c(1616.0103406525084,1166.0812429127056),c(2778.403743577997,2137.0504668341123),c(1801.1781921856082,1345.1252132811921),c(1646.1333404234433,1221.1716953337784),c(1572.597782159102,1230.3534374039573),c(2460.340304820184,1943.0861656015854))
targetgene="CTRL"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1011.7784040708138,927.3559490880573),c(1078.2261976831703,1117.8770970442672),c(1008.2345217448216,1069.6729511758285),c(1146.445932458523,980.1509659915853),c(864.7072875421317,871.1177789082122),c(1577.9136056480907,1605.6571445225152))
targetgene="mKate2"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(253.3875863084525,3243.450386290656),c(287.94043898687784,4241.964836422599),c(190.48367502208842,2593.842134825507),c(169.22038106613437,2287.40149323329),c(246.29982165646783,3621.0495289267583),c(842.5580230046795,3787.4686039487488))
targetgene="EGFP"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(411.9763203966099,2075.0737078604056),c(380.0813794626788,3078.179029027438),c(435.0115555155601,2397.5823980754353),c(166.56246932164012,1137.388298943397),c(551.0737016918093,4059.4777127777957),c(318.0634387578128,1744.5309933339693))
targetgene="KRT20"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("EGFP","KRT20","mKate2","SOX9","LRIG1","ROSA26","CTRL","AAVS1","PROM1","CCR5")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='3_vs_0 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(253.3875863084525,3243.450386290656),c(287.94043898687784,4241.964836422599),c(190.48367502208842,2593.842134825507),c(169.22038106613437,2287.40149323329),c(246.29982165646783,3621.0495289267583),c(842.5580230046795,3787.4686039487488))
targetgene="EGFP"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(411.9763203966099,2075.0737078604056),c(380.0813794626788,3078.179029027438),c(435.0115555155601,2397.5823980754353),c(166.56246932164012,1137.388298943397),c(551.0737016918093,4059.4777127777957),c(318.0634387578128,1744.5309933339693))
targetgene="KRT20"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1011.7784040708138,927.3559490880573),c(1078.2261976831703,1117.8770970442672),c(1008.2345217448216,1069.6729511758285),c(1146.445932458523,980.1509659915853),c(864.7072875421317,871.1177789082122),c(1577.9136056480907,1605.6571445225152))
targetgene="mKate2"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(862.0493757976374,462.5302567852563),c(574.9949073922577,340.8721743553874),c(527.152495991361,226.10039847815258),c(460.7047023790046,325.9518434913469),c(827.496523119212,453.3485147150775),c(1966.8546909257502,1269.375841202217))
targetgene="SOX9"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1933.187808828823,1251.0123570618596),c(1000.2607865113388,710.4372926800835),c(1145.5599618770248,791.9252535529203),c(2039.5042786085933,1556.3052808953041),c(3188.6081228116104,2339.0487923780456),c(1111.0071091985994,734.5393656143028))
targetgene="LRIG1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(5234.3141954906905,3556.777334435507),c(2201.636895022743,1550.5666921014424),c(2592.349921463399,1838.6438495533018),c(1809.151927419091,1273.9667122373064),c(1546.0186647141595,1170.6721139477952),c(2415.1558051637817,1440.3857872592969),c(1866.7400152164666,1080.0024110047796),c(1401.6054599299716,949.162586504732),c(639.6707598416178,508.4389671361502),c(1683.3441048463628,1181.0015737767462))
targetgene="ROSA26"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1470.7111652868223,1093.7750241100478),c(1504.3780473837496,1039.8322894477474),c(4643.371817631467,3157.3715543827298),c(754.846935436369,561.2339840396783),c(1616.0103406525084,1166.0812429127056),c(2778.403743577997,2137.0504668341123),c(1801.1781921856082,1345.1252132811921),c(1646.1333404234433,1221.1716953337784),c(1572.597782159102,1230.3534374039573),c(2460.340304820184,1943.0861656015854))
targetgene="CTRL"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1676.2563401943783,1286.5916075838022),c(1727.6426339212671,1201.6604934346485),c(1192.5164026964235,791.9252535529203),c(778.7681411368173,542.8704998993206),c(1353.763048529075,921.6173602941956),c(657.3901714715796,479.74602316684155),c(996.7169041853464,679.44891319323),c(1991.6618672076968,1263.6372524083554),c(636.1268775156256,441.871337127354),c(870.9090816126183,580.7451859388082))
targetgene="AAVS1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1717.0109869432902,1131.6497101495354),c(2475.4018047056516,1801.9168812725866),c(1592.08913495206,1101.8090484214542),c(2682.7189207762035,2022.2786909568774),c(1526.5273119212018,1034.0937006538857),c(3860.1738235871594,2704.0230396676525))
targetgene="PROM1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1142.9020501325306,734.5393656143028),c(1924.328103013842,1257.8986636144937),c(1119.8668150135804,791.9252535529203),c(863.8213169606336,519.9161447238737),c(1274.0256961942473,856.1974480441718),c(1298.8328724761936,712.7327281976283),c(2594.121862626395,1844.3824383471635),c(1605.3786936745314,1147.7177587723481),c(1004.6906394188292,693.2215262984982),c(1518.553576687719,1007.6961922021217))
targetgene="CCR5"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("S8A_c0_t3_summary.Rnw");
library(tools);
texi2dvi("S8A_c0_t3_summary.tex",pdf=TRUE);
| /Miniscreen05/MaGeCK_stats/S8A_c0_t3.R | no_license | davidchen0420/Miniscreen | R | false | false | 36,771 | r | pdf(file='S8A_c0_t3.pdf',width=4.5,height=4.5);
gstable=read.table('S8A_c0_t3.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("SOX9","CCR5","ROSA26","PROM1","AAVS1","LRIG1","CTRL","mKate2","EGFP","KRT20")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='3_vs_0 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(862.0493757976374,462.5302567852563),c(574.9949073922577,340.8721743553874),c(527.152495991361,226.10039847815258),c(460.7047023790046,325.9518434913469),c(827.496523119212,453.3485147150775),c(1966.8546909257502,1269.375841202217))
targetgene="SOX9"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1142.9020501325306,734.5393656143028),c(1924.328103013842,1257.8986636144937),c(1119.8668150135804,791.9252535529203),c(863.8213169606336,519.9161447238737),c(1274.0256961942473,856.1974480441718),c(1298.8328724761936,712.7327281976283),c(2594.121862626395,1844.3824383471635),c(1605.3786936745314,1147.7177587723481),c(1004.6906394188292,693.2215262984982),c(1518.553576687719,1007.6961922021217))
targetgene="CCR5"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(5234.3141954906905,3556.777334435507),c(2201.636895022743,1550.5666921014424),c(2592.349921463399,1838.6438495533018),c(1809.151927419091,1273.9667122373064),c(1546.0186647141595,1170.6721139477952),c(2415.1558051637817,1440.3857872592969),c(1866.7400152164666,1080.0024110047796),c(1401.6054599299716,949.162586504732),c(639.6707598416178,508.4389671361502),c(1683.3441048463628,1181.0015737767462))
targetgene="ROSA26"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1717.0109869432902,1131.6497101495354),c(2475.4018047056516,1801.9168812725866),c(1592.08913495206,1101.8090484214542),c(2682.7189207762035,2022.2786909568774),c(1526.5273119212018,1034.0937006538857),c(3860.1738235871594,2704.0230396676525))
targetgene="PROM1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1676.2563401943783,1286.5916075838022),c(1727.6426339212671,1201.6604934346485),c(1192.5164026964235,791.9252535529203),c(778.7681411368173,542.8704998993206),c(1353.763048529075,921.6173602941956),c(657.3901714715796,479.74602316684155),c(996.7169041853464,679.44891319323),c(1991.6618672076968,1263.6372524083554),c(636.1268775156256,441.871337127354),c(870.9090816126183,580.7451859388082))
targetgene="AAVS1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1933.187808828823,1251.0123570618596),c(1000.2607865113388,710.4372926800835),c(1145.5599618770248,791.9252535529203),c(2039.5042786085933,1556.3052808953041),c(3188.6081228116104,2339.0487923780456),c(1111.0071091985994,734.5393656143028))
targetgene="LRIG1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1470.7111652868223,1093.7750241100478),c(1504.3780473837496,1039.8322894477474),c(4643.371817631467,3157.3715543827298),c(754.846935436369,561.2339840396783),c(1616.0103406525084,1166.0812429127056),c(2778.403743577997,2137.0504668341123),c(1801.1781921856082,1345.1252132811921),c(1646.1333404234433,1221.1716953337784),c(1572.597782159102,1230.3534374039573),c(2460.340304820184,1943.0861656015854))
targetgene="CTRL"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1011.7784040708138,927.3559490880573),c(1078.2261976831703,1117.8770970442672),c(1008.2345217448216,1069.6729511758285),c(1146.445932458523,980.1509659915853),c(864.7072875421317,871.1177789082122),c(1577.9136056480907,1605.6571445225152))
targetgene="mKate2"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(253.3875863084525,3243.450386290656),c(287.94043898687784,4241.964836422599),c(190.48367502208842,2593.842134825507),c(169.22038106613437,2287.40149323329),c(246.29982165646783,3621.0495289267583),c(842.5580230046795,3787.4686039487488))
targetgene="EGFP"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(411.9763203966099,2075.0737078604056),c(380.0813794626788,3078.179029027438),c(435.0115555155601,2397.5823980754353),c(166.56246932164012,1137.388298943397),c(551.0737016918093,4059.4777127777957),c(318.0634387578128,1744.5309933339693))
targetgene="KRT20"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("EGFP","KRT20","mKate2","SOX9","LRIG1","ROSA26","CTRL","AAVS1","PROM1","CCR5")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='3_vs_0 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(253.3875863084525,3243.450386290656),c(287.94043898687784,4241.964836422599),c(190.48367502208842,2593.842134825507),c(169.22038106613437,2287.40149323329),c(246.29982165646783,3621.0495289267583),c(842.5580230046795,3787.4686039487488))
targetgene="EGFP"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(411.9763203966099,2075.0737078604056),c(380.0813794626788,3078.179029027438),c(435.0115555155601,2397.5823980754353),c(166.56246932164012,1137.388298943397),c(551.0737016918093,4059.4777127777957),c(318.0634387578128,1744.5309933339693))
targetgene="KRT20"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1011.7784040708138,927.3559490880573),c(1078.2261976831703,1117.8770970442672),c(1008.2345217448216,1069.6729511758285),c(1146.445932458523,980.1509659915853),c(864.7072875421317,871.1177789082122),c(1577.9136056480907,1605.6571445225152))
targetgene="mKate2"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(862.0493757976374,462.5302567852563),c(574.9949073922577,340.8721743553874),c(527.152495991361,226.10039847815258),c(460.7047023790046,325.9518434913469),c(827.496523119212,453.3485147150775),c(1966.8546909257502,1269.375841202217))
targetgene="SOX9"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1933.187808828823,1251.0123570618596),c(1000.2607865113388,710.4372926800835),c(1145.5599618770248,791.9252535529203),c(2039.5042786085933,1556.3052808953041),c(3188.6081228116104,2339.0487923780456),c(1111.0071091985994,734.5393656143028))
targetgene="LRIG1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(5234.3141954906905,3556.777334435507),c(2201.636895022743,1550.5666921014424),c(2592.349921463399,1838.6438495533018),c(1809.151927419091,1273.9667122373064),c(1546.0186647141595,1170.6721139477952),c(2415.1558051637817,1440.3857872592969),c(1866.7400152164666,1080.0024110047796),c(1401.6054599299716,949.162586504732),c(639.6707598416178,508.4389671361502),c(1683.3441048463628,1181.0015737767462))
targetgene="ROSA26"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1470.7111652868223,1093.7750241100478),c(1504.3780473837496,1039.8322894477474),c(4643.371817631467,3157.3715543827298),c(754.846935436369,561.2339840396783),c(1616.0103406525084,1166.0812429127056),c(2778.403743577997,2137.0504668341123),c(1801.1781921856082,1345.1252132811921),c(1646.1333404234433,1221.1716953337784),c(1572.597782159102,1230.3534374039573),c(2460.340304820184,1943.0861656015854))
targetgene="CTRL"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1676.2563401943783,1286.5916075838022),c(1727.6426339212671,1201.6604934346485),c(1192.5164026964235,791.9252535529203),c(778.7681411368173,542.8704998993206),c(1353.763048529075,921.6173602941956),c(657.3901714715796,479.74602316684155),c(996.7169041853464,679.44891319323),c(1991.6618672076968,1263.6372524083554),c(636.1268775156256,441.871337127354),c(870.9090816126183,580.7451859388082))
targetgene="AAVS1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1717.0109869432902,1131.6497101495354),c(2475.4018047056516,1801.9168812725866),c(1592.08913495206,1101.8090484214542),c(2682.7189207762035,2022.2786909568774),c(1526.5273119212018,1034.0937006538857),c(3860.1738235871594,2704.0230396676525))
targetgene="PROM1"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1142.9020501325306,734.5393656143028),c(1924.328103013842,1257.8986636144937),c(1119.8668150135804,791.9252535529203),c(863.8213169606336,519.9161447238737),c(1274.0256961942473,856.1974480441718),c(1298.8328724761936,712.7327281976283),c(2594.121862626395,1844.3824383471635),c(1605.3786936745314,1147.7177587723481),c(1004.6906394188292,693.2215262984982),c(1518.553576687719,1007.6961922021217))
targetgene="CCR5"
collabel=c("S57_MNSC508_F1","S60_MNSC508_F4")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("S8A_c0_t3_summary.Rnw");
library(tools);
texi2dvi("S8A_c0_t3_summary.tex",pdf=TRUE);
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oa_readme.R
\name{oa_readme}
\alias{oa_readme}
\title{Print readme from one or more datasets}
\usage{
oa_readme(x)
}
\arguments{
\item{x}{input, either an object of class \code{oa} or a list of such
objects}
}
\value{
character string
}
\description{
Print readme from one or more datasets
}
\examples{
\dontrun{
# single
url1 <- "http://data.openaddresses.io/runs/33311/us/mi/ottawa.zip"
xx <- oa_get(url1)
oa_readme(xx)
cat(oa_readme(xx))
# many at once
url2 <- "http://data.openaddresses.io/runs/101436/us/ca/yolo.zip"
zz <- oa_get(url2)
oa_readme(list(xx, zz))
cat(oa_readme(list(xx, zz)), sep = "\\n\\n")
}
}
| /man/oa_readme.Rd | permissive | cran/openadds | R | false | true | 694 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oa_readme.R
\name{oa_readme}
\alias{oa_readme}
\title{Print readme from one or more datasets}
\usage{
oa_readme(x)
}
\arguments{
\item{x}{input, either an object of class \code{oa} or a list of such
objects}
}
\value{
character string
}
\description{
Print readme from one or more datasets
}
\examples{
\dontrun{
# single
url1 <- "http://data.openaddresses.io/runs/33311/us/mi/ottawa.zip"
xx <- oa_get(url1)
oa_readme(xx)
cat(oa_readme(xx))
# many at once
url2 <- "http://data.openaddresses.io/runs/101436/us/ca/yolo.zip"
zz <- oa_get(url2)
oa_readme(list(xx, zz))
cat(oa_readme(list(xx, zz)), sep = "\\n\\n")
}
}
|
library(testthat)
library(equivtest)
test_check("equivtest")
| /tests/testthat.R | permissive | jwbowers/equivtest | R | false | false | 62 | r | library(testthat)
library(equivtest)
test_check("equivtest")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appmesh_operations.R
\name{appmesh_update_virtual_node}
\alias{appmesh_update_virtual_node}
\title{Updates an existing virtual node in a specified service mesh}
\usage{
appmesh_update_virtual_node(clientToken, meshName, spec,
virtualNodeName)
}
\arguments{
\item{clientToken}{Unique, case-sensitive identifier that you provide to ensure the
idempotency of the request. Up to 36 letters, numbers, hyphens, and
underscores are allowed.}
\item{meshName}{[required] The name of the service mesh that the virtual node resides in.}
\item{spec}{[required] The new virtual node specification to apply. This overwrites the
existing data.}
\item{virtualNodeName}{[required] The name of the virtual node to update.}
}
\description{
Updates an existing virtual node in a specified service mesh.
}
\section{Request syntax}{
\preformatted{svc$update_virtual_node(
clientToken = "string",
meshName = "string",
spec = list(
backends = list(
list(
virtualService = list(
virtualServiceName = "string"
)
)
),
listeners = list(
list(
healthCheck = list(
healthyThreshold = 123,
intervalMillis = 123,
path = "string",
port = 123,
protocol = "http"|"tcp",
timeoutMillis = 123,
unhealthyThreshold = 123
),
portMapping = list(
port = 123,
protocol = "http"|"tcp"
)
)
),
logging = list(
accessLog = list(
file = list(
path = "string"
)
)
),
serviceDiscovery = list(
dns = list(
hostname = "string"
)
)
),
virtualNodeName = "string"
)
}
}
\keyword{internal}
| /cran/paws.networking/man/appmesh_update_virtual_node.Rd | permissive | peoplecure/paws | R | false | true | 1,789 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appmesh_operations.R
\name{appmesh_update_virtual_node}
\alias{appmesh_update_virtual_node}
\title{Updates an existing virtual node in a specified service mesh}
\usage{
appmesh_update_virtual_node(clientToken, meshName, spec,
virtualNodeName)
}
\arguments{
\item{clientToken}{Unique, case-sensitive identifier that you provide to ensure the
idempotency of the request. Up to 36 letters, numbers, hyphens, and
underscores are allowed.}
\item{meshName}{[required] The name of the service mesh that the virtual node resides in.}
\item{spec}{[required] The new virtual node specification to apply. This overwrites the
existing data.}
\item{virtualNodeName}{[required] The name of the virtual node to update.}
}
\description{
Updates an existing virtual node in a specified service mesh.
}
\section{Request syntax}{
\preformatted{svc$update_virtual_node(
clientToken = "string",
meshName = "string",
spec = list(
backends = list(
list(
virtualService = list(
virtualServiceName = "string"
)
)
),
listeners = list(
list(
healthCheck = list(
healthyThreshold = 123,
intervalMillis = 123,
path = "string",
port = 123,
protocol = "http"|"tcp",
timeoutMillis = 123,
unhealthyThreshold = 123
),
portMapping = list(
port = 123,
protocol = "http"|"tcp"
)
)
),
logging = list(
accessLog = list(
file = list(
path = "string"
)
)
),
serviceDiscovery = list(
dns = list(
hostname = "string"
)
)
),
virtualNodeName = "string"
)
}
}
\keyword{internal}
|
# User options
use_gpu <- FALSE
make_args_from_build_script <- character(0L)
# For Windows, the package will be built with Visual Studio
# unless you set one of these to TRUE
use_mingw <- FALSE
use_msys2 <- FALSE
if (use_mingw && use_msys2) {
stop("Cannot use both MinGW and MSYS2. Please choose only one.")
}
if (.Machine$sizeof.pointer != 8L) {
stop("LightGBM only supports 64-bit R, please check the version of R and Rtools.")
}
R_int_UUID <- .Internal(internalsID())
R_ver <- as.double(R.Version()$major) + as.double(R.Version()$minor) / 10.0
if (!(R_int_UUID == "0310d4b8-ccb1-4bb8-ba94-d36a55f60262"
|| R_int_UUID == "2fdf6c18-697a-4ba7-b8ef-11c0d92f1327")) {
warning("Warning: unmatched R_INTERNALS_UUID, may not run normally.")
}
# system() will not raise an R exception if the process called
# fails. Wrapping it here to get that behavior.
#
# system() introduces a lot of overhead, at least on Windows,
# so trying processx if it is available
.run_shell_command <- function(cmd, args, strict = TRUE) {
on_windows <- .Platform$OS.type == "windows"
has_processx <- suppressMessages({
suppressWarnings({
require("processx") # nolint
})
})
if (has_processx && on_windows) {
result <- processx::run(
command = cmd
, args = args
, windows_verbatim_args = TRUE
, error_on_status = FALSE
, echo = TRUE
)
exit_code <- result$status
} else {
if (on_windows) {
message(paste0(
"Using system() to run shell commands. Installing "
, "'processx' with install.packages('processx') might "
, "make this faster."
))
}
cmd <- paste0(cmd, " ", paste0(args, collapse = " "))
exit_code <- system(cmd)
}
if (exit_code != 0L && isTRUE(strict)) {
stop(paste0("Command failed with exit code: ", exit_code))
}
return(invisible(exit_code))
}
# try to generate Visual Studio build files
.generate_vs_makefiles <- function(cmake_args) {
vs_versions <- c(
"Visual Studio 16 2019"
, "Visual Studio 15 2017"
, "Visual Studio 14 2015"
)
working_vs_version <- NULL
for (vs_version in vs_versions) {
message(sprintf("Trying '%s'", vs_version))
# if the build directory is not empty, clean it
if (file.exists("CMakeCache.txt")) {
file.remove("CMakeCache.txt")
}
vs_cmake_args <- c(
cmake_args
, "-G"
, shQuote(vs_version)
, "-A"
, "x64"
)
exit_code <- .run_shell_command("cmake", c(vs_cmake_args, ".."), strict = FALSE)
if (exit_code == 0L) {
message(sprintf("Successfully created build files for '%s'", vs_version))
return(invisible(TRUE))
}
}
return(invisible(FALSE))
}
# Move in CMakeLists.txt
write_succeeded <- file.copy(
"../inst/bin/CMakeLists.txt"
, "CMakeLists.txt"
, overwrite = TRUE
)
if (!write_succeeded) {
stop("Copying CMakeLists.txt failed")
}
# Get some paths
source_dir <- file.path(R_PACKAGE_SOURCE, "src", fsep = "/")
build_dir <- file.path(source_dir, "build", fsep = "/")
# Prepare building package
dir.create(
build_dir
, recursive = TRUE
, showWarnings = FALSE
)
setwd(build_dir)
use_visual_studio <- !(use_mingw || use_msys2)
# If using MSVC to build, pull in the script used
# to create R.def from R.dll
if (WINDOWS && use_visual_studio) {
write_succeeded <- file.copy(
"../../inst/make-r-def.R"
, file.path(build_dir, "make-r-def.R")
, overwrite = TRUE
)
if (!write_succeeded) {
stop("Copying make-r-def.R failed")
}
}
# Prepare installation steps
cmake_args <- NULL
build_cmd <- "make"
build_args <- c("_lightgbm", make_args_from_build_script)
lib_folder <- file.path(source_dir, fsep = "/")
# add in command-line arguments
# NOTE: build_r.R replaces the line below
command_line_args <- NULL
cmake_args <- c(cmake_args, command_line_args)
WINDOWS_BUILD_TOOLS <- list(
"MinGW" = c(
build_tool = "mingw32-make.exe"
, makefile_generator = "MinGW Makefiles"
)
, "MSYS2" = c(
build_tool = "make.exe"
, makefile_generator = "MSYS Makefiles"
)
)
if (use_mingw) {
windows_toolchain <- "MinGW"
} else if (use_msys2) {
windows_toolchain <- "MSYS2"
} else {
# Rtools 4.0 moved from MinGW to MSYS toolchain. If user tries
# Visual Studio install but that fails, fall back to the toolchain
# supported in Rtools
if (R_ver >= 4.0) {
windows_toolchain <- "MSYS2"
} else {
windows_toolchain <- "MinGW"
}
}
windows_build_tool <- WINDOWS_BUILD_TOOLS[[windows_toolchain]][["build_tool"]]
windows_makefile_generator <- WINDOWS_BUILD_TOOLS[[windows_toolchain]][["makefile_generator"]]
if (use_gpu) {
cmake_args <- c(cmake_args, "-DUSE_GPU=ON")
}
cmake_args <- c(cmake_args, "-D__BUILD_FOR_R=ON")
# Pass in R version, used to help find R executable for linking
R_version_string <- paste(
R.Version()[["major"]]
, R.Version()[["minor"]]
, sep = "."
)
r_version_arg <- sprintf("-DCMAKE_R_VERSION='%s'", R_version_string)
cmake_args <- c(cmake_args, r_version_arg)
# the checks below might already run `cmake -G`. If they do, set this flag
# to TRUE to avoid re-running it later
makefiles_already_generated <- FALSE
# Check if Windows installation (for gcc vs Visual Studio)
if (WINDOWS) {
if (!use_visual_studio) {
message(sprintf("Trying to build with %s", windows_toolchain))
# Must build twice for Windows due sh.exe in Rtools
cmake_args <- c(cmake_args, "-G", shQuote(windows_makefile_generator))
.run_shell_command("cmake", c(cmake_args, ".."), strict = FALSE)
build_cmd <- windows_build_tool
build_args <- c("_lightgbm", make_args_from_build_script)
} else {
visual_studio_succeeded <- .generate_vs_makefiles(cmake_args)
if (!isTRUE(visual_studio_succeeded)) {
warning(sprintf("Building with Visual Studio failed. Attempting with %s", windows_toolchain))
# Must build twice for Windows due sh.exe in Rtools
cmake_args <- c(cmake_args, "-G", shQuote(windows_makefile_generator))
.run_shell_command("cmake", c(cmake_args, ".."), strict = FALSE)
build_cmd <- windows_build_tool
build_args <- c("_lightgbm", make_args_from_build_script)
} else {
build_cmd <- "cmake"
build_args <- c("--build", ".", "--target", "_lightgbm", "--config", "Release")
lib_folder <- file.path(source_dir, "Release", fsep = "/")
makefiles_already_generated <- TRUE
}
}
} else {
.run_shell_command("cmake", c(cmake_args, ".."))
makefiles_already_generated <- TRUE
}
# generate build files
if (!makefiles_already_generated) {
.run_shell_command("cmake", c(cmake_args, ".."))
}
# build the library
message("Building lib_lightgbm")
.run_shell_command(build_cmd, build_args)
src <- file.path(lib_folder, paste0("lib_lightgbm", SHLIB_EXT), fsep = "/")
# Packages with install.libs.R need to copy some artifacts into the
# expected places in the package structure.
# see https://cran.r-project.org/doc/manuals/r-devel/R-exts.html#Package-subdirectories,
# especially the paragraph on install.libs.R
dest <- file.path(R_PACKAGE_DIR, paste0("libs", R_ARCH), fsep = "/")
dir.create(dest, recursive = TRUE, showWarnings = FALSE)
if (file.exists(src)) {
message(paste0("Found library file: ", src, " to move to ", dest))
file.copy(src, dest, overwrite = TRUE)
symbols_file <- file.path(source_dir, "symbols.rds")
if (file.exists(symbols_file)) {
file.copy(symbols_file, dest, overwrite = TRUE)
}
} else {
stop(paste0("Cannot find lib_lightgbm", SHLIB_EXT))
}
# clean up the "build" directory
if (dir.exists(build_dir)) {
message("Removing 'build/' directory")
unlink(
x = build_dir
, recursive = TRUE
, force = TRUE
)
}
| /R-package/src/install.libs.R | permissive | NProkoptsev/LightGBM | R | false | false | 7,748 | r | # User options
use_gpu <- FALSE
make_args_from_build_script <- character(0L)
# For Windows, the package will be built with Visual Studio
# unless you set one of these to TRUE
use_mingw <- FALSE
use_msys2 <- FALSE
if (use_mingw && use_msys2) {
stop("Cannot use both MinGW and MSYS2. Please choose only one.")
}
if (.Machine$sizeof.pointer != 8L) {
stop("LightGBM only supports 64-bit R, please check the version of R and Rtools.")
}
R_int_UUID <- .Internal(internalsID())
R_ver <- as.double(R.Version()$major) + as.double(R.Version()$minor) / 10.0
if (!(R_int_UUID == "0310d4b8-ccb1-4bb8-ba94-d36a55f60262"
|| R_int_UUID == "2fdf6c18-697a-4ba7-b8ef-11c0d92f1327")) {
warning("Warning: unmatched R_INTERNALS_UUID, may not run normally.")
}
# system() will not raise an R exception if the process called
# fails. Wrapping it here to get that behavior.
#
# system() introduces a lot of overhead, at least on Windows,
# so trying processx if it is available
.run_shell_command <- function(cmd, args, strict = TRUE) {
on_windows <- .Platform$OS.type == "windows"
has_processx <- suppressMessages({
suppressWarnings({
require("processx") # nolint
})
})
if (has_processx && on_windows) {
result <- processx::run(
command = cmd
, args = args
, windows_verbatim_args = TRUE
, error_on_status = FALSE
, echo = TRUE
)
exit_code <- result$status
} else {
if (on_windows) {
message(paste0(
"Using system() to run shell commands. Installing "
, "'processx' with install.packages('processx') might "
, "make this faster."
))
}
cmd <- paste0(cmd, " ", paste0(args, collapse = " "))
exit_code <- system(cmd)
}
if (exit_code != 0L && isTRUE(strict)) {
stop(paste0("Command failed with exit code: ", exit_code))
}
return(invisible(exit_code))
}
# try to generate Visual Studio build files
.generate_vs_makefiles <- function(cmake_args) {
vs_versions <- c(
"Visual Studio 16 2019"
, "Visual Studio 15 2017"
, "Visual Studio 14 2015"
)
working_vs_version <- NULL
for (vs_version in vs_versions) {
message(sprintf("Trying '%s'", vs_version))
# if the build directory is not empty, clean it
if (file.exists("CMakeCache.txt")) {
file.remove("CMakeCache.txt")
}
vs_cmake_args <- c(
cmake_args
, "-G"
, shQuote(vs_version)
, "-A"
, "x64"
)
exit_code <- .run_shell_command("cmake", c(vs_cmake_args, ".."), strict = FALSE)
if (exit_code == 0L) {
message(sprintf("Successfully created build files for '%s'", vs_version))
return(invisible(TRUE))
}
}
return(invisible(FALSE))
}
# Move in CMakeLists.txt
write_succeeded <- file.copy(
"../inst/bin/CMakeLists.txt"
, "CMakeLists.txt"
, overwrite = TRUE
)
if (!write_succeeded) {
stop("Copying CMakeLists.txt failed")
}
# Get some paths
source_dir <- file.path(R_PACKAGE_SOURCE, "src", fsep = "/")
build_dir <- file.path(source_dir, "build", fsep = "/")
# Prepare building package
dir.create(
build_dir
, recursive = TRUE
, showWarnings = FALSE
)
setwd(build_dir)
use_visual_studio <- !(use_mingw || use_msys2)
# If using MSVC to build, pull in the script used
# to create R.def from R.dll
if (WINDOWS && use_visual_studio) {
write_succeeded <- file.copy(
"../../inst/make-r-def.R"
, file.path(build_dir, "make-r-def.R")
, overwrite = TRUE
)
if (!write_succeeded) {
stop("Copying make-r-def.R failed")
}
}
# Prepare installation steps
cmake_args <- NULL
build_cmd <- "make"
build_args <- c("_lightgbm", make_args_from_build_script)
lib_folder <- file.path(source_dir, fsep = "/")
# add in command-line arguments
# NOTE: build_r.R replaces the line below
command_line_args <- NULL
cmake_args <- c(cmake_args, command_line_args)
WINDOWS_BUILD_TOOLS <- list(
"MinGW" = c(
build_tool = "mingw32-make.exe"
, makefile_generator = "MinGW Makefiles"
)
, "MSYS2" = c(
build_tool = "make.exe"
, makefile_generator = "MSYS Makefiles"
)
)
if (use_mingw) {
windows_toolchain <- "MinGW"
} else if (use_msys2) {
windows_toolchain <- "MSYS2"
} else {
# Rtools 4.0 moved from MinGW to MSYS toolchain. If user tries
# Visual Studio install but that fails, fall back to the toolchain
# supported in Rtools
if (R_ver >= 4.0) {
windows_toolchain <- "MSYS2"
} else {
windows_toolchain <- "MinGW"
}
}
windows_build_tool <- WINDOWS_BUILD_TOOLS[[windows_toolchain]][["build_tool"]]
windows_makefile_generator <- WINDOWS_BUILD_TOOLS[[windows_toolchain]][["makefile_generator"]]
if (use_gpu) {
cmake_args <- c(cmake_args, "-DUSE_GPU=ON")
}
cmake_args <- c(cmake_args, "-D__BUILD_FOR_R=ON")
# Pass in R version, used to help find R executable for linking
R_version_string <- paste(
R.Version()[["major"]]
, R.Version()[["minor"]]
, sep = "."
)
r_version_arg <- sprintf("-DCMAKE_R_VERSION='%s'", R_version_string)
cmake_args <- c(cmake_args, r_version_arg)
# the checks below might already run `cmake -G`. If they do, set this flag
# to TRUE to avoid re-running it later
makefiles_already_generated <- FALSE
# Check if Windows installation (for gcc vs Visual Studio)
if (WINDOWS) {
if (!use_visual_studio) {
message(sprintf("Trying to build with %s", windows_toolchain))
# Must build twice for Windows due sh.exe in Rtools
cmake_args <- c(cmake_args, "-G", shQuote(windows_makefile_generator))
.run_shell_command("cmake", c(cmake_args, ".."), strict = FALSE)
build_cmd <- windows_build_tool
build_args <- c("_lightgbm", make_args_from_build_script)
} else {
visual_studio_succeeded <- .generate_vs_makefiles(cmake_args)
if (!isTRUE(visual_studio_succeeded)) {
warning(sprintf("Building with Visual Studio failed. Attempting with %s", windows_toolchain))
# Must build twice for Windows due sh.exe in Rtools
cmake_args <- c(cmake_args, "-G", shQuote(windows_makefile_generator))
.run_shell_command("cmake", c(cmake_args, ".."), strict = FALSE)
build_cmd <- windows_build_tool
build_args <- c("_lightgbm", make_args_from_build_script)
} else {
build_cmd <- "cmake"
build_args <- c("--build", ".", "--target", "_lightgbm", "--config", "Release")
lib_folder <- file.path(source_dir, "Release", fsep = "/")
makefiles_already_generated <- TRUE
}
}
} else {
.run_shell_command("cmake", c(cmake_args, ".."))
makefiles_already_generated <- TRUE
}
# generate build files
if (!makefiles_already_generated) {
.run_shell_command("cmake", c(cmake_args, ".."))
}
# build the library
message("Building lib_lightgbm")
.run_shell_command(build_cmd, build_args)
src <- file.path(lib_folder, paste0("lib_lightgbm", SHLIB_EXT), fsep = "/")
# Packages with install.libs.R need to copy some artifacts into the
# expected places in the package structure.
# see https://cran.r-project.org/doc/manuals/r-devel/R-exts.html#Package-subdirectories,
# especially the paragraph on install.libs.R
dest <- file.path(R_PACKAGE_DIR, paste0("libs", R_ARCH), fsep = "/")
dir.create(dest, recursive = TRUE, showWarnings = FALSE)
if (file.exists(src)) {
message(paste0("Found library file: ", src, " to move to ", dest))
file.copy(src, dest, overwrite = TRUE)
symbols_file <- file.path(source_dir, "symbols.rds")
if (file.exists(symbols_file)) {
file.copy(symbols_file, dest, overwrite = TRUE)
}
} else {
stop(paste0("Cannot find lib_lightgbm", SHLIB_EXT))
}
# clean up the "build" directory
if (dir.exists(build_dir)) {
message("Removing 'build/' directory")
unlink(
x = build_dir
, recursive = TRUE
, force = TRUE
)
}
|
library(shiny)
library(shinycssloaders)
library(dygraphs)
library(DT)
source("app.R")
fluidPage(
sidebarPanel(
h3("Historische Preise"),
htmlOutput("station"),
verbatimTextOutput('openingtimes'),
selectInput("price", "Sorte:",
c("Diesel" = "Diesel",
"E10" = "E10",
"E5" = "E5")),
dateRangeInput("daterange", "Zeitraum:",
start = format(Sys.Date()-8,"%Y-%m-%d"),
end = format(Sys.Date()-1,"%Y-%m-%d"),
min="2014-06-09",
max=format(Sys.Date()-1,"%Y-%m-%d")
)
),
mainPanel(
withSpinner(dygraphOutput("dygraph"))
),
fluidRow(
column(12,
h4("Unterschied zum Durchschnittspreis im Zeitraum")
)
),
fluidRow(
column(12,
dataTableOutput('table')
)
),
fluidRow(
column(12,
p('(c) 2019',
tags$a(href='https://www.raphaelvolz.de/','Raphael Volz (raphael.volz@hs-pforzheim.de)'),' | ',
tags$a(href='https://github.com/volzinnovation/wanntanken','Open Source - Fork me on Github'),' | ',
tags$a(href='http://tankerkoenig.de','Daten von tankerkoenig.de unter CC-BY-SA 4.0')
)
)
)
) | /src/shiny-viewer/ui.R | no_license | volzinnovation/wanntanken | R | false | false | 1,359 | r | library(shiny)
library(shinycssloaders)
library(dygraphs)
library(DT)
source("app.R")
fluidPage(
sidebarPanel(
h3("Historische Preise"),
htmlOutput("station"),
verbatimTextOutput('openingtimes'),
selectInput("price", "Sorte:",
c("Diesel" = "Diesel",
"E10" = "E10",
"E5" = "E5")),
dateRangeInput("daterange", "Zeitraum:",
start = format(Sys.Date()-8,"%Y-%m-%d"),
end = format(Sys.Date()-1,"%Y-%m-%d"),
min="2014-06-09",
max=format(Sys.Date()-1,"%Y-%m-%d")
)
),
mainPanel(
withSpinner(dygraphOutput("dygraph"))
),
fluidRow(
column(12,
h4("Unterschied zum Durchschnittspreis im Zeitraum")
)
),
fluidRow(
column(12,
dataTableOutput('table')
)
),
fluidRow(
column(12,
p('(c) 2019',
tags$a(href='https://www.raphaelvolz.de/','Raphael Volz (raphael.volz@hs-pforzheim.de)'),' | ',
tags$a(href='https://github.com/volzinnovation/wanntanken','Open Source - Fork me on Github'),' | ',
tags$a(href='http://tankerkoenig.de','Daten von tankerkoenig.de unter CC-BY-SA 4.0')
)
)
)
) |
#
# Copyright 2007-2018 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
setClass(Class = "SymmMatrix",
representation = representation(),
contains = "MxMatrix")
setMethod("imxSymmetricMatrix", "SymmMatrix",
function(.Object) { return(TRUE) }
)
setMethod("imxSquareMatrix", "SymmMatrix",
function(.Object) { return(TRUE) }
)
populateSymmTriangle <- function(input, n, default, byrow, strname) {
len <- length(input)
if (len == n * n || len == 1) {
output <- matrix(input, n, n, byrow)
} else if (len == n * (n + 1) / 2) {
if(byrow) {
output <- matrix(default, n, n)
output[upper.tri(output, TRUE)] <- input
output[lower.tri(output)] <- t(output)[lower.tri(output)]
} else {
output <- matrix(default, n, n)
output[lower.tri(output, TRUE)] <- input
output[upper.tri(output)] <- t(output)[upper.tri(output)]
}
} else {
stop(paste(
"illegal number of elements (", len,
") for '", strname, "' matrix in Symmetric MxMatrix construction", sep=""),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
return(output)
}
setMethod("imxCreateMatrix", "SymmMatrix",
function(.Object, labels, values, free, lbound, ubound, nrow, ncol, byrow, name, condenseSlots,
joinKey, joinModel) {
if (nrow != ncol) {
stop(paste("non-square MxMatrix attempted in 'nrow' and 'ncol' arguments to",
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix"))),
call. = FALSE)
}
if (single.na(values)) {
values <- 0
}
if (is.vector(values)) {
values <- populateSymmTriangle(values, nrow, 0, byrow, 'values')
}
if(condenseSlots && all.false(free) && all.na(labels)){
labels <- as.character(NA)
free <- FALSE
}
else{
if (is.vector(labels)) {
labels <- populateSymmTriangle(labels, nrow, as.character(NA), byrow, 'labels')
}
if (is.vector(free)) {
free <- populateSymmTriangle(free, nrow, FALSE, byrow, 'free')
}}
if(condenseSlots && all.na(lbound)){lbound <- as.numeric(NA)}
else{if (is.vector(lbound)) {
lbound <- populateSymmTriangle(lbound, nrow, as.numeric(NA), byrow, 'lbound')
}}
if(condenseSlots && all.na(ubound)){ubound <- as.numeric(NA)}
else{if (is.vector(ubound)) {
ubound <- populateSymmTriangle(ubound, nrow, as.numeric(NA), byrow, 'ubound')
}}
return(callNextMethod(.Object, labels, values, free, lbound, ubound, nrow, ncol, byrow, name,
condenseSlots, joinKey, joinModel))
}
)
setMethod("imxVerifyMatrix", "SymmMatrix",
function(.Object) {
callNextMethod(.Object)
values <- .Object@values
free <- .Object@free
labels <- .Object@labels
lbound <- .Object@lbound
ubound <- .Object@ubound
mask <- is.na(values[upper.tri(values)])
if (any(is.na(t(values)[upper.tri(values)]) != mask)) {
stop(paste("NAs in 'values' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"are not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (any(values[upper.tri(values)][!mask] != t(values)[upper.tri(values)][!mask])) {
stop(paste("'values' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "), deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (!all(free == t(free))) {
stop(paste("'free' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (!all(labels == t(labels), na.rm = TRUE) && all(is.na(labels) == is.na(t(labels)))) {
stop(paste("'labels' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (!all(lbound == t(lbound), na.rm = TRUE) && all(is.na(lbound) == is.na(t(lbound)))) {
stop(paste("'lbound' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (!all(ubound == t(ubound), na.rm = TRUE) && all(is.na(ubound) == is.na(t(ubound)))) {
stop(paste("'ubound' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
}
)
| /R/SymmMatrix.R | no_license | OpenMx/OpenMx | R | false | false | 4,988 | r | #
# Copyright 2007-2018 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
setClass(Class = "SymmMatrix",
representation = representation(),
contains = "MxMatrix")
setMethod("imxSymmetricMatrix", "SymmMatrix",
function(.Object) { return(TRUE) }
)
setMethod("imxSquareMatrix", "SymmMatrix",
function(.Object) { return(TRUE) }
)
populateSymmTriangle <- function(input, n, default, byrow, strname) {
len <- length(input)
if (len == n * n || len == 1) {
output <- matrix(input, n, n, byrow)
} else if (len == n * (n + 1) / 2) {
if(byrow) {
output <- matrix(default, n, n)
output[upper.tri(output, TRUE)] <- input
output[lower.tri(output)] <- t(output)[lower.tri(output)]
} else {
output <- matrix(default, n, n)
output[lower.tri(output, TRUE)] <- input
output[upper.tri(output)] <- t(output)[upper.tri(output)]
}
} else {
stop(paste(
"illegal number of elements (", len,
") for '", strname, "' matrix in Symmetric MxMatrix construction", sep=""),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
return(output)
}
setMethod("imxCreateMatrix", "SymmMatrix",
function(.Object, labels, values, free, lbound, ubound, nrow, ncol, byrow, name, condenseSlots,
joinKey, joinModel) {
if (nrow != ncol) {
stop(paste("non-square MxMatrix attempted in 'nrow' and 'ncol' arguments to",
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix"))),
call. = FALSE)
}
if (single.na(values)) {
values <- 0
}
if (is.vector(values)) {
values <- populateSymmTriangle(values, nrow, 0, byrow, 'values')
}
if(condenseSlots && all.false(free) && all.na(labels)){
labels <- as.character(NA)
free <- FALSE
}
else{
if (is.vector(labels)) {
labels <- populateSymmTriangle(labels, nrow, as.character(NA), byrow, 'labels')
}
if (is.vector(free)) {
free <- populateSymmTriangle(free, nrow, FALSE, byrow, 'free')
}}
if(condenseSlots && all.na(lbound)){lbound <- as.numeric(NA)}
else{if (is.vector(lbound)) {
lbound <- populateSymmTriangle(lbound, nrow, as.numeric(NA), byrow, 'lbound')
}}
if(condenseSlots && all.na(ubound)){ubound <- as.numeric(NA)}
else{if (is.vector(ubound)) {
ubound <- populateSymmTriangle(ubound, nrow, as.numeric(NA), byrow, 'ubound')
}}
return(callNextMethod(.Object, labels, values, free, lbound, ubound, nrow, ncol, byrow, name,
condenseSlots, joinKey, joinModel))
}
)
setMethod("imxVerifyMatrix", "SymmMatrix",
function(.Object) {
callNextMethod(.Object)
values <- .Object@values
free <- .Object@free
labels <- .Object@labels
lbound <- .Object@lbound
ubound <- .Object@ubound
mask <- is.na(values[upper.tri(values)])
if (any(is.na(t(values)[upper.tri(values)]) != mask)) {
stop(paste("NAs in 'values' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"are not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (any(values[upper.tri(values)][!mask] != t(values)[upper.tri(values)][!mask])) {
stop(paste("'values' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "), deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (!all(free == t(free))) {
stop(paste("'free' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (!all(labels == t(labels), na.rm = TRUE) && all(is.na(labels) == is.na(t(labels)))) {
stop(paste("'labels' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (!all(lbound == t(lbound), na.rm = TRUE) && all(is.na(lbound) == is.na(t(lbound)))) {
stop(paste("'lbound' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
if (!all(ubound == t(ubound), na.rm = TRUE) && all(is.na(ubound) == is.na(t(ubound)))) {
stop(paste("'ubound' matrix of Symmetric MxMatrix", omxQuotes(.Object@name),
"is not symmetric in "),
deparse(width.cutoff = 400L, imxLocateFunction("mxMatrix")),
call. = FALSE)
}
}
)
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{evalPerformance}
\alias{evalPerformance}
\title{evalPerformance}
\usage{
evalPerformance(refset.calls, sample.outcomes)
}
\arguments{
\item{refset.calls}{is the data.frame returned by the \link{testUnknowns}
function. It contains column with sampleIDs, z-scores and calls for
T21, T18, T13 and the fetal sex}
\item{sample.outcomes}{data.frame with column names: Dx, Gender, SampleID}
}
\description{
This function takes in the summed counts, the baseline and an outcomes table
and calculates the overall performance on the dataset
}
\seealso{
\code{\link{testUnknowns}}
}
| /man/evalPerformance.Rd | no_license | biocyberman/RAPIDR | R | false | false | 633 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{evalPerformance}
\alias{evalPerformance}
\title{evalPerformance}
\usage{
evalPerformance(refset.calls, sample.outcomes)
}
\arguments{
\item{refset.calls}{is the data.frame returned by the \link{testUnknowns}
function. It contains column with sampleIDs, z-scores and calls for
T21, T18, T13 and the fetal sex}
\item{sample.outcomes}{data.frame with column names: Dx, Gender, SampleID}
}
\description{
This function takes in the summed counts, the baseline and an outcomes table
and calculates the overall performance on the dataset
}
\seealso{
\code{\link{testUnknowns}}
}
|
library(rmr)
library(hash)
# sample the data using the "look at the first few
# lines" method
# use the file() function, readChar function, and strsplit
# function to break the words up.
# then use R's tapply function to find the word counts
hist_order=order(unlist(word_count_samples), decreasing=T)
word_histogram = data.frame(freq=unlist(unname(word_count_samples))[hist_order],
word=names(word_count_samples)[hist_order])
# this barplot will let you see quite clearly that a good
# estimate of the "tail" threshold is about 5.
barplot(word_histogram$freq, names.arg=word_histogram$word)
# now that you know the threshold, set up a hash to test for it.
# use the > operator to find the high frequency words
# then save them in a hash, using the constant T as the value
# now we want to break the task up by the degree of parallelism in our cluster
num_slots = 10
rmr.options.set(backend="local")
partitioned_wordcount_map = function(null,line){
words = unlist(strsplit(line, split="\\s+", perl=T))
words = words[nzchar(words)]
high_freq_part=floor(runif(1)*num_slots)
# create a partition assigner function that
# checks if it's input is in the high frequency map
# using the is.null function. If it isn't,
# assign it to the partition 0. Otherwise, assign it to
# the partition high_freq_part. Do so by
# concatenating the word and the partition number
# using the c function
partitioned_words = lapply(words, partiton_assigner)
lapply(partitioned_words, function(word)keyval(word,1))
}
partitioned_wordcount_combine = function(word_and_parts, counts){
# sum the counts, but don't strip off the partition number
}
partitioned_wordcount_reduce = function(word_and_parts, counts){
# sum the counts, strip off the partition number
}
wordcount_reduce = function(words, counts){
# sum the counts again
}
phase_1_counts = mapreduce("~/Data/federalist_papers",
input.format="text",
map=partitioned_wordcount_map,
reduce = partitioned_wordcount_reduce,
combine = partitioned_wordcount_combine
)
result = from.dfs(mapreduce(phase_1_counts,
reduce=wordcount_reduce))
counts = unlist(lapply(result, function(kv) kv$val))
words = unlist(lapply(result, function(kv) kv$key))
orders = order(counts,decreasing=T)[1:50]
barplot(counts[orders], names.arg=words[orders] )
| /Fill-In/wordcount-zipfian.R | no_license | RodavLasIlad/rhadoop-examples | R | false | false | 2,372 | r | library(rmr)
library(hash)
# sample the data using the "look at the first few
# lines" method
# use the file() function, readChar function, and strsplit
# function to break the words up.
# then use R's tapply function to find the word counts
hist_order=order(unlist(word_count_samples), decreasing=T)
word_histogram = data.frame(freq=unlist(unname(word_count_samples))[hist_order],
word=names(word_count_samples)[hist_order])
# this barplot will let you see quite clearly that a good
# estimate of the "tail" threshold is about 5.
barplot(word_histogram$freq, names.arg=word_histogram$word)
# now that you know the threshold, set up a hash to test for it.
# use the > operator to find the high frequency words
# then save them in a hash, using the constant T as the value
# now we want to break the task up by the degree of parallelism in our cluster
num_slots = 10
rmr.options.set(backend="local")
partitioned_wordcount_map = function(null,line){
words = unlist(strsplit(line, split="\\s+", perl=T))
words = words[nzchar(words)]
high_freq_part=floor(runif(1)*num_slots)
# create a partition assigner function that
# checks if it's input is in the high frequency map
# using the is.null function. If it isn't,
# assign it to the partition 0. Otherwise, assign it to
# the partition high_freq_part. Do so by
# concatenating the word and the partition number
# using the c function
partitioned_words = lapply(words, partiton_assigner)
lapply(partitioned_words, function(word)keyval(word,1))
}
partitioned_wordcount_combine = function(word_and_parts, counts){
# sum the counts, but don't strip off the partition number
}
partitioned_wordcount_reduce = function(word_and_parts, counts){
# sum the counts, strip off the partition number
}
wordcount_reduce = function(words, counts){
# sum the counts again
}
phase_1_counts = mapreduce("~/Data/federalist_papers",
input.format="text",
map=partitioned_wordcount_map,
reduce = partitioned_wordcount_reduce,
combine = partitioned_wordcount_combine
)
result = from.dfs(mapreduce(phase_1_counts,
reduce=wordcount_reduce))
counts = unlist(lapply(result, function(kv) kv$val))
words = unlist(lapply(result, function(kv) kv$key))
orders = order(counts,decreasing=T)[1:50]
barplot(counts[orders], names.arg=words[orders] )
|
testlist <- list(x = numeric(0), y = c(1.82391396040758e-183, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result) | /netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612798731-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 182 | r | testlist <- list(x = numeric(0), y = c(1.82391396040758e-183, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result) |
########################################
# turn any pin heights into mm
# and name the column pin_height
########################################
height_to_mm <- function(data){
if(exists('pin_height_cm', data)) {
data <- data %>%
mutate(pin_height = pin_height_cm * 10) %>%
select(-pin_height_cm)
}
if(exists('pin_height_mm', data)){
data <- data %>%
mutate(pin_height = pin_height_mm) %>%
select(-pin_height_mm)
}
return(data)
}
######################################################
######################################################
#### Cumulative change (change since first reading)
######################################################
## calculate all levels of cumulative change, in one function?
## returns three data frames (puts them into the Global Environment)
# test
if(exists('dat')){
change_cumu_test_pin <<- dat %>%
group_by(reserve, set_id, arm_position, pin_number) %>%
mutate(cumu = pin_height - pin_height[min(which(!is.na(pin_height)))]) %>% ##### need to make this the first pin reading that's not NA - not just [1]
select(-pin_height) %>%
ungroup()
change_cumu_test_arm <<- change_cumu_test_pin %>%
group_by(reserve, set_id, arm_position, date) %>%
select(-pin_number) %>%
summarize(mean_cumu = mean(cumu, na.rm = TRUE),
sd_cumu = sd(cumu, na.rm = TRUE),
se_cumu = sd(cumu, na.rm = TRUE)/sqrt(length(!is.na(cumu)))) %>%
ungroup()
}
## going to need better documentation, like what kind of data frame is needed as input (long); what column names are required; maybe some if statements to throw errors
calc_change_cumu <- function(dat) {
# by pin
change_cumu_pin <<- dat %>%
group_by(reserve, set_id, arm_position, pin_number) %>%
mutate(cumu = pin_height - pin_height[1]) %>% ##### if there are nas in the first pin reading, maybe those pins should be excluded from further aggregation (at least this type of agg) - this will make those pins NA all the way through
# mutate(cumu = pin_height - pin_height[min(which(!is.na(pin_height)))]) %>% ##### subtract off the first pin reading that's not NA
select(-pin_height) %>%
ungroup()
# pins averaged up to arms
change_cumu_arm <<- change_cumu_pin %>%
group_by(reserve, set_id, arm_position, date) %>%
select(-pin_number) %>%
summarize(mean_cumu = mean(cumu, na.rm = TRUE),
sd_cumu = sd(cumu, na.rm = TRUE),
se_cumu = sd(cumu, na.rm = TRUE)/sqrt(length(!is.na(cumu)))) %>%
ungroup()
# arms averaged up to SETs
change_cumu_set <<- change_cumu_arm %>%
group_by(reserve, set_id, date) %>%
select(-arm_position, mean_value = mean_cumu) %>%
summarize(mean_cumu = mean(mean_value, na.rm = TRUE),
sd_cumu = sd(mean_value, na.rm = TRUE),
se_cumu = sd(mean_value, na.rm = TRUE)/sqrt(length(!is.na(mean_value)))) %>%
ungroup()
}
######################################################
######################################################
#### Incremental Change (change since last reading)
######################################################
calc_change_incr <- function(dat){
# by pin
change_incr_pin <<- dat %>%
arrange(reserve, set_id, arm_position, pin_number, date) %>%
group_by(reserve, set_id, arm_position, pin_number) %>%
mutate(incr = pin_height - lag(pin_height, 1)) %>%
ungroup()
# pins averaged up to arms
change_incr_arm <<- change_incr_pin %>%
group_by(reserve, set_id, arm_position, date) %>%
select(-pin_number) %>%
summarize(mean_incr = mean(incr, na.rm = TRUE),
sd_incr = sd(incr, na.rm = TRUE),
se_incr = sd(incr, na.rm = TRUE)/sqrt(length(!is.na(incr)))) %>%
ungroup()
# arms averaged up to SETs
change_incr_set <<- change_incr_arm %>%
group_by(reserve, set_id, date) %>%
select(-arm_position, mean_value = mean_incr) %>%
summarize(mean_incr = mean(mean_value, na.rm = TRUE),
sd_incr = sd(mean_value, na.rm = TRUE),
se_incr = sd(mean_value, na.rm = TRUE)/sqrt(length(!is.na(mean_value)))) %>%
ungroup()
}
#######################################
### Graphs
#######################################
# maybe figure out how to make free y scales an option in the function call
## histogram, colored by arm
hist_by_arm <- function(data, columns = 4){
ggplot(data) +
geom_histogram(aes(pin_height, fill = as.factor(arm_position)), color = 'black') +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Histogram of raw pin heights by SET',
subtitle = 'colored by arm position; stacked',
x = 'Pin Height (mm)') +
theme_bw() +
scale_fill_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
#### raw pin readings
# by arm
plot_raw_arm <- function(data, columns = 4, pointsize = 2){
data %>%
group_by(set_id, arm_position, date) %>%
summarize(mean = mean(pin_height, na.rm = TRUE)) %>%
ggplot(aes(x = date, y = mean, col = as.factor(arm_position))) +
geom_point(size = pointsize) +
geom_line(alpha = 0.6) +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Pin Height (raw measurement)',
x = 'Date',
y = 'Average pin height (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
# individual pins; choose a SET (put in quotes in function call)
plot_raw_pin <- function(data, set, columns = 2, pointsize = 2){
data %>%
filter(set_id == !!set) %>%
group_by(set_id, arm_position, pin_number, date) %>%
ggplot(aes(x = date, y = pin_height, col = as.factor(pin_number))) +
geom_point(size = pointsize) +
geom_line(alpha = 0.6) +
facet_wrap(~arm_position, ncol = columns) +
labs(title = 'Pin Height (raw measurement)',
subtitle = sym(set),
x = 'Date',
y = 'Measured pin height (mm)') +
theme_bw() +
scale_color_discrete(name = 'Pin') +
theme(legend.position = 'bottom')
}
##### cumulative change
## by arm
plot_cumu_arm <- function(columns = 4) {
ggplot(change_cumu_arm, aes(x = date, y = mean_cumu, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_line() +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Cumulative Change',
x = 'Date',
y = 'Change since first reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
## by set
plot_cumu_set <- function(columns = 4){
ggplot(change_cumu_set, aes(x = date, y = mean_cumu)) +
geom_line(col = 'lightsteelblue4') +
geom_smooth(se = FALSE, method = 'lm',
col = 'steelblue4', lty = 5, size = 1) +
geom_point(shape = 21,
fill = 'lightsteelblue1', col = 'steelblue3',
size = 3.5, alpha = 0.9) +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Cumulative Change since first reading',
subtitle = 'dashed line is linear regression',
x = 'Date',
y = 'Change since first reading (mm)') +
theme_classic()
}
###### incremental change
plot_incr_arm <- function(columns = 4, set = NULL){
if(is.null(set)){
ggplot(change_incr_arm, aes(x = date, y = mean_incr, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
else{
change_incr_arm %>%
filter(set_id == !!set) %>%
ggplot(., aes(x = date, y = mean_incr, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
}
# same thing, without free y scales
plot_incr_arm2 <- function(columns = 4, set = NULL){
if(is.null(set)){
ggplot(change_incr_arm, aes(x = date, y = mean_incr, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~set_id, ncol = columns) +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
else{
change_incr_arm %>%
filter(set_id == !!set) %>%
ggplot(., aes(x = date, y = mean_incr, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~set_id, ncol = columns) +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
}
# by pin
plot_incr_pin <- function(set, columns = 2){
change_incr_pin %>%
filter(set_id == !!set) %>%
ggplot(., aes(x = date, y = incr, col = as.factor(pin_number))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~arm_position, ncol = columns, scales = 'free_y') +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Pin') +
theme(legend.position = 'bottom')
}
# same thing, without free y scales
plot_incr_pin2 <- function(set, columns = 2, pointsize = 2){
change_incr_pin %>%
filter(set_id == !!set) %>%
ggplot(., aes(x = date, y = incr, col = as.factor(pin_number))) +
geom_point(size = pointsize) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~arm_position, ncol = columns) +
labs(title = paste0('Incremental Change at ', set),
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Pin') +
theme(legend.position = 'bottom')
}
| /R/000_functions.R | no_license | swmpkim/SETr_script_development | R | false | false | 12,130 | r | ########################################
# turn any pin heights into mm
# and name the column pin_height
########################################
height_to_mm <- function(data){
if(exists('pin_height_cm', data)) {
data <- data %>%
mutate(pin_height = pin_height_cm * 10) %>%
select(-pin_height_cm)
}
if(exists('pin_height_mm', data)){
data <- data %>%
mutate(pin_height = pin_height_mm) %>%
select(-pin_height_mm)
}
return(data)
}
######################################################
######################################################
#### Cumulative change (change since first reading)
######################################################
## calculate all levels of cumulative change, in one function?
## returns three data frames (puts them into the Global Environment)
# test
if(exists('dat')){
change_cumu_test_pin <<- dat %>%
group_by(reserve, set_id, arm_position, pin_number) %>%
mutate(cumu = pin_height - pin_height[min(which(!is.na(pin_height)))]) %>% ##### need to make this the first pin reading that's not NA - not just [1]
select(-pin_height) %>%
ungroup()
change_cumu_test_arm <<- change_cumu_test_pin %>%
group_by(reserve, set_id, arm_position, date) %>%
select(-pin_number) %>%
summarize(mean_cumu = mean(cumu, na.rm = TRUE),
sd_cumu = sd(cumu, na.rm = TRUE),
se_cumu = sd(cumu, na.rm = TRUE)/sqrt(length(!is.na(cumu)))) %>%
ungroup()
}
## going to need better documentation, like what kind of data frame is needed as input (long); what column names are required; maybe some if statements to throw errors
calc_change_cumu <- function(dat) {
# by pin
change_cumu_pin <<- dat %>%
group_by(reserve, set_id, arm_position, pin_number) %>%
mutate(cumu = pin_height - pin_height[1]) %>% ##### if there are nas in the first pin reading, maybe those pins should be excluded from further aggregation (at least this type of agg) - this will make those pins NA all the way through
# mutate(cumu = pin_height - pin_height[min(which(!is.na(pin_height)))]) %>% ##### subtract off the first pin reading that's not NA
select(-pin_height) %>%
ungroup()
# pins averaged up to arms
change_cumu_arm <<- change_cumu_pin %>%
group_by(reserve, set_id, arm_position, date) %>%
select(-pin_number) %>%
summarize(mean_cumu = mean(cumu, na.rm = TRUE),
sd_cumu = sd(cumu, na.rm = TRUE),
se_cumu = sd(cumu, na.rm = TRUE)/sqrt(length(!is.na(cumu)))) %>%
ungroup()
# arms averaged up to SETs
change_cumu_set <<- change_cumu_arm %>%
group_by(reserve, set_id, date) %>%
select(-arm_position, mean_value = mean_cumu) %>%
summarize(mean_cumu = mean(mean_value, na.rm = TRUE),
sd_cumu = sd(mean_value, na.rm = TRUE),
se_cumu = sd(mean_value, na.rm = TRUE)/sqrt(length(!is.na(mean_value)))) %>%
ungroup()
}
######################################################
######################################################
#### Incremental Change (change since last reading)
######################################################
calc_change_incr <- function(dat){
# by pin
change_incr_pin <<- dat %>%
arrange(reserve, set_id, arm_position, pin_number, date) %>%
group_by(reserve, set_id, arm_position, pin_number) %>%
mutate(incr = pin_height - lag(pin_height, 1)) %>%
ungroup()
# pins averaged up to arms
change_incr_arm <<- change_incr_pin %>%
group_by(reserve, set_id, arm_position, date) %>%
select(-pin_number) %>%
summarize(mean_incr = mean(incr, na.rm = TRUE),
sd_incr = sd(incr, na.rm = TRUE),
se_incr = sd(incr, na.rm = TRUE)/sqrt(length(!is.na(incr)))) %>%
ungroup()
# arms averaged up to SETs
change_incr_set <<- change_incr_arm %>%
group_by(reserve, set_id, date) %>%
select(-arm_position, mean_value = mean_incr) %>%
summarize(mean_incr = mean(mean_value, na.rm = TRUE),
sd_incr = sd(mean_value, na.rm = TRUE),
se_incr = sd(mean_value, na.rm = TRUE)/sqrt(length(!is.na(mean_value)))) %>%
ungroup()
}
#######################################
### Graphs
#######################################
# maybe figure out how to make free y scales an option in the function call
## histogram, colored by arm
hist_by_arm <- function(data, columns = 4){
ggplot(data) +
geom_histogram(aes(pin_height, fill = as.factor(arm_position)), color = 'black') +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Histogram of raw pin heights by SET',
subtitle = 'colored by arm position; stacked',
x = 'Pin Height (mm)') +
theme_bw() +
scale_fill_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
#### raw pin readings
# by arm
plot_raw_arm <- function(data, columns = 4, pointsize = 2){
data %>%
group_by(set_id, arm_position, date) %>%
summarize(mean = mean(pin_height, na.rm = TRUE)) %>%
ggplot(aes(x = date, y = mean, col = as.factor(arm_position))) +
geom_point(size = pointsize) +
geom_line(alpha = 0.6) +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Pin Height (raw measurement)',
x = 'Date',
y = 'Average pin height (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
# individual pins; choose a SET (put in quotes in function call)
plot_raw_pin <- function(data, set, columns = 2, pointsize = 2){
data %>%
filter(set_id == !!set) %>%
group_by(set_id, arm_position, pin_number, date) %>%
ggplot(aes(x = date, y = pin_height, col = as.factor(pin_number))) +
geom_point(size = pointsize) +
geom_line(alpha = 0.6) +
facet_wrap(~arm_position, ncol = columns) +
labs(title = 'Pin Height (raw measurement)',
subtitle = sym(set),
x = 'Date',
y = 'Measured pin height (mm)') +
theme_bw() +
scale_color_discrete(name = 'Pin') +
theme(legend.position = 'bottom')
}
##### cumulative change
## by arm
plot_cumu_arm <- function(columns = 4) {
ggplot(change_cumu_arm, aes(x = date, y = mean_cumu, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_line() +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Cumulative Change',
x = 'Date',
y = 'Change since first reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
## by set
plot_cumu_set <- function(columns = 4){
ggplot(change_cumu_set, aes(x = date, y = mean_cumu)) +
geom_line(col = 'lightsteelblue4') +
geom_smooth(se = FALSE, method = 'lm',
col = 'steelblue4', lty = 5, size = 1) +
geom_point(shape = 21,
fill = 'lightsteelblue1', col = 'steelblue3',
size = 3.5, alpha = 0.9) +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Cumulative Change since first reading',
subtitle = 'dashed line is linear regression',
x = 'Date',
y = 'Change since first reading (mm)') +
theme_classic()
}
###### incremental change
plot_incr_arm <- function(columns = 4, set = NULL){
if(is.null(set)){
ggplot(change_incr_arm, aes(x = date, y = mean_incr, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
else{
change_incr_arm %>%
filter(set_id == !!set) %>%
ggplot(., aes(x = date, y = mean_incr, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~set_id, ncol = columns, scales = 'free_y') +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
}
# same thing, without free y scales
plot_incr_arm2 <- function(columns = 4, set = NULL){
if(is.null(set)){
ggplot(change_incr_arm, aes(x = date, y = mean_incr, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~set_id, ncol = columns) +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
else{
change_incr_arm %>%
filter(set_id == !!set) %>%
ggplot(., aes(x = date, y = mean_incr, col = as.factor(arm_position))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~set_id, ncol = columns) +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Arm Position') +
theme(legend.position = 'bottom')
}
}
# by pin
plot_incr_pin <- function(set, columns = 2){
change_incr_pin %>%
filter(set_id == !!set) %>%
ggplot(., aes(x = date, y = incr, col = as.factor(pin_number))) +
geom_point(size = 2) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~arm_position, ncol = columns, scales = 'free_y') +
labs(title = 'Incremental Change',
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Pin') +
theme(legend.position = 'bottom')
}
# same thing, without free y scales
plot_incr_pin2 <- function(set, columns = 2, pointsize = 2){
change_incr_pin %>%
filter(set_id == !!set) %>%
ggplot(., aes(x = date, y = incr, col = as.factor(pin_number))) +
geom_point(size = pointsize) +
geom_hline(yintercept = 25, col = "red", size = 1) +
geom_hline(yintercept = -25, col = "red", size = 1) +
facet_wrap(~arm_position, ncol = columns) +
labs(title = paste0('Incremental Change at ', set),
subtitle = 'red lines at +/- 25 mm',
x = 'Date',
y = 'Change since previous reading (mm)') +
theme_bw() +
scale_color_discrete(name = 'Pin') +
theme(legend.position = 'bottom')
}
|
library(BB)
### Name: sane
### Title: Solving Large-Scale Nonlinear System of Equations
### Aliases: sane
### Keywords: multivariate
### ** Examples
trigexp <- function(x) {
# Test function No. 12 in the Appendix of LaCruz and Raydan (2003)
n <- length(x)
F <- rep(NA, n)
F[1] <- 3*x[1]^2 + 2*x[2] - 5 + sin(x[1] - x[2]) * sin(x[1] + x[2])
tn1 <- 2:(n-1)
F[tn1] <- -x[tn1-1] * exp(x[tn1-1] - x[tn1]) + x[tn1] * ( 4 + 3*x[tn1]^2) +
2 * x[tn1 + 1] + sin(x[tn1] - x[tn1 + 1]) * sin(x[tn1] + x[tn1 + 1]) - 8
F[n] <- -x[n-1] * exp(x[n-1] - x[n]) + 4*x[n] - 3
F
}
p0 <- rnorm(50)
sane(par=p0, fn=trigexp)
sane(par=p0, fn=trigexp, method=1)
######################################
brent <- function(x) {
n <- length(x)
tnm1 <- 2:(n-1)
F <- rep(NA, n)
F[1] <- 3 * x[1] * (x[2] - 2*x[1]) + (x[2]^2)/4
F[tnm1] <- 3 * x[tnm1] * (x[tnm1+1] - 2 * x[tnm1] + x[tnm1-1]) +
((x[tnm1+1] - x[tnm1-1])^2) / 4
F[n] <- 3 * x[n] * (20 - 2 * x[n] + x[n-1]) + ((20 - x[n-1])^2) / 4
F
}
p0 <- sort(runif(50, 0, 10))
sane(par=p0, fn=brent, control=list(trace=FALSE))
sane(par=p0, fn=brent, control=list(M=200, trace=FALSE))
| /data/genthat_extracted_code/BB/examples/sane.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,209 | r | library(BB)
### Name: sane
### Title: Solving Large-Scale Nonlinear System of Equations
### Aliases: sane
### Keywords: multivariate
### ** Examples
trigexp <- function(x) {
# Test function No. 12 in the Appendix of LaCruz and Raydan (2003)
n <- length(x)
F <- rep(NA, n)
F[1] <- 3*x[1]^2 + 2*x[2] - 5 + sin(x[1] - x[2]) * sin(x[1] + x[2])
tn1 <- 2:(n-1)
F[tn1] <- -x[tn1-1] * exp(x[tn1-1] - x[tn1]) + x[tn1] * ( 4 + 3*x[tn1]^2) +
2 * x[tn1 + 1] + sin(x[tn1] - x[tn1 + 1]) * sin(x[tn1] + x[tn1 + 1]) - 8
F[n] <- -x[n-1] * exp(x[n-1] - x[n]) + 4*x[n] - 3
F
}
p0 <- rnorm(50)
sane(par=p0, fn=trigexp)
sane(par=p0, fn=trigexp, method=1)
######################################
brent <- function(x) {
n <- length(x)
tnm1 <- 2:(n-1)
F <- rep(NA, n)
F[1] <- 3 * x[1] * (x[2] - 2*x[1]) + (x[2]^2)/4
F[tnm1] <- 3 * x[tnm1] * (x[tnm1+1] - 2 * x[tnm1] + x[tnm1-1]) +
((x[tnm1+1] - x[tnm1-1])^2) / 4
F[n] <- 3 * x[n] * (20 - 2 * x[n] + x[n-1]) + ((20 - x[n-1])^2) / 4
F
}
p0 <- sort(runif(50, 0, 10))
sane(par=p0, fn=brent, control=list(trace=FALSE))
sane(par=p0, fn=brent, control=list(M=200, trace=FALSE))
|
# The makeCacheMatrix function creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of the inverse
# 4. get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inverse) inverse <<- inverse
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# The cacheSolve function returns the inverse of the matrix returned by the makeCacheMatrix function.
# If the inverse has already been calculated, then cacheSolve retrieves the inverse from the cache.
# If not, cacheSolve computes and returns the inverted matrix.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | tmcclure92/ProgrammingAssignment2 | R | false | false | 1,158 | r | # The makeCacheMatrix function creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of the inverse
# 4. get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inverse) inverse <<- inverse
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# The cacheSolve function returns the inverse of the matrix returned by the makeCacheMatrix function.
# If the inverse has already been calculated, then cacheSolve retrieves the inverse from the cache.
# If not, cacheSolve computes and returns the inverted matrix.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$setinverse(inverse)
inverse
}
|
### Function to doawnload data from aclimate geoserver "https://geo.aclimate.org/geoserver/"
# Author: Rodriguez-Espinoza J., Mesa J.,
# https://github.com/jrodriguez88/
# 2022
#library(tidyverse)
#library(raster)
# Function to doawnload data from aclimate geoserver "https://geo.aclimate.org/geoserver/"
## Arguments
#crop <- "wheat"
#data_type <- "fertilizer"
##data_type <- "climate"
#format <- "geotiff"
##format <- "shp" # just works for some areas
#country <- "et" # et = ethiopia, co = colombia
#outpath <- "inputs/"
get_geoserver_data <- function(crop, data_type, country, format, outpath){
url_base <- "https://geo.aclimate.org/geoserver/"
if(data_type == "climate"){
data_type_string <- paste0("a", data_type, "_", country)
layers <- c("seasonal_country_et_probabilistic_above",
"seasonal_country_et_probabilistic_normal",
"seasonal_country_et_probabilistic_below")
names_tif <- paste0(outpath, "/", layers, ".tif")
bbox_raster <- "bbox=33.0%2C3.0%2C48.0%2C17.0&width=768&height=716"
} else if (data_type == "fertilizer"){
data_type_string <- paste0(data_type, "_", country)
layers <- c(paste0("et_", crop, "_nps_probabilistic_normal"),
paste0("et_", crop, "_urea_probabilistic_normal"))
bbox_raster <- "bbox=33.051816455%2C5.352336938999999%2C43.448502149%2C14.749081456&width=768&height=694"
names_tif <- paste0(outpath, "/", layers, ".tif")
} else {message("No recognizing data type")}
format_string <- if(format == "geotiff"){format
} else if (format == "shp"){
paste0("SHAPE-ZIP")
}
# bbox <- change by country
url <- paste0(url_base, data_type_string,
"/wms?service=WMS&version=1.1.0&request=GetMap&layers=",
data_type_string, "%3A", layers,
"&", bbox_raster, "&srs=EPSG%3A4326&styles=&format=image%2F",
format_string)
if(all(map_lgl(names_tif, file.exists))){message("Geoserver tif files already downloaded!")
} else {
walk2(url, names_tif, ~download.file(url = .x, destfile = .y, method = "curl"))
}
return(names_tif)
}
#Usage
#downloaded_tif <- get_geoserver_data(crop, "fertilizer", country, format, outpath)
#lat <- 9.37
#lon <- 42
extract_raster_geos <- function(raster_geos, lat, lon) {
data_raster <- raster_geos %>% map(raster) %>%
raster::stack() %>%
raster::extract(., SpatialPoints(cbind(lon, lat), proj4string = CRS("+init=epsg:4326"))) %>%
t() %>% #as.tibble()
as.data.frame() %>%
tibble::rownames_to_column(var = "file") %>%
tibble()
return(data_raster)
}
#https://geo.aclimate.org/geoserver/fertilizer_et/wms?service=WMS&version=1.1.0&request=GetMap&layers=fertilizer_et%3Aet_wheat_urea_probabilistic_normal&bbox=33.051816455%2C5.352336938999999%2C43.448502149%2C14.749081456&width=768&height=694&srs=EPSG%3A4326&styles=&format=image%2Fgeotiff
#https://geo.aclimate.org/geoserver/aclimate_et/wms?service=WMS&version=1.1.0&request=GetMap&layers=aclimate_et%3Aseasonal_country_et_probabilistic_above&bbox=33.0%2C3.0%2C48.0%2C17.0&width=768&height=716&srs=EPSG%3A4326&styles=&format=image%2Fgeotiff
#test <- extract_raster_geos(downloaded_tif, 35, 8.38)
# urea : Urea amount (kg/ha
# nps : NPS amount (kg/ha)
# apps_dap : Number application - Days after planting
# urea_split = Rate application
# nps_split : Rate application
convert_FertApp_dssat <- function(urea, nps, apps_dap = c(1, 40), urea_split = c(1/3, 2/3), nps_split = c(1, 0)){
base_tb <- bind_rows(tibble(dap = apps_dap, fert = "nps", value = nps * nps_split) %>%
dplyr::filter(value > 0),
tibble(dap = apps_dap, fert= "urea", value = urea * urea_split) %>%
dplyr::filter(value > 0))
fert_to_N <-function(fert, amount){
if(fert == "nps"){
N = amount*0.19 # https://www.tandfonline.com/doi/full/10.1080/23311932.2018.1439663
} else if(fert == "urea"){
N = amount*0.46
} else {
message("No detected Fertilizer")
N = -99
}
return(N)
}
fert_to_P <-function(fert, amount){
if(fert == "nps"){
P = amount*0.38
} else if(fert == "urea"){
P = -99
} else {
message("No detected Fertilizer")
P = -99
}
return(P)
}
# AP001 Broadcast, not incorporated
# AP002 Broadcast, incorporated
# FE005 Urea
# FE006 Diammonium phosphate (DAP)
# FE028 NPK - urea
base_tb <- base_tb %>%
mutate(N = map2_dbl(fert, value, fert_to_N),
P = map2_dbl(fert, value, fert_to_P),
FMCD = case_when(fert == "nps" ~ "FE006",
fert == "urea" ~"FE005",
TRUE ~ NA_character_),
FACD = case_when(dap < 5 ~ "AP002",
dap > 15 ~ "AP001",
TRUE ~ NA_character_),
FDEP = case_when(dap < 5 ~ 5,
dap > 15 ~ 1,
TRUE ~ NA_real_))
# De acuerdo a las recomendaciones: 2 aplicaciones,
# 1 app: (nps) + 1(urea)/3 -- Incorporated
# 2 app: 2(urea)/3 -- No incorporated
#*FERTILIZERS (INORGANIC)
#@F FDATE FMCD FACD FDEP FAMN FAMP FAMK FAMC FAMO FOCD FERNAME
# 1 1 FE006 AP002 5 10 20 -99 -99 -99 -99 fertApp
# 1 1 FE005 AP002 5 30 -99 -99 -99 -99 -99 fertApp
# 1 40 FE005 AP001 1 10 30 10 -99 -99 -99 fertApp
FDATE <- base_tb$dap
FMCD <- base_tb$FMCD
FACD <- base_tb$FACD
FDEP <- base_tb$FDEP
FAMN <- round(base_tb$N)
FAMP <- round(base_tb$P)
FAMK <- -99
FAMC <- -99
FAMO <- -99
FOCD <- -99
FERNAME <- "AgroClimR"
#
# fertilizer <- data.frame(F = 1, FDATE, FMCD, FACD, FDEP, FAMN, FAMP, FAMK,
# FAMC, FAMO, FOCD, FERNAME)
fertilizer <- tibble(F = 1, FDATE, FMCD, FACD, FDEP, FAMN, FAMP, FAMK,
FAMC, FAMO, FOCD, FERNAME)
return(fertilizer)
}
#c(FALSE , "auto", "fertapp")
| /dssat_API/dssat_scripts/03_connect_georserver.R | no_license | CIAT-DAPA/usaid_procesos_interfaz | R | false | false | 6,272 | r | ### Function to doawnload data from aclimate geoserver "https://geo.aclimate.org/geoserver/"
# Author: Rodriguez-Espinoza J., Mesa J.,
# https://github.com/jrodriguez88/
# 2022
#library(tidyverse)
#library(raster)
# Function to doawnload data from aclimate geoserver "https://geo.aclimate.org/geoserver/"
## Arguments
#crop <- "wheat"
#data_type <- "fertilizer"
##data_type <- "climate"
#format <- "geotiff"
##format <- "shp" # just works for some areas
#country <- "et" # et = ethiopia, co = colombia
#outpath <- "inputs/"
get_geoserver_data <- function(crop, data_type, country, format, outpath){
url_base <- "https://geo.aclimate.org/geoserver/"
if(data_type == "climate"){
data_type_string <- paste0("a", data_type, "_", country)
layers <- c("seasonal_country_et_probabilistic_above",
"seasonal_country_et_probabilistic_normal",
"seasonal_country_et_probabilistic_below")
names_tif <- paste0(outpath, "/", layers, ".tif")
bbox_raster <- "bbox=33.0%2C3.0%2C48.0%2C17.0&width=768&height=716"
} else if (data_type == "fertilizer"){
data_type_string <- paste0(data_type, "_", country)
layers <- c(paste0("et_", crop, "_nps_probabilistic_normal"),
paste0("et_", crop, "_urea_probabilistic_normal"))
bbox_raster <- "bbox=33.051816455%2C5.352336938999999%2C43.448502149%2C14.749081456&width=768&height=694"
names_tif <- paste0(outpath, "/", layers, ".tif")
} else {message("No recognizing data type")}
format_string <- if(format == "geotiff"){format
} else if (format == "shp"){
paste0("SHAPE-ZIP")
}
# bbox <- change by country
url <- paste0(url_base, data_type_string,
"/wms?service=WMS&version=1.1.0&request=GetMap&layers=",
data_type_string, "%3A", layers,
"&", bbox_raster, "&srs=EPSG%3A4326&styles=&format=image%2F",
format_string)
if(all(map_lgl(names_tif, file.exists))){message("Geoserver tif files already downloaded!")
} else {
walk2(url, names_tif, ~download.file(url = .x, destfile = .y, method = "curl"))
}
return(names_tif)
}
#Usage
#downloaded_tif <- get_geoserver_data(crop, "fertilizer", country, format, outpath)
#lat <- 9.37
#lon <- 42
extract_raster_geos <- function(raster_geos, lat, lon) {
data_raster <- raster_geos %>% map(raster) %>%
raster::stack() %>%
raster::extract(., SpatialPoints(cbind(lon, lat), proj4string = CRS("+init=epsg:4326"))) %>%
t() %>% #as.tibble()
as.data.frame() %>%
tibble::rownames_to_column(var = "file") %>%
tibble()
return(data_raster)
}
#https://geo.aclimate.org/geoserver/fertilizer_et/wms?service=WMS&version=1.1.0&request=GetMap&layers=fertilizer_et%3Aet_wheat_urea_probabilistic_normal&bbox=33.051816455%2C5.352336938999999%2C43.448502149%2C14.749081456&width=768&height=694&srs=EPSG%3A4326&styles=&format=image%2Fgeotiff
#https://geo.aclimate.org/geoserver/aclimate_et/wms?service=WMS&version=1.1.0&request=GetMap&layers=aclimate_et%3Aseasonal_country_et_probabilistic_above&bbox=33.0%2C3.0%2C48.0%2C17.0&width=768&height=716&srs=EPSG%3A4326&styles=&format=image%2Fgeotiff
#test <- extract_raster_geos(downloaded_tif, 35, 8.38)
# urea : Urea amount (kg/ha
# nps : NPS amount (kg/ha)
# apps_dap : Number application - Days after planting
# urea_split = Rate application
# nps_split : Rate application
convert_FertApp_dssat <- function(urea, nps, apps_dap = c(1, 40), urea_split = c(1/3, 2/3), nps_split = c(1, 0)){
base_tb <- bind_rows(tibble(dap = apps_dap, fert = "nps", value = nps * nps_split) %>%
dplyr::filter(value > 0),
tibble(dap = apps_dap, fert= "urea", value = urea * urea_split) %>%
dplyr::filter(value > 0))
fert_to_N <-function(fert, amount){
if(fert == "nps"){
N = amount*0.19 # https://www.tandfonline.com/doi/full/10.1080/23311932.2018.1439663
} else if(fert == "urea"){
N = amount*0.46
} else {
message("No detected Fertilizer")
N = -99
}
return(N)
}
fert_to_P <-function(fert, amount){
if(fert == "nps"){
P = amount*0.38
} else if(fert == "urea"){
P = -99
} else {
message("No detected Fertilizer")
P = -99
}
return(P)
}
# AP001 Broadcast, not incorporated
# AP002 Broadcast, incorporated
# FE005 Urea
# FE006 Diammonium phosphate (DAP)
# FE028 NPK - urea
base_tb <- base_tb %>%
mutate(N = map2_dbl(fert, value, fert_to_N),
P = map2_dbl(fert, value, fert_to_P),
FMCD = case_when(fert == "nps" ~ "FE006",
fert == "urea" ~"FE005",
TRUE ~ NA_character_),
FACD = case_when(dap < 5 ~ "AP002",
dap > 15 ~ "AP001",
TRUE ~ NA_character_),
FDEP = case_when(dap < 5 ~ 5,
dap > 15 ~ 1,
TRUE ~ NA_real_))
# De acuerdo a las recomendaciones: 2 aplicaciones,
# 1 app: (nps) + 1(urea)/3 -- Incorporated
# 2 app: 2(urea)/3 -- No incorporated
#*FERTILIZERS (INORGANIC)
#@F FDATE FMCD FACD FDEP FAMN FAMP FAMK FAMC FAMO FOCD FERNAME
# 1 1 FE006 AP002 5 10 20 -99 -99 -99 -99 fertApp
# 1 1 FE005 AP002 5 30 -99 -99 -99 -99 -99 fertApp
# 1 40 FE005 AP001 1 10 30 10 -99 -99 -99 fertApp
FDATE <- base_tb$dap
FMCD <- base_tb$FMCD
FACD <- base_tb$FACD
FDEP <- base_tb$FDEP
FAMN <- round(base_tb$N)
FAMP <- round(base_tb$P)
FAMK <- -99
FAMC <- -99
FAMO <- -99
FOCD <- -99
FERNAME <- "AgroClimR"
#
# fertilizer <- data.frame(F = 1, FDATE, FMCD, FACD, FDEP, FAMN, FAMP, FAMK,
# FAMC, FAMO, FOCD, FERNAME)
fertilizer <- tibble(F = 1, FDATE, FMCD, FACD, FDEP, FAMN, FAMP, FAMK,
FAMC, FAMO, FOCD, FERNAME)
return(fertilizer)
}
#c(FALSE , "auto", "fertapp")
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -1.17863395026857e-20, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609868226-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 832 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -1.17863395026857e-20, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
#' @importFrom dplyr filter
#' @importFrom ggplot2 geom_vline
#' @title Studentized Residuals vs Leverage Plot
#' @description Graph for detecting outliers and/or observations with high leverage.
#' @param model an object of class \code{lm}
#' @examples
#' model <- lm(read ~ write + math + science, data = hsb)
#' ols_rsdlev_plot(model)
#'
#' @export
#'
ols_rsdlev_plot <- function(model) {
if (!all(class(model) == 'lm')) {
stop('Please specify a OLS linear regression model.', call. = FALSE)
}
Observation <- NULL
leverage <- NULL
txt <- NULL
obs <- NULL
resp <- model %>% model.frame() %>% names() %>% `[`(1)
g <- rstudlev(model)
d <- g$levrstud
d <- d %>% mutate(txt = ifelse(Observation == 'normal', NA, obs))
f <- d %>% filter(., Observation == 'outlier') %>% select(obs, leverage, rstudent)
p <- ggplot(d, aes(leverage, rstudent, label = txt)) +
geom_point(shape = 1, aes(colour = Observation)) +
scale_color_manual(values = c("blue", "red", "green", "violet")) +
xlim(g$minx, g$maxx) + ylim(g$miny, g$maxy) +
xlab('Leverage') + ylab('RStudent') +
ggtitle(paste("Outlier and Leverage Diagnostics for", resp)) +
geom_hline(yintercept = c(2, -2), colour = 'maroon') +
geom_vline(xintercept = g$lev_thrsh, colour = 'maroon') +
geom_text(vjust = -1, size = 3, family="serif", fontface="italic", colour="darkred") +
annotate("text", x = Inf, y = Inf, hjust = 1.2, vjust = 2,
family="serif", fontface="italic", colour="darkred",
label = paste('Threshold:', round(g$lev_thrsh, 3)))
suppressWarnings(print(p))
colnames(f) <- c("Observation", "Leverage", "Studentized Residuals")
result <- list(leverage = f, threshold = round(g$lev_thrsh, 3))
invisible(result)
} | /R/ols-rstud-vs-lev-plot.R | no_license | SvetiStefan/olsrr | R | false | false | 1,767 | r | #' @importFrom dplyr filter
#' @importFrom ggplot2 geom_vline
#' @title Studentized Residuals vs Leverage Plot
#' @description Graph for detecting outliers and/or observations with high leverage.
#' @param model an object of class \code{lm}
#' @examples
#' model <- lm(read ~ write + math + science, data = hsb)
#' ols_rsdlev_plot(model)
#'
#' @export
#'
ols_rsdlev_plot <- function(model) {
if (!all(class(model) == 'lm')) {
stop('Please specify a OLS linear regression model.', call. = FALSE)
}
Observation <- NULL
leverage <- NULL
txt <- NULL
obs <- NULL
resp <- model %>% model.frame() %>% names() %>% `[`(1)
g <- rstudlev(model)
d <- g$levrstud
d <- d %>% mutate(txt = ifelse(Observation == 'normal', NA, obs))
f <- d %>% filter(., Observation == 'outlier') %>% select(obs, leverage, rstudent)
p <- ggplot(d, aes(leverage, rstudent, label = txt)) +
geom_point(shape = 1, aes(colour = Observation)) +
scale_color_manual(values = c("blue", "red", "green", "violet")) +
xlim(g$minx, g$maxx) + ylim(g$miny, g$maxy) +
xlab('Leverage') + ylab('RStudent') +
ggtitle(paste("Outlier and Leverage Diagnostics for", resp)) +
geom_hline(yintercept = c(2, -2), colour = 'maroon') +
geom_vline(xintercept = g$lev_thrsh, colour = 'maroon') +
geom_text(vjust = -1, size = 3, family="serif", fontface="italic", colour="darkred") +
annotate("text", x = Inf, y = Inf, hjust = 1.2, vjust = 2,
family="serif", fontface="italic", colour="darkred",
label = paste('Threshold:', round(g$lev_thrsh, 3)))
suppressWarnings(print(p))
colnames(f) <- c("Observation", "Leverage", "Studentized Residuals")
result <- list(leverage = f, threshold = round(g$lev_thrsh, 3))
invisible(result)
} |
setwd("C:/Users/Administrador/Downloads")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
POINT<-NEI[which(NEI$type=="POINT"),]
NONPOINT<-NEI[which(NEI$type=="NONPOINT"),]
ROAD<-NEI[which(NEI$type=="ON-ROAD"),]
NONROAD<-NEI[which(NEI$type=="NON-ROAD"),]
PTotal1999<-sum(POINT$Emissions[which(POINT$year==1999)])
PTotal2002<-sum(POINT$Emissions[which(POINT$year==2002)])
PTotal2005<-sum(POINT$Emissions[which(POINT$year==2005)])
PTotal2008<-sum(POINT$Emissions[which(POINT$year==2008)])
PTotales<-c(PTotal1999,PTotal2002,PTotal2005,PTotal2008)
NPTotal1999<-sum(NONPOINT$Emissions[which(NONPOINT$year==1999)])
NPTotal2002<-sum(NONPOINT$Emissions[which(NONPOINT$year==2002)])
NPTotal2005<-sum(NONPOINT$Emissions[which(NONPOINT$year==2005)])
NPTotal2008<-sum(NONPOINT$Emissions[which(NONPOINT$year==2008)])
NPTotales<-c(NPTotal1999,NPTotal2002,NPTotal2005,NPTotal2008)
RTotal1999<-sum(ROAD$Emissions[which(ROAD$year==1999)])
RTotal2002<-sum(ROAD$Emissions[which(ROAD$year==2002)])
RTotal2005<-sum(ROAD$Emissions[which(ROAD$year==2005)])
RTotal2008<-sum(ROAD$Emissions[which(ROAD$year==2008)])
RTotales<-c(RTotal1999,RTotal2002,RTotal2005,RTotal2008)
NRTotal1999<-sum(ROAD$Emissions[which(ROAD$year==1999)])
NRTotal2002<-sum(ROAD$Emissions[which(ROAD$year==2002)])
NRTotal2005<-sum(ROAD$Emissions[which(ROAD$year==2005)])
NRTotal2008<-sum(ROAD$Emissions[which(ROAD$year==2008)])
NRTotales<-c(NRTotal1999,NRTotal2002,NRTotal2005,NRTotal2008)
TOTALES<-as.numeric(c(PTotales,NPTotales,RTotales,NRTotales))
TOTALES<-cbind(TOTALES,rep(c("POINT","NON-POINT","ON-ROAD","NON-ROAD"),each=4),rep(c(1999,2002,2005,2008),4))
TOTALES<-as.data.frame(TOTALES)
names(TOTALES)<-c("Emisiones","type","year")
TOTALES$Emisiones<-as.character(TOTALES$Emisiones)
TOTALES$Emisiones<-as.numeric(TOTALES$Emisiones)
png(filename="Plot3.png",width = 480,height = 480, units = "px")
library(ggplot2)
bp <- ggplot(TOTALES, aes(x=year, y=Emisiones, group=type)) + geom_line() + facet_wrap(.~type) +
geom_line(data = TOTALES, aes(x=year, y=Emisiones))
bp
dev.off()
| /Project 2/Plot3.R | no_license | jhonarredondo/Exploratory-Data-Analysis | R | false | false | 2,125 | r | setwd("C:/Users/Administrador/Downloads")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
POINT<-NEI[which(NEI$type=="POINT"),]
NONPOINT<-NEI[which(NEI$type=="NONPOINT"),]
ROAD<-NEI[which(NEI$type=="ON-ROAD"),]
NONROAD<-NEI[which(NEI$type=="NON-ROAD"),]
PTotal1999<-sum(POINT$Emissions[which(POINT$year==1999)])
PTotal2002<-sum(POINT$Emissions[which(POINT$year==2002)])
PTotal2005<-sum(POINT$Emissions[which(POINT$year==2005)])
PTotal2008<-sum(POINT$Emissions[which(POINT$year==2008)])
PTotales<-c(PTotal1999,PTotal2002,PTotal2005,PTotal2008)
NPTotal1999<-sum(NONPOINT$Emissions[which(NONPOINT$year==1999)])
NPTotal2002<-sum(NONPOINT$Emissions[which(NONPOINT$year==2002)])
NPTotal2005<-sum(NONPOINT$Emissions[which(NONPOINT$year==2005)])
NPTotal2008<-sum(NONPOINT$Emissions[which(NONPOINT$year==2008)])
NPTotales<-c(NPTotal1999,NPTotal2002,NPTotal2005,NPTotal2008)
RTotal1999<-sum(ROAD$Emissions[which(ROAD$year==1999)])
RTotal2002<-sum(ROAD$Emissions[which(ROAD$year==2002)])
RTotal2005<-sum(ROAD$Emissions[which(ROAD$year==2005)])
RTotal2008<-sum(ROAD$Emissions[which(ROAD$year==2008)])
RTotales<-c(RTotal1999,RTotal2002,RTotal2005,RTotal2008)
NRTotal1999<-sum(ROAD$Emissions[which(ROAD$year==1999)])
NRTotal2002<-sum(ROAD$Emissions[which(ROAD$year==2002)])
NRTotal2005<-sum(ROAD$Emissions[which(ROAD$year==2005)])
NRTotal2008<-sum(ROAD$Emissions[which(ROAD$year==2008)])
NRTotales<-c(NRTotal1999,NRTotal2002,NRTotal2005,NRTotal2008)
TOTALES<-as.numeric(c(PTotales,NPTotales,RTotales,NRTotales))
TOTALES<-cbind(TOTALES,rep(c("POINT","NON-POINT","ON-ROAD","NON-ROAD"),each=4),rep(c(1999,2002,2005,2008),4))
TOTALES<-as.data.frame(TOTALES)
names(TOTALES)<-c("Emisiones","type","year")
TOTALES$Emisiones<-as.character(TOTALES$Emisiones)
TOTALES$Emisiones<-as.numeric(TOTALES$Emisiones)
png(filename="Plot3.png",width = 480,height = 480, units = "px")
library(ggplot2)
bp <- ggplot(TOTALES, aes(x=year, y=Emisiones, group=type)) + geom_line() + facet_wrap(.~type) +
geom_line(data = TOTALES, aes(x=year, y=Emisiones))
bp
dev.off()
|
care_package <- function(x){
library(stringr)
library(dplyr)
}
clean_who_number_spaces <-
function(x) {
str_replace_all(x, "([0-9]{1,3}) ([0-9]{3})", paste0("\\1", "", "\\2"))
str_replace_all(x," ","")
}
avg_get <-
function(x) {
test <- str_extract_all(x,"^[0-9]+\\.?[0-9]*\\[")
str_replace_all(test,"\\[","")
}
lower_get <-
function(x) {
test <- str_extract_all(x,"\\[[0-9]+\\.?[0-9]*")
str_replace_all(test,"\\[","")
}
upper_get <-
function(x) {
test <- str_extract_all(x,"[0-9]+\\.?[0-9]*\\]$")
str_replace_all(test,"\\]","")
}
fix_who_column <-
function(x) {
care_package(x)
prep <- clean_who_number_spaces(x)
avg <- as.numeric(avg_get(prep))
lower_bound <- as.numeric(lower_get(prep))
upper_bound <- as.numeric(upper_get(prep))
data.frame(avg,lower_bound,upper_bound)
} | /code/Jeng-functions.R | no_license | KaiJeng/msds597-week08 | R | false | false | 867 | r | care_package <- function(x){
library(stringr)
library(dplyr)
}
clean_who_number_spaces <-
function(x) {
str_replace_all(x, "([0-9]{1,3}) ([0-9]{3})", paste0("\\1", "", "\\2"))
str_replace_all(x," ","")
}
avg_get <-
function(x) {
test <- str_extract_all(x,"^[0-9]+\\.?[0-9]*\\[")
str_replace_all(test,"\\[","")
}
lower_get <-
function(x) {
test <- str_extract_all(x,"\\[[0-9]+\\.?[0-9]*")
str_replace_all(test,"\\[","")
}
upper_get <-
function(x) {
test <- str_extract_all(x,"[0-9]+\\.?[0-9]*\\]$")
str_replace_all(test,"\\]","")
}
fix_who_column <-
function(x) {
care_package(x)
prep <- clean_who_number_spaces(x)
avg <- as.numeric(avg_get(prep))
lower_bound <- as.numeric(lower_get(prep))
upper_bound <- as.numeric(upper_get(prep))
data.frame(avg,lower_bound,upper_bound)
} |
file_path<-"./household_power_consumption.txt"
myData<-read.table(file_path, sep=";", header = TRUE)
head(myData)
myData$Date= as.Date(myData$Date, "%d/%m/%Y")
#get set
myset<-myData[myData$Date >="2007-02-01"& myData$Date <="2007-02-02", ]
#add weekday to dataset
myset$FullDate<-with (myset,as.POSIXct(paste(myset$Date, myset$Time), format="%Y-%m-%d %H:%M:%S"))
sub1<-as.numeric(as.character(myset$Sub_metering_1))
sub2<-as.numeric(as.character(myset$Sub_metering_2))
sub3<-as.numeric(as.character(myset$Sub_metering_3))
par(mar=c(4,4,2,10))
plot(myset$FullDate, sub1, type="l",ylim=c(0, 40),yaxt="none",xlab="", ylab = "Energy sub metering")
axis(2, seq(0, 35, 10))
par(new=TRUE)
plot(myset$FullDate, sub2, type="l", ylim=c(0, 40), yaxt="none",xlab="", ylab = "Energy sub metering", col="red")
par(new=TRUE)
plot(myset$FullDate, sub3, type="l", ylim=c(0, 40), xlab="",yaxt="none", ylab = "Energy sub metering", col="blue")
legend("topright", lty=1,col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
| /plot3.r | no_license | yaomisun/datasciencecoursera | R | false | false | 1,085 | r | file_path<-"./household_power_consumption.txt"
myData<-read.table(file_path, sep=";", header = TRUE)
head(myData)
myData$Date= as.Date(myData$Date, "%d/%m/%Y")
#get set
myset<-myData[myData$Date >="2007-02-01"& myData$Date <="2007-02-02", ]
#add weekday to dataset
myset$FullDate<-with (myset,as.POSIXct(paste(myset$Date, myset$Time), format="%Y-%m-%d %H:%M:%S"))
sub1<-as.numeric(as.character(myset$Sub_metering_1))
sub2<-as.numeric(as.character(myset$Sub_metering_2))
sub3<-as.numeric(as.character(myset$Sub_metering_3))
par(mar=c(4,4,2,10))
plot(myset$FullDate, sub1, type="l",ylim=c(0, 40),yaxt="none",xlab="", ylab = "Energy sub metering")
axis(2, seq(0, 35, 10))
par(new=TRUE)
plot(myset$FullDate, sub2, type="l", ylim=c(0, 40), yaxt="none",xlab="", ylab = "Energy sub metering", col="red")
par(new=TRUE)
plot(myset$FullDate, sub3, type="l", ylim=c(0, 40), xlab="",yaxt="none", ylab = "Energy sub metering", col="blue")
legend("topright", lty=1,col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
|
####
# Generate qq plot for SMG results
#
###
# for example
#R --slave --args < qqplot.R music2_smg_test_detailed out.pdf
#
# Fetch command line arguments
args = commandArgs();
input = as.character(args[4])
output = as.character(args[5])
#input="smgs_detailed"
#output="qqplot.pdf"
read.table(input,header=T )->z
gc=function(p)
{
p=1-p
#pm=median(p)
pm=median(p[p>0 & p<1])
lambda=qchisq(pm,1)/qchisq(0.5,1)
x2=qchisq(p,1)/lambda
p=pchisq(x2,1)
p=1-p
p
}
z$P_CT_corrected=gc(z[,9])
z$FDR_CT_corrected=p.adjust(z$P_CT_corrected,method="fdr")
pdf(output,10,7 )
par(mfrow=c(1,2))
# plot 1: uncorrected
p=z[,9]
p = p[p>0]
p = p[p<1]
p = p[!is.na(p)]
OBS = sort(-log10(p))
EXP = sort(-log10(1:length(p)/length(p)))
plot(EXP, OBS, col="red", pch=20); abline(a=0,b=1, col="lightgray", lty=1, lwd=2)
title("SMG test qq-plot")
# plot 2: GC-corrected
p=z$P_CT_corrected
p = p[p>0]
p = p[p<1]
p = p[!is.na(p)]
OBS = sort(-log10(p))
EXP = sort(-log10(1:length(p)/length(p)))
plot(EXP, OBS, col="red", pch=20); abline(a=0,b=1, col="lightgray", lty=1, lwd=2)
title("SMG test qq-plot(GC corrected)")
dev.off()
#write.csv(z,paste(input,"corrected",sep="."),row.names=F,quote=F)
write.csv(z, file="smgs_detailed.corrected", row.names=F, quote=F)
| /lib/TGI/MuSiC2/Smg.pm.qqplot.correct.R | permissive | ding-lab/MuSiC2 | R | false | false | 1,305 | r | ####
# Generate qq plot for SMG results
#
###
# for example
#R --slave --args < qqplot.R music2_smg_test_detailed out.pdf
#
# Fetch command line arguments
args = commandArgs();
input = as.character(args[4])
output = as.character(args[5])
#input="smgs_detailed"
#output="qqplot.pdf"
read.table(input,header=T )->z
gc=function(p)
{
p=1-p
#pm=median(p)
pm=median(p[p>0 & p<1])
lambda=qchisq(pm,1)/qchisq(0.5,1)
x2=qchisq(p,1)/lambda
p=pchisq(x2,1)
p=1-p
p
}
z$P_CT_corrected=gc(z[,9])
z$FDR_CT_corrected=p.adjust(z$P_CT_corrected,method="fdr")
pdf(output,10,7 )
par(mfrow=c(1,2))
# plot 1: uncorrected
p=z[,9]
p = p[p>0]
p = p[p<1]
p = p[!is.na(p)]
OBS = sort(-log10(p))
EXP = sort(-log10(1:length(p)/length(p)))
plot(EXP, OBS, col="red", pch=20); abline(a=0,b=1, col="lightgray", lty=1, lwd=2)
title("SMG test qq-plot")
# plot 2: GC-corrected
p=z$P_CT_corrected
p = p[p>0]
p = p[p<1]
p = p[!is.na(p)]
OBS = sort(-log10(p))
EXP = sort(-log10(1:length(p)/length(p)))
plot(EXP, OBS, col="red", pch=20); abline(a=0,b=1, col="lightgray", lty=1, lwd=2)
title("SMG test qq-plot(GC corrected)")
dev.off()
#write.csv(z,paste(input,"corrected",sep="."),row.names=F,quote=F)
write.csv(z, file="smgs_detailed.corrected", row.names=F, quote=F)
|
library(tidyverse)
# ggplot2::mpg 패키지를 복사
mpg <- as.data.frame(ggplot2::mpg)
# 1) cty, hwy 변수의 평균을 파생 변수(avg_mpg)로 추가
mpg_added <- mutate(mpg, avg_mpg = (cty + hwy) / 2)
head(mpg_added)
# 2) avg_mpg 값 상위 3개 자동차 모델 정보 출력
mpg_added %>% arrange(desc(avg_mpg)) %>% head(n = 3)
# 3) 1), 2)와 같은 결과를 주는 pipe 연산자 호출 구문을 작성.
head(mpg)
mpg %>%
mutate(avg_mpg = (cty + hwy) / 2) %>%
arrange(desc(avg_mpg)) %>%
head(n = 3)
# summarise() 함수에서 사용되는 통계(집계) 함수들:
# n(): 빈도수
# mean(): 평균
# sd(): 표준편차(standard deviation)
# sum(): 합계
# min(): 최솟값
# max(): 최댓값
# median(): 중앙값
# mpg 데이터 프레임에서 연도별로 cty 컬럼에 위의 모든 함수 적용
mpg %>%
group_by(year) %>%
summarise(counts = n(), mean = mean(cty), sd = sd(cty),
sum = sum(cty), min = min(cty), max = max(cty),
median = median(cty))
# 회사별 suv의 시내연비와 고속도로 연비 평균을 구하고,
# 연비 평균의 내림차순 정렬했을 때 상위 5개를 출력
mpg %>%
filter(class == 'suv') %>% # suv 차종만 선택
mutate(avg_mpg = (cty + hwy) / 2) %>% # 시내/고속도로 연비 평균 추가
group_by(manufacturer) %>% # 자동차 회사별로 그룹 지어서
summarise(mean_total = mean(avg_mpg), counts = n()) %>% # 통합연비 평균
arrange(desc(mean_total)) %>% # 통합연비 내림차순 정렬
head(n = 5) # 상위 5위까지 출력
# class별 cty 평균을 출력.
mpg %>%
group_by(class) %>%
summarise(mean_cty = mean(cty))
# class별 cty 평균을 cty 평균 내림차순으로 출력.
mpg %>%
group_by(class) %>%
summarise(mean_cty = mean(cty)) %>%
arrange(desc(mean_cty))
# 자동차 회사별 hwy의 평균이 가장 높은 곳 1 ~ 3위를 출력.
mpg %>%
group_by(manufacturer) %>%
summarise(mean_hwy = mean(hwy)) %>%
arrange(desc(mean_hwy)) %>%
head(n = 3)
# 자동차 회사별 compact 자동차 차종 수를 내림차순으로 출력.
mpg %>%
filter(class == 'compact') %>%
group_by(manufacturer) %>%
summarise(counts = n()) %>%
arrange(desc(counts))
## ggplot2::midwest 데이터 프레임을 복사
midwest <- as.data.frame(ggplot2::midwest)
colnames(midwest)
# 1) 전체인구 대비 미성년 인구 백분율 변수를 추가하세요.
# (Hint) poptotal: 전체인구수, popadults: 성인인구수
# 미성년자 인구수 = poptotal - popadults
# 미성년자 인구 백분율 = (poptotal - popadults) / poptotal * 100 (%)
midwest_added <- mutate(midwest,
child_pct = (poptotal - popadults) / poptotal * 100)
# 2) 미성년 인구 비율이 높은 상위 5개 county의 미성년 인구 비율 출력.
midwest_added %>%
arrange(desc(child_pct)) %>%
head(n = 5) %>%
select(county, child_pct)
# 3) 미성년 인구 비율이 40% 이상이면, 'large',
# 30 ~ 40%이면, 'middle'
# 30% 미만이면, 'small'
# 값을 갖는 파생 변수를 추가하고,
# 각 비율 등급에는 몇 개 지역이 있는 지 찾아보세요.
midwest %>%
mutate(child_pct = (poptotal - popadults) / poptotal * 100,
child_grade = ifelse(child_pct >= 40, 'large',
ifelse(child_pct >= 30, 'middle', 'small'))) %>%
group_by(child_grade) %>%
summarise(n = n())
midwest_added$child_grade <-
ifelse(midwest_added$child_pct >= 40, 'large',
ifelse(midwest_added$child_pct >= 30, 'middle', 'small'))
table(midwest_added$child_grade)
midwest_added %>%
group_by(child_grade) %>%
summarise(count = n())
# 4) poptotal과 popasian 변수를 사용해서
# 전체인구 대비 아시아 인구 비율 파생 변수를 추가하고,
# 아시아 인구 비율 상위 10위까지의 county, 아시아 인구 비율을 출력.
midwest %>%
mutate(asian_ratio = popasian / poptotal * 100) %>%
arrange(desc(asian_ratio)) %>%
head(n = 10) %>%
select(county, state, asian_ratio)
| /lab_r/ex09_preprocessing.r | no_license | jade053/202007_itw_bd18 | R | false | false | 4,215 | r | library(tidyverse)
# ggplot2::mpg 패키지를 복사
mpg <- as.data.frame(ggplot2::mpg)
# 1) cty, hwy 변수의 평균을 파생 변수(avg_mpg)로 추가
mpg_added <- mutate(mpg, avg_mpg = (cty + hwy) / 2)
head(mpg_added)
# 2) avg_mpg 값 상위 3개 자동차 모델 정보 출력
mpg_added %>% arrange(desc(avg_mpg)) %>% head(n = 3)
# 3) 1), 2)와 같은 결과를 주는 pipe 연산자 호출 구문을 작성.
head(mpg)
mpg %>%
mutate(avg_mpg = (cty + hwy) / 2) %>%
arrange(desc(avg_mpg)) %>%
head(n = 3)
# summarise() 함수에서 사용되는 통계(집계) 함수들:
# n(): 빈도수
# mean(): 평균
# sd(): 표준편차(standard deviation)
# sum(): 합계
# min(): 최솟값
# max(): 최댓값
# median(): 중앙값
# mpg 데이터 프레임에서 연도별로 cty 컬럼에 위의 모든 함수 적용
mpg %>%
group_by(year) %>%
summarise(counts = n(), mean = mean(cty), sd = sd(cty),
sum = sum(cty), min = min(cty), max = max(cty),
median = median(cty))
# 회사별 suv의 시내연비와 고속도로 연비 평균을 구하고,
# 연비 평균의 내림차순 정렬했을 때 상위 5개를 출력
mpg %>%
filter(class == 'suv') %>% # suv 차종만 선택
mutate(avg_mpg = (cty + hwy) / 2) %>% # 시내/고속도로 연비 평균 추가
group_by(manufacturer) %>% # 자동차 회사별로 그룹 지어서
summarise(mean_total = mean(avg_mpg), counts = n()) %>% # 통합연비 평균
arrange(desc(mean_total)) %>% # 통합연비 내림차순 정렬
head(n = 5) # 상위 5위까지 출력
# class별 cty 평균을 출력.
mpg %>%
group_by(class) %>%
summarise(mean_cty = mean(cty))
# class별 cty 평균을 cty 평균 내림차순으로 출력.
mpg %>%
group_by(class) %>%
summarise(mean_cty = mean(cty)) %>%
arrange(desc(mean_cty))
# 자동차 회사별 hwy의 평균이 가장 높은 곳 1 ~ 3위를 출력.
mpg %>%
group_by(manufacturer) %>%
summarise(mean_hwy = mean(hwy)) %>%
arrange(desc(mean_hwy)) %>%
head(n = 3)
# 자동차 회사별 compact 자동차 차종 수를 내림차순으로 출력.
mpg %>%
filter(class == 'compact') %>%
group_by(manufacturer) %>%
summarise(counts = n()) %>%
arrange(desc(counts))
## ggplot2::midwest 데이터 프레임을 복사
midwest <- as.data.frame(ggplot2::midwest)
colnames(midwest)
# 1) 전체인구 대비 미성년 인구 백분율 변수를 추가하세요.
# (Hint) poptotal: 전체인구수, popadults: 성인인구수
# 미성년자 인구수 = poptotal - popadults
# 미성년자 인구 백분율 = (poptotal - popadults) / poptotal * 100 (%)
midwest_added <- mutate(midwest,
child_pct = (poptotal - popadults) / poptotal * 100)
# 2) 미성년 인구 비율이 높은 상위 5개 county의 미성년 인구 비율 출력.
midwest_added %>%
arrange(desc(child_pct)) %>%
head(n = 5) %>%
select(county, child_pct)
# 3) 미성년 인구 비율이 40% 이상이면, 'large',
# 30 ~ 40%이면, 'middle'
# 30% 미만이면, 'small'
# 값을 갖는 파생 변수를 추가하고,
# 각 비율 등급에는 몇 개 지역이 있는 지 찾아보세요.
midwest %>%
mutate(child_pct = (poptotal - popadults) / poptotal * 100,
child_grade = ifelse(child_pct >= 40, 'large',
ifelse(child_pct >= 30, 'middle', 'small'))) %>%
group_by(child_grade) %>%
summarise(n = n())
midwest_added$child_grade <-
ifelse(midwest_added$child_pct >= 40, 'large',
ifelse(midwest_added$child_pct >= 30, 'middle', 'small'))
table(midwest_added$child_grade)
midwest_added %>%
group_by(child_grade) %>%
summarise(count = n())
# 4) poptotal과 popasian 변수를 사용해서
# 전체인구 대비 아시아 인구 비율 파생 변수를 추가하고,
# 아시아 인구 비율 상위 10위까지의 county, 아시아 인구 비율을 출력.
midwest %>%
mutate(asian_ratio = popasian / poptotal * 100) %>%
arrange(desc(asian_ratio)) %>%
head(n = 10) %>%
select(county, state, asian_ratio)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ub_wn_ign.R
\name{ub_wn_ign}
\alias{ub_wn_ign}
\title{upper bound wilson, ignorable mi}
\usage{
ub_wn_ign(z, qhat, n_obs, rn)
}
\arguments{
\item{z}{numeric, quantile of t distribution coresponding to the desired
confidence level 1- alpha}
\item{qhat}{numeric}
\item{n_obs}{integer, number of observations}
\item{rn}{interger}
}
\value{
numeric
}
\description{
calculates lower bound of (1-alpha)100\% confidence interval
using Wilson's method following MI assuming ignorability
}
\examples{
ub_wn_ign(1.96, 0.8, 100, 0.7)
}
| /man/ub_wn_ign.Rd | permissive | yuliasidi/bin2mi | R | false | true | 606 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ub_wn_ign.R
\name{ub_wn_ign}
\alias{ub_wn_ign}
\title{upper bound wilson, ignorable mi}
\usage{
ub_wn_ign(z, qhat, n_obs, rn)
}
\arguments{
\item{z}{numeric, quantile of t distribution coresponding to the desired
confidence level 1- alpha}
\item{qhat}{numeric}
\item{n_obs}{integer, number of observations}
\item{rn}{interger}
}
\value{
numeric
}
\description{
calculates lower bound of (1-alpha)100\% confidence interval
using Wilson's method following MI assuming ignorability
}
\examples{
ub_wn_ign(1.96, 0.8, 100, 0.7)
}
|
library(mice)
library(parallel)
library(readr)
library(dplyr)
df <- read_csv("NHAMCS_2012-2015_2018-04-09.csv")
df$X1 <- NULL
cl <- makeCluster(7)
clusterSetRNGStream(cl,9956)
clusterExport(cl,"df")
clusterEvalQ(cl,library(mice))
imp_pars <- parLapply(cl=cl, X=1:7, fun=function(no){mice(df, m=1)})
stopCluster(cl)
imp_merged <- imp_pars[[1]]
for (n in 2:length(imp_pars)){imp_merged <- ibind(imp_merged,imp_pars[[n]])}
completed_df <- complete(imp_merged)
write_csv(completed_df, "NHAMCS_2012-2015_2018-04-09_imp.csv") | /imp_triage.R | no_license | cagancayco/ed-triage | R | false | false | 523 | r | library(mice)
library(parallel)
library(readr)
library(dplyr)
df <- read_csv("NHAMCS_2012-2015_2018-04-09.csv")
df$X1 <- NULL
cl <- makeCluster(7)
clusterSetRNGStream(cl,9956)
clusterExport(cl,"df")
clusterEvalQ(cl,library(mice))
imp_pars <- parLapply(cl=cl, X=1:7, fun=function(no){mice(df, m=1)})
stopCluster(cl)
imp_merged <- imp_pars[[1]]
for (n in 2:length(imp_pars)){imp_merged <- ibind(imp_merged,imp_pars[[n]])}
completed_df <- complete(imp_merged)
write_csv(completed_df, "NHAMCS_2012-2015_2018-04-09_imp.csv") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_node_attrs.R
\name{get_node_attrs}
\alias{get_node_attrs}
\title{Get node attribute values}
\usage{
get_node_attrs(x, node_attr, nodes = NULL)
}
\arguments{
\item{x}{either a graph object of class
\code{dgr_graph} that is created using
\code{create_graph}, or a node data frame.}
\item{node_attr}{the name of the attribute for which
to get values.}
\item{nodes}{an optional vector of node IDs for
filtering list of nodes present in the graph or
node data frame.}
}
\value{
a named vector of node attribute values for
the attribute given by \code{node_attr} by node ID.
}
\description{
From a graph object of class
\code{dgr_graph} or a node data frame, get node
attribute values for one or more nodes.
}
\examples{
\dontrun{
library(magrittr)
# With the `create_random_graph()` function, get
# a simple graph with a node attribute called
# `value`
random_graph <-
create_random_graph(
n = 4,
m = 4,
directed = TRUE,
fully_connected = TRUE,
set_seed = 20)
# Get all of the values from the `value` node
# attribute as a named vector
random_graph \%>\%
get_node_attrs("value")
#> 1 2 3 4
#> 9.0 8.0 3.0 5.5
# To only return node attribute values for specified
# nodes, use the `nodes` argument
random_graph \%>\%
get_node_attrs("value", nodes = c(1, 3))
#> 1 3
#> 9 3
}
}
| /man/get_node_attrs.Rd | no_license | dy-kim/DiagrammeR | R | false | true | 1,389 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_node_attrs.R
\name{get_node_attrs}
\alias{get_node_attrs}
\title{Get node attribute values}
\usage{
get_node_attrs(x, node_attr, nodes = NULL)
}
\arguments{
\item{x}{either a graph object of class
\code{dgr_graph} that is created using
\code{create_graph}, or a node data frame.}
\item{node_attr}{the name of the attribute for which
to get values.}
\item{nodes}{an optional vector of node IDs for
filtering list of nodes present in the graph or
node data frame.}
}
\value{
a named vector of node attribute values for
the attribute given by \code{node_attr} by node ID.
}
\description{
From a graph object of class
\code{dgr_graph} or a node data frame, get node
attribute values for one or more nodes.
}
\examples{
\dontrun{
library(magrittr)
# With the `create_random_graph()` function, get
# a simple graph with a node attribute called
# `value`
random_graph <-
create_random_graph(
n = 4,
m = 4,
directed = TRUE,
fully_connected = TRUE,
set_seed = 20)
# Get all of the values from the `value` node
# attribute as a named vector
random_graph \%>\%
get_node_attrs("value")
#> 1 2 3 4
#> 9.0 8.0 3.0 5.5
# To only return node attribute values for specified
# nodes, use the `nodes` argument
random_graph \%>\%
get_node_attrs("value", nodes = c(1, 3))
#> 1 3
#> 9 3
}
}
|
# Title : heuristic
# Objective : Interface with stan model
# Created by: adkinsty
# Created on: 6/25/20
library(rstan)
library(tidyverse)
library(shinystan)
library(ggthemes)
library(shotGroups)
library(bayesplot)
options(mc.cores = parallel::detectCores())
parallel:::setDefaultClusterOptions(setup_strategy = "sequential")
setwd("/Users/adkinsty/Box/LeeLab/Experiments/Exp_files/reach/")
train <- read_csv("data/clean/exp2/train_data.csv") %>%
filter(x > -2 & x < 2 & y > -2 & y < 2)
data <- read_csv("data/clean/exp2/test_data.csv") %>%
filter(x > -2 & x < 2 & y > -2 & y < 2)
# key variables
jj <- data$sub_id
cc <- data$cond_id
J <- length(unique(jj))
N <- nrow(data)
C <- length(unique(cc))
cond <- sort(unique(data$loss))
y <- data$x
aim <- matrix(nrow=3,ncol=J)
for (j in 1:J) {
# calculate win probabilities and get aim with max
for (c in 1:C) {
if (cond[c]==0) {
aim[c,j] = 0
} else {
aim[c,j] = .5
}
}
}
write.csv(aim,"data/clean/exp2/H_aims.csv")
input <- list(N=N,J=J,C=C,
cc=cc,jj=jj,
aim=aim,
y=y,
get_yrep=1,
get_log_lik=1,
prior_only=0)
model <- stan_model(file="modeling/exp2/stan/aim.stan",model_name="H")
map_est = optimizing(object=model,data=input,as_vector=FALSE)
map_est$par$sigma_m; map_est$par$sigma_s; map_est$par$sigma
# mcmc_est <- readRDS("modeling/stan/rds/H.rds")
mcmc_est <- sampling(
object=model,
data=input,
chains=4,
iter=5000,
warmup=2500,
cores=4)
write_rds(mcmc_est,path="modeling/exp2/stan/rds/H.rds")
#
#
# print(mcmc_est,pars = c("sigma_m","sigma_s","sigma"),probs = c(.025,.975))
# stan_plot(mcmc_est,pars = c("sigma_m","sigma_s","sigma"))
#
# yrep <- rstan::extract(mcmc_est, 'yrep')$yrep
# pred <- as_tibble(t(yrep)) %>% cbind(data) %>%
# pivot_longer(cols=starts_with("V"),names_to="draw",values_to="yrep")
# color_scheme_set("red")
# ppc_dens_overlay(y,yrep[sample(nrow(yrep), 25), ]) + theme(legend.position="top")
# ppc_ecdf_overlay(y, yrep[sample(nrow(yrep), 25), ]) + theme(legend.position="top")
# ppc_stat(y,yrep,stat = "mean") + theme(legend.position = "top")
# ppc_stat(y,yrep,stat = "sd") + theme(legend.position = "top")
# ppc_stat(y,yrep,stat = "max") + theme(legend.position = "top")
# ppc_stat(y,yrep,stat = "min") + theme(legend.position = "top")
# ppc_violin_grouped(y,yrep[sample(nrow(yrep), 25), ],group=data$ratio,
# y_draw = "points",y_alpha = .2,y_jitter = .2,y_size = .2)
#
# loo_est <- loo(mcmc_est, save_psis = TRUE, cores = 4)
# psis <- loo_est$psis_object
# lw <- weights(psis)
# ppc_loo_pit_overlay(y, yrep, lw = lw) + theme(legend.position = "top")
# ppc_loo_pit_qq(y, yrep, lw = lw) + theme(legend.position = "top")
#
# keep_obs <- sample(nrow(yrep), 50)
# ppc_loo_intervals(y, yrep, psis_object = psis, subset = keep_obs,order = "median")
# ppc_loo_ribbon(y, yrep, psis_object = psis, subset = keep_obs)
#
# # mu_obs vs mu_rep scatter
# pred %>%
# group_by(draw,id) %>%
# summarise(mu_rep=mean(yrep),
# mu_obs=mean(x)) %>% ungroup() %>%
# ggplot() +
# coord_cartesian(xlim=c(0,1),ylim=c(0,1)) +
# geom_abline(slope=1,intercept=0,colour="grey",linetype="dashed") +
# stat_bin_hex(aes(x=mu_rep,y=mu_obs),binwidth = .05) +
# scale_fill_viridis_b(option="A") +
# theme_tufte(base_family = "sans",base_size=15) +
# theme(axis.line = element_line(size=.25),
# legend.position = "none")
#
# # sd_obs vs sd_rep scatter
# pred %>%
# group_by(draw,id) %>%
# summarise(sd_rep=sd(yrep),
# sd_obs=sd(x)) %>%
# ggplot() +
# coord_cartesian(xlim=c(0,1),ylim=c(0,1)) +
# geom_abline(slope=1,intercept=0,colour="grey",linetype="dashed") +
# stat_bin_hex(aes(x=sd_rep,y=sd_obs),binwidth = .05) +
# scale_fill_viridis_b(option="A") +
# theme_tufte(base_family = "sans",base_size=15) +
# theme(axis.line = element_line(size=.25),
# legend.position = "none")
#
# # mu_obs vs mu_rep scatter by ratio
# pred %>%
# group_by(draw,id,ratio) %>%
# summarise(mu_rep=mean(yrep),
# mu_obs=mean(x)) %>%
# ggplot() +
# coord_cartesian(xlim=c(0,1),ylim=c(0,1)) +
# geom_abline(slope=1,intercept=0,colour="black",linetype="dashed") +
# stat_bin_hex(aes(x=mu_rep,y=mu_obs),binwidth = .05) +
# scale_fill_viridis_b(option="A") +
# facet_wrap(.~ratio,nrow=3) +
# theme_tufte(base_family = "sans",base_size=15) +
# theme(axis.line = element_line(size=.25),
# legend.position = "none")
#
# # sd_obs vs sd_rep scatter by ratio
# pred %>%
# group_by(draw,id,ratio) %>%
# summarise(sd_rep=sd(yrep),
# sd_obs=sd(x)) %>%
# ggplot() +
# coord_cartesian(xlim=c(0,1),ylim=c(0,1)) +
# geom_abline(slope=1,intercept=0,colour="black",linetype="dashed") +
# stat_bin_hex(aes(x=sd_rep,y=sd_obs),binwidth = .05,alpha=.8) +
# scale_fill_viridis_b(option="A") +
# facet_wrap(.~ratio,nrow=3) +
# theme_tufte(base_family = "sans",base_size=15) +
# theme(axis.line = element_line(size=.25),
# legend.position = "none")
| /modeling/exp2/stan/extra/H.R | no_license | adkinsty/reach | R | false | false | 5,093 | r | # Title : heuristic
# Objective : Interface with stan model
# Created by: adkinsty
# Created on: 6/25/20
library(rstan)
library(tidyverse)
library(shinystan)
library(ggthemes)
library(shotGroups)
library(bayesplot)
options(mc.cores = parallel::detectCores())
parallel:::setDefaultClusterOptions(setup_strategy = "sequential")
setwd("/Users/adkinsty/Box/LeeLab/Experiments/Exp_files/reach/")
train <- read_csv("data/clean/exp2/train_data.csv") %>%
filter(x > -2 & x < 2 & y > -2 & y < 2)
data <- read_csv("data/clean/exp2/test_data.csv") %>%
filter(x > -2 & x < 2 & y > -2 & y < 2)
# key variables
jj <- data$sub_id
cc <- data$cond_id
J <- length(unique(jj))
N <- nrow(data)
C <- length(unique(cc))
cond <- sort(unique(data$loss))
y <- data$x
aim <- matrix(nrow=3,ncol=J)
for (j in 1:J) {
# calculate win probabilities and get aim with max
for (c in 1:C) {
if (cond[c]==0) {
aim[c,j] = 0
} else {
aim[c,j] = .5
}
}
}
write.csv(aim,"data/clean/exp2/H_aims.csv")
input <- list(N=N,J=J,C=C,
cc=cc,jj=jj,
aim=aim,
y=y,
get_yrep=1,
get_log_lik=1,
prior_only=0)
model <- stan_model(file="modeling/exp2/stan/aim.stan",model_name="H")
map_est = optimizing(object=model,data=input,as_vector=FALSE)
map_est$par$sigma_m; map_est$par$sigma_s; map_est$par$sigma
# mcmc_est <- readRDS("modeling/stan/rds/H.rds")
mcmc_est <- sampling(
object=model,
data=input,
chains=4,
iter=5000,
warmup=2500,
cores=4)
write_rds(mcmc_est,path="modeling/exp2/stan/rds/H.rds")
#
#
# print(mcmc_est,pars = c("sigma_m","sigma_s","sigma"),probs = c(.025,.975))
# stan_plot(mcmc_est,pars = c("sigma_m","sigma_s","sigma"))
#
# yrep <- rstan::extract(mcmc_est, 'yrep')$yrep
# pred <- as_tibble(t(yrep)) %>% cbind(data) %>%
# pivot_longer(cols=starts_with("V"),names_to="draw",values_to="yrep")
# color_scheme_set("red")
# ppc_dens_overlay(y,yrep[sample(nrow(yrep), 25), ]) + theme(legend.position="top")
# ppc_ecdf_overlay(y, yrep[sample(nrow(yrep), 25), ]) + theme(legend.position="top")
# ppc_stat(y,yrep,stat = "mean") + theme(legend.position = "top")
# ppc_stat(y,yrep,stat = "sd") + theme(legend.position = "top")
# ppc_stat(y,yrep,stat = "max") + theme(legend.position = "top")
# ppc_stat(y,yrep,stat = "min") + theme(legend.position = "top")
# ppc_violin_grouped(y,yrep[sample(nrow(yrep), 25), ],group=data$ratio,
# y_draw = "points",y_alpha = .2,y_jitter = .2,y_size = .2)
#
# loo_est <- loo(mcmc_est, save_psis = TRUE, cores = 4)
# psis <- loo_est$psis_object
# lw <- weights(psis)
# ppc_loo_pit_overlay(y, yrep, lw = lw) + theme(legend.position = "top")
# ppc_loo_pit_qq(y, yrep, lw = lw) + theme(legend.position = "top")
#
# keep_obs <- sample(nrow(yrep), 50)
# ppc_loo_intervals(y, yrep, psis_object = psis, subset = keep_obs,order = "median")
# ppc_loo_ribbon(y, yrep, psis_object = psis, subset = keep_obs)
#
# # mu_obs vs mu_rep scatter
# pred %>%
# group_by(draw,id) %>%
# summarise(mu_rep=mean(yrep),
# mu_obs=mean(x)) %>% ungroup() %>%
# ggplot() +
# coord_cartesian(xlim=c(0,1),ylim=c(0,1)) +
# geom_abline(slope=1,intercept=0,colour="grey",linetype="dashed") +
# stat_bin_hex(aes(x=mu_rep,y=mu_obs),binwidth = .05) +
# scale_fill_viridis_b(option="A") +
# theme_tufte(base_family = "sans",base_size=15) +
# theme(axis.line = element_line(size=.25),
# legend.position = "none")
#
# # sd_obs vs sd_rep scatter
# pred %>%
# group_by(draw,id) %>%
# summarise(sd_rep=sd(yrep),
# sd_obs=sd(x)) %>%
# ggplot() +
# coord_cartesian(xlim=c(0,1),ylim=c(0,1)) +
# geom_abline(slope=1,intercept=0,colour="grey",linetype="dashed") +
# stat_bin_hex(aes(x=sd_rep,y=sd_obs),binwidth = .05) +
# scale_fill_viridis_b(option="A") +
# theme_tufte(base_family = "sans",base_size=15) +
# theme(axis.line = element_line(size=.25),
# legend.position = "none")
#
# # mu_obs vs mu_rep scatter by ratio
# pred %>%
# group_by(draw,id,ratio) %>%
# summarise(mu_rep=mean(yrep),
# mu_obs=mean(x)) %>%
# ggplot() +
# coord_cartesian(xlim=c(0,1),ylim=c(0,1)) +
# geom_abline(slope=1,intercept=0,colour="black",linetype="dashed") +
# stat_bin_hex(aes(x=mu_rep,y=mu_obs),binwidth = .05) +
# scale_fill_viridis_b(option="A") +
# facet_wrap(.~ratio,nrow=3) +
# theme_tufte(base_family = "sans",base_size=15) +
# theme(axis.line = element_line(size=.25),
# legend.position = "none")
#
# # sd_obs vs sd_rep scatter by ratio
# pred %>%
# group_by(draw,id,ratio) %>%
# summarise(sd_rep=sd(yrep),
# sd_obs=sd(x)) %>%
# ggplot() +
# coord_cartesian(xlim=c(0,1),ylim=c(0,1)) +
# geom_abline(slope=1,intercept=0,colour="black",linetype="dashed") +
# stat_bin_hex(aes(x=sd_rep,y=sd_obs),binwidth = .05,alpha=.8) +
# scale_fill_viridis_b(option="A") +
# facet_wrap(.~ratio,nrow=3) +
# theme_tufte(base_family = "sans",base_size=15) +
# theme(axis.line = element_line(size=.25),
# legend.position = "none")
|
# Script summary
#
# Quantile Mapping
# Loop through WRF output for stations, quantile map to bias correct
# save CSVs of the adjusted WRF output
# Save figures of ECDF plots
# Do this for:
#
# ERA-Interim
#
# CSM3 (historical and future)
#
# CCSM4 (historical and future)
#
# Convert CSV
# save csv files of "historical" and "future" output (not the same
# as in model runs)
#
# Output files:
# /data/ERA_stations_adj/"stid"_era_adj.Rds
# /data/ERA_stations_adj_csv/"stid"_era_adj.csv
# /data/CM3_stations_adj/"stid"_cm3"h/f"_adj.Rds
# /data/CM3_stations_adj_csv/"stid"_cm3"h/f"_adj.csv
# /data/CCSM4_stations_adj/"stid"_ccsm4"h/f"_adj.Rds
# /data/CCSM4_stations_adj_csv/"stid"_ccsm4"h/f"_adj.csv
# /figures/era_adj_ecdfs/"stid"_era.png
# /figures/cm3_adj_ecdfs/"stid"_cm3"h/f".png
# /figures/ccsm4_adj_ecdfs/"stid"_ccsm4"h/f".png
#-- Setup ---------------------------------------------------------------------
library(dplyr)
library(lubridate)
library(progress)
workdir <- getwd()
datadir <- file.path(workdir, "data")
figdir <- file.path(workdir, "figures")
# adjusted ASOS data
asos_adj_dir <- file.path(datadir, "AK_ASOS_stations_adj")
era_dir <- file.path(datadir, "ERA_stations")
era_adj_dir <- file.path(datadir, "ERA_stations_adj")
era_adj_csv_dir <- file.path(datadir, "ERA_stations_adj_csv")
# helper functions for qmapping
helpers <- file.path(workdir, "code/helpers.R")
source(helpers)
#------------------------------------------------------------------------------
#-- Quantile Map ERA-Interim --------------------------------------------------
# loop through ERA output data files and adjust
era_paths <- list.files(era_dir, full.names = TRUE)
pb <- progress_bar$new(total = length(era_raw_paths),
format = " Quantile Mapping ERA Speeds [:bar] :percent")
for(i in seq_along(era_paths)){
era <- readRDS(era_paths[i])
stid <- era$stid[1]
asos_path <- file.path(asos_adj_dir, paste0(stid, ".Rds"))
asos <- readRDS(asos_path)
sim <- era$sped
obs <- asos$sped_adj
# quantile mapping
sim_adj <- qMapWind(obs, sim)
sim_adj[sim_adj < 1] <- 0
era$sped_adj <- sim_adj
# save data
era_adj_path <- file.path(era_adj_dir,
paste0(stid, "_era_adj.Rds"))
saveRDS(era, era_adj_path)
pb$tick()
}
#------------------------------------------------------------------------------
#-- Quantile Map CM3 ----------------------------------------------------------
cm3_dir <- file.path(datadir, "CM3_stations")
cm3_adj_dir <- file.path(datadir, "CM3_stations_adj")
cm3_adj_csv_dir <- file.path(datadir, "CM3_stations_adj_csv")
cm3h_paths <- list.files(cm3_dir, pattern = "cm3h", full.names = TRUE)
cm3f_paths <- list.files(cm3_dir, pattern = "cm3f", full.names = TRUE)
h_start <- ymd_hms("1980-01-01 00:00:00")
h_end <- ymd_hms("2005-12-31 23:59:59")
pb <- progress_bar$new(total = length(cm3h_paths),
format = " Quantile Mapping CM3 data [:bar] :percent")
for(i in seq_along(cm3h_paths)){
cm3 <- readRDS(cm3h_paths[i]) %>%
filter(ts >= h_start)
stid <- cm3$stid[1]
era_path <- file.path(era_adj_dir, paste0(stid, "_era_adj.Rds"))
# use years from historical CM3 period
era <- readRDS(era_path) %>%
filter(ts >= h_start & ts <= h_end)
sim <- cm3$sped
obs <- era$sped_adj
# historical quantile mapping
qmap_obj <- qMapWind(obs, sim, ret.deltas = TRUE)
sim_adj <- qmap_obj$sim_adj
sim_adj[sim_adj < 1] <- 0
cm3$sped_adj <- sim_adj
# save data
cm3_adj_path <- file.path(cm3_adj_dir,
paste0(stid, "_cm3h_adj.Rds"))
saveRDS(cm3, cm3_adj_path)
cm3 <- readRDS(cm3f_paths[i])
# just check to make sure same station
stid2 <- cm3$stid[1]
if(stid2 != stid){print("shit stations don't match");break}
sim <- cm3$sped
# future quantile mapping
sim_adj <- qMapWind(sim = sim, use.deltas = qmap_obj$deltas)
sim_adj[sim_adj < 1] <- 0
cm3$sped_adj <- sim_adj
# save data
cm3_adj_path <- file.path(cm3_adj_dir,
paste0(stid, "_cm3f_adj.Rds"))
saveRDS(cm3, cm3_adj_path)
pb$tick()
}
#------------------------------------------------------------------------------
#-- Quantile Map CCSM4 --------------------------------------------------------
ccsm4_dir <- file.path(datadir, "ccsm4_stations")
ccsm4_adj_dir <- file.path(datadir, "ccsm4_stations_adj")
ccsm4_adj_csv_dir <- file.path(datadir, "ccsm4_stations_adj_csv")
ccsm4h_paths <- list.files(ccsm4_dir, pattern = "ccsm4h", full.names = TRUE)
ccsm4f_paths <- list.files(ccsm4_dir, pattern = "ccsm4f", full.names = TRUE)
h_start <- ymd_hms("1980-01-01 00:00:00")
h_end <- ymd_hms("2005-12-31 23:59:59")
pb <- progress_bar$new(total = length(ccsm4h_paths),
format = " Quantile Mapping CCSM4 data [:bar] :percent")
for(i in seq_along(ccsm4h_paths)){
ccsm4 <- readRDS(ccsm4h_paths[i]) %>%
filter(ts >= h_start)
stid <- ccsm4$stid[1]
era_path <- file.path(era_adj_dir, paste0(stid, "_era_adj.Rds"))
# use years from historical ccsm4 period
era <- readRDS(era_path) %>%
filter(ts >= h_start & ts <= h_end)
sim <- ccsm4$sped
obs <- era$sped_adj
# historical quantile mapping
qmap_obj <- qMapWind(obs, sim, ret.deltas = TRUE)
sim_adj <- qmap_obj$sim_adj
sim_adj[sim_adj < 1] <- 0
ccsm4$sped_adj <- sim_adj
# save data
ccsm4_adj_path <- file.path(ccsm4_adj_dir,
paste0(stid, "_ccsm4h_adj.Rds"))
saveRDS(ccsm4, ccsm4_adj_path)
ccsm4 <- readRDS(ccsm4f_paths[i])
# just check to make sure same station
stid2 <- ccsm4$stid[1]
if(stid2 != stid){print("shit stations don't match");break}
sim <- ccsm4$sped
# future quantile mapping
sim_adj <- qMapWind(sim = sim, use.deltas = qmap_obj$deltas)
sim_adj[sim_adj < 1] <- 0
ccsm4$sped_adj <- sim_adj
# save data
ccsm4_adj_path <- file.path(ccsm4_adj_dir,
paste0(stid, "_ccsm4f_adj.Rds"))
saveRDS(ccsm4, ccsm4_adj_path)
pb$tick()
}
#------------------------------------------------------------------------------
#-- Save CSVs -----------------------------------------------------------------
# ERA dirs
era_adj_dir <- file.path(datadir, "era_stations_adj")
era_adj_csv_dir <- file.path(datadir, "era_stations_adj_csv")
# era paths
era_adj_paths <- list.files(era_adj_dir, full.names = TRUE)
pb <- progress_bar$new(total = length(era_adj_paths),
format = " Creating ERA CSVs [:bar] :percent")
for(i in seq_along(era_adj_paths)){
# read, filter to target dates, save CSVs
era <- readRDS(era_paths[i]) %>%
filter(ts < ymd("2015-01-02"))
stid <- era$stid[1]
era_path <- file.path(era_adj_csv_dir, paste0(stid, "_era_adj.csv"))
write.csv(era, era_path, row.names = FALSE)
pb$tick()
}
# CM3 dirs
cm3_adj_dir <- file.path(datadir, "CM3_stations_adj")
cm3_adj_csv_dir <- file.path(datadir, "CM3_stations_adj_csv")
# CM3 paths
cm3h_adj_paths <- list.files(cm3_adj_dir, pattern = "cm3h", full.names = TRUE)
cm3f_adj_paths <- list.files(cm3_adj_dir, pattern = "cm3f", full.names = TRUE)
# Loop through CM3 paths and save future/hist CSVs
h_start <- ymd_hms("1980-01-01 00:00:00")
h_end <- ymd_hms("2015-01-01 23:59:59")
f_start <- ymd_hms("2065-01-01 00:00:00")
f_end <- ymd_hms("2100-01-01 23:59:59")
pb <- progress_bar$new(total = length(cm3h_adj_paths),
format = " Creating CSVs [:bar] :percent")
for(i in seq_along(cm3h_adj_paths)){
# read, filter to target dates, save CSVs
cm3h <- readRDS(cm3h_adj_paths[i])
cm3f <- readRDS(cm3f_adj_paths[i])
cm3 <- bind_rows(cm3h, cm3f)
cm3h <- cm3 %>% filter(ts >= h_start & ts <= h_end)
cm3f <- cm3 %>% filter(ts >= f_start & ts <= f_end)
stid <- cm3f$stid[1]
cm3h_path <- file.path(cm3_adj_csv_dir, paste0(stid, "_cm3h_adj.csv"))
cm3f_path <- file.path(cm3_adj_csv_dir, paste0(stid, "_cm3f_adj.csv"))
write.csv(cm3h, cm3h_path, row.names = FALSE)
write.csv(cm3f, cm3f_path, row.names = FALSE)
pb$tick()
}
# CCSM4 dirs
ccsm4_adj_dir <- file.path(datadir, "CCSM4_stations_adj")
ccsm4_adj_csv_dir <- file.path(datadir, "CCSM4_stations_adj_csv")
# CCSM4 paths
ccsm4h_adj_paths <- list.files(ccsm4_adj_dir, pattern = "ccsm4h", full.names = TRUE)
ccsm4f_adj_paths <- list.files(ccsm4_adj_dir, pattern = "ccsm4f", full.names = TRUE)
# Loop through CCSM4 paths and save future/hist CSVs
h_start <- ymd_hms("1980-01-01 00:00:00")
h_end <- ymd_hms("2015-01-01 23:59:59")
f_start <- ymd_hms("2065-01-01 00:00:00")
f_end <- ymd_hms("2100-01-01 23:59:59")
pb <- progress_bar$new(total = length(ccsm4h_adj_paths),
format = " Creating CSVs [:bar] :percent")
for(i in seq_along(ccsm4h_adj_paths)){
# read, filter to target dates, save CSVs
ccsm4h <- readRDS(ccsm4h_adj_paths[i])
ccsm4f <- readRDS(ccsm4f_adj_paths[i])
ccsm4 <- bind_rows(ccsm4h, ccsm4f)
ccsm4h <- ccsm4 %>% filter(ts >= h_start & ts <= h_end)
ccsm4f <- ccsm4 %>% filter(ts >= f_start & ts <= f_end)
stid <- ccsm4f$stid[1]
ccsm4h_path <- file.path(ccsm4_adj_csv_dir, paste0(stid, "_ccsm4h_adj.csv"))
ccsm4f_path <- file.path(ccsm4_adj_csv_dir, paste0(stid, "_ccsm4f_adj.csv"))
write.csv(ccsm4h, ccsm4h_path, row.names = FALSE)
write.csv(ccsm4f, ccsm4f_path, row.names = FALSE)
pb$tick()
}
#------------------------------------------------------------------------------
#-- Generate ECDFs ------------------------------------------------------------
# plot and save ECDF comparisons
# ERA-Interim
era_adj_paths <- list.files(era_adj_dir, full.names = TRUE)
pb <- progress_bar$new(total = length(era_adj_paths),
format = " Plotting ECDFs from ERA Adjustment [:bar] :percent")
for(i in seq_along(era_adj_paths)){
era <- readRDS(era_adj_paths[i])
stid <- era$stid[1]
asos <- readRDS(file.path(asos_adj_dir, paste0(stid, ".Rds")))
obs <- asos$sped_adj
sim <- era$sped
sim_adj <- era$sped_adj
ecdf_path <- file.path(figdir, "era_adj_ecdfs", paste0(stid, "_era.png"))
sim_samp <- sample(length(sim), 100000)
n <- length(obs)
if(n > 100000){
obs_samp <- sample(n, 100000)
} else {obs_samp <- 1:n}
p1 <- ggECDF_compare(obs[obs_samp],
sim[sim_samp],
sim_adj[sim_samp], p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
pb$tick()
}
# GFDL CM3 i = 27
cm3h_adj_paths <- list.files(cm3_adj_dir, pattern = "cm3h", full.names = TRUE)
cm3f_adj_paths <- list.files(cm3_adj_dir, pattern = "cm3f", full.names = TRUE)
pb <- progress_bar$new(total = length(cm3h_adj_paths),
format = " Plotting ECDFs from CM3 Adjustment [:bar] :percent")
for(i in seq_along(cm3h_adj_paths)){
# historical
cm3 <- readRDS(cm3h_adj_paths[i])
stid <- cm3$stid[1]
asos <- readRDS(file.path(asos_adj_dir, paste0(stid, ".Rds")))
obs <- asos$sped_adj
sim <- cm3$sped
sim_adj <- cm3$sped_adj
ecdf_path <- file.path(figdir, "cm3_adj_ecdfs", paste0(stid, "_cm3h.png"))
p1 <- ggECDF_compare(obs, sim, sim_adj, p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
# future
cm3 <- readRDS(cm3f_adj_paths[i])
obs <- asos$sped_adj
sim <- cm3$sped
sim_adj <- cm3$sped_adj
ecdf_path <- file.path(figdir, "cm3_adj_ecdfs", paste0(stid, "_cm3f.png"))
p1 <- ggECDF_compare(obs, sim, sim_adj, p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
pb$tick()
}
# NCAR CCSM4
ccsm4h_adj_paths <- list.files(ccsm4_adj_dir, pattern = "ccsm4h", full.names = TRUE)
ccsm4f_adj_paths <- list.files(ccsm4_adj_dir, pattern = "ccsm4f", full.names = TRUE)
pb <- progress_bar$new(total = length(ccsm4h_adj_paths),
format = " Plotting ECDFs from ccsm4 Adjustment [:bar] :percent")
for(i in seq_along(ccsm4h_adj_paths)){
# historical
ccsm4 <- readRDS(ccsm4h_adj_paths[i])
stid <- ccsm4$stid[1]
asos <- readRDS(file.path(asos_adj_dir, paste0(stid, ".Rds")))
obs <- asos$sped_adj
sim <- ccsm4$sped
sim_adj <- ccsm4$sped_adj
ecdf_path <- file.path(figdir, "ccsm4_adj_ecdfs", paste0(stid, "_ccsm4h.png"))
p1 <- ggECDF_compare(obs, sim, sim_adj, p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
# future
ccsm4 <- readRDS(ccsm4f_adj_paths[i])
obs <- asos$sped_adj
sim <- ccsm4$sped
sim_adj <- ccsm4$sped_adj
ecdf_path <- file.path(figdir, "ccsm4_adj_ecdfs", paste0(stid, "_ccsm4f.png"))
p1 <- ggECDF_compare(obs, sim, sim_adj, p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
pb$tick()
}
#------------------------------------------------------------------------------
| /WRF_code/wrf_adjustment.R | no_license | kyleredilla/AK_Wind_Climatology | R | false | false | 12,615 | r | # Script summary
#
# Quantile Mapping
# Loop through WRF output for stations, quantile map to bias correct
# save CSVs of the adjusted WRF output
# Save figures of ECDF plots
# Do this for:
#
# ERA-Interim
#
# CSM3 (historical and future)
#
# CCSM4 (historical and future)
#
# Convert CSV
# save csv files of "historical" and "future" output (not the same
# as in model runs)
#
# Output files:
# /data/ERA_stations_adj/"stid"_era_adj.Rds
# /data/ERA_stations_adj_csv/"stid"_era_adj.csv
# /data/CM3_stations_adj/"stid"_cm3"h/f"_adj.Rds
# /data/CM3_stations_adj_csv/"stid"_cm3"h/f"_adj.csv
# /data/CCSM4_stations_adj/"stid"_ccsm4"h/f"_adj.Rds
# /data/CCSM4_stations_adj_csv/"stid"_ccsm4"h/f"_adj.csv
# /figures/era_adj_ecdfs/"stid"_era.png
# /figures/cm3_adj_ecdfs/"stid"_cm3"h/f".png
# /figures/ccsm4_adj_ecdfs/"stid"_ccsm4"h/f".png
#-- Setup ---------------------------------------------------------------------
library(dplyr)
library(lubridate)
library(progress)
workdir <- getwd()
datadir <- file.path(workdir, "data")
figdir <- file.path(workdir, "figures")
# adjusted ASOS data
asos_adj_dir <- file.path(datadir, "AK_ASOS_stations_adj")
era_dir <- file.path(datadir, "ERA_stations")
era_adj_dir <- file.path(datadir, "ERA_stations_adj")
era_adj_csv_dir <- file.path(datadir, "ERA_stations_adj_csv")
# helper functions for qmapping
helpers <- file.path(workdir, "code/helpers.R")
source(helpers)
#------------------------------------------------------------------------------
#-- Quantile Map ERA-Interim --------------------------------------------------
# loop through ERA output data files and adjust
era_paths <- list.files(era_dir, full.names = TRUE)
pb <- progress_bar$new(total = length(era_raw_paths),
format = " Quantile Mapping ERA Speeds [:bar] :percent")
for(i in seq_along(era_paths)){
era <- readRDS(era_paths[i])
stid <- era$stid[1]
asos_path <- file.path(asos_adj_dir, paste0(stid, ".Rds"))
asos <- readRDS(asos_path)
sim <- era$sped
obs <- asos$sped_adj
# quantile mapping
sim_adj <- qMapWind(obs, sim)
sim_adj[sim_adj < 1] <- 0
era$sped_adj <- sim_adj
# save data
era_adj_path <- file.path(era_adj_dir,
paste0(stid, "_era_adj.Rds"))
saveRDS(era, era_adj_path)
pb$tick()
}
#------------------------------------------------------------------------------
#-- Quantile Map CM3 ----------------------------------------------------------
cm3_dir <- file.path(datadir, "CM3_stations")
cm3_adj_dir <- file.path(datadir, "CM3_stations_adj")
cm3_adj_csv_dir <- file.path(datadir, "CM3_stations_adj_csv")
cm3h_paths <- list.files(cm3_dir, pattern = "cm3h", full.names = TRUE)
cm3f_paths <- list.files(cm3_dir, pattern = "cm3f", full.names = TRUE)
h_start <- ymd_hms("1980-01-01 00:00:00")
h_end <- ymd_hms("2005-12-31 23:59:59")
pb <- progress_bar$new(total = length(cm3h_paths),
format = " Quantile Mapping CM3 data [:bar] :percent")
for(i in seq_along(cm3h_paths)){
cm3 <- readRDS(cm3h_paths[i]) %>%
filter(ts >= h_start)
stid <- cm3$stid[1]
era_path <- file.path(era_adj_dir, paste0(stid, "_era_adj.Rds"))
# use years from historical CM3 period
era <- readRDS(era_path) %>%
filter(ts >= h_start & ts <= h_end)
sim <- cm3$sped
obs <- era$sped_adj
# historical quantile mapping
qmap_obj <- qMapWind(obs, sim, ret.deltas = TRUE)
sim_adj <- qmap_obj$sim_adj
sim_adj[sim_adj < 1] <- 0
cm3$sped_adj <- sim_adj
# save data
cm3_adj_path <- file.path(cm3_adj_dir,
paste0(stid, "_cm3h_adj.Rds"))
saveRDS(cm3, cm3_adj_path)
cm3 <- readRDS(cm3f_paths[i])
# just check to make sure same station
stid2 <- cm3$stid[1]
if(stid2 != stid){print("shit stations don't match");break}
sim <- cm3$sped
# future quantile mapping
sim_adj <- qMapWind(sim = sim, use.deltas = qmap_obj$deltas)
sim_adj[sim_adj < 1] <- 0
cm3$sped_adj <- sim_adj
# save data
cm3_adj_path <- file.path(cm3_adj_dir,
paste0(stid, "_cm3f_adj.Rds"))
saveRDS(cm3, cm3_adj_path)
pb$tick()
}
#------------------------------------------------------------------------------
#-- Quantile Map CCSM4 --------------------------------------------------------
ccsm4_dir <- file.path(datadir, "ccsm4_stations")
ccsm4_adj_dir <- file.path(datadir, "ccsm4_stations_adj")
ccsm4_adj_csv_dir <- file.path(datadir, "ccsm4_stations_adj_csv")
ccsm4h_paths <- list.files(ccsm4_dir, pattern = "ccsm4h", full.names = TRUE)
ccsm4f_paths <- list.files(ccsm4_dir, pattern = "ccsm4f", full.names = TRUE)
h_start <- ymd_hms("1980-01-01 00:00:00")
h_end <- ymd_hms("2005-12-31 23:59:59")
pb <- progress_bar$new(total = length(ccsm4h_paths),
format = " Quantile Mapping CCSM4 data [:bar] :percent")
for(i in seq_along(ccsm4h_paths)){
ccsm4 <- readRDS(ccsm4h_paths[i]) %>%
filter(ts >= h_start)
stid <- ccsm4$stid[1]
era_path <- file.path(era_adj_dir, paste0(stid, "_era_adj.Rds"))
# use years from historical ccsm4 period
era <- readRDS(era_path) %>%
filter(ts >= h_start & ts <= h_end)
sim <- ccsm4$sped
obs <- era$sped_adj
# historical quantile mapping
qmap_obj <- qMapWind(obs, sim, ret.deltas = TRUE)
sim_adj <- qmap_obj$sim_adj
sim_adj[sim_adj < 1] <- 0
ccsm4$sped_adj <- sim_adj
# save data
ccsm4_adj_path <- file.path(ccsm4_adj_dir,
paste0(stid, "_ccsm4h_adj.Rds"))
saveRDS(ccsm4, ccsm4_adj_path)
ccsm4 <- readRDS(ccsm4f_paths[i])
# just check to make sure same station
stid2 <- ccsm4$stid[1]
if(stid2 != stid){print("shit stations don't match");break}
sim <- ccsm4$sped
# future quantile mapping
sim_adj <- qMapWind(sim = sim, use.deltas = qmap_obj$deltas)
sim_adj[sim_adj < 1] <- 0
ccsm4$sped_adj <- sim_adj
# save data
ccsm4_adj_path <- file.path(ccsm4_adj_dir,
paste0(stid, "_ccsm4f_adj.Rds"))
saveRDS(ccsm4, ccsm4_adj_path)
pb$tick()
}
#------------------------------------------------------------------------------
#-- Save CSVs -----------------------------------------------------------------
# ERA dirs
era_adj_dir <- file.path(datadir, "era_stations_adj")
era_adj_csv_dir <- file.path(datadir, "era_stations_adj_csv")
# era paths
era_adj_paths <- list.files(era_adj_dir, full.names = TRUE)
pb <- progress_bar$new(total = length(era_adj_paths),
format = " Creating ERA CSVs [:bar] :percent")
for(i in seq_along(era_adj_paths)){
# read, filter to target dates, save CSVs
era <- readRDS(era_paths[i]) %>%
filter(ts < ymd("2015-01-02"))
stid <- era$stid[1]
era_path <- file.path(era_adj_csv_dir, paste0(stid, "_era_adj.csv"))
write.csv(era, era_path, row.names = FALSE)
pb$tick()
}
# CM3 dirs
cm3_adj_dir <- file.path(datadir, "CM3_stations_adj")
cm3_adj_csv_dir <- file.path(datadir, "CM3_stations_adj_csv")
# CM3 paths
cm3h_adj_paths <- list.files(cm3_adj_dir, pattern = "cm3h", full.names = TRUE)
cm3f_adj_paths <- list.files(cm3_adj_dir, pattern = "cm3f", full.names = TRUE)
# Loop through CM3 paths and save future/hist CSVs
h_start <- ymd_hms("1980-01-01 00:00:00")
h_end <- ymd_hms("2015-01-01 23:59:59")
f_start <- ymd_hms("2065-01-01 00:00:00")
f_end <- ymd_hms("2100-01-01 23:59:59")
pb <- progress_bar$new(total = length(cm3h_adj_paths),
format = " Creating CSVs [:bar] :percent")
for(i in seq_along(cm3h_adj_paths)){
# read, filter to target dates, save CSVs
cm3h <- readRDS(cm3h_adj_paths[i])
cm3f <- readRDS(cm3f_adj_paths[i])
cm3 <- bind_rows(cm3h, cm3f)
cm3h <- cm3 %>% filter(ts >= h_start & ts <= h_end)
cm3f <- cm3 %>% filter(ts >= f_start & ts <= f_end)
stid <- cm3f$stid[1]
cm3h_path <- file.path(cm3_adj_csv_dir, paste0(stid, "_cm3h_adj.csv"))
cm3f_path <- file.path(cm3_adj_csv_dir, paste0(stid, "_cm3f_adj.csv"))
write.csv(cm3h, cm3h_path, row.names = FALSE)
write.csv(cm3f, cm3f_path, row.names = FALSE)
pb$tick()
}
# CCSM4 dirs
ccsm4_adj_dir <- file.path(datadir, "CCSM4_stations_adj")
ccsm4_adj_csv_dir <- file.path(datadir, "CCSM4_stations_adj_csv")
# CCSM4 paths
ccsm4h_adj_paths <- list.files(ccsm4_adj_dir, pattern = "ccsm4h", full.names = TRUE)
ccsm4f_adj_paths <- list.files(ccsm4_adj_dir, pattern = "ccsm4f", full.names = TRUE)
# Loop through CCSM4 paths and save future/hist CSVs
h_start <- ymd_hms("1980-01-01 00:00:00")
h_end <- ymd_hms("2015-01-01 23:59:59")
f_start <- ymd_hms("2065-01-01 00:00:00")
f_end <- ymd_hms("2100-01-01 23:59:59")
pb <- progress_bar$new(total = length(ccsm4h_adj_paths),
format = " Creating CSVs [:bar] :percent")
for(i in seq_along(ccsm4h_adj_paths)){
# read, filter to target dates, save CSVs
ccsm4h <- readRDS(ccsm4h_adj_paths[i])
ccsm4f <- readRDS(ccsm4f_adj_paths[i])
ccsm4 <- bind_rows(ccsm4h, ccsm4f)
ccsm4h <- ccsm4 %>% filter(ts >= h_start & ts <= h_end)
ccsm4f <- ccsm4 %>% filter(ts >= f_start & ts <= f_end)
stid <- ccsm4f$stid[1]
ccsm4h_path <- file.path(ccsm4_adj_csv_dir, paste0(stid, "_ccsm4h_adj.csv"))
ccsm4f_path <- file.path(ccsm4_adj_csv_dir, paste0(stid, "_ccsm4f_adj.csv"))
write.csv(ccsm4h, ccsm4h_path, row.names = FALSE)
write.csv(ccsm4f, ccsm4f_path, row.names = FALSE)
pb$tick()
}
#------------------------------------------------------------------------------
#-- Generate ECDFs ------------------------------------------------------------
# plot and save ECDF comparisons
# ERA-Interim
era_adj_paths <- list.files(era_adj_dir, full.names = TRUE)
pb <- progress_bar$new(total = length(era_adj_paths),
format = " Plotting ECDFs from ERA Adjustment [:bar] :percent")
for(i in seq_along(era_adj_paths)){
era <- readRDS(era_adj_paths[i])
stid <- era$stid[1]
asos <- readRDS(file.path(asos_adj_dir, paste0(stid, ".Rds")))
obs <- asos$sped_adj
sim <- era$sped
sim_adj <- era$sped_adj
ecdf_path <- file.path(figdir, "era_adj_ecdfs", paste0(stid, "_era.png"))
sim_samp <- sample(length(sim), 100000)
n <- length(obs)
if(n > 100000){
obs_samp <- sample(n, 100000)
} else {obs_samp <- 1:n}
p1 <- ggECDF_compare(obs[obs_samp],
sim[sim_samp],
sim_adj[sim_samp], p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
pb$tick()
}
# GFDL CM3 i = 27
cm3h_adj_paths <- list.files(cm3_adj_dir, pattern = "cm3h", full.names = TRUE)
cm3f_adj_paths <- list.files(cm3_adj_dir, pattern = "cm3f", full.names = TRUE)
pb <- progress_bar$new(total = length(cm3h_adj_paths),
format = " Plotting ECDFs from CM3 Adjustment [:bar] :percent")
for(i in seq_along(cm3h_adj_paths)){
# historical
cm3 <- readRDS(cm3h_adj_paths[i])
stid <- cm3$stid[1]
asos <- readRDS(file.path(asos_adj_dir, paste0(stid, ".Rds")))
obs <- asos$sped_adj
sim <- cm3$sped
sim_adj <- cm3$sped_adj
ecdf_path <- file.path(figdir, "cm3_adj_ecdfs", paste0(stid, "_cm3h.png"))
p1 <- ggECDF_compare(obs, sim, sim_adj, p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
# future
cm3 <- readRDS(cm3f_adj_paths[i])
obs <- asos$sped_adj
sim <- cm3$sped
sim_adj <- cm3$sped_adj
ecdf_path <- file.path(figdir, "cm3_adj_ecdfs", paste0(stid, "_cm3f.png"))
p1 <- ggECDF_compare(obs, sim, sim_adj, p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
pb$tick()
}
# NCAR CCSM4
ccsm4h_adj_paths <- list.files(ccsm4_adj_dir, pattern = "ccsm4h", full.names = TRUE)
ccsm4f_adj_paths <- list.files(ccsm4_adj_dir, pattern = "ccsm4f", full.names = TRUE)
pb <- progress_bar$new(total = length(ccsm4h_adj_paths),
format = " Plotting ECDFs from ccsm4 Adjustment [:bar] :percent")
for(i in seq_along(ccsm4h_adj_paths)){
# historical
ccsm4 <- readRDS(ccsm4h_adj_paths[i])
stid <- ccsm4$stid[1]
asos <- readRDS(file.path(asos_adj_dir, paste0(stid, ".Rds")))
obs <- asos$sped_adj
sim <- ccsm4$sped
sim_adj <- ccsm4$sped_adj
ecdf_path <- file.path(figdir, "ccsm4_adj_ecdfs", paste0(stid, "_ccsm4h.png"))
p1 <- ggECDF_compare(obs, sim, sim_adj, p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
# future
ccsm4 <- readRDS(ccsm4f_adj_paths[i])
obs <- asos$sped_adj
sim <- ccsm4$sped
sim_adj <- ccsm4$sped_adj
ecdf_path <- file.path(figdir, "ccsm4_adj_ecdfs", paste0(stid, "_ccsm4f.png"))
p1 <- ggECDF_compare(obs, sim, sim_adj, p_title = stid)
ggsave(ecdf_path, p1, width = 6.82, height = 4.58)
pb$tick()
}
#------------------------------------------------------------------------------
|
# set working directory
#setwd("E:/PhD/Teaching/ENG203/Drone")
# define connection
ARDRONE_NAVDATA_PORT = 5554
ARDRONE_VIDEO_PORT = 5555
ARDRONE_COMMAND_PORT = 5556
droneIP <- "192.168.1.1"
# # options to create the connection
# # using socket write.socket(AR_cmd, AT(Cmd))
# AR_cmd <- make.socket(host = droneIP, ARDRONE_COMMAND_PORT)
# AR_nav <- make.socket(host = hostIP, ARDRONE_NAVDATA_PORT)
#
# # # using ncat
ncat <- function(msg_string){
ncatOpts <- "-u -vv -e"
ncatExec <- sprintf('"printf %s"', msg_string)
ncatArgs <- paste(ncatOpts, ncatExec, droneIP, ARDRONE_COMMAND_PORT )
message(ncatArgs)
# ncat -u -vv --sh-exec 'printf "AT*FTRIM=1\rAT*CONFIG=2,control:altitude_max,1000\rAT*REF=3,290718208\r"' 192.168.1.1 5556
system2("ncat", args=ncatArgs)
}
# initialise drone, set config default values, define constants.
default_speed <- 0.5
cmdCounter <- 0
maxHeight <- 2000
watchdogInterval <- 0.1
cmdInterval <- 0.03
emergencyCode <- "290717952"
landCode <- "290717696"
takeOffCode <- "290718208"
anim_moves <- c("turn_around", "turn_around_go_down", "flip_ahead", "flip_behind","flip_left", "flip_right")
anim_nums <- c(6,7,16:19)
anim_table <- data.frame(anim_moves, anim_nums)
# convert float to signed integer
f2i_table <- read.csv("Float_2_Int.csv")
colnames(f2i_table) <- c("Float_num", "Hex_num", "Signed_int")
f2i <- function(f) {
if (f>=(-1) & f<=1) {
return(f2i_table[f2i_table$Float_num==round(f,1),]$Signed_int)
} else return(f2i_table[f2i_table$Float_num==round(default_speed,1),]$Signed_int)
}
# define general AT command syntax
AT <- function(cmd, params_str){
if (missing(params_str)) msg <- sprintf("AT*%s=%i\\r",cmd, cmdCounter)
else msg <- sprintf("AT*%s=%i,%s\\r",cmd, cmdCounter, params_str)
assign("cmdCounter", cmdCounter+1, envir = .GlobalEnv)
return(msg)
}
# enter emergency mode
drone.emergency <- function() ncat(AT("REF",emergencyCode))
# take off (including horizontal calibration and set maximum height)
drone.take_off <- function(take_off_duration){
msg <- paste0(AT("FTRIM"),
AT("CONFIG", sprintf("control:altitude_max,%i",maxHeight)),
AT("REF",takeOffCode))
ncat(msg)
elapsed <- 0
while (elapsed<take_off_duration) {
# while takeoff is taking place, keep sending watchdog signal
Sys.sleep(watchdogInterval)
ncat(AT("COMWDG"))
# increment elapsed time
elapsed <- elapsed + watchdogInterval
}
message(sprintf("Drone taking off, waiting: %.2f seconds", take_off_duration))
}
# landing
drone.land <- function() {
ncat(AT("REF",landCode))
message("Drone landed safely (hopefuly...)")
}
# define drone movements commands
drone.hover <- function(speed){
return(AT("PCMD", "0,0,0,0,0"))
}
drone.up <- function(speed){
params <- paste(1,0,0,f2i(speed),0, sep=",")
return(AT("PCMD", params))
}
drone.down <- function(speed){
params <- paste(1,0,0,f2i(-speed),0, sep=",")
return(AT("PCMD", params))
}
drone.move_right <- function(speed){
params <- paste(1,f2i(speed),0,0,0, sep=",")
return(AT("PCMD", params))
}
drone.move_left <- function(speed){
params <- paste(1,f2i(-speed),0,0,0, sep=",")
return(AT("PCMD", params))
}
drone.move_forward <- function(speed){
params <- paste(1,0,f2i(-speed),0,0, sep=",")
return(AT("PCMD", params))
}
drone.move_back <- function(speed){
params <- paste(1,0,f2i(speed),0,0, sep=",")
return(AT("PCMD", params))
}
drone.rotate_right <- function(speed){
params <- paste(1,0,0,0,f2i(speed), sep=",")
return(AT("PCMD", params))
}
drone.rotate_left <- function(speed){
params <- paste(1,0,0,0, f2i(-speed),sep=",")
return(AT("PCMD", params))
}
# flight animation
drone.anim <- function(anim, duration){
anim_code <- anim_table[anim_table$anim_moves==anim,]$anim_nums
if (missing(duration)) msg <- AT("ANIM", anim_code)
else msg <- AT("ANIM", paste(anim_code, duration, sep=","))
# message(msg)
ncat(msg)
elapsed <- 0
while (elapsed<duration) {
# while animation is performing, keep sending watchdog signal
Sys.sleep(watchdogInterval)
ncat(AT("COMWDG"))
# message(sprintf("Performing <%s> animation, time elapsed: %.2f", anim, elapsed))
# increment elapsed time
elapsed <- elapsed + watchdogInterval
}
message(sprintf("Performing <%s> animation, duration: %.2f seconds", anim, elapsed))
}
# flight action command
drone.do <- function(action, duration, speed){
if (missing(speed)) speed <- default_speed
elapsed <- 0
drone_function <- paste("drone",action, sep=".")
while (elapsed<duration) {
# using ncat
msg <- get(drone_function)(speed)
ncat(msg)
# wait the defined ms before resending the command
Sys.sleep(cmdInterval)
elapsed <- elapsed + cmdInterval
}
message(sprintf("Drone movement <%s> performed for %.2f seconds", action, duration))
}
# start flight sequence
drone_flight <- function(){
# create the connection
drone.take_off(1)
drone.do("hover",2)
# drone.do("move_forward", 5)
# drone.do("hover",3)
# drone.do("move_right", 3)
# drone.do("hover",3)
# drone.do("rotate_right", 5)
# drone.do("up",2)
# drone.do("down",1)
# drone.anim("turn_around", 3)
drone.land()
}
# run flight
drone_flight()
| /bin/drone_path_ncat.R | no_license | IdoBar/AR.Drone.R.API | R | false | false | 5,228 | r | # set working directory
#setwd("E:/PhD/Teaching/ENG203/Drone")
# define connection
ARDRONE_NAVDATA_PORT = 5554
ARDRONE_VIDEO_PORT = 5555
ARDRONE_COMMAND_PORT = 5556
droneIP <- "192.168.1.1"
# # options to create the connection
# # using socket write.socket(AR_cmd, AT(Cmd))
# AR_cmd <- make.socket(host = droneIP, ARDRONE_COMMAND_PORT)
# AR_nav <- make.socket(host = hostIP, ARDRONE_NAVDATA_PORT)
#
# # # using ncat
ncat <- function(msg_string){
ncatOpts <- "-u -vv -e"
ncatExec <- sprintf('"printf %s"', msg_string)
ncatArgs <- paste(ncatOpts, ncatExec, droneIP, ARDRONE_COMMAND_PORT )
message(ncatArgs)
# ncat -u -vv --sh-exec 'printf "AT*FTRIM=1\rAT*CONFIG=2,control:altitude_max,1000\rAT*REF=3,290718208\r"' 192.168.1.1 5556
system2("ncat", args=ncatArgs)
}
# initialise drone, set config default values, define constants.
default_speed <- 0.5
cmdCounter <- 0
maxHeight <- 2000
watchdogInterval <- 0.1
cmdInterval <- 0.03
emergencyCode <- "290717952"
landCode <- "290717696"
takeOffCode <- "290718208"
anim_moves <- c("turn_around", "turn_around_go_down", "flip_ahead", "flip_behind","flip_left", "flip_right")
anim_nums <- c(6,7,16:19)
anim_table <- data.frame(anim_moves, anim_nums)
# convert float to signed integer
f2i_table <- read.csv("Float_2_Int.csv")
colnames(f2i_table) <- c("Float_num", "Hex_num", "Signed_int")
f2i <- function(f) {
if (f>=(-1) & f<=1) {
return(f2i_table[f2i_table$Float_num==round(f,1),]$Signed_int)
} else return(f2i_table[f2i_table$Float_num==round(default_speed,1),]$Signed_int)
}
# define general AT command syntax
AT <- function(cmd, params_str){
if (missing(params_str)) msg <- sprintf("AT*%s=%i\\r",cmd, cmdCounter)
else msg <- sprintf("AT*%s=%i,%s\\r",cmd, cmdCounter, params_str)
assign("cmdCounter", cmdCounter+1, envir = .GlobalEnv)
return(msg)
}
# enter emergency mode
drone.emergency <- function() ncat(AT("REF",emergencyCode))
# take off (including horizontal calibration and set maximum height)
drone.take_off <- function(take_off_duration){
msg <- paste0(AT("FTRIM"),
AT("CONFIG", sprintf("control:altitude_max,%i",maxHeight)),
AT("REF",takeOffCode))
ncat(msg)
elapsed <- 0
while (elapsed<take_off_duration) {
# while takeoff is taking place, keep sending watchdog signal
Sys.sleep(watchdogInterval)
ncat(AT("COMWDG"))
# increment elapsed time
elapsed <- elapsed + watchdogInterval
}
message(sprintf("Drone taking off, waiting: %.2f seconds", take_off_duration))
}
# landing
drone.land <- function() {
ncat(AT("REF",landCode))
message("Drone landed safely (hopefuly...)")
}
# define drone movements commands
drone.hover <- function(speed){
return(AT("PCMD", "0,0,0,0,0"))
}
drone.up <- function(speed){
params <- paste(1,0,0,f2i(speed),0, sep=",")
return(AT("PCMD", params))
}
drone.down <- function(speed){
params <- paste(1,0,0,f2i(-speed),0, sep=",")
return(AT("PCMD", params))
}
drone.move_right <- function(speed){
params <- paste(1,f2i(speed),0,0,0, sep=",")
return(AT("PCMD", params))
}
drone.move_left <- function(speed){
params <- paste(1,f2i(-speed),0,0,0, sep=",")
return(AT("PCMD", params))
}
drone.move_forward <- function(speed){
params <- paste(1,0,f2i(-speed),0,0, sep=",")
return(AT("PCMD", params))
}
drone.move_back <- function(speed){
params <- paste(1,0,f2i(speed),0,0, sep=",")
return(AT("PCMD", params))
}
drone.rotate_right <- function(speed){
params <- paste(1,0,0,0,f2i(speed), sep=",")
return(AT("PCMD", params))
}
drone.rotate_left <- function(speed){
params <- paste(1,0,0,0, f2i(-speed),sep=",")
return(AT("PCMD", params))
}
# flight animation
drone.anim <- function(anim, duration){
anim_code <- anim_table[anim_table$anim_moves==anim,]$anim_nums
if (missing(duration)) msg <- AT("ANIM", anim_code)
else msg <- AT("ANIM", paste(anim_code, duration, sep=","))
# message(msg)
ncat(msg)
elapsed <- 0
while (elapsed<duration) {
# while animation is performing, keep sending watchdog signal
Sys.sleep(watchdogInterval)
ncat(AT("COMWDG"))
# message(sprintf("Performing <%s> animation, time elapsed: %.2f", anim, elapsed))
# increment elapsed time
elapsed <- elapsed + watchdogInterval
}
message(sprintf("Performing <%s> animation, duration: %.2f seconds", anim, elapsed))
}
# flight action command
drone.do <- function(action, duration, speed){
if (missing(speed)) speed <- default_speed
elapsed <- 0
drone_function <- paste("drone",action, sep=".")
while (elapsed<duration) {
# using ncat
msg <- get(drone_function)(speed)
ncat(msg)
# wait the defined ms before resending the command
Sys.sleep(cmdInterval)
elapsed <- elapsed + cmdInterval
}
message(sprintf("Drone movement <%s> performed for %.2f seconds", action, duration))
}
# start flight sequence
drone_flight <- function(){
# create the connection
drone.take_off(1)
drone.do("hover",2)
# drone.do("move_forward", 5)
# drone.do("hover",3)
# drone.do("move_right", 3)
# drone.do("hover",3)
# drone.do("rotate_right", 5)
# drone.do("up",2)
# drone.do("down",1)
# drone.anim("turn_around", 3)
drone.land()
}
# run flight
drone_flight()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getExpressionLevel.R
\name{getExpressionLevel}
\alias{getExpressionLevel}
\title{getExpressionLevel function}
\usage{
getExpressionLevel(host, expressionLevelId)
}
\arguments{
\item{host}{URL of GA4GH API data server.}
\item{expressionLevelId}{ID of the expression level.}
}
\value{
\code{\link{DataFrame}} object.
}
\description{
Get an expression level by its ID.
}
\details{
This function requests \code{GET host/expressionlevels/expressionLevelId}.
}
\examples{
host <- "http://1kgenomes.ga4gh.org/"
\dontrun{
datasetId <- searchDatasets(host, nrows = 1)$id
rnaQuantificationSetId <- searchRnaQuantificationSets(host, datasetId, nrow = 1)$id
rnaQuantificationId <- searchRnaQuantifications(host, rnaQuantificationSetId, nrows = 1)$id
expressionLevelId <- searchExpressionLevels(host, rnaQuantificationId, nrows = 1)$id
getExpressionLevel(host, expressionLevelId)
}
}
\references{
\href{https://ga4gh-schemas.readthedocs.io/en/latest/schemas/rna_quantification_service.proto.html#GetExpressionLevel}{Official documentation}.
}
\seealso{
\code{\link{DataFrame}}, \code{\link{searchExpressionLevels}}
}
| /man/getExpressionLevel.Rd | no_license | labbcb/GA4GHclient | R | false | true | 1,183 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getExpressionLevel.R
\name{getExpressionLevel}
\alias{getExpressionLevel}
\title{getExpressionLevel function}
\usage{
getExpressionLevel(host, expressionLevelId)
}
\arguments{
\item{host}{URL of GA4GH API data server.}
\item{expressionLevelId}{ID of the expression level.}
}
\value{
\code{\link{DataFrame}} object.
}
\description{
Get an expression level by its ID.
}
\details{
This function requests \code{GET host/expressionlevels/expressionLevelId}.
}
\examples{
host <- "http://1kgenomes.ga4gh.org/"
\dontrun{
datasetId <- searchDatasets(host, nrows = 1)$id
rnaQuantificationSetId <- searchRnaQuantificationSets(host, datasetId, nrow = 1)$id
rnaQuantificationId <- searchRnaQuantifications(host, rnaQuantificationSetId, nrows = 1)$id
expressionLevelId <- searchExpressionLevels(host, rnaQuantificationId, nrows = 1)$id
getExpressionLevel(host, expressionLevelId)
}
}
\references{
\href{https://ga4gh-schemas.readthedocs.io/en/latest/schemas/rna_quantification_service.proto.html#GetExpressionLevel}{Official documentation}.
}
\seealso{
\code{\link{DataFrame}}, \code{\link{searchExpressionLevels}}
}
|
###################################################################
# UNIVERSIDADE FEDERAL DE MINAS GERAIS
# BACHARELADO EM ENGENHARIA DE SISTEMAS
# DISCIPLINA: ELE088 Teoria da Decisao
# PROFESSOR: Lucas de Souza Batista
# ALUNOs: Ariel Domingues, Hernane Braga e Nikolas Fantoni
# DATA: Outubro/2019
# TC2 - Otimizacao multi-objetivo do PCV
# Estruturas de vizinhanca para serem aplicadas no algoritmo Simulated Annealing (SA).
# Considerando os dados de custo como duas matrizes quadradas nxn e a solucao como um data.frame nx3
# onde as tres colunas se referem a 'destino', 'custotempo' e 'custodistancia', respectivamente.
# Ou seja, a linha x do data.frame se refere a cidade x, sendo a primeira coluna a cidade destino,
# a segunda o tempo para de ir ate ela e a terceira a distancia.
#############################################################################################
# Funcao de nivel de perturbacao 1 ou 4, dependendo do numero de trocas (num_trocas).
# As letras 'SD' equivalem a 'Simples' e 'Dupla'. A(s) cidade(s) eh(sao) escolhida(s)
# aleatoriamente e troca(m) de lugar com seu vizinho da frente. Ou seja, se a ordem do caminho
# for A > B > C > D > E, e B eh a cidade escolhida, entao B troca com C e a nova ordem passa a
# ser A > C > B > D > E. O numero de trocas definira quantas trocas desse tipo serao feitas
# de uma vez (1 ou 2).
TrocaVizinhaSD <- function(solucao_atual, dados_tempo, dados_distancia, num_trocas){
nova_solucao <- solucao_atual
for (i in 1:num_trocas) {
cidade <- sample(dim(solucao_atual)[1], 1) # Escolhe-se uma cidade aleatoriamente
vizinho_anterior <- which(solucao_atual$destino == cidade)
prox_vizinho1 <- solucao_atual$destino[cidade]
prox_vizinho2 <- solucao_atual$destino[prox_vizinho1]
# As trocas necessarias sao feitas com os novos custos extraidos das matrizes de custos
nova_solucao[prox_vizinho1,] <- c(cidade,
dados_tempo[prox_vizinho1, cidade],
dados_distancia[prox_vizinho1, cidade])
nova_solucao[cidade,] <- c(prox_vizinho2,
dados_tempo[cidade, prox_vizinho2],
dados_distancia[cidade, prox_vizinho2])
nova_solucao[vizinho_anterior,] <- c(prox_vizinho1,
dados_tempo[vizinho_anterior, prox_vizinho1],
dados_distancia[vizinho_anterior, prox_vizinho1])
solucao_atual <- nova_solucao
}
return(nova_solucao)
}
##################################################################################################
# Funcao de nivel de perturbacao 2 ou 6, dependendo do numero de deslocamentos (num_deslocs).
# As letras 'SD' referem-se a 'Simples' e 'Duplo', assim como a funcao anterior. A(s) cidade(s)
# eh(sao) escolhida(s) aleatoriamente e sofrem um deslocamento para frente de 2 a 5 cidades
# (distribuicao uniforme). Ou seja, se a ordem do caminho for A > B > C > D > E > F, e B
# eh a cidade escolhida, entao B eh deslocada e a ordem passa a ser A > C > D > E > F > B,
# se o deslocamento for de 4 cidades por exemplo. O numero de deslocamentos definira quantos serao
# feitos de uma vez (1 ou 2).
DeslocamentoSD <- function(solucao_atual, dados_tempo, dados_distancia, num_deslocs){
nova_solucao <- solucao_atual
for (i in 1:num_deslocs) {
cidade <- sample(dim(solucao_atual)[1], 1) # Escolhe-se uma cidade aleatoriamente
vizinho_anterior <- which(solucao_atual$destino == cidade)
prox_vizinho1 <- solucao_atual$destino[cidade]
# A cidade escolhida eh retirada do caminho.
nova_solucao[vizinho_anterior,] <- c(prox_vizinho1, dados_tempo[vizinho_anterior, prox_vizinho1], dados_distancia[vizinho_anterior, prox_vizinho1])
# Ela sera deslocada 2 a 5 posicoes para frente. O 'for' percorre o caminho para isso.
delta_desloc <- sample(2:5, 1)
for (j in 1:(delta_desloc-1)) {
prox_vizinho1 <- solucao_atual$destino[prox_vizinho1]
}
prox_vizinho2 <- solucao_atual$destino[prox_vizinho1]
# A cidade eh inserida apos o deslocamento.
nova_solucao[prox_vizinho1,] <- c(cidade,
dados_tempo[prox_vizinho1, cidade],
dados_distancia[prox_vizinho1, cidade])
nova_solucao[cidade,] <- c(prox_vizinho2,
dados_tempo[cidade, prox_vizinho2],
dados_distancia[cidade, prox_vizinho2])
solucao_atual <- nova_solucao
}
return(nova_solucao)
}
###################################################################################################
# Funcao de nivel de perturbacao 3. Uma cidade eh escolhida aleatoriamente e tem o trecho subsequente
# de 2 a 15 cidades invertido. Ou seja, de o caminho for A > B > C > D > E > F, e B eh a cidade
# escolhida, entao o caminho seguinte a B eh invertido de forma a se tornar A > E > D > C > B > F,
# se o trecho for de 4 cidades por exemplo.
Inversao <- function(solucao_atual, dados_tempo, dados_distancia){
nova_solucao <- solucao_atual
cidade <- sample(dim(solucao_atual)[1], 1) # Escolhe-se uma cidade aleatoriamente
vizinho_anterior <- which(solucao_atual$destino == cidade)
prox_vizinho1 <- solucao_atual$destino[cidade]
prox_vizinho2 <- solucao_atual$destino[prox_vizinho1]
# As cidades do trecho a ser invertido passam a apontar para as cidades anteriores a elas.
nova_solucao[prox_vizinho1,] <- c(cidade,
dados_tempo[prox_vizinho1, cidade],
dados_distancia[prox_vizinho1, cidade])
# O for realiza o percorrimento do trecho.
delta_intervalo <- sample(2:15, 1)
for (j in 1:(delta_intervalo-2)) {
nova_solucao[prox_vizinho2,] <- c(prox_vizinho1,
dados_tempo[prox_vizinho2, prox_vizinho1],
dados_distancia[prox_vizinho2, prox_vizinho1])
prox_vizinho1 <- prox_vizinho2
prox_vizinho2 <- solucao_atual$destino[prox_vizinho2]
}
# As arestas das extremidades do trecho sao unidas para se fechar o caminho novamente.
nova_solucao[vizinho_anterior,] <- c(prox_vizinho1,
dados_tempo[vizinho_anterior, prox_vizinho1],
dados_distancia[vizinho_anterior, prox_vizinho1])
nova_solucao[cidade,] <- c(prox_vizinho2,
dados_tempo[cidade, prox_vizinho2],
dados_distancia[cidade, prox_vizinho2])
return(nova_solucao)
}
###################################################################################################
# Funcao de nivel de perturbacao 5. Uma cidade eh escolhida aleatoriamente e eh trocada de lugar
# com outra cidade a sua frente com um intervalo de 2 a 7 cidades entre elas. Ou seja, se o caminho
# for A > B > C > D > E > F > G, e B eh a cidade escolhida, entao ocorre a troca e o novo caminho passa
# a ser A > G > C > D > E > F > B, se o intervalo for de 4 cidades por exemplo.
TrocaIntervalada <- function(solucao_atual, dados_tempo, dados_distancia){
nova_solucao <- solucao_atual
cidade1 <- sample(dim(solucao_atual)[1], 1) # Escolhe-se uma cidade aleatoriamente.
# Armazena-se os vizinhos da cidade 1 para receberem a cidade 2 trocada posteriormente.
vizinho_anterior_cidade1 <- which(solucao_atual$destino == cidade1)
prox_vizinho_cidade1 <- solucao_atual$destino[cidade1]
vizinho_anterior_cidade2 <- prox_vizinho_cidade1
# O intervalo eh percorrido pelo for.
delta_intervalo <- sample(2:7, 1)
for (j in 1:(delta_intervalo-1)) {
vizinho_anterior_cidade2 <- solucao_atual$destino[vizinho_anterior_cidade2]
}
cidade2 <- solucao_atual$destino[vizinho_anterior_cidade2]
prox_vizinho_cidade2 <- solucao_atual$destino[cidade2]
# Os vizinhos da cidade 2 foram capturados e portanto ocorre a troca de fato das cidades no caminho.
nova_solucao[vizinho_anterior_cidade1,] <- c(cidade2,
dados_tempo[vizinho_anterior_cidade1, cidade2],
dados_distancia[vizinho_anterior_cidade1, cidade2])
nova_solucao[cidade2,] <- c(prox_vizinho_cidade1,
dados_tempo[cidade2, prox_vizinho_cidade1],
dados_distancia[cidade2, prox_vizinho_cidade1])
nova_solucao[vizinho_anterior_cidade2,] <- c(cidade1,
dados_tempo[vizinho_anterior_cidade2, cidade1],
dados_distancia[vizinho_anterior_cidade2, cidade1])
nova_solucao[cidade1,] <- c(prox_vizinho_cidade2,
dados_tempo[cidade1, prox_vizinho_cidade2],
dados_distancia[cidade1, prox_vizinho_cidade2])
return(nova_solucao)
}
###########################################################################################################
# Funcao que escolhera qual nivel de perturbacao utilizar. Para chama-la, o parametro nivel deve ser passado
# de 1 a 6, em ordem crescente de perturbacao. Passa-se uma solucao e os dados de custo de tempo e distancia
# como parametros e, dado o nivel, ela chama uma das estruturas de vizinhanca, que obtem uma nova solucao.
Vizinhanca <- function(solucao_atual, dados_tempo, dados_distancia, nivel){
switch (nivel,
TrocaVizinhaSD(solucao_atual, dados_tempo, dados_distancia, 1), # nivel 1 - Troca vizinha simples
DeslocamentoSD(solucao_atual, dados_tempo, dados_distancia, 1), # nivel 2 - Deslocamento simples
Inversao(solucao_atual, dados_tempo, dados_distancia), # nivel 3
TrocaVizinhaSD(solucao_atual, dados_tempo, dados_distancia, 2), # nivel 4 - Troca vizinha dupla
TrocaIntervalada(solucao_atual, dados_tempo, dados_distancia), # nivel 5
DeslocamentoSD(solucao_atual, dados_tempo, dados_distancia, 2) # nivel 6 - Deslocamento duplo
)
}
| /TC2/vizinhanca2.R | no_license | nikolasfantoni/TD | R | false | false | 10,722 | r | ###################################################################
# UNIVERSIDADE FEDERAL DE MINAS GERAIS
# BACHARELADO EM ENGENHARIA DE SISTEMAS
# DISCIPLINA: ELE088 Teoria da Decisao
# PROFESSOR: Lucas de Souza Batista
# ALUNOs: Ariel Domingues, Hernane Braga e Nikolas Fantoni
# DATA: Outubro/2019
# TC2 - Otimizacao multi-objetivo do PCV
# Estruturas de vizinhanca para serem aplicadas no algoritmo Simulated Annealing (SA).
# Considerando os dados de custo como duas matrizes quadradas nxn e a solucao como um data.frame nx3
# onde as tres colunas se referem a 'destino', 'custotempo' e 'custodistancia', respectivamente.
# Ou seja, a linha x do data.frame se refere a cidade x, sendo a primeira coluna a cidade destino,
# a segunda o tempo para de ir ate ela e a terceira a distancia.
#############################################################################################
# Funcao de nivel de perturbacao 1 ou 4, dependendo do numero de trocas (num_trocas).
# As letras 'SD' equivalem a 'Simples' e 'Dupla'. A(s) cidade(s) eh(sao) escolhida(s)
# aleatoriamente e troca(m) de lugar com seu vizinho da frente. Ou seja, se a ordem do caminho
# for A > B > C > D > E, e B eh a cidade escolhida, entao B troca com C e a nova ordem passa a
# ser A > C > B > D > E. O numero de trocas definira quantas trocas desse tipo serao feitas
# de uma vez (1 ou 2).
TrocaVizinhaSD <- function(solucao_atual, dados_tempo, dados_distancia, num_trocas){
nova_solucao <- solucao_atual
for (i in 1:num_trocas) {
cidade <- sample(dim(solucao_atual)[1], 1) # Escolhe-se uma cidade aleatoriamente
vizinho_anterior <- which(solucao_atual$destino == cidade)
prox_vizinho1 <- solucao_atual$destino[cidade]
prox_vizinho2 <- solucao_atual$destino[prox_vizinho1]
# As trocas necessarias sao feitas com os novos custos extraidos das matrizes de custos
nova_solucao[prox_vizinho1,] <- c(cidade,
dados_tempo[prox_vizinho1, cidade],
dados_distancia[prox_vizinho1, cidade])
nova_solucao[cidade,] <- c(prox_vizinho2,
dados_tempo[cidade, prox_vizinho2],
dados_distancia[cidade, prox_vizinho2])
nova_solucao[vizinho_anterior,] <- c(prox_vizinho1,
dados_tempo[vizinho_anterior, prox_vizinho1],
dados_distancia[vizinho_anterior, prox_vizinho1])
solucao_atual <- nova_solucao
}
return(nova_solucao)
}
##################################################################################################
# Funcao de nivel de perturbacao 2 ou 6, dependendo do numero de deslocamentos (num_deslocs).
# As letras 'SD' referem-se a 'Simples' e 'Duplo', assim como a funcao anterior. A(s) cidade(s)
# eh(sao) escolhida(s) aleatoriamente e sofrem um deslocamento para frente de 2 a 5 cidades
# (distribuicao uniforme). Ou seja, se a ordem do caminho for A > B > C > D > E > F, e B
# eh a cidade escolhida, entao B eh deslocada e a ordem passa a ser A > C > D > E > F > B,
# se o deslocamento for de 4 cidades por exemplo. O numero de deslocamentos definira quantos serao
# feitos de uma vez (1 ou 2).
DeslocamentoSD <- function(solucao_atual, dados_tempo, dados_distancia, num_deslocs){
nova_solucao <- solucao_atual
for (i in 1:num_deslocs) {
cidade <- sample(dim(solucao_atual)[1], 1) # Escolhe-se uma cidade aleatoriamente
vizinho_anterior <- which(solucao_atual$destino == cidade)
prox_vizinho1 <- solucao_atual$destino[cidade]
# A cidade escolhida eh retirada do caminho.
nova_solucao[vizinho_anterior,] <- c(prox_vizinho1, dados_tempo[vizinho_anterior, prox_vizinho1], dados_distancia[vizinho_anterior, prox_vizinho1])
# Ela sera deslocada 2 a 5 posicoes para frente. O 'for' percorre o caminho para isso.
delta_desloc <- sample(2:5, 1)
for (j in 1:(delta_desloc-1)) {
prox_vizinho1 <- solucao_atual$destino[prox_vizinho1]
}
prox_vizinho2 <- solucao_atual$destino[prox_vizinho1]
# A cidade eh inserida apos o deslocamento.
nova_solucao[prox_vizinho1,] <- c(cidade,
dados_tempo[prox_vizinho1, cidade],
dados_distancia[prox_vizinho1, cidade])
nova_solucao[cidade,] <- c(prox_vizinho2,
dados_tempo[cidade, prox_vizinho2],
dados_distancia[cidade, prox_vizinho2])
solucao_atual <- nova_solucao
}
return(nova_solucao)
}
###################################################################################################
# Funcao de nivel de perturbacao 3. Uma cidade eh escolhida aleatoriamente e tem o trecho subsequente
# de 2 a 15 cidades invertido. Ou seja, de o caminho for A > B > C > D > E > F, e B eh a cidade
# escolhida, entao o caminho seguinte a B eh invertido de forma a se tornar A > E > D > C > B > F,
# se o trecho for de 4 cidades por exemplo.
Inversao <- function(solucao_atual, dados_tempo, dados_distancia){
nova_solucao <- solucao_atual
cidade <- sample(dim(solucao_atual)[1], 1) # Escolhe-se uma cidade aleatoriamente
vizinho_anterior <- which(solucao_atual$destino == cidade)
prox_vizinho1 <- solucao_atual$destino[cidade]
prox_vizinho2 <- solucao_atual$destino[prox_vizinho1]
# As cidades do trecho a ser invertido passam a apontar para as cidades anteriores a elas.
nova_solucao[prox_vizinho1,] <- c(cidade,
dados_tempo[prox_vizinho1, cidade],
dados_distancia[prox_vizinho1, cidade])
# O for realiza o percorrimento do trecho.
delta_intervalo <- sample(2:15, 1)
for (j in 1:(delta_intervalo-2)) {
nova_solucao[prox_vizinho2,] <- c(prox_vizinho1,
dados_tempo[prox_vizinho2, prox_vizinho1],
dados_distancia[prox_vizinho2, prox_vizinho1])
prox_vizinho1 <- prox_vizinho2
prox_vizinho2 <- solucao_atual$destino[prox_vizinho2]
}
# As arestas das extremidades do trecho sao unidas para se fechar o caminho novamente.
nova_solucao[vizinho_anterior,] <- c(prox_vizinho1,
dados_tempo[vizinho_anterior, prox_vizinho1],
dados_distancia[vizinho_anterior, prox_vizinho1])
nova_solucao[cidade,] <- c(prox_vizinho2,
dados_tempo[cidade, prox_vizinho2],
dados_distancia[cidade, prox_vizinho2])
return(nova_solucao)
}
###################################################################################################
# Funcao de nivel de perturbacao 5. Uma cidade eh escolhida aleatoriamente e eh trocada de lugar
# com outra cidade a sua frente com um intervalo de 2 a 7 cidades entre elas. Ou seja, se o caminho
# for A > B > C > D > E > F > G, e B eh a cidade escolhida, entao ocorre a troca e o novo caminho passa
# a ser A > G > C > D > E > F > B, se o intervalo for de 4 cidades por exemplo.
TrocaIntervalada <- function(solucao_atual, dados_tempo, dados_distancia){
nova_solucao <- solucao_atual
cidade1 <- sample(dim(solucao_atual)[1], 1) # Escolhe-se uma cidade aleatoriamente.
# Armazena-se os vizinhos da cidade 1 para receberem a cidade 2 trocada posteriormente.
vizinho_anterior_cidade1 <- which(solucao_atual$destino == cidade1)
prox_vizinho_cidade1 <- solucao_atual$destino[cidade1]
vizinho_anterior_cidade2 <- prox_vizinho_cidade1
# O intervalo eh percorrido pelo for.
delta_intervalo <- sample(2:7, 1)
for (j in 1:(delta_intervalo-1)) {
vizinho_anterior_cidade2 <- solucao_atual$destino[vizinho_anterior_cidade2]
}
cidade2 <- solucao_atual$destino[vizinho_anterior_cidade2]
prox_vizinho_cidade2 <- solucao_atual$destino[cidade2]
# Os vizinhos da cidade 2 foram capturados e portanto ocorre a troca de fato das cidades no caminho.
nova_solucao[vizinho_anterior_cidade1,] <- c(cidade2,
dados_tempo[vizinho_anterior_cidade1, cidade2],
dados_distancia[vizinho_anterior_cidade1, cidade2])
nova_solucao[cidade2,] <- c(prox_vizinho_cidade1,
dados_tempo[cidade2, prox_vizinho_cidade1],
dados_distancia[cidade2, prox_vizinho_cidade1])
nova_solucao[vizinho_anterior_cidade2,] <- c(cidade1,
dados_tempo[vizinho_anterior_cidade2, cidade1],
dados_distancia[vizinho_anterior_cidade2, cidade1])
nova_solucao[cidade1,] <- c(prox_vizinho_cidade2,
dados_tempo[cidade1, prox_vizinho_cidade2],
dados_distancia[cidade1, prox_vizinho_cidade2])
return(nova_solucao)
}
###########################################################################################################
# Funcao que escolhera qual nivel de perturbacao utilizar. Para chama-la, o parametro nivel deve ser passado
# de 1 a 6, em ordem crescente de perturbacao. Passa-se uma solucao e os dados de custo de tempo e distancia
# como parametros e, dado o nivel, ela chama uma das estruturas de vizinhanca, que obtem uma nova solucao.
Vizinhanca <- function(solucao_atual, dados_tempo, dados_distancia, nivel){
switch (nivel,
TrocaVizinhaSD(solucao_atual, dados_tempo, dados_distancia, 1), # nivel 1 - Troca vizinha simples
DeslocamentoSD(solucao_atual, dados_tempo, dados_distancia, 1), # nivel 2 - Deslocamento simples
Inversao(solucao_atual, dados_tempo, dados_distancia), # nivel 3
TrocaVizinhaSD(solucao_atual, dados_tempo, dados_distancia, 2), # nivel 4 - Troca vizinha dupla
TrocaIntervalada(solucao_atual, dados_tempo, dados_distancia), # nivel 5
DeslocamentoSD(solucao_atual, dados_tempo, dados_distancia, 2) # nivel 6 - Deslocamento duplo
)
}
|
# This script processes dataset of protein-protein inteacrtions related to brain ageing (PBA)
## Create the folder where current results will be written
resdir <- paste("~/absb/results", "pba", sep = "/")
dir.create(file.path(resdir), showWarnings = FALSE, recursive = TRUE)
# Set created directory as working dirrectory
setwd(resdir)
# Read in the data
pba_ppi.hs <- read.table(file = "~/absb/data/pba/PBA_PPI_HS.txt", sep = "\t", header = T, stringsAsFactors = F)
# Data size
dim(pba_ppi.hs ) #2032 5
# Convert pba_ppi.hs protein names to ENSG and bing them to the dataframe.
length(unique(c(pba_ppi.hs[,1], pba_ppi.hs[,2])))#1250
pba_pr <- unique(c(pba_ppi.hs[,1], pba_ppi.hs[,2]))
# Convert entrezgene IDs to ENSG IDs
library(biomaRt)
mart.pr <- useMart("ENSEMBL_MART_ENSEMBL", "hsapiens_gene_ensembl", host = "ensembl.org")
pba_entrez2ensg <- getBM(attributes = c("entrezgene","ensembl_gene_id"),filters=c("entrezgene"), values = pba_pr, mart = mart.pr)
dim(pba_entrez2ensg )#[1] 1265 2
colnames(pba_entrez2ensg)[]<-c(".id", "Target")
# Merge for the first interactor
dim(merge(pba_ppi.hs, pba2ensg, by.x = "entrez.p1", by.y = ".id", all = F))
pba_ppi.hs.p1 = merge(pba_ppi.hs, pba_entrez2ensg, by.x = "entrez.p1", by.y = ".id", all = F)
pba_ppi.hs.p1p2 <- merge(pba_ppi.hs.p1, pba_entrez2ensg, by.x = "entrez.p2", by.y = ".id", all = F)
pba_ppi.hs.ensg <- pba_ppi.hs.p1p2[, c(6,7,5)]
save(pba_ppi.hs.p1p2, file = "pba_ppi.hs.p1p2.RData")#file describes interactions where both partners are proteins
# Bind additional columns
pba_ppi.hs_int <- cbind(pba_ppi.hs.ensg, interaction_type = "PPI")
pba_ppi.hs_int <- cbind(pba_ppi.hs_int, data_source = "PBA")# evidence code for Hybrigenics experimental interactions
colnames(pba_ppi.hs_int)[c(1,2,3)] <- c("ensg1","ensg2","score")
pba_int<- pba_ppi.hs_int
# Remove duplicates
pba_int <- pba_int[!duplicated(pba_int),]
dim(pba_int)
df2string<-function(df){
i <- sapply(df, is.factor)
df[i] <- lapply(df[i], as.character)
df[,3]<-as.numeric(df[,3])
return (df)}
# PBA
pba_int <- df2string(pba_int)
# Structure
str(pba_int)
# Initial size
dim(pba_int) #1836 5
# Remove the duplicated undirrescted edges with the same score.
# For example ENSG1-ENSG2 0.5 and ENSG2-ENSG1 0.5
pba_int <- pba_int[!duplicated(data.frame(t(apply(pba_int[1:2], 1, sort)), pba_int$score)),]
# New size
dim(pba_int)# 1834 5
# Save the part of the integrated dataset related to interactions in HS.
save(pba_int, file = "pba_int.RData")
write.table(pba_int, file = "pba_int.txt", sep = "\t", quote = F, row.names = F)
| /scripts/pba/pba_int.R | no_license | esugis/absb | R | false | false | 2,576 | r | # This script processes dataset of protein-protein inteacrtions related to brain ageing (PBA)
## Create the folder where current results will be written
resdir <- paste("~/absb/results", "pba", sep = "/")
dir.create(file.path(resdir), showWarnings = FALSE, recursive = TRUE)
# Set created directory as working dirrectory
setwd(resdir)
# Read in the data
pba_ppi.hs <- read.table(file = "~/absb/data/pba/PBA_PPI_HS.txt", sep = "\t", header = T, stringsAsFactors = F)
# Data size
dim(pba_ppi.hs ) #2032 5
# Convert pba_ppi.hs protein names to ENSG and bing them to the dataframe.
length(unique(c(pba_ppi.hs[,1], pba_ppi.hs[,2])))#1250
pba_pr <- unique(c(pba_ppi.hs[,1], pba_ppi.hs[,2]))
# Convert entrezgene IDs to ENSG IDs
library(biomaRt)
mart.pr <- useMart("ENSEMBL_MART_ENSEMBL", "hsapiens_gene_ensembl", host = "ensembl.org")
pba_entrez2ensg <- getBM(attributes = c("entrezgene","ensembl_gene_id"),filters=c("entrezgene"), values = pba_pr, mart = mart.pr)
dim(pba_entrez2ensg )#[1] 1265 2
colnames(pba_entrez2ensg)[]<-c(".id", "Target")
# Merge for the first interactor
dim(merge(pba_ppi.hs, pba2ensg, by.x = "entrez.p1", by.y = ".id", all = F))
pba_ppi.hs.p1 = merge(pba_ppi.hs, pba_entrez2ensg, by.x = "entrez.p1", by.y = ".id", all = F)
pba_ppi.hs.p1p2 <- merge(pba_ppi.hs.p1, pba_entrez2ensg, by.x = "entrez.p2", by.y = ".id", all = F)
pba_ppi.hs.ensg <- pba_ppi.hs.p1p2[, c(6,7,5)]
save(pba_ppi.hs.p1p2, file = "pba_ppi.hs.p1p2.RData")#file describes interactions where both partners are proteins
# Bind additional columns
pba_ppi.hs_int <- cbind(pba_ppi.hs.ensg, interaction_type = "PPI")
pba_ppi.hs_int <- cbind(pba_ppi.hs_int, data_source = "PBA")# evidence code for Hybrigenics experimental interactions
colnames(pba_ppi.hs_int)[c(1,2,3)] <- c("ensg1","ensg2","score")
pba_int<- pba_ppi.hs_int
# Remove duplicates
pba_int <- pba_int[!duplicated(pba_int),]
dim(pba_int)
df2string<-function(df){
i <- sapply(df, is.factor)
df[i] <- lapply(df[i], as.character)
df[,3]<-as.numeric(df[,3])
return (df)}
# PBA
pba_int <- df2string(pba_int)
# Structure
str(pba_int)
# Initial size
dim(pba_int) #1836 5
# Remove the duplicated undirrescted edges with the same score.
# For example ENSG1-ENSG2 0.5 and ENSG2-ENSG1 0.5
pba_int <- pba_int[!duplicated(data.frame(t(apply(pba_int[1:2], 1, sort)), pba_int$score)),]
# New size
dim(pba_int)# 1834 5
# Save the part of the integrated dataset related to interactions in HS.
save(pba_int, file = "pba_int.RData")
write.table(pba_int, file = "pba_int.txt", sep = "\t", quote = F, row.names = F)
|
library(spdep)
### Name: aple.mc
### Title: Approximate profile-likelihood estimator (APLE) permutation test
### Aliases: aple.mc
### Keywords: spatial
### ** Examples
## No test:
if (require(rgdal, quietly=TRUE)) {
example(aple)
oldRNG <- RNGkind()
RNGkind("L'Ecuyer-CMRG")
set.seed(1L)
boot_out_ser <- aple.mc(as.vector(scale(wheat$yield_detrend, scale=FALSE)),
nb2listw(nbr12, style="W"), nsim=500)
plot(boot_out_ser)
boot_out_ser
library(parallel)
oldCores <- set.coresOption(NULL)
nc <- detectCores(logical=FALSE)
# set nc to 1L here
if (nc > 1L) nc <- 1L
invisible(set.coresOption(nc))
set.seed(1L)
if (!get.mcOption()) {
cl <- makeCluster(nc)
set.ClusterOption(cl)
} else{
mc.reset.stream()
}
boot_out_par <- aple.mc(as.vector(scale(wheat$yield_detrend, scale=FALSE)),
nb2listw(nbr12, style="W"), nsim=500)
if (!get.mcOption()) {
set.ClusterOption(NULL)
stopCluster(cl)
}
boot_out_par
invisible(set.coresOption(oldCores))
RNGkind(oldRNG[1], oldRNG[2])
}
## End(No test)
| /data/genthat_extracted_code/spdep/examples/aple.mc.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,000 | r | library(spdep)
### Name: aple.mc
### Title: Approximate profile-likelihood estimator (APLE) permutation test
### Aliases: aple.mc
### Keywords: spatial
### ** Examples
## No test:
if (require(rgdal, quietly=TRUE)) {
example(aple)
oldRNG <- RNGkind()
RNGkind("L'Ecuyer-CMRG")
set.seed(1L)
boot_out_ser <- aple.mc(as.vector(scale(wheat$yield_detrend, scale=FALSE)),
nb2listw(nbr12, style="W"), nsim=500)
plot(boot_out_ser)
boot_out_ser
library(parallel)
oldCores <- set.coresOption(NULL)
nc <- detectCores(logical=FALSE)
# set nc to 1L here
if (nc > 1L) nc <- 1L
invisible(set.coresOption(nc))
set.seed(1L)
if (!get.mcOption()) {
cl <- makeCluster(nc)
set.ClusterOption(cl)
} else{
mc.reset.stream()
}
boot_out_par <- aple.mc(as.vector(scale(wheat$yield_detrend, scale=FALSE)),
nb2listw(nbr12, style="W"), nsim=500)
if (!get.mcOption()) {
set.ClusterOption(NULL)
stopCluster(cl)
}
boot_out_par
invisible(set.coresOption(oldCores))
RNGkind(oldRNG[1], oldRNG[2])
}
## End(No test)
|
#'Estimate animal abundance from actual data set
#'
#'Code as of 2 August 2013 is incomplete and not in a functional state.
#'
#'This function estimates animal abundance within the study area (grid)
#'by calculating density \eqn{\pi (z(x,y))} as a function of covariate for each grid cell.
#'
#'Calls to appropriate distribution (normal, lognormal, beta, uniform,
#'mixture of normals) in association with the parameters estimated by
#'the likelihood routine (\code{nupoint.env.fit}) are summed to produce estimate.
#'
#'@param fit.obj fitted object
#'@param truncation distance proportion (default 0.9) such that sightings beyond 0.9*max.r are deleted
#'
#'@return list containing abundance estimate within covered region and
#'abundance estimate for entire study area (assuming grid cells are unit square in area)
#'
#'@details Should your grid cell sizes not be unit square, then multiply the
#'value returned by this function by the grid cell size to produce
#'abundance estimate in the units appropriate for your study.
#'@author Eric Rexstad
#'
#'@references M.J. Cox, D.L. Borchers, D.A. Demer, G.R. Cutter, and A.S. Brierley. 2011. Estimating the density of Antarctic krill (Euphausia superba) from multi-beam echo-sounder observations using distance sampling methods. Journal of the Royal Statistical Society: Series C (Applied Statistics), 60(2):301-316.
#'
#'M.J. Cox, D.L. Borchers and N. Kelly. 2013. nupoint: An R package for density estimation from point transects in the presence of non-uniform animal density Methods in Ecology and Evolution 4(6):589-594
#'
#'Marques, T.A. , Buckland, S.T. , Borchers, D.L. , Tosh, D. and McDonald, R.A.
#'2010. Point transect sampling along linear features. Biometrics 66(4):1247-1255.
#'
#'@export
est.abundance.whales <- function(environ.sim.dat, trunc.prop=0.9) {
# nsim <-200
# popn <- numeric(nsim)
# for (k in 1:nsim) {
# environ.sim.dat<-nupoint.env.simulator(pars=c(60,10,50),
# z.mat=NULL,
# xlim=c(0,200),ylim=c(0,100),
# grid.resolution=1,grad.type='NORM',det.type='HNORM',
# observer.coords=c(100,0),nbr.targets=1000,
# environment.simulator.control=
# list(c(X=50,Y=10,sd=60),c(X=90,Y=0,sd=30)),
# mask.mat=NULL,mask.ang=0,plot=FALSE,
# perp.lines=NULL,n=NULL)
test <- truncate(trunc.prop=trunc.prop, sightings=environ.sim.dat$sighting.mat)
trunc.dist <- test$trunc.radius
# replace sightings inside fitted object with truncated sightings
environ.sim.dat[[1]] <- test$sightings
# parameter estimation
browser()
sim.norm.fit<-nupoint.env.fit(pars=environ.sim.dat$settings$pars,
z=environ.sim.dat$sighting.mat$z,
rd=environ.sim.dat$sighting.mat$r, # is it r or d (data or simulation)
dzdy=environ.sim.dat$sighting.mat$dzdy,
z.mat=environ.sim.dat$z.mat,
dzdy.mat=environ.sim.dat$zGradmat,
rd.mat=environ.sim.dat$rd.mat,
minz=min(environ.sim.dat$z.mat, na.rm=TRUE),
wx=environ.sim.dat$wx, #environ.sim.dat$settings$xlim[2],
wy=environ.sim.dat$wy, #environ.sim.dat$settings$ylim[2],
wz=environ.sim.dat$wz, #max(environ.sim.dat$z.mat),
grad.type=environ.sim.dat$settings$grad.type,
det.type=environ.sim.dat$settings$det.type,
n=NULL,lower.b=rep(1,length(environ.sim.dat$settings$pars))
,upper.b=rep(100,length(environ.sim.dat$settings$pars)))
# estimate P for HT
# truncate the grid at the truncation distance
new.rdmat <- environ.sim.dat$rd.mat
new.zmat <- environ.sim.dat$z.mat
new.zgrad <- environ.sim.dat$zGradmat
for (i in seq(1:dim(environ.sim.dat$rd.mat)[1])) {
for (j in seq(1:dim(environ.sim.dat$rd.mat)[2])) {
if (new.rdmat[i,j]>trunc.dist) {
new.rdmat[i,j] <- NA
new.zmat[i,j] <- NA
new.zgrad[i,j] <- NA
}
}
}
gradient.model <- environ.sim.dat$settings$grad.type
detection.model <- environ.sim.dat$settings$det.type
browser()
# following two lines need fixing for non-norm,hnorm combination
mat.g <- detectF(new.rdmat[!is.na(new.rdmat)], detection.model, sim.norm.fit$par[3])
mat.pi <- pi.z.f(gradient.model, pars=sim.norm.fit$par[1:2], z=new.zmat[!is.na(new.zmat)],
z.lim=c(min(new.zmat, na.rm=TRUE), max(new.zmat, na.rm=TRUE)))
# Abundance within truncation zone
Nhat.a <- dim(environ.sim.dat$sightings)[1]/sum(mat.g*mat.pi*abs(new.zgrad[!is.na(new.zgrad)])/(1*environ.sim.dat$settings$xlim[2]))
# Scale Nhat.a to entire study area by dividing by integral pi(x,y) in region a
divisor <- sum(mat.pi*abs(new.zgrad[!is.na(new.zgrad)])/(1*environ.sim.dat$settings$xlim[2]))
print(divisor)
Nhat.region <- Nhat.a / divisor
return(list(Nhat.covered=Nhat.a, Nhat.region=Nhat.region))
# popn[k] <- Nhat.region
}
| /R/est.abundance.whales.R | no_license | martinjamescox/nupoint | R | false | false | 5,271 | r | #'Estimate animal abundance from actual data set
#'
#'Code as of 2 August 2013 is incomplete and not in a functional state.
#'
#'This function estimates animal abundance within the study area (grid)
#'by calculating density \eqn{\pi (z(x,y))} as a function of covariate for each grid cell.
#'
#'Calls to appropriate distribution (normal, lognormal, beta, uniform,
#'mixture of normals) in association with the parameters estimated by
#'the likelihood routine (\code{nupoint.env.fit}) are summed to produce estimate.
#'
#'@param fit.obj fitted object
#'@param truncation distance proportion (default 0.9) such that sightings beyond 0.9*max.r are deleted
#'
#'@return list containing abundance estimate within covered region and
#'abundance estimate for entire study area (assuming grid cells are unit square in area)
#'
#'@details Should your grid cell sizes not be unit square, then multiply the
#'value returned by this function by the grid cell size to produce
#'abundance estimate in the units appropriate for your study.
#'@author Eric Rexstad
#'
#'@references M.J. Cox, D.L. Borchers, D.A. Demer, G.R. Cutter, and A.S. Brierley. 2011. Estimating the density of Antarctic krill (Euphausia superba) from multi-beam echo-sounder observations using distance sampling methods. Journal of the Royal Statistical Society: Series C (Applied Statistics), 60(2):301-316.
#'
#'M.J. Cox, D.L. Borchers and N. Kelly. 2013. nupoint: An R package for density estimation from point transects in the presence of non-uniform animal density Methods in Ecology and Evolution 4(6):589-594
#'
#'Marques, T.A. , Buckland, S.T. , Borchers, D.L. , Tosh, D. and McDonald, R.A.
#'2010. Point transect sampling along linear features. Biometrics 66(4):1247-1255.
#'
#'@export
est.abundance.whales <- function(environ.sim.dat, trunc.prop=0.9) {
# nsim <-200
# popn <- numeric(nsim)
# for (k in 1:nsim) {
# environ.sim.dat<-nupoint.env.simulator(pars=c(60,10,50),
# z.mat=NULL,
# xlim=c(0,200),ylim=c(0,100),
# grid.resolution=1,grad.type='NORM',det.type='HNORM',
# observer.coords=c(100,0),nbr.targets=1000,
# environment.simulator.control=
# list(c(X=50,Y=10,sd=60),c(X=90,Y=0,sd=30)),
# mask.mat=NULL,mask.ang=0,plot=FALSE,
# perp.lines=NULL,n=NULL)
test <- truncate(trunc.prop=trunc.prop, sightings=environ.sim.dat$sighting.mat)
trunc.dist <- test$trunc.radius
# replace sightings inside fitted object with truncated sightings
environ.sim.dat[[1]] <- test$sightings
# parameter estimation
browser()
sim.norm.fit<-nupoint.env.fit(pars=environ.sim.dat$settings$pars,
z=environ.sim.dat$sighting.mat$z,
rd=environ.sim.dat$sighting.mat$r, # is it r or d (data or simulation)
dzdy=environ.sim.dat$sighting.mat$dzdy,
z.mat=environ.sim.dat$z.mat,
dzdy.mat=environ.sim.dat$zGradmat,
rd.mat=environ.sim.dat$rd.mat,
minz=min(environ.sim.dat$z.mat, na.rm=TRUE),
wx=environ.sim.dat$wx, #environ.sim.dat$settings$xlim[2],
wy=environ.sim.dat$wy, #environ.sim.dat$settings$ylim[2],
wz=environ.sim.dat$wz, #max(environ.sim.dat$z.mat),
grad.type=environ.sim.dat$settings$grad.type,
det.type=environ.sim.dat$settings$det.type,
n=NULL,lower.b=rep(1,length(environ.sim.dat$settings$pars))
,upper.b=rep(100,length(environ.sim.dat$settings$pars)))
# estimate P for HT
# truncate the grid at the truncation distance
new.rdmat <- environ.sim.dat$rd.mat
new.zmat <- environ.sim.dat$z.mat
new.zgrad <- environ.sim.dat$zGradmat
for (i in seq(1:dim(environ.sim.dat$rd.mat)[1])) {
for (j in seq(1:dim(environ.sim.dat$rd.mat)[2])) {
if (new.rdmat[i,j]>trunc.dist) {
new.rdmat[i,j] <- NA
new.zmat[i,j] <- NA
new.zgrad[i,j] <- NA
}
}
}
gradient.model <- environ.sim.dat$settings$grad.type
detection.model <- environ.sim.dat$settings$det.type
browser()
# following two lines need fixing for non-norm,hnorm combination
mat.g <- detectF(new.rdmat[!is.na(new.rdmat)], detection.model, sim.norm.fit$par[3])
mat.pi <- pi.z.f(gradient.model, pars=sim.norm.fit$par[1:2], z=new.zmat[!is.na(new.zmat)],
z.lim=c(min(new.zmat, na.rm=TRUE), max(new.zmat, na.rm=TRUE)))
# Abundance within truncation zone
Nhat.a <- dim(environ.sim.dat$sightings)[1]/sum(mat.g*mat.pi*abs(new.zgrad[!is.na(new.zgrad)])/(1*environ.sim.dat$settings$xlim[2]))
# Scale Nhat.a to entire study area by dividing by integral pi(x,y) in region a
divisor <- sum(mat.pi*abs(new.zgrad[!is.na(new.zgrad)])/(1*environ.sim.dat$settings$xlim[2]))
print(divisor)
Nhat.region <- Nhat.a / divisor
return(list(Nhat.covered=Nhat.a, Nhat.region=Nhat.region))
# popn[k] <- Nhat.region
}
|
#' Return each team's worst losses
#'
#' @param df df
#' @param teamname teamname
#' @param type type
#' @param N N
#'
#' @examples
#' df <- engsoccerdata2
#' worstlosses(df,"Everton")
#' worstlosses(df,"Aston Villa", type="H")
#' worstlosses(df,"York City", type="A")
#' worstlosses(df,"Port Vale", N=20)
#' worstlosses(df,"Hull City", type="A", N=7)
#'
#' @export
worstlosses<-function(df, teamname, type=NULL, N=NULL){
N<- if(is.null(N)) 10 else {N}
if(is.null(type))
df %>%
filter(home==teamname & result=="A" | visitor==teamname & result=="H") %>%
mutate(maxgoal=pmax(hgoal, vgoal), mingoal=pmin(hgoal,vgoal), absgoaldif=abs(goaldif)) %>%
arrange(desc(absgoaldif),desc(maxgoal)) %>%
select (Season, home, visitor, FT, division) %>%
head(N)
else
{
df %>%
filter(home==teamname & result=="A" | visitor==teamname & result=="H") %>%
mutate(maxgoal=pmax(hgoal, vgoal), mingoal=pmin(hgoal,vgoal), absgoaldif=abs(goaldif)) %>%
arrange(desc(absgoaldif),desc(maxgoal)) %>%
filter (result==type) %>%
select (Season, home, visitor, FT, division) %>%
head(N)
}
}
| /R/worstlosses.R | no_license | amunnelly/engsoccerdata | R | false | false | 1,141 | r | #' Return each team's worst losses
#'
#' @param df df
#' @param teamname teamname
#' @param type type
#' @param N N
#'
#' @examples
#' df <- engsoccerdata2
#' worstlosses(df,"Everton")
#' worstlosses(df,"Aston Villa", type="H")
#' worstlosses(df,"York City", type="A")
#' worstlosses(df,"Port Vale", N=20)
#' worstlosses(df,"Hull City", type="A", N=7)
#'
#' @export
worstlosses<-function(df, teamname, type=NULL, N=NULL){
N<- if(is.null(N)) 10 else {N}
if(is.null(type))
df %>%
filter(home==teamname & result=="A" | visitor==teamname & result=="H") %>%
mutate(maxgoal=pmax(hgoal, vgoal), mingoal=pmin(hgoal,vgoal), absgoaldif=abs(goaldif)) %>%
arrange(desc(absgoaldif),desc(maxgoal)) %>%
select (Season, home, visitor, FT, division) %>%
head(N)
else
{
df %>%
filter(home==teamname & result=="A" | visitor==teamname & result=="H") %>%
mutate(maxgoal=pmax(hgoal, vgoal), mingoal=pmin(hgoal,vgoal), absgoaldif=abs(goaldif)) %>%
arrange(desc(absgoaldif),desc(maxgoal)) %>%
filter (result==type) %>%
select (Season, home, visitor, FT, division) %>%
head(N)
}
}
|
## load helpers ------------------
source("helpers.R")
## load packages -----------------
library(rvest)
library(tidyverse)
library(pdftools)
library(magrittr)
## parse HTML --------------------
iso_codes <- readxl::read_xlsx("../data/iso_codes.xlsx")
# parse with read_html
parsed_doc <- read_html("https://www.unglobalcompact.org/participation/report/cop/create-and-submit/active?page=1&per_page=10") # usually the first step in R when scraping web pages
parsed_doc
# number of active COPs
n_entries <- rvest::html_nodes(parsed_doc, xpath = "/html/body/main/section/div/header/h2") %>%
rvest::html_text("") %>%
stringr::str_extract_all("\\d+") %>%
as.numeric
paste0("Total number of GC Active COPs received: ", n_entries)
## extract information ------------------
# number of entries to calculate pages
n_pages <- 1:ceiling(n_entries/5000)
# urls holding tables
urls_to_parse <- paste0("https://www.unglobalcompact.org/participation/report/cop/create-and-submit/active?page=", n_pages,"&per_page=5000")
# extraction of table information
csr_cop_submissions <- lapply(urls_to_parse, submission_table) %>% dplyr::bind_rows()
# fixing of country names for iso code matching
csr_cop_submissions <- csr_cop_submissions %>% dplyr::mutate(Country = case_when(Country == "Bosnia-Herze..." ~ "Bosnia and Herzegovina",
Country == "Central Afri..." ~ "Central African Republic",
Country == "Congo, Democ..." ~ "Congo, the Democratic Republic of the",
Country == "Dominican Re..." ~ "Dominican Republic",
Country == "Iran, Islami..." ~ "Iran, Islamic Republic of",
Country == "Korea, Repub..." ~ "Korea, Republic of",
Country == "Kosovo as pe..." ~ "Kosovo",
Country == "Moldova, Rep..." ~ "Moldova, Republic of",
Country == "Palestine, S..." ~ "Palestine, State of",
Country == "Papua New Gu..." ~ "Papua New Guinea",
Country == "Russian Fede..." ~ "Russian Federation",
Country == "Sao Tome And..." ~ "Sao Tome and Principe",
Country == "Syrian Arab ..." ~ "Syrian Arab Republic",
Country == "Tanzania, Un..." ~ "Tanzania, United Republic of",
Country == "Trinidad And..." ~ "Trinidad and Tobago",
Country == "United Arab ..." ~ "United Arab Emirates",
Country == "United State..." ~ "United States",
T ~ Country
))
# iso code matching - exclusion of 2020+
csr_table <- left_join(csr_cop_submissions, iso_codes, by = c("Country")) %>%
dplyr::filter(Year != "2021")
# english and number of document availability (time-consuming//load csr_table.Rdata)
#csr_table <- csr_table %>% dplyr::rowwise() %>%
# dplyr::mutate(English = submit_language(Link))
# save table -----
#save(csr_table, file = "../data/csr_table.Rdata")
| /report2021/COP_CSR/code/01-table-extraction.R | no_license | sjankin/lancet | R | false | false | 3,717 | r | ## load helpers ------------------
source("helpers.R")
## load packages -----------------
library(rvest)
library(tidyverse)
library(pdftools)
library(magrittr)
## parse HTML --------------------
iso_codes <- readxl::read_xlsx("../data/iso_codes.xlsx")
# parse with read_html
parsed_doc <- read_html("https://www.unglobalcompact.org/participation/report/cop/create-and-submit/active?page=1&per_page=10") # usually the first step in R when scraping web pages
parsed_doc
# number of active COPs
n_entries <- rvest::html_nodes(parsed_doc, xpath = "/html/body/main/section/div/header/h2") %>%
rvest::html_text("") %>%
stringr::str_extract_all("\\d+") %>%
as.numeric
paste0("Total number of GC Active COPs received: ", n_entries)
## extract information ------------------
# number of entries to calculate pages
n_pages <- 1:ceiling(n_entries/5000)
# urls holding tables
urls_to_parse <- paste0("https://www.unglobalcompact.org/participation/report/cop/create-and-submit/active?page=", n_pages,"&per_page=5000")
# extraction of table information
csr_cop_submissions <- lapply(urls_to_parse, submission_table) %>% dplyr::bind_rows()
# fixing of country names for iso code matching
csr_cop_submissions <- csr_cop_submissions %>% dplyr::mutate(Country = case_when(Country == "Bosnia-Herze..." ~ "Bosnia and Herzegovina",
Country == "Central Afri..." ~ "Central African Republic",
Country == "Congo, Democ..." ~ "Congo, the Democratic Republic of the",
Country == "Dominican Re..." ~ "Dominican Republic",
Country == "Iran, Islami..." ~ "Iran, Islamic Republic of",
Country == "Korea, Repub..." ~ "Korea, Republic of",
Country == "Kosovo as pe..." ~ "Kosovo",
Country == "Moldova, Rep..." ~ "Moldova, Republic of",
Country == "Palestine, S..." ~ "Palestine, State of",
Country == "Papua New Gu..." ~ "Papua New Guinea",
Country == "Russian Fede..." ~ "Russian Federation",
Country == "Sao Tome And..." ~ "Sao Tome and Principe",
Country == "Syrian Arab ..." ~ "Syrian Arab Republic",
Country == "Tanzania, Un..." ~ "Tanzania, United Republic of",
Country == "Trinidad And..." ~ "Trinidad and Tobago",
Country == "United Arab ..." ~ "United Arab Emirates",
Country == "United State..." ~ "United States",
T ~ Country
))
# iso code matching - exclusion of 2020+
csr_table <- left_join(csr_cop_submissions, iso_codes, by = c("Country")) %>%
dplyr::filter(Year != "2021")
# english and number of document availability (time-consuming//load csr_table.Rdata)
#csr_table <- csr_table %>% dplyr::rowwise() %>%
# dplyr::mutate(English = submit_language(Link))
# save table -----
#save(csr_table, file = "../data/csr_table.Rdata")
|
#funções especiais
#unlist()
#Produz um vetor com os elementos da lista
?unlist
lst1 <-list(6, "b", 15)
lst1
class(lst1)
unlist(lst1)
vec1 <- unlist(lst1) # transforma uma lista em vetor
class(vec1)
lst2 <- list(v1 = 6, v2 = list(381, 2190), v3 = c(30, 217))
lst2
unlist(lst2)
mean(unlist(lst2))
round(mean(unlist(lst2)))
#do.call()
#executa uma função em um objeto
#***ATENÇÂO ***
#as funções da família apply aplicam uma função a cada elemento de um objeto (substitui um loop)
#a função do.call aplica uma função ao objeto inteiro e não a cada elemento individualmente
?do.call
data<-list()
N <- 100
for (n in 1:N) {
data[[n]] = data.frame(index = n, char = sample(letters, 1), z = rnorm(1))
}
head(data)
#rbind pode unir vetores. baseado em alguma regra
do.call(rbind, data)
class(do.call(rbind, data))
#lapply() x do.call()
y <- list(1:3, 4:6, 7:9)
y
lapply(y, sum) # aplica a operação em todos os elementos da lista
do.call(sum, y) #aplica a operação ao objeto
#o resulta da função do.call pode ser obtido de outras
# pacote plyr
install.packages('plyr')
library(plyr)
Y <- list(1:3, 4:6, 7:9)
Y
ldply(y, sum)
#benchmark
#comparada o tempo de execução de dois métodos
install.packages('rbenchmark')
library(rbenchmark)
benchmark(do.call(sum, y), ldply(y, sum))
N <- list(as.numeric(1:30000), as.numeric(4:60000), as.numeric(7:90000))
benchmark(do.call(sum, N), ldply(N, sum))
#strsplit()
#divide uma string ou vetor de caracteres
texto <- "Esta é uma string"
strsplit(texto, " ")
texto <- "Esta é uma string"
strsplit(texto, "")
dates <- c("1999-05-23", "2001-12-30", "2004-12-17", "2018-11-11")
temp <- strsplit(dates, "-")
temp
class(temp)
matrix(unlist(temp), ncol = 3, byrow = TRUE)
Names <- c("Brinm Sergey", "Page, Larry", "Dorsey, Jack",
"Glass, Noah", "Williams, Evan", "Stone, Biz")
Cofounded <- rep(c("Google", "Twitter"), c(2,4))
temp <- strsplit(Names, ", ")
temp
frase <- "Muitas vezes temos que repetir algo diversas vezes e essas diversas vezes parecem algo estranho"
palavras <- strsplit(frase, " ")[[1]]
palavras
unique(tolower(palavras)) #unique() para retirar repetições
antes = data.frame(attr = c(1, 30, 4, 6), tipo = c('pao_e_agua', 'pao_e_agua_2'))
antes
strsplit(as.character(antes$tipo), '_e_') #separa os valores e transforma em lista
library(stringr)
str_split_fixed(antes$tipo, "_e_", 2) #sepera os valores e transforma em matriz
antes = data.frame(attr = c(1, 30, 4, 6), tipo = c('pao_e_agua', 'pao_e_agua_2'))
antes
depois <- strsplit(as.character(antes$tipo), '_e_')
do.call(rbind, depois)
library(dplyr)
library(tidyr)
antes <- data.frame(
attr < c(1, 30, 4, 6),
tipo <- c('pao_e_agua', 'pao_e_agua_2')
)
antes %>%
separate(tipo, c("pao", "agua"), "_e_")
#para encerrar
#operadores de atribuição
vec1 = 1:4
vec2 <- 1:4
class(vec1)
class(vec2)
typeof(vec1)
typeof(vec2)
# em funções, quando utilizado '=' o objeto tem escopo local
mean(x = 1:10)
x
# em funções, quando utilizado '<-' o objeto tem escopo global
mean(x <- 1:10)
x
#criação de objetos
vetor1 = 1:4
vetor2 = c(1:4)
vetor3 = c(1, 2, 3, 4)
class(vetor1)
class(vetor2)
class(vetor3)
typeof(vetor1)
typeof(vetor2)
typeof(vetor3)
matriz1 = matrix(1:4, nr = 2)
matriz2 = matrix(c(1:4), nr = 2)
matriz3 = matrix(c(1, 2, 3, 4), nr = 2)
class(matriz1)
class(matriz2)
class(matriz3)
typeof(matriz1)
typeof(matriz2)
typeof(matriz3) | /Parte2/06-Funcoes_Especiais.R | no_license | Kotayz/RFundamentos | R | false | false | 3,425 | r | #funções especiais
#unlist()
#Produz um vetor com os elementos da lista
?unlist
lst1 <-list(6, "b", 15)
lst1
class(lst1)
unlist(lst1)
vec1 <- unlist(lst1) # transforma uma lista em vetor
class(vec1)
lst2 <- list(v1 = 6, v2 = list(381, 2190), v3 = c(30, 217))
lst2
unlist(lst2)
mean(unlist(lst2))
round(mean(unlist(lst2)))
#do.call()
#executa uma função em um objeto
#***ATENÇÂO ***
#as funções da família apply aplicam uma função a cada elemento de um objeto (substitui um loop)
#a função do.call aplica uma função ao objeto inteiro e não a cada elemento individualmente
?do.call
data<-list()
N <- 100
for (n in 1:N) {
data[[n]] = data.frame(index = n, char = sample(letters, 1), z = rnorm(1))
}
head(data)
#rbind pode unir vetores. baseado em alguma regra
do.call(rbind, data)
class(do.call(rbind, data))
#lapply() x do.call()
y <- list(1:3, 4:6, 7:9)
y
lapply(y, sum) # aplica a operação em todos os elementos da lista
do.call(sum, y) #aplica a operação ao objeto
#o resulta da função do.call pode ser obtido de outras
# pacote plyr
install.packages('plyr')
library(plyr)
Y <- list(1:3, 4:6, 7:9)
Y
ldply(y, sum)
#benchmark
#comparada o tempo de execução de dois métodos
install.packages('rbenchmark')
library(rbenchmark)
benchmark(do.call(sum, y), ldply(y, sum))
N <- list(as.numeric(1:30000), as.numeric(4:60000), as.numeric(7:90000))
benchmark(do.call(sum, N), ldply(N, sum))
#strsplit()
#divide uma string ou vetor de caracteres
texto <- "Esta é uma string"
strsplit(texto, " ")
texto <- "Esta é uma string"
strsplit(texto, "")
dates <- c("1999-05-23", "2001-12-30", "2004-12-17", "2018-11-11")
temp <- strsplit(dates, "-")
temp
class(temp)
matrix(unlist(temp), ncol = 3, byrow = TRUE)
Names <- c("Brinm Sergey", "Page, Larry", "Dorsey, Jack",
"Glass, Noah", "Williams, Evan", "Stone, Biz")
Cofounded <- rep(c("Google", "Twitter"), c(2,4))
temp <- strsplit(Names, ", ")
temp
frase <- "Muitas vezes temos que repetir algo diversas vezes e essas diversas vezes parecem algo estranho"
palavras <- strsplit(frase, " ")[[1]]
palavras
unique(tolower(palavras)) #unique() para retirar repetições
antes = data.frame(attr = c(1, 30, 4, 6), tipo = c('pao_e_agua', 'pao_e_agua_2'))
antes
strsplit(as.character(antes$tipo), '_e_') #separa os valores e transforma em lista
library(stringr)
str_split_fixed(antes$tipo, "_e_", 2) #sepera os valores e transforma em matriz
antes = data.frame(attr = c(1, 30, 4, 6), tipo = c('pao_e_agua', 'pao_e_agua_2'))
antes
depois <- strsplit(as.character(antes$tipo), '_e_')
do.call(rbind, depois)
library(dplyr)
library(tidyr)
antes <- data.frame(
attr < c(1, 30, 4, 6),
tipo <- c('pao_e_agua', 'pao_e_agua_2')
)
antes %>%
separate(tipo, c("pao", "agua"), "_e_")
#para encerrar
#operadores de atribuição
vec1 = 1:4
vec2 <- 1:4
class(vec1)
class(vec2)
typeof(vec1)
typeof(vec2)
# em funções, quando utilizado '=' o objeto tem escopo local
mean(x = 1:10)
x
# em funções, quando utilizado '<-' o objeto tem escopo global
mean(x <- 1:10)
x
#criação de objetos
vetor1 = 1:4
vetor2 = c(1:4)
vetor3 = c(1, 2, 3, 4)
class(vetor1)
class(vetor2)
class(vetor3)
typeof(vetor1)
typeof(vetor2)
typeof(vetor3)
matriz1 = matrix(1:4, nr = 2)
matriz2 = matrix(c(1:4), nr = 2)
matriz3 = matrix(c(1, 2, 3, 4), nr = 2)
class(matriz1)
class(matriz2)
class(matriz3)
typeof(matriz1)
typeof(matriz2)
typeof(matriz3) |
library(testthat)
library(dynutils)
library(dyntoy)
library(dplyr)
library(tibble)
Sys.setenv("R_TESTS" = "")
test_check("dyntoy")
| /tests/testthat.R | no_license | dynverse/dyntoy | R | false | false | 134 | r | library(testthat)
library(dynutils)
library(dyntoy)
library(dplyr)
library(tibble)
Sys.setenv("R_TESTS" = "")
test_check("dyntoy")
|
library(xtable)
xtable <- function(x, file = "", ..., rownames = FALSE){
table <- xtable::xtable(x, ...)
print(table, floating = F, hline.after = NULL,
add.to.row = list(pos = list(-1,0, nrow(x)),
command = c('\\toprule\n ','\\midrule\n ','\\bottomrule\n')),
include.rownames = rownames, NA.string = "---",
file = file,
comment = FALSE, timestamp = FALSE
)
} | /xtable.R | no_license | tmh741/FieldCropTests | R | false | false | 427 | r | library(xtable)
xtable <- function(x, file = "", ..., rownames = FALSE){
table <- xtable::xtable(x, ...)
print(table, floating = F, hline.after = NULL,
add.to.row = list(pos = list(-1,0, nrow(x)),
command = c('\\toprule\n ','\\midrule\n ','\\bottomrule\n')),
include.rownames = rownames, NA.string = "---",
file = file,
comment = FALSE, timestamp = FALSE
)
} |
#' Obtain hierarchical color palettes (Tree Colors)
#'
#' Obtain hierarchical color palettes, either the so-called Tree Colors from the HCL color space model, or by using an existing color palette. The former method, which is recommended, is used by default in \code{\link{treemap}} (type \code{"index"}) and \code{\link{treegraph}}. Use \code{\link{treecolors}} to experiment with this method.
#'
#' @param dtf a data.frame or data.table. Required.
#' @param index the index variables of dtf
#' @param method used method: either \code{"HCL"} (recommended), which is based on the HCL color space model, or \code{"HSV"}, which uses the argument \code{palette}.
#' @param palette color palette, which is only used for the HSV method
#' @param palette.HCL.options list of options to obtain Tree Colors from the HCL space (when \code{palette="HCL"}). This list contains:
#' \describe{
#' \item{\code{hue_start}:}{number between 0 and 360 that determines the starting hue value (default: 30)}
#' \item{\code{hue_end}:}{number between \code{hue_start} and \code{hue_start + 360} that determines the ending hue value (default: 390)}
#' \item{\code{hue_perm}:}{boolean that determines whether the colors are permuted such that adjacent levels get more distinguishable colors. If \code{FALSE}, then the colors are equally distributed from \code{hue_start} to \code{hue_end} (default: TRUE)}
#' \item{\code{hue_rev}:}{boolean that determines whether the colors of even-numbered branched are reversed (to increase discrimination among branches)}
#' \item{\code{hue_fraction}:}{number between 0 and 1 that determines the fraction of the hue circle that is used for recursive color picking: if 1 then the full hue circle is used, which means that the hue of the colors of lower-level nodes are spread maximally. If 0, then the hue of the colors of lower-level nodes are identical of the hue of their parents. (default: .5)}
#' \item{\code{chroma}:}{chroma value of colors of the first-level nodes, that are determined by the first index variable (default: 60)}
#' \item{\code{luminance}:}{luminance value of colors of the first-level nodes, i.e. determined by the first index variable (default: 70)}
#' \item{\code{chroma_slope}:}{slope value for chroma of the non-first-level nodes. The chroma values for the second-level nodes are \code{chroma+chroma_slope}, for the third-level nodes \code{chroma+2*chroma_slope}, etc. (default: 5)}
#' \item{\code{luminance_slope}:}{slope value for luminance of the non-first-level nodes (default: -10)}} For "depth" and "categorical" types, only the first two items are used. Use \code{\link{treecolors}} to experiment with these parameters.
#' @param return.parameters should a data.frame with color values and parameter options be returned (\code{TRUE}), or just the vector of color values (\code{FALSE})?
#' @param prepare.dat data is by default preprocessed, except for interal use
#' @return Either a vector of colors, or a data.frame is return (see \code{return.parameters}).
#' @import data.table
#' @import grid
#' @import colorspace
#' @export
treepalette <- function(dtf, index=names(dtf), method="HCL", palette=NULL, palette.HCL.options, return.parameters=TRUE, prepare.dat=TRUE) {
.SD <- NULL #for CMD check
palette.HCL.options <- tmSetHCLoptions(palette.HCL.options)
k <- length(index)
dat <- as.data.table(dtf)
othercols <- setdiff(names(dat), index)
if (length(othercols)) dat[, eval(othercols):=NULL]
setcolorder(dat, index)
dat[, names(dat):=lapply(.SD,as.factor)]
if (prepare.dat) {
if (k>1) {
dats <- list()
for (i in 1:(k-1)) {
dats[[i]] <- dat[!duplicated(dat[,1:i, with=FALSE]), ]
for (j in (i+1):k) dats[[i]][[j]] <- factor(NA, levels=levels(dats[[i]][[j]]))
}
dat <- rbindlist(c(list(dat), dats))
}
dat <- dat[!duplicated(dat), ]
# sort dat to be consistent with tmAggregate
dep <- treedepth(dat)
unikey <- do.call("paste", c(as.list(dat), list(dep, sep="__")))
dat <- dat[order(unikey), ]
}
if (method=="HCL") {
res <- treeapply(dat, list(lb=as.integer(palette.HCL.options$hue_start),
ub=as.integer(palette.HCL.options$hue_end),
rev=FALSE),
fun="addRange", frc=palette.HCL.options$hue_fraction,
hue_perm=palette.HCL.options$hue_perm, hue_rev=palette.HCL.options$hue_rev)
point <- with(res, (lb+ub)/2)
chr <- palette.HCL.options$chroma +
palette.HCL.options$chroma_slope * (res$l-1)
#75 - (k-res$l) * 10
lum <- palette.HCL.options$luminance +
palette.HCL.options$luminance_slope * (res$l-1)
#lum <- 95 - res$l * 10 #90
color <- hcl(point,c=chr, l=lum)
if (return.parameters) {
return(cbind(as.data.frame(dat), data.table(HCL.color=color,
HCL.H=point,
HCL.C=chr,
HCL.L=lum,
HCL.hue_lb=res$lb,
HCL.hue_ub=res$ub)))
} else {
return(color)
}
} else if (method=="HSV") {
nl <- nlevels(dat[[1]])
palette <- substr(palette, 1, 7) # remove alpha number
palette <- rep(palette, length.out=nl)
co <- coords(as(hex2RGB(palette), "HSV"))
value <- as.list(as.data.frame(co))
res <- treeapply(dat, value, fun="hsvs")
color <- with(res, hex(HSV(H, S, V)))
if (return.parameters) {
return(cbind(as.data.frame(dat), data.frame(HSV.color=color,
HSV.H=res$H,
HSV.S=res$S,
HSV.V=res$V)))
} else {
return(color)
}
}
} | /treemap/R/treepalette.R | no_license | ingted/R-Examples | R | false | false | 6,241 | r | #' Obtain hierarchical color palettes (Tree Colors)
#'
#' Obtain hierarchical color palettes, either the so-called Tree Colors from the HCL color space model, or by using an existing color palette. The former method, which is recommended, is used by default in \code{\link{treemap}} (type \code{"index"}) and \code{\link{treegraph}}. Use \code{\link{treecolors}} to experiment with this method.
#'
#' @param dtf a data.frame or data.table. Required.
#' @param index the index variables of dtf
#' @param method used method: either \code{"HCL"} (recommended), which is based on the HCL color space model, or \code{"HSV"}, which uses the argument \code{palette}.
#' @param palette color palette, which is only used for the HSV method
#' @param palette.HCL.options list of options to obtain Tree Colors from the HCL space (when \code{palette="HCL"}). This list contains:
#' \describe{
#' \item{\code{hue_start}:}{number between 0 and 360 that determines the starting hue value (default: 30)}
#' \item{\code{hue_end}:}{number between \code{hue_start} and \code{hue_start + 360} that determines the ending hue value (default: 390)}
#' \item{\code{hue_perm}:}{boolean that determines whether the colors are permuted such that adjacent levels get more distinguishable colors. If \code{FALSE}, then the colors are equally distributed from \code{hue_start} to \code{hue_end} (default: TRUE)}
#' \item{\code{hue_rev}:}{boolean that determines whether the colors of even-numbered branched are reversed (to increase discrimination among branches)}
#' \item{\code{hue_fraction}:}{number between 0 and 1 that determines the fraction of the hue circle that is used for recursive color picking: if 1 then the full hue circle is used, which means that the hue of the colors of lower-level nodes are spread maximally. If 0, then the hue of the colors of lower-level nodes are identical of the hue of their parents. (default: .5)}
#' \item{\code{chroma}:}{chroma value of colors of the first-level nodes, that are determined by the first index variable (default: 60)}
#' \item{\code{luminance}:}{luminance value of colors of the first-level nodes, i.e. determined by the first index variable (default: 70)}
#' \item{\code{chroma_slope}:}{slope value for chroma of the non-first-level nodes. The chroma values for the second-level nodes are \code{chroma+chroma_slope}, for the third-level nodes \code{chroma+2*chroma_slope}, etc. (default: 5)}
#' \item{\code{luminance_slope}:}{slope value for luminance of the non-first-level nodes (default: -10)}} For "depth" and "categorical" types, only the first two items are used. Use \code{\link{treecolors}} to experiment with these parameters.
#' @param return.parameters should a data.frame with color values and parameter options be returned (\code{TRUE}), or just the vector of color values (\code{FALSE})?
#' @param prepare.dat data is by default preprocessed, except for interal use
#' @return Either a vector of colors, or a data.frame is return (see \code{return.parameters}).
#' @import data.table
#' @import grid
#' @import colorspace
#' @export
treepalette <- function(dtf, index=names(dtf), method="HCL", palette=NULL, palette.HCL.options, return.parameters=TRUE, prepare.dat=TRUE) {
.SD <- NULL #for CMD check
palette.HCL.options <- tmSetHCLoptions(palette.HCL.options)
k <- length(index)
dat <- as.data.table(dtf)
othercols <- setdiff(names(dat), index)
if (length(othercols)) dat[, eval(othercols):=NULL]
setcolorder(dat, index)
dat[, names(dat):=lapply(.SD,as.factor)]
if (prepare.dat) {
if (k>1) {
dats <- list()
for (i in 1:(k-1)) {
dats[[i]] <- dat[!duplicated(dat[,1:i, with=FALSE]), ]
for (j in (i+1):k) dats[[i]][[j]] <- factor(NA, levels=levels(dats[[i]][[j]]))
}
dat <- rbindlist(c(list(dat), dats))
}
dat <- dat[!duplicated(dat), ]
# sort dat to be consistent with tmAggregate
dep <- treedepth(dat)
unikey <- do.call("paste", c(as.list(dat), list(dep, sep="__")))
dat <- dat[order(unikey), ]
}
if (method=="HCL") {
res <- treeapply(dat, list(lb=as.integer(palette.HCL.options$hue_start),
ub=as.integer(palette.HCL.options$hue_end),
rev=FALSE),
fun="addRange", frc=palette.HCL.options$hue_fraction,
hue_perm=palette.HCL.options$hue_perm, hue_rev=palette.HCL.options$hue_rev)
point <- with(res, (lb+ub)/2)
chr <- palette.HCL.options$chroma +
palette.HCL.options$chroma_slope * (res$l-1)
#75 - (k-res$l) * 10
lum <- palette.HCL.options$luminance +
palette.HCL.options$luminance_slope * (res$l-1)
#lum <- 95 - res$l * 10 #90
color <- hcl(point,c=chr, l=lum)
if (return.parameters) {
return(cbind(as.data.frame(dat), data.table(HCL.color=color,
HCL.H=point,
HCL.C=chr,
HCL.L=lum,
HCL.hue_lb=res$lb,
HCL.hue_ub=res$ub)))
} else {
return(color)
}
} else if (method=="HSV") {
nl <- nlevels(dat[[1]])
palette <- substr(palette, 1, 7) # remove alpha number
palette <- rep(palette, length.out=nl)
co <- coords(as(hex2RGB(palette), "HSV"))
value <- as.list(as.data.frame(co))
res <- treeapply(dat, value, fun="hsvs")
color <- with(res, hex(HSV(H, S, V)))
if (return.parameters) {
return(cbind(as.data.frame(dat), data.frame(HSV.color=color,
HSV.H=res$H,
HSV.S=res$S,
HSV.V=res$V)))
} else {
return(color)
}
}
} |
library(ape)
testtree <- read.tree("1577_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1577_0_unrooted.txt") | /codeml_files/newick_trees_processed/1577_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("1577_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1577_0_unrooted.txt") |
library(parallel)
StoC.Sample <- function(str, cluster, clusterHierarchy, clusterTaxonomy, distanceMatrix) {
cluster <- clusterTaxonomy[clusterTaxonomy$cluster == as.character(cluster), ]$string
clusterLength <- length(cluster)
if(clusterLength > 30) {
clusterLength = 30
}
cluster <- sample(cluster, clusterLength, replace = FALSE)
dedicatedCores <- detectCores() - 1
parallelWorker <- makeCluster(dedicatedCores)
clusterExport(parallelWorker, varlist = c("distanceMatrix", "str"), envir = environment())
distances <- parSapply(
parallelWorker,
cluster,
function(stringA) {
distanceMatrix[ str, stringA ]
}
)
stopCluster(parallelWorker)
median(distances)
}
# StoC.Sample("Bodenwischer", 69, clusterResult[["hierarchy"]], clusterResult[["taxonomy"]], distanceMatrix)
| /source/distance/StringToCluster/Sample.R | no_license | DasenB/CluString | R | false | false | 827 | r | library(parallel)
StoC.Sample <- function(str, cluster, clusterHierarchy, clusterTaxonomy, distanceMatrix) {
cluster <- clusterTaxonomy[clusterTaxonomy$cluster == as.character(cluster), ]$string
clusterLength <- length(cluster)
if(clusterLength > 30) {
clusterLength = 30
}
cluster <- sample(cluster, clusterLength, replace = FALSE)
dedicatedCores <- detectCores() - 1
parallelWorker <- makeCluster(dedicatedCores)
clusterExport(parallelWorker, varlist = c("distanceMatrix", "str"), envir = environment())
distances <- parSapply(
parallelWorker,
cluster,
function(stringA) {
distanceMatrix[ str, stringA ]
}
)
stopCluster(parallelWorker)
median(distances)
}
# StoC.Sample("Bodenwischer", 69, clusterResult[["hierarchy"]], clusterResult[["taxonomy"]], distanceMatrix)
|
#' A styling Function
#'
#' This function allows you to styling your html output.
#' @param
#' @keywords style html
#' @export
styling <- function(...
, full_width = FALSE
, bootstrap_options = c("striped", "hover", "condensed", "responsive")
, position = "left"
, fixed_thead = TRUE
){
kableExtra::kable_styling(...
, full_width = full_width
, bootstrap_options = bootstrap_options
, position = position
, fixed_thead = fixed_thead
)
}
| /R/styling.R | no_license | ghowoo/Wu | R | false | false | 605 | r | #' A styling Function
#'
#' This function allows you to styling your html output.
#' @param
#' @keywords style html
#' @export
styling <- function(...
, full_width = FALSE
, bootstrap_options = c("striped", "hover", "condensed", "responsive")
, position = "left"
, fixed_thead = TRUE
){
kableExtra::kable_styling(...
, full_width = full_width
, bootstrap_options = bootstrap_options
, position = position
, fixed_thead = fixed_thead
)
}
|
mvt = read.csv("mvt.csv", stringsAsFactors = FALSE)
str(mvt)
mvt$Date = strptime(mvt$Date, format = "%m/%d/%y %H:%M")
mvt$Weekday = weekdays(mvt$Date)
mvt$Hour = mvt$Date$hour
str(mvt)
WeekdayCounts = as.data.frame(table(mvt$Weekday))
str(WeekdayCounts)
library(ggplot2)
ggplot(WeekdayCounts, aes(x = Var1, y = Freq)) + geom_line(aes(group = 1))
WeekdayCounts$Var1 = factor(WeekdayCounts$Var1, ordered = TRUE, levels = c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"))
ggplot(WeekdayCounts, aes(x = Var1, y = Freq)) + geom_line(aes(group = 1), alpha = 0.3) + xlab("Day of the week") + ylab("Total Motor Vehicle Thefts")
table(mvt$Weekday, mvt$Hour)
DayHourCounts = as.data.frame(table(mvt$Weekday, mvt$Hour))
str(DayHourCounts)
DayHourCounts$Hour = as.numeric(DayHourCounts$Var2)
str(DayHourCounts)
ggplot(DayHourCounts, aes(x = Hour, y = Freq)) + geom_line(aes(group = Var1, color = Var1), size = 2)
DayHourCounts$Var1 = factor(DayHourCounts$Var1, ordered = TRUE, levels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"))
ggplot(DayHourCounts, aes(x = Hour, y = Var1)) + geom_tile(aes(fill = Freq))
ggplot(DayHourCounts, aes(x = Hour, y = Var1)) + geom_tile(aes(fill = Freq)) + scale_fill_gradient(name = "Total MV Thefts", low = "white", high = "red") + theme(axis.title.y = element_blank())
install.packages("maps")
install.packages("ggmap")
library(maps)
library(ggmap)
chicago = get_map(location = "chicago", zoom = 11)
ggmap(chicago)
ggmap(chicago) + geom_point(data = mvt[1:100,], aes(x = Longitude, y = Latitude))
LatLonCounts = as.data.frame(table(round(mvt$Longitude, 2), round(mvt$Latitude, 2)))
str(LatLonCounts)
LatLonCounts$Lon = as.numeric(as.character(LatLonCounts$Var1))
LatLonCounts$Lat = as.numeric(as.character(LatLonCounts$Var2))
ggmap(chicago) + geom_point(data = LatLonCounts, aes(x = Lon, y = Lat, color = Freq, size = Freq)) + scale_color_gradient(low = "yellow", high = "red")
ggmap(chicago) + geom_tile(data = LatLonCounts, aes(x = Lon, y = Lat, alpha = Freq), fill = "red")
LatLonCounts = subset(LatLonCounts, Freq > 0)
| /Unit-7/mvt.R | no_license | praveenvvstgy/15.071x-The-Analytics-Edge | R | false | false | 2,132 | r | mvt = read.csv("mvt.csv", stringsAsFactors = FALSE)
str(mvt)
mvt$Date = strptime(mvt$Date, format = "%m/%d/%y %H:%M")
mvt$Weekday = weekdays(mvt$Date)
mvt$Hour = mvt$Date$hour
str(mvt)
WeekdayCounts = as.data.frame(table(mvt$Weekday))
str(WeekdayCounts)
library(ggplot2)
ggplot(WeekdayCounts, aes(x = Var1, y = Freq)) + geom_line(aes(group = 1))
WeekdayCounts$Var1 = factor(WeekdayCounts$Var1, ordered = TRUE, levels = c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"))
ggplot(WeekdayCounts, aes(x = Var1, y = Freq)) + geom_line(aes(group = 1), alpha = 0.3) + xlab("Day of the week") + ylab("Total Motor Vehicle Thefts")
table(mvt$Weekday, mvt$Hour)
DayHourCounts = as.data.frame(table(mvt$Weekday, mvt$Hour))
str(DayHourCounts)
DayHourCounts$Hour = as.numeric(DayHourCounts$Var2)
str(DayHourCounts)
ggplot(DayHourCounts, aes(x = Hour, y = Freq)) + geom_line(aes(group = Var1, color = Var1), size = 2)
DayHourCounts$Var1 = factor(DayHourCounts$Var1, ordered = TRUE, levels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"))
ggplot(DayHourCounts, aes(x = Hour, y = Var1)) + geom_tile(aes(fill = Freq))
ggplot(DayHourCounts, aes(x = Hour, y = Var1)) + geom_tile(aes(fill = Freq)) + scale_fill_gradient(name = "Total MV Thefts", low = "white", high = "red") + theme(axis.title.y = element_blank())
install.packages("maps")
install.packages("ggmap")
library(maps)
library(ggmap)
chicago = get_map(location = "chicago", zoom = 11)
ggmap(chicago)
ggmap(chicago) + geom_point(data = mvt[1:100,], aes(x = Longitude, y = Latitude))
LatLonCounts = as.data.frame(table(round(mvt$Longitude, 2), round(mvt$Latitude, 2)))
str(LatLonCounts)
LatLonCounts$Lon = as.numeric(as.character(LatLonCounts$Var1))
LatLonCounts$Lat = as.numeric(as.character(LatLonCounts$Var2))
ggmap(chicago) + geom_point(data = LatLonCounts, aes(x = Lon, y = Lat, color = Freq, size = Freq)) + scale_color_gradient(low = "yellow", high = "red")
ggmap(chicago) + geom_tile(data = LatLonCounts, aes(x = Lon, y = Lat, alpha = Freq), fill = "red")
LatLonCounts = subset(LatLonCounts, Freq > 0)
|
#' Get all common neighbors between two or more nodes
#' @description With two or more nodes, get the set of
#' common neighboring nodes.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @param nodes a vector of node ID values of length
#' at least 2.
#' @return a vector of node ID values.
#' @examples
#' # Create a directed graph with 5 nodes
#' graph <-
#' create_graph() %>%
#' add_path(n = 5)
#'
#' # Find all common neighbor nodes
#' # for nodes `1` and `2` (there are no
#' # common neighbors amongst them)
#' graph %>%
#' get_common_nbrs(
#' nodes = c(1, 2))
#'
#' # Find all common neighbor nodes for
#' # nodes `1` and `3`
#' graph %>%
#' get_common_nbrs(
#' nodes = c(1, 3))
#' @export get_common_nbrs
get_common_nbrs <- function(graph,
nodes) {
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Get predecessors and successors for all nodes
# in `nodes`
for (i in 1:length(nodes)) {
if (i == 1) {
nbrs <- list()
}
nbrs[[i]] <-
c(sort(get_predecessors(graph, node = nodes[i])),
sort(get_successors(graph, node = nodes[i])))
}
common_nbrs <- nbrs[[1]]
for (i in nbrs[-1]) {
common_nbrs <- intersect(common_nbrs, i)
}
if (length(common_nbrs) == 0) {
return(NA)
} else {
return(sort(as.integer(common_nbrs)))
}
}
| /R/get_common_nbrs.R | permissive | akkalbist55/DiagrammeR | R | false | false | 1,549 | r | #' Get all common neighbors between two or more nodes
#' @description With two or more nodes, get the set of
#' common neighboring nodes.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @param nodes a vector of node ID values of length
#' at least 2.
#' @return a vector of node ID values.
#' @examples
#' # Create a directed graph with 5 nodes
#' graph <-
#' create_graph() %>%
#' add_path(n = 5)
#'
#' # Find all common neighbor nodes
#' # for nodes `1` and `2` (there are no
#' # common neighbors amongst them)
#' graph %>%
#' get_common_nbrs(
#' nodes = c(1, 2))
#'
#' # Find all common neighbor nodes for
#' # nodes `1` and `3`
#' graph %>%
#' get_common_nbrs(
#' nodes = c(1, 3))
#' @export get_common_nbrs
get_common_nbrs <- function(graph,
nodes) {
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Get predecessors and successors for all nodes
# in `nodes`
for (i in 1:length(nodes)) {
if (i == 1) {
nbrs <- list()
}
nbrs[[i]] <-
c(sort(get_predecessors(graph, node = nodes[i])),
sort(get_successors(graph, node = nodes[i])))
}
common_nbrs <- nbrs[[1]]
for (i in nbrs[-1]) {
common_nbrs <- intersect(common_nbrs, i)
}
if (length(common_nbrs) == 0) {
return(NA)
} else {
return(sort(as.integer(common_nbrs)))
}
}
|
plot4 <- function()
{
# Read in entire dataset, get rid of NAs
dataset <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?")
dataset$Date <- as.Date(dataset$Date, format="%d/%m/%Y")
# Subset the data by date between 2007-02-01 and 2007-02-02
data <- subset(dataset, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# Convert dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
# plot 4
# open PNG graphics device
png("plot4.png")
# set up the four regions
par(mfrow=c(2,2))
# create plot 4a (top left)
plot(data$Datetime,data$Global_active_power,type="n",xlab="",ylab="Global Active Power")
lines(data$Datetime,data$Global_active_power)
# create plot 4b (top right)
plot(data$Datetime,data$Voltage,type="n",xlab="datetime",ylab="Voltage")
lines(data$Datetime,data$Voltage)
# create plot 4c (bottom left)
plot(data$Datetime,data$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"))
lines(data$Datetime,data$Sub_metering_1)
lines(data$Datetime,data$Sub_metering_2,col="red")
lines(data$Datetime,data$Sub_metering_3,col="blue")
# create plot 4d (bottom right)
plot(data$Datetime,data$Global_reactive_power,type="n",xlab="datetime",ylab="Global_reactive_power")
lines(data$Datetime,data$Global_reactive_power)
# close connection
dev.off()
} | /plot4.R | no_license | diyaaang/ExData_Plotting1 | R | false | false | 1,444 | r | plot4 <- function()
{
# Read in entire dataset, get rid of NAs
dataset <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?")
dataset$Date <- as.Date(dataset$Date, format="%d/%m/%Y")
# Subset the data by date between 2007-02-01 and 2007-02-02
data <- subset(dataset, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# Convert dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
# plot 4
# open PNG graphics device
png("plot4.png")
# set up the four regions
par(mfrow=c(2,2))
# create plot 4a (top left)
plot(data$Datetime,data$Global_active_power,type="n",xlab="",ylab="Global Active Power")
lines(data$Datetime,data$Global_active_power)
# create plot 4b (top right)
plot(data$Datetime,data$Voltage,type="n",xlab="datetime",ylab="Voltage")
lines(data$Datetime,data$Voltage)
# create plot 4c (bottom left)
plot(data$Datetime,data$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"))
lines(data$Datetime,data$Sub_metering_1)
lines(data$Datetime,data$Sub_metering_2,col="red")
lines(data$Datetime,data$Sub_metering_3,col="blue")
# create plot 4d (bottom right)
plot(data$Datetime,data$Global_reactive_power,type="n",xlab="datetime",ylab="Global_reactive_power")
lines(data$Datetime,data$Global_reactive_power)
# close connection
dev.off()
} |
################################################################################
# Run Multivar decision tree model #
# OUTPUTS USED BY GAVI AND IN PAPER FOR BURDEN ESTIMATES #
# run ICER scenarios (no discount) i.e. improved PEP, RIG & dog vax
# save outputs to folder: countryLTs_nodiscount
################################################################################
#' * Life Tables - Country specific *
#' * Discounting - 0 *
#' * PEP cost - $5 (default) *
#' * RIG cost - $45 (default) *
#' * Intro grant - $100k *
#' * Scenarios - a3_1, a4, a2, a5_1, a5_2 *
#' * Run count - 1000 *
rm(list=ls())
# Load in packages
library(gdata)
library(rlang)
library(reshape2)
library(ggplot2)
library(tools)
library(triangle)
library(plyr)
library(dplyr)
library(Hmisc)
library(tidyr)
# Load in functions
source("R/YLL.R") # Calculate YLL given life tables and rabies age distribution
source("R/PEP.R") # Vial use under different regimens and throughput
source("R/prob_rabies.R") # Probability of developing rabies - sensitivity analysis
source("R/decision_tree_sensitivity_by_year.R") # Sensitivity analysis
source("R/decision_tree_multivariate_analysis_by_year_v2.R") # Multivariate sensitivity analysis
source("R/scenario_params.R") # Parameters and functions for gavi support and phasing
source("R/multivar_output_summary_Github.R") # MEAN
source("R/multivariate_plot_summarise_data_Github.R")
# Set folder name for output
folder_name <- "countryLTs_nodiscount"
######################
# 1. Setup variables #
######################
rabies = read.csv("data/baseline_incidence_Gavi_final.csv")
data <- read.csv("output/gavi_output_data.csv") # Load gavi-prepped data (scripts 1-7)
params <- read.csv("output/bio_data.csv") # parameters i.e. rabies transmission, prevention given incomplete PEP
vacc <- read.csv("data/vaccine_use.csv") # PEP scenarios - clinic throughput, regimen, completeness, vials, clinic visits:
dogs <- read.csv(file="output/dogs_pop_traj.csv", stringsAsFactors = FALSE) # dog pop 2018-2070 created in 6.elimination_traj.R
elimination_traj <- read.csv(file="output/rabies_traj.csv") # 100 elimination trajectories under dog vax 2020-2070 by year of GBP
y1 = "2020"; yN = "2035"
pop = data[,grep(y1, names(data)):grep(yN, names(data))] # needs this format to combine with elimination trajectories!
hrz=length(2020:2035) # time horizon: from 2020 to 2035
# DALYs - disability weightings & lifetables
DALYrabies_input <- read.csv("data/DALY_params_rabies.csv") # Knobel et al. 2005
# SPECIFIC PARAMETERS
# Life table
country_LE <- read.csv("data/lifetables_bycountry.csv")
country_LE <- country_LE[-which(country_LE$age_from == 100),]
LE2020 <- country_LE[which(country_LE$year == 2020),] # Use 2020 age distributions throughout!
# Set discounting rate
discount = 0
# Set prices (USD)
gavi_intro_grant <- 100000 # Intro grant
gavi_vaccine_price <- 5 # vaccine cost per vial
gavi_RIG_price <- 45 # ERIG cost per vial
################
# 2. Run model #
################
# Set number of runs
n = 1000 # 2 hrs per scenario, ~10 hrs
# Improved PEP access - Paper SC2
scenario_a3_1 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="base", DogVax_TF=F, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="none", discount=discount, breaks="5yr", IBCM=FALSE)
# Improved PEP access + RIG - Paper SC3
scenario_a4 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="base", DogVax_TF=F, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="high risk", discount=discount, breaks="5yr", IBCM=FALSE)
# Dog vacc SQ - Paper SC4a
scenario_a2 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="none", DogVax_TF=T, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="none", discount=discount, breaks="5yr", IBCM=FALSE)
# Dog vacc + PEP access - Paper SC4b
scenario_a5_1 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="base", DogVax_TF=T, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="none", discount=discount, breaks="5yr", IBCM=FALSE)
# Dog vacc + PEP access + IBCM - Paper SC4c
scenario_a5_2 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="base", DogVax_TF=T, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="none", discount=discount, breaks="5yr", IBCM=TRUE)
###########################################
# 3. Bind outputs into a single dataframe #
###########################################
# Append all results into a dataframe
out <- rbind.data.frame(
cbind.data.frame(scenario_a3_1, scenario="a3_1"), # scenario 2 - improved PEP access
cbind.data.frame(scenario_a4, scenario="a4"), # scenario 3 - RIG
cbind.data.frame(scenario_a2, scenario="a2"), # scenario 4a - dog vax
cbind.data.frame(scenario_a5_1, scenario="a5_1"), # scenario 4b - dog vax + improved PEP access (no RIG)
cbind.data.frame(scenario_a5_2, scenario="a5_2")) # scenario 4c - dog vax + improved PEP access (no RIG) + IBCM
dim(out)
table(out$scenario)
countries <- unique(out$country)
scenarios <- unique(out$scenario)
yrs <- unique(out$year)
# INCLUDE GAVI ELIGIBILITY
gavi_info <- read.csv("output/gavi_output_data.csv", stringsAsFactors=FALSE)
out <- merge(out, data.frame(country=gavi_info$country, gavi_2018=gavi_info$gavi_2018), by="country", all.x=TRUE)
# CE outputs
out$cost_per_death_averted <- out$total_cost/out$total_deaths_averted
out$cost_per_YLL_averted <- out$total_cost/out$total_YLL_averted
out$deaths_averted_per_100k_vaccinated <- out$total_deaths_averted/out$vaccinated/100000
# Summarize by iteration over time horizon
out_horizon = country_horizon_iter(out)
######################################
# 4a. Create summary outputs #
######################################
# Country, cluster, & global by year
country_summary_yr = multivar_country_summary(out, year = TRUE)
cluster_summary_yr = multivar_summary(country_summary_yr, year=TRUE, setting ="cluster")
global_summary_yr = multivar_summary(country_summary_yr, year=TRUE, setting="global")
gavi2018_summary_yr = multivar_summary(country_summary_yr[which(country_summary_yr$gavi_2018==TRUE),], year=TRUE, setting="global")
write.csv(country_summary_yr, paste("output/", folder_name, "/country_stats_ICER.csv", sep=""), row.names=FALSE)
write.csv(cluster_summary_yr, paste("output/", folder_name, "/cluster_stats_ICER.csv", sep=""), row.names=FALSE)
write.csv(global_summary_yr, paste("output/", folder_name, "/global_stats_ICER.csv", sep=""), row.names=FALSE)
write.csv(gavi2018_summary_yr, paste("output/", folder_name, "/gavi2018_stats_ICER.csv", sep=""), row.names=FALSE)
################################################
# 4b. Create summary outputs over time horizon #
################################################
# Country, cluster, & global over time horizon
country_summary_horizon = multivar_country_summary(out_horizon, year = FALSE)
cluster_summary_horizon = multivar_summary(country_summary_horizon, year=FALSE, setting ="cluster")
global_summary_horizon = multivar_summary(country_summary_horizon, year=FALSE, setting="global")
gavi2018_summary_horizon = multivar_summary(country_summary_horizon[which(country_summary_horizon$gavi_2018==TRUE),], year=FALSE, setting="global")
write.csv(country_summary_horizon, paste("output/", folder_name, "/country_stats_horizon_ICER.csv", sep=""), row.names=FALSE)
write.csv(cluster_summary_horizon, paste("output/", folder_name, "/cluster_stats_horizon_ICER.csv", sep=""), row.names=FALSE)
write.csv(global_summary_horizon, paste("output/", folder_name, "/global_stats_horizon_ICER.csv", sep=""), row.names=FALSE)
write.csv(gavi2018_summary_horizon, paste("output/", folder_name, "/gavi2018_stats_horizon_ICER.csv", sep=""), row.names=FALSE)
| /ms7.1.1.multivar_countryLTs_nodiscount_RIG_ICER.R | no_license | katiehampson1978/rabies_PEP_access | R | false | false | 8,035 | r | ################################################################################
# Run Multivar decision tree model #
# OUTPUTS USED BY GAVI AND IN PAPER FOR BURDEN ESTIMATES #
# run ICER scenarios (no discount) i.e. improved PEP, RIG & dog vax
# save outputs to folder: countryLTs_nodiscount
################################################################################
#' * Life Tables - Country specific *
#' * Discounting - 0 *
#' * PEP cost - $5 (default) *
#' * RIG cost - $45 (default) *
#' * Intro grant - $100k *
#' * Scenarios - a3_1, a4, a2, a5_1, a5_2 *
#' * Run count - 1000 *
rm(list=ls())
# Load in packages
library(gdata)
library(rlang)
library(reshape2)
library(ggplot2)
library(tools)
library(triangle)
library(plyr)
library(dplyr)
library(Hmisc)
library(tidyr)
# Load in functions
source("R/YLL.R") # Calculate YLL given life tables and rabies age distribution
source("R/PEP.R") # Vial use under different regimens and throughput
source("R/prob_rabies.R") # Probability of developing rabies - sensitivity analysis
source("R/decision_tree_sensitivity_by_year.R") # Sensitivity analysis
source("R/decision_tree_multivariate_analysis_by_year_v2.R") # Multivariate sensitivity analysis
source("R/scenario_params.R") # Parameters and functions for gavi support and phasing
source("R/multivar_output_summary_Github.R") # MEAN
source("R/multivariate_plot_summarise_data_Github.R")
# Set folder name for output
folder_name <- "countryLTs_nodiscount"
######################
# 1. Setup variables #
######################
rabies = read.csv("data/baseline_incidence_Gavi_final.csv")
data <- read.csv("output/gavi_output_data.csv") # Load gavi-prepped data (scripts 1-7)
params <- read.csv("output/bio_data.csv") # parameters i.e. rabies transmission, prevention given incomplete PEP
vacc <- read.csv("data/vaccine_use.csv") # PEP scenarios - clinic throughput, regimen, completeness, vials, clinic visits:
dogs <- read.csv(file="output/dogs_pop_traj.csv", stringsAsFactors = FALSE) # dog pop 2018-2070 created in 6.elimination_traj.R
elimination_traj <- read.csv(file="output/rabies_traj.csv") # 100 elimination trajectories under dog vax 2020-2070 by year of GBP
y1 = "2020"; yN = "2035"
pop = data[,grep(y1, names(data)):grep(yN, names(data))] # needs this format to combine with elimination trajectories!
hrz=length(2020:2035) # time horizon: from 2020 to 2035
# DALYs - disability weightings & lifetables
DALYrabies_input <- read.csv("data/DALY_params_rabies.csv") # Knobel et al. 2005
# SPECIFIC PARAMETERS
# Life table
country_LE <- read.csv("data/lifetables_bycountry.csv")
country_LE <- country_LE[-which(country_LE$age_from == 100),]
LE2020 <- country_LE[which(country_LE$year == 2020),] # Use 2020 age distributions throughout!
# Set discounting rate
discount = 0
# Set prices (USD)
gavi_intro_grant <- 100000 # Intro grant
gavi_vaccine_price <- 5 # vaccine cost per vial
gavi_RIG_price <- 45 # ERIG cost per vial
################
# 2. Run model #
################
# Set number of runs
n = 1000 # 2 hrs per scenario, ~10 hrs
# Improved PEP access - Paper SC2
scenario_a3_1 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="base", DogVax_TF=F, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="none", discount=discount, breaks="5yr", IBCM=FALSE)
# Improved PEP access + RIG - Paper SC3
scenario_a4 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="base", DogVax_TF=F, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="high risk", discount=discount, breaks="5yr", IBCM=FALSE)
# Dog vacc SQ - Paper SC4a
scenario_a2 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="none", DogVax_TF=T, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="none", discount=discount, breaks="5yr", IBCM=FALSE)
# Dog vacc + PEP access - Paper SC4b
scenario_a5_1 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="base", DogVax_TF=T, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="none", discount=discount, breaks="5yr", IBCM=FALSE)
# Dog vacc + PEP access + IBCM - Paper SC4c
scenario_a5_2 <- multivariate_analysis(ndraw=n, horizon=hrz, GAVI_status="base", DogVax_TF=T, VaxRegimen="Updated TRC",
DALYrabies=DALYrabies_input, LE=LE2020, RIG_status="none", discount=discount, breaks="5yr", IBCM=TRUE)
###########################################
# 3. Bind outputs into a single dataframe #
###########################################
# Append all results into a dataframe
out <- rbind.data.frame(
cbind.data.frame(scenario_a3_1, scenario="a3_1"), # scenario 2 - improved PEP access
cbind.data.frame(scenario_a4, scenario="a4"), # scenario 3 - RIG
cbind.data.frame(scenario_a2, scenario="a2"), # scenario 4a - dog vax
cbind.data.frame(scenario_a5_1, scenario="a5_1"), # scenario 4b - dog vax + improved PEP access (no RIG)
cbind.data.frame(scenario_a5_2, scenario="a5_2")) # scenario 4c - dog vax + improved PEP access (no RIG) + IBCM
dim(out)
table(out$scenario)
countries <- unique(out$country)
scenarios <- unique(out$scenario)
yrs <- unique(out$year)
# INCLUDE GAVI ELIGIBILITY
gavi_info <- read.csv("output/gavi_output_data.csv", stringsAsFactors=FALSE)
out <- merge(out, data.frame(country=gavi_info$country, gavi_2018=gavi_info$gavi_2018), by="country", all.x=TRUE)
# CE outputs
out$cost_per_death_averted <- out$total_cost/out$total_deaths_averted
out$cost_per_YLL_averted <- out$total_cost/out$total_YLL_averted
out$deaths_averted_per_100k_vaccinated <- out$total_deaths_averted/out$vaccinated/100000
# Summarize by iteration over time horizon
out_horizon = country_horizon_iter(out)
######################################
# 4a. Create summary outputs #
######################################
# Country, cluster, & global by year
country_summary_yr = multivar_country_summary(out, year = TRUE)
cluster_summary_yr = multivar_summary(country_summary_yr, year=TRUE, setting ="cluster")
global_summary_yr = multivar_summary(country_summary_yr, year=TRUE, setting="global")
gavi2018_summary_yr = multivar_summary(country_summary_yr[which(country_summary_yr$gavi_2018==TRUE),], year=TRUE, setting="global")
write.csv(country_summary_yr, paste("output/", folder_name, "/country_stats_ICER.csv", sep=""), row.names=FALSE)
write.csv(cluster_summary_yr, paste("output/", folder_name, "/cluster_stats_ICER.csv", sep=""), row.names=FALSE)
write.csv(global_summary_yr, paste("output/", folder_name, "/global_stats_ICER.csv", sep=""), row.names=FALSE)
write.csv(gavi2018_summary_yr, paste("output/", folder_name, "/gavi2018_stats_ICER.csv", sep=""), row.names=FALSE)
################################################
# 4b. Create summary outputs over time horizon #
################################################
# Country, cluster, & global over time horizon
country_summary_horizon = multivar_country_summary(out_horizon, year = FALSE)
cluster_summary_horizon = multivar_summary(country_summary_horizon, year=FALSE, setting ="cluster")
global_summary_horizon = multivar_summary(country_summary_horizon, year=FALSE, setting="global")
gavi2018_summary_horizon = multivar_summary(country_summary_horizon[which(country_summary_horizon$gavi_2018==TRUE),], year=FALSE, setting="global")
write.csv(country_summary_horizon, paste("output/", folder_name, "/country_stats_horizon_ICER.csv", sep=""), row.names=FALSE)
write.csv(cluster_summary_horizon, paste("output/", folder_name, "/cluster_stats_horizon_ICER.csv", sep=""), row.names=FALSE)
write.csv(global_summary_horizon, paste("output/", folder_name, "/global_stats_horizon_ICER.csv", sep=""), row.names=FALSE)
write.csv(gavi2018_summary_horizon, paste("output/", folder_name, "/gavi2018_stats_horizon_ICER.csv", sep=""), row.names=FALSE)
|
### Concatenate all runs in a study
library(rtracklayer)
library(GenomicFeatures)
library(GenomicRanges)
library(S4Vectors)
options(echo=TRUE)
args <- commandArgs(TRUE)
studyIDs <- args[1]
positive_experiments <- read.delim("/SAN/Plasmo_compare/SRAdb/Input/positive_experiments.txt", sep = ",", header = F)
allHPexp <- read.delim("/SAN/Plasmo_compare/SRAdb/Output/allHPexp.txt", header = T)
################# Step 1: Bring all runs together ###################
ConcatRunsToStudy <- data.frame()
for(j in 1:length(studyIDs))
{
print(studyIDs[j])
runIDs <- read.table(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/runs_",studyIDs[j],".txt", collapse = ''), header = F, sep = ',')
number_of_runs <- nrow(runIDs)
if(number_of_runs == 1)
{
firstRun <- runIDs[1,1]
FirstCountfile <- read.table(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",firstRun,".txt",collapse=''), header = T, sep = '\t')
write.table(FirstCountfile, paste0("/SAN/Plasmo_compare/SRAdb/Output/_", studyIDs[j],"/ConcatRunsToStudy_", studyIDs[j],".txt"), sep = '\t', row.names=F)
}
if(number_of_runs > 1)
{
firstRun <- runIDs[1,1]
if(file.exists(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",firstRun,".txt",collapse='')))
{
FirstCountfile <- read.table(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",firstRun,".txt",collapse=''), header = T, sep = '\t')
ConcatRunsToStudy <- FirstCountfile
colnames(ConcatRunsToStudy)[6] <- paste0(as.character(firstRun), "_",as.character(studyIDs[j]),collapse='')
}
a = 2
for(i in 2:number_of_runs)
{
runID <- runIDs[i,1]
# get runID
if(file.exists(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",runID,".txt",collapse='')))
{
countfile <- read.table(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",runID,".txt",collapse=''), header = T, sep = '\t')
ConcatRunsToStudy[1:nrow(FirstCountfile),(a+5)] <- countfile[,6]
#ConcatRunsToStudy <- merge(ConcatRunsToStudy, countfile, by = c("seqnames", "start", "end", "width", "strand"))
colnames(ConcatRunsToStudy)[(a+5)] <- paste0(as.character(runID), "_",as.character(studyIDs[j]),collapse='')
a = a+1
}
}
write.table(ConcatRunsToStudy, paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/ConcatRunsToStudy_", studyIDs[j],".txt", collapse=''), sep = '\t', row.names=F)
}
}
################################# Step 2: Get gene names for all reads #################
for( i in 1:length(studyIDs))
{
print(studyIDs[i])
study <- read.csv2(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[i], "/ConcatRunsToStudy_", studyIDs[i],".txt", collapse=''), sep = '\t', header=T)
library(rtracklayer, quietly = TRUE)
#get host and parasite
host <- as.character(positive_experiments[grep(studyIDs[i],positive_experiments[,1]),2])
para <- as.character(positive_experiments[grep(studyIDs[i],positive_experiments[,1]),3])
genes <- import(paste0("/SAN/Plasmo_compare/Genomes/annotation/",host,para,".gtf", collapse=''), format = "gtf")
genes <- genes[genes$type%in%"exon"]
#genes <- genes[which(genes[,"type"] == "exon"),]
genes.df <- as.data.frame(genes)
genes.df.gene_name <- genes.df[,c("seqnames", "start", "end", "width", "strand", "gene_id")]
mergeStudy.genes.df.gene_name <- merge(study, genes.df.gene_name, by = c("seqnames", "start", "end", "width", "strand"))
mergeStudy.genes.df.gene_name <- mergeStudy.genes.df.gene_name[,6:ncol(mergeStudy.genes.df.gene_name)]
mergeStudy.genes.df.gene_name.combineGenes <- data.frame()
mergeStudy.genes.df.gene_name.combineGenes <- aggregate(mergeStudy.genes.df.gene_name[,1] ~ gene_id, data = mergeStudy.genes.df.gene_name, sum)
colnames(mergeStudy.genes.df.gene_name.combineGenes)[2] <- colnames(mergeStudy.genes.df.gene_name)[1]
if(ncol(study) > 6)
{
for(k in 2:(ncol(mergeStudy.genes.df.gene_name)-1))
{
agg <- aggregate(mergeStudy.genes.df.gene_name[,k] ~ gene_id, data = mergeStudy.genes.df.gene_name, sum)
mergeStudy.genes.df.gene_name.combineGenes <- merge(mergeStudy.genes.df.gene_name.combineGenes, agg, by = c("gene_id"))
colnames(mergeStudy.genes.df.gene_name.combineGenes)[k+1] <- colnames(mergeStudy.genes.df.gene_name)[k]
}
}
t.study <- t(mergeStudy.genes.df.gene_name.combineGenes)
colnames(t.study) <- mergeStudy.genes.df.gene_name.combineGenes$gene_id
t.study <- t.study[-1,]
class(t.study) <- "numeric"
write.table(t.study, paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[i], "/", studyIDs[i],".txt", collapse=''), sep = '\t', row.names=T)
}
#################################### Step 3: Only keep coding genes ########################################
################ host-parasite pairs ###############
require(rtracklayer)
require(dplyr)
for(i in 1:length(studyIDs))
{
print(i)
# get study.txt including all runs
study <- as.data.frame(t(read.delim(paste0("Output/", studyIDs[i], "/", studyIDs[i], ".txt", collapse = ''), sep = '\t', header = T)))
# get host parasite from allHPexp
#hp <- as.character(unique(allHPexp[allHPexp$Study==studyIDs[i],"HostParasite"]))
hp <- paste(as.character(positive_experiments[grep(studyIDs[i],positive_experiments[,1]),2]), as.character(positive_experiments[grep(studyIDs[i],positive_experiments[,1]),3]), sep="")
if(hp == "humanPfalciparum")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/humanPfalciparum.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "humanPberghei")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/humanPberghei.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "humanPvivax")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/humanPvivax.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "mousePberghei")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/mousePberghei.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "mousePyoelii")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/mousePyoelii.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "mousePchabaudi")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/mousePchabaudi.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "monkeyPcoatneyi")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/monkeyPcoatneyi.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "monkeyPcynomolgi")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/monkeyPcynomolgi.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "monkeyPknowlesi")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/monkeyPknowlesi.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
# filter study to keep only protein-coding genes
study_coding_genes <- study %>%
tibble::rownames_to_column('gene') %>%
filter(rownames(study)%in%coding$gene_id) %>%
tibble::column_to_rownames('gene')
# write the table out
write.table(study_coding_genes, paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[i],"/", studyIDs[i], "_coding_genes.txt", collapse = ''), sep ='\t', row.names = T)
}
############################## Step 4: Get orthologous groups for each study ########################
parasite_orthogroups <- read.delim("/SAN/Plasmo_compare/OrthoFinder/parasite_orthogroups.txt", stringsAsFactors=FALSE)
host_orthogroups <- read.delim("/SAN/Plasmo_compare/OrthoFinder/host_orthogroups.txt", stringsAsFactors=FALSE)
for(i in 1:length(studyIDs))
{ print(i)
#if the study_coding_genes exists, merge with orthogroups (join functions)
filepath = paste0("/SAN/Plasmo_compare/SRAdb/Output/",studyIDs[i],"/",studyIDs[i],"_coding_genes.txt", collapse = "")
if(file.exists(filepath))
{
# find out what host and parasite the study is
host <- as.character(positive_experiments[grep(pattern = studyIDs[i], positive_experiments[,1]),2])
para <- as.character(positive_experiments[grep(pattern = studyIDs[i], positive_experiments[,1]),3])
# take the host and para orthogroups and make a df -> orthogroup | gene name
h.df <- data.frame(Orthogroup = host_orthogroups[,1], Org = host_orthogroups[,grep(pattern = host, colnames(host_orthogroups))])
p.df <- data.frame(Orthogroup = parasite_orthogroups[,1], Org = parasite_orthogroups[,grep(pattern = para, colnames(parasite_orthogroups))])
hp.df <- rbind(h.df, p.df)
# read table
file = read.delim(filepath, header = T) %>% tibble::rownames_to_column("Gene")
ortho.table = merge(file, hp.df, by.x = "Gene", by.y = "Org")
ortho.table <- data.frame(Gene = ortho.table$Gene, Orthogroup = ortho.table$Orthogroup, ortho.table[,2:(ncol(ortho.table)-1)])
write.table(ortho.table, paste0("/SAN/Plasmo_compare/SRAdb/Output/",studyIDs[i],"/",studyIDs[i],"_orthogroups.txt", collapse = ""), sep = '\t', row.names = F)
}
}
| /malariaHPinteractions/R/post_process_countfiles.R | no_license | parnika91/malariaHPinteractions | R | false | false | 9,845 | r | ### Concatenate all runs in a study
library(rtracklayer)
library(GenomicFeatures)
library(GenomicRanges)
library(S4Vectors)
options(echo=TRUE)
args <- commandArgs(TRUE)
studyIDs <- args[1]
positive_experiments <- read.delim("/SAN/Plasmo_compare/SRAdb/Input/positive_experiments.txt", sep = ",", header = F)
allHPexp <- read.delim("/SAN/Plasmo_compare/SRAdb/Output/allHPexp.txt", header = T)
################# Step 1: Bring all runs together ###################
ConcatRunsToStudy <- data.frame()
for(j in 1:length(studyIDs))
{
print(studyIDs[j])
runIDs <- read.table(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/runs_",studyIDs[j],".txt", collapse = ''), header = F, sep = ',')
number_of_runs <- nrow(runIDs)
if(number_of_runs == 1)
{
firstRun <- runIDs[1,1]
FirstCountfile <- read.table(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",firstRun,".txt",collapse=''), header = T, sep = '\t')
write.table(FirstCountfile, paste0("/SAN/Plasmo_compare/SRAdb/Output/_", studyIDs[j],"/ConcatRunsToStudy_", studyIDs[j],".txt"), sep = '\t', row.names=F)
}
if(number_of_runs > 1)
{
firstRun <- runIDs[1,1]
if(file.exists(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",firstRun,".txt",collapse='')))
{
FirstCountfile <- read.table(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",firstRun,".txt",collapse=''), header = T, sep = '\t')
ConcatRunsToStudy <- FirstCountfile
colnames(ConcatRunsToStudy)[6] <- paste0(as.character(firstRun), "_",as.character(studyIDs[j]),collapse='')
}
a = 2
for(i in 2:number_of_runs)
{
runID <- runIDs[i,1]
# get runID
if(file.exists(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",runID,".txt",collapse='')))
{
countfile <- read.table(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/countWithGFF3_",runID,".txt",collapse=''), header = T, sep = '\t')
ConcatRunsToStudy[1:nrow(FirstCountfile),(a+5)] <- countfile[,6]
#ConcatRunsToStudy <- merge(ConcatRunsToStudy, countfile, by = c("seqnames", "start", "end", "width", "strand"))
colnames(ConcatRunsToStudy)[(a+5)] <- paste0(as.character(runID), "_",as.character(studyIDs[j]),collapse='')
a = a+1
}
}
write.table(ConcatRunsToStudy, paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[j], "/ConcatRunsToStudy_", studyIDs[j],".txt", collapse=''), sep = '\t', row.names=F)
}
}
################################# Step 2: Get gene names for all reads #################
for( i in 1:length(studyIDs))
{
print(studyIDs[i])
study <- read.csv2(paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[i], "/ConcatRunsToStudy_", studyIDs[i],".txt", collapse=''), sep = '\t', header=T)
library(rtracklayer, quietly = TRUE)
#get host and parasite
host <- as.character(positive_experiments[grep(studyIDs[i],positive_experiments[,1]),2])
para <- as.character(positive_experiments[grep(studyIDs[i],positive_experiments[,1]),3])
genes <- import(paste0("/SAN/Plasmo_compare/Genomes/annotation/",host,para,".gtf", collapse=''), format = "gtf")
genes <- genes[genes$type%in%"exon"]
#genes <- genes[which(genes[,"type"] == "exon"),]
genes.df <- as.data.frame(genes)
genes.df.gene_name <- genes.df[,c("seqnames", "start", "end", "width", "strand", "gene_id")]
mergeStudy.genes.df.gene_name <- merge(study, genes.df.gene_name, by = c("seqnames", "start", "end", "width", "strand"))
mergeStudy.genes.df.gene_name <- mergeStudy.genes.df.gene_name[,6:ncol(mergeStudy.genes.df.gene_name)]
mergeStudy.genes.df.gene_name.combineGenes <- data.frame()
mergeStudy.genes.df.gene_name.combineGenes <- aggregate(mergeStudy.genes.df.gene_name[,1] ~ gene_id, data = mergeStudy.genes.df.gene_name, sum)
colnames(mergeStudy.genes.df.gene_name.combineGenes)[2] <- colnames(mergeStudy.genes.df.gene_name)[1]
if(ncol(study) > 6)
{
for(k in 2:(ncol(mergeStudy.genes.df.gene_name)-1))
{
agg <- aggregate(mergeStudy.genes.df.gene_name[,k] ~ gene_id, data = mergeStudy.genes.df.gene_name, sum)
mergeStudy.genes.df.gene_name.combineGenes <- merge(mergeStudy.genes.df.gene_name.combineGenes, agg, by = c("gene_id"))
colnames(mergeStudy.genes.df.gene_name.combineGenes)[k+1] <- colnames(mergeStudy.genes.df.gene_name)[k]
}
}
t.study <- t(mergeStudy.genes.df.gene_name.combineGenes)
colnames(t.study) <- mergeStudy.genes.df.gene_name.combineGenes$gene_id
t.study <- t.study[-1,]
class(t.study) <- "numeric"
write.table(t.study, paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[i], "/", studyIDs[i],".txt", collapse=''), sep = '\t', row.names=T)
}
#################################### Step 3: Only keep coding genes ########################################
################ host-parasite pairs ###############
require(rtracklayer)
require(dplyr)
for(i in 1:length(studyIDs))
{
print(i)
# get study.txt including all runs
study <- as.data.frame(t(read.delim(paste0("Output/", studyIDs[i], "/", studyIDs[i], ".txt", collapse = ''), sep = '\t', header = T)))
# get host parasite from allHPexp
#hp <- as.character(unique(allHPexp[allHPexp$Study==studyIDs[i],"HostParasite"]))
hp <- paste(as.character(positive_experiments[grep(studyIDs[i],positive_experiments[,1]),2]), as.character(positive_experiments[grep(studyIDs[i],positive_experiments[,1]),3]), sep="")
if(hp == "humanPfalciparum")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/humanPfalciparum.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "humanPberghei")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/humanPberghei.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "humanPvivax")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/humanPvivax.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "mousePberghei")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/mousePberghei.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "mousePyoelii")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/mousePyoelii.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "mousePchabaudi")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/mousePchabaudi.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "monkeyPcoatneyi")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/monkeyPcoatneyi.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "monkeyPcynomolgi")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/monkeyPcynomolgi.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
if(hp == "monkeyPknowlesi")
{
coding = as.data.frame(import("/SAN/Plasmo_compare/Genomes/annotation/monkeyPknowlesi.gtf")) %>%
filter(type%in%"exon") %>%
filter(gene_biotype%in%"protein_coding") %>%
distinct(gene_id)
}
# filter study to keep only protein-coding genes
study_coding_genes <- study %>%
tibble::rownames_to_column('gene') %>%
filter(rownames(study)%in%coding$gene_id) %>%
tibble::column_to_rownames('gene')
# write the table out
write.table(study_coding_genes, paste0("/SAN/Plasmo_compare/SRAdb/Output/", studyIDs[i],"/", studyIDs[i], "_coding_genes.txt", collapse = ''), sep ='\t', row.names = T)
}
############################## Step 4: Get orthologous groups for each study ########################
parasite_orthogroups <- read.delim("/SAN/Plasmo_compare/OrthoFinder/parasite_orthogroups.txt", stringsAsFactors=FALSE)
host_orthogroups <- read.delim("/SAN/Plasmo_compare/OrthoFinder/host_orthogroups.txt", stringsAsFactors=FALSE)
for(i in 1:length(studyIDs))
{ print(i)
#if the study_coding_genes exists, merge with orthogroups (join functions)
filepath = paste0("/SAN/Plasmo_compare/SRAdb/Output/",studyIDs[i],"/",studyIDs[i],"_coding_genes.txt", collapse = "")
if(file.exists(filepath))
{
# find out what host and parasite the study is
host <- as.character(positive_experiments[grep(pattern = studyIDs[i], positive_experiments[,1]),2])
para <- as.character(positive_experiments[grep(pattern = studyIDs[i], positive_experiments[,1]),3])
# take the host and para orthogroups and make a df -> orthogroup | gene name
h.df <- data.frame(Orthogroup = host_orthogroups[,1], Org = host_orthogroups[,grep(pattern = host, colnames(host_orthogroups))])
p.df <- data.frame(Orthogroup = parasite_orthogroups[,1], Org = parasite_orthogroups[,grep(pattern = para, colnames(parasite_orthogroups))])
hp.df <- rbind(h.df, p.df)
# read table
file = read.delim(filepath, header = T) %>% tibble::rownames_to_column("Gene")
ortho.table = merge(file, hp.df, by.x = "Gene", by.y = "Org")
ortho.table <- data.frame(Gene = ortho.table$Gene, Orthogroup = ortho.table$Orthogroup, ortho.table[,2:(ncol(ortho.table)-1)])
write.table(ortho.table, paste0("/SAN/Plasmo_compare/SRAdb/Output/",studyIDs[i],"/",studyIDs[i],"_orthogroups.txt", collapse = ""), sep = '\t', row.names = F)
}
}
|
context("Test hash/hmac functions")
test_that("Hash functions match openssl command line tool", {
# COMPARE: echo -n "foo" | openssl dgst -md4
expect_that(unclass(md4("foo")), equals("0ac6700c491d70fb8650940b1ca1e4b2"))
expect_that(unclass(md5("foo")), equals("acbd18db4cc2f85cedef654fccc4a4d8"))
expect_that(unclass(ripemd160("foo")), equals("42cfa211018ea492fdee45ac637b7972a0ad6873"))
expect_that(unclass(sha1("foo")), equals("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"))
expect_that(unclass(sha256("foo")), equals("2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"))
expect_that(unclass(sha512("foo")), equals("f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc6638326e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7"))
})
test_that("HMAC functions match openssl command line tool", {
# #COMPARE: echo -n "foo" | openssl dgst -md4 -hmac "secret"
expect_that(unclass(md4("foo", key = "secret")), equals("93e81ded7aec4ec0d73a97bb4792742a"))
expect_that(unclass(md5("foo", key = "secret")), equals("ba19fbc606a960051b60244e9a5ed3d2"))
expect_that(unclass(ripemd160("foo", key = "secret")), equals("a87093c26e44fdfa04e142e59710daa94556a5ed"))
expect_that(unclass(sha1("foo", key = "secret")), equals("9baed91be7f58b57c824b60da7cb262b2ecafbd2"))
expect_that(unclass(sha256("foo", key = "secret")), equals("773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4"))
expect_that(unclass(sha512("foo", key = "secret")), equals("82df7103de8d82de45e01c45fe642b5d13c6c2b47decafebc009431c665c6fa5f3d1af4e978ea1bde91426622073ebeac61a3461efd467e0971c788bc8ebdbbe"))
})
test_that("Connection interface matches raw interface", {
mydata <- serialize(iris, NULL)
saveRDS(iris, tmp <- tempfile())
expect_equal(md5(mydata), md5(file(tmp)))
expect_equal(sha1(mydata), sha1(file(tmp)))
expect_equal(sha256(mydata), sha256(file(tmp)))
expect_equal(md5(mydata, key = "secret"), md5(file(tmp), key = "secret"))
expect_equal(sha1(mydata, key = "secret"), sha1(file(tmp), key = "secret"))
expect_equal(sha256(mydata, key = "secret"), sha256(file(tmp), key = "secret"))
})
test_that("Connection interface matches string interface", {
expect_equal(md5(charToRaw("foo")), md5(textConnection("foo")))
expect_equal(sha1(charToRaw("foo")), sha1(textConnection("foo")))
expect_equal(sha256(charToRaw("foo")), sha256(textConnection("foo")))
expect_equal(md5(charToRaw("foo"), key = "secret"), md5(textConnection("foo"), key = "secret"))
expect_equal(sha1(charToRaw("foo"), key = "secret"), sha1(textConnection("foo"), key = "secret"))
expect_equal(sha256(charToRaw("foo"), key = "secret"), sha256(textConnection("foo"), key = "secret"))
})
| /packrat/lib/x86_64-apple-darwin15.6.0/3.4.2/openssl/tests/testthat/test_hash_output_value.R | permissive | danielg7/FireWeatherExplorer | R | false | false | 2,718 | r | context("Test hash/hmac functions")
test_that("Hash functions match openssl command line tool", {
# COMPARE: echo -n "foo" | openssl dgst -md4
expect_that(unclass(md4("foo")), equals("0ac6700c491d70fb8650940b1ca1e4b2"))
expect_that(unclass(md5("foo")), equals("acbd18db4cc2f85cedef654fccc4a4d8"))
expect_that(unclass(ripemd160("foo")), equals("42cfa211018ea492fdee45ac637b7972a0ad6873"))
expect_that(unclass(sha1("foo")), equals("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"))
expect_that(unclass(sha256("foo")), equals("2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"))
expect_that(unclass(sha512("foo")), equals("f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc6638326e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7"))
})
test_that("HMAC functions match openssl command line tool", {
# #COMPARE: echo -n "foo" | openssl dgst -md4 -hmac "secret"
expect_that(unclass(md4("foo", key = "secret")), equals("93e81ded7aec4ec0d73a97bb4792742a"))
expect_that(unclass(md5("foo", key = "secret")), equals("ba19fbc606a960051b60244e9a5ed3d2"))
expect_that(unclass(ripemd160("foo", key = "secret")), equals("a87093c26e44fdfa04e142e59710daa94556a5ed"))
expect_that(unclass(sha1("foo", key = "secret")), equals("9baed91be7f58b57c824b60da7cb262b2ecafbd2"))
expect_that(unclass(sha256("foo", key = "secret")), equals("773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4"))
expect_that(unclass(sha512("foo", key = "secret")), equals("82df7103de8d82de45e01c45fe642b5d13c6c2b47decafebc009431c665c6fa5f3d1af4e978ea1bde91426622073ebeac61a3461efd467e0971c788bc8ebdbbe"))
})
test_that("Connection interface matches raw interface", {
mydata <- serialize(iris, NULL)
saveRDS(iris, tmp <- tempfile())
expect_equal(md5(mydata), md5(file(tmp)))
expect_equal(sha1(mydata), sha1(file(tmp)))
expect_equal(sha256(mydata), sha256(file(tmp)))
expect_equal(md5(mydata, key = "secret"), md5(file(tmp), key = "secret"))
expect_equal(sha1(mydata, key = "secret"), sha1(file(tmp), key = "secret"))
expect_equal(sha256(mydata, key = "secret"), sha256(file(tmp), key = "secret"))
})
test_that("Connection interface matches string interface", {
expect_equal(md5(charToRaw("foo")), md5(textConnection("foo")))
expect_equal(sha1(charToRaw("foo")), sha1(textConnection("foo")))
expect_equal(sha256(charToRaw("foo")), sha256(textConnection("foo")))
expect_equal(md5(charToRaw("foo"), key = "secret"), md5(textConnection("foo"), key = "secret"))
expect_equal(sha1(charToRaw("foo"), key = "secret"), sha1(textConnection("foo"), key = "secret"))
expect_equal(sha256(charToRaw("foo"), key = "secret"), sha256(textConnection("foo"), key = "secret"))
})
|
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data1$Date), data1$Time)
data1$Datetime <- as.POSIXct(datetime)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data1, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="datetime")
})
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() | /plot4.R | no_license | Abinav-M/Coursera-Exploratory | R | false | false | 1,167 | r | data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data1$Date), data1$Time)
data1$Datetime <- as.POSIXct(datetime)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data1, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="datetime")
})
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() |
#' Throw a Condition
#'
#' Throws a condition of class c("error", "{{{ package }}}", "condition").
#'
#' We use this condition as an error dedicated to \pkg{ {{{ package}}}.}
#'
#' @param message_string The message to be thrown.
#' @param system_call The call to be thrown.
#' @param ... Arguments to be passed to
#' \code{\link[base:structure]{base::structure}}.
#' @return The function does never return anything, it stops with a
#' condition of class c("error", "{{{ package }}}", "condition").
#' @keywords internal
throw <- function(message_string, system_call = sys.call(-1), ...) {
condition <- structure(class = c("error", "{{{ package }}}", "condition"),
list(message = message_string, call = system_call),
...)
stop(condition)
}
| /inst/templates/throw.R | no_license | cran/packager | R | false | false | 801 | r | #' Throw a Condition
#'
#' Throws a condition of class c("error", "{{{ package }}}", "condition").
#'
#' We use this condition as an error dedicated to \pkg{ {{{ package}}}.}
#'
#' @param message_string The message to be thrown.
#' @param system_call The call to be thrown.
#' @param ... Arguments to be passed to
#' \code{\link[base:structure]{base::structure}}.
#' @return The function does never return anything, it stops with a
#' condition of class c("error", "{{{ package }}}", "condition").
#' @keywords internal
throw <- function(message_string, system_call = sys.call(-1), ...) {
condition <- structure(class = c("error", "{{{ package }}}", "condition"),
list(message = message_string, call = system_call),
...)
stop(condition)
}
|
#modifying System Locale to english
Sys.setlocale("LC_TIME", "English")
#setting up the download & reading the data
temp <- tempfile()
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = temp)
data <- read.table(unz(temp, "household_power_consumption.txt"),sep=";",na.strings = "?",header=TRUE)
unlink(temp)
#Creating a new with Date/Time based on the Date & Time columns of the data set
data["datetime"]<-NA
data$datetime <- strptime(paste(as.character(data[,1]),data[,2]),"%d/%m/%Y %H:%M:%S")
dat<-data[data$datetime>=as.POSIXlt("2007-02-01") & data$datetime<as.POSIXlt("2007-02-03") & !is.na(data$datetime),]
#ploting the values into a PNG file
png(file = "plot1.png", width = 480, height = 480, units = "px", pointsize = 12, bg = "white")
#Creating a table to plot 4 plots on a 2x2 scheme
par(mfcol = c(2, 2))
#Top left plot
par(mar=c(4,4,4,4))
with(dat, plot(datetime,Global_active_power, type="l",ylab = "Global Active Power (Killowatts)"))
#Lower left plot
par(mar=c(2,4,4,4))
with(dat, plot(datetime,Sub_metering_1, type = "n",ylab = "Energy sub metering"))
with(dat, points(datetime, Sub_metering_1,type="l", col = "black"))
with(dat, points(datetime, Sub_metering_2, type="l",col = "blue"))
with(dat, points(datetime, Sub_metering_3, type="l",col = "red"))
legend("topright", lty=1, col = c("black","blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
#Top right Plot
par(mar=c(4,4,4,4))
with(dat, plot(datetime,Voltage, type="l",ylab = "Voltage"))
#Lower right plot
par(mar=c(4,4,4,4))
with(dat, plot(datetime,Global_reactive_power, type="l"))
dev.off() | /plot4.R | no_license | cbouthelier/ExData_Plotting1 | R | false | false | 1,674 | r | #modifying System Locale to english
Sys.setlocale("LC_TIME", "English")
#setting up the download & reading the data
temp <- tempfile()
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = temp)
data <- read.table(unz(temp, "household_power_consumption.txt"),sep=";",na.strings = "?",header=TRUE)
unlink(temp)
#Creating a new with Date/Time based on the Date & Time columns of the data set
data["datetime"]<-NA
data$datetime <- strptime(paste(as.character(data[,1]),data[,2]),"%d/%m/%Y %H:%M:%S")
dat<-data[data$datetime>=as.POSIXlt("2007-02-01") & data$datetime<as.POSIXlt("2007-02-03") & !is.na(data$datetime),]
#ploting the values into a PNG file
png(file = "plot1.png", width = 480, height = 480, units = "px", pointsize = 12, bg = "white")
#Creating a table to plot 4 plots on a 2x2 scheme
par(mfcol = c(2, 2))
#Top left plot
par(mar=c(4,4,4,4))
with(dat, plot(datetime,Global_active_power, type="l",ylab = "Global Active Power (Killowatts)"))
#Lower left plot
par(mar=c(2,4,4,4))
with(dat, plot(datetime,Sub_metering_1, type = "n",ylab = "Energy sub metering"))
with(dat, points(datetime, Sub_metering_1,type="l", col = "black"))
with(dat, points(datetime, Sub_metering_2, type="l",col = "blue"))
with(dat, points(datetime, Sub_metering_3, type="l",col = "red"))
legend("topright", lty=1, col = c("black","blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
#Top right Plot
par(mar=c(4,4,4,4))
with(dat, plot(datetime,Voltage, type="l",ylab = "Voltage"))
#Lower right plot
par(mar=c(4,4,4,4))
with(dat, plot(datetime,Global_reactive_power, type="l"))
dev.off() |
# Speed comparisons and looping examples.
# === preliminaries === #
#clear workspace
rm(list = ls())
#set your working directory
setwd("~/Dropbox/RA_and_Consulting_Work/ICPSR_Summer_14/HPC_Workshop_Materials")
# sum over a vector of length 10,000,000 using a loop in R
system.time({
vect <- c(1:10000000)
total <- 0
for(i in 1:length(as.numeric(vect))){
total <- total + vect[i]
}
print(total)
})
# sum over the same vector using the built in su function in R whihc is coded in C
system.time({
vect <- c(1:10000000)
total <- sum(as.numeric(vect))
print(total)
})
# generate a very sparse two column dataset
#number of observations
numobs <- 100000000
#observations we want to check
vec <- rep(0,numobs)
#only select 100 to check
vec[sample(1:numobs,100)] <- 1
#combine data
data <- cbind(c(1:numobs),vec)
# sum only over the entries in the first column where the second column is equal to 1 using an R loop
system.time({
total <- 0
for(i in 1:numobs){
if(data[i,2] == 1)
total <- total + data[i,1]
}
print(total)
})
#sum over the subset of observations where the second column is equal to 1 using the subset function (coded in C)
system.time({
dat <- subset(data, data[,2] ==1)
total <- sum(dat[,1])
print(total)
})
# an example of paralellization using the foreach package in R
#create some toy data
data <- matrix(rnorm(10000000),nrow= 1000000,ncol = 100)
#define a function that we are going to run in parallel
my_function <- function(col_number){
#take the column sum of the matrix
col_sum <- sum(data[,col_number])
return(col_sum)
}
# Packages:
require(doMC)
require(foreach)
# Register number of cores on your computer
nCores <- 8
registerDoMC(nCores)
# iterations
N <- 100
# Run analysis in serial
system.time({
serial_results <- rep(0,N)
for(i in 1:N){
serial_results[i] <- my_function(i)
}
})
# Run analysis in parallel
system.time({
parallel_results <- foreach(i=1:N,.combine=rbind) %dopar% {
cur_result <- my_function(i)
}
})
# example using snowfall parallelization in R
data <- matrix(rnorm(1000000),nrow= 100000,ncol = 100)
#define a function that we are going to run in parallel
my_function <- function(col_number){
#take the column sum of the matrix
col_sum <- sum(data[,col_number])
return(col_sum)
}
# Package:
library(snowfall)
# Register cores
numcpus <- 2
sfInit(parallel=TRUE, cpus=numcpus )
# Check initialization
if(sfParallel()){
cat( "Parallel on", sfCpus(), "nodes.\n" )
}else{
cat( "Sequential mode.\n" )
}
# Export all packages
for (i in 1:length(.packages())){
eval(call("sfLibrary", (.packages()[i]),
character.only=TRUE))
}
# Export a list of R data objects
sfExport("data")
# Apply a function across the cluster
indexes <- c(1:100)
result <- sfClusterApplyLB(indexes,my_function)
# Stop the cluster
sfStop()
sum(unlist(result))
# run jobs in parallel using mclapply (only works on a Mac or Linux Machine)
# Packages:
library(parallel)
num_cpus <- 4
data <- matrix(rnorm(10000000),nrow= 1000000,ncol = 100)
#additional argument
vect <- rep(c(1:4),25)
#define a function with two arguments that we are going to run in parallel
my_function <- function(col_number,multiply_by){
#take the column sum of the matrix
col_sum <- sum(data[,col_number])
col_sum <- col_sum*multiply_by
return(col_sum)
}
# Wrapper Function
run_on_cluster <- function(i){
temp <- my_function(i, vect[i])
return(temp)
}
# Run analysis
indexes <- 1:100
Result <- mclapply(indexes, run_on_cluster, mc.cores = num_cpus)
# run analysis of a large dataset using biglm package
# load package:
library(biglm)
data <- matrix(rnorm(10000000),nrow= 1000000,ncol = 100)
data <- cbind(round(runif(1000000),0),data)
# Data must be of data.frame type
data <- as.data.frame(data)
# Use variable names in formula
str <- "V1 ~ V2 + V3 + V4 + V5 + V6"
# run model using bigglm function
model<- bigglm(as.formula(str),
data = data,
family=binomial(),
maxit = 20)
# run the same model using the standard glm package
model2<- glm(as.formula(str),
data = data,
family=binomial(),
maxit = 20)
| /Scripts/HPC_Programming_Example.R | no_license | duthedd/ISSR_Data_Science_Summer_Summit_15 | R | false | false | 4,286 | r | # Speed comparisons and looping examples.
# === preliminaries === #
#clear workspace
rm(list = ls())
#set your working directory
setwd("~/Dropbox/RA_and_Consulting_Work/ICPSR_Summer_14/HPC_Workshop_Materials")
# sum over a vector of length 10,000,000 using a loop in R
system.time({
vect <- c(1:10000000)
total <- 0
for(i in 1:length(as.numeric(vect))){
total <- total + vect[i]
}
print(total)
})
# sum over the same vector using the built in su function in R whihc is coded in C
system.time({
vect <- c(1:10000000)
total <- sum(as.numeric(vect))
print(total)
})
# generate a very sparse two column dataset
#number of observations
numobs <- 100000000
#observations we want to check
vec <- rep(0,numobs)
#only select 100 to check
vec[sample(1:numobs,100)] <- 1
#combine data
data <- cbind(c(1:numobs),vec)
# sum only over the entries in the first column where the second column is equal to 1 using an R loop
system.time({
total <- 0
for(i in 1:numobs){
if(data[i,2] == 1)
total <- total + data[i,1]
}
print(total)
})
#sum over the subset of observations where the second column is equal to 1 using the subset function (coded in C)
system.time({
dat <- subset(data, data[,2] ==1)
total <- sum(dat[,1])
print(total)
})
# an example of paralellization using the foreach package in R
#create some toy data
data <- matrix(rnorm(10000000),nrow= 1000000,ncol = 100)
#define a function that we are going to run in parallel
my_function <- function(col_number){
#take the column sum of the matrix
col_sum <- sum(data[,col_number])
return(col_sum)
}
# Packages:
require(doMC)
require(foreach)
# Register number of cores on your computer
nCores <- 8
registerDoMC(nCores)
# iterations
N <- 100
# Run analysis in serial
system.time({
serial_results <- rep(0,N)
for(i in 1:N){
serial_results[i] <- my_function(i)
}
})
# Run analysis in parallel
system.time({
parallel_results <- foreach(i=1:N,.combine=rbind) %dopar% {
cur_result <- my_function(i)
}
})
# example using snowfall parallelization in R
data <- matrix(rnorm(1000000),nrow= 100000,ncol = 100)
#define a function that we are going to run in parallel
my_function <- function(col_number){
#take the column sum of the matrix
col_sum <- sum(data[,col_number])
return(col_sum)
}
# Package:
library(snowfall)
# Register cores
numcpus <- 2
sfInit(parallel=TRUE, cpus=numcpus )
# Check initialization
if(sfParallel()){
cat( "Parallel on", sfCpus(), "nodes.\n" )
}else{
cat( "Sequential mode.\n" )
}
# Export all packages
for (i in 1:length(.packages())){
eval(call("sfLibrary", (.packages()[i]),
character.only=TRUE))
}
# Export a list of R data objects
sfExport("data")
# Apply a function across the cluster
indexes <- c(1:100)
result <- sfClusterApplyLB(indexes,my_function)
# Stop the cluster
sfStop()
sum(unlist(result))
# run jobs in parallel using mclapply (only works on a Mac or Linux Machine)
# Packages:
library(parallel)
num_cpus <- 4
data <- matrix(rnorm(10000000),nrow= 1000000,ncol = 100)
#additional argument
vect <- rep(c(1:4),25)
#define a function with two arguments that we are going to run in parallel
my_function <- function(col_number,multiply_by){
#take the column sum of the matrix
col_sum <- sum(data[,col_number])
col_sum <- col_sum*multiply_by
return(col_sum)
}
# Wrapper Function
run_on_cluster <- function(i){
temp <- my_function(i, vect[i])
return(temp)
}
# Run analysis
indexes <- 1:100
Result <- mclapply(indexes, run_on_cluster, mc.cores = num_cpus)
# run analysis of a large dataset using biglm package
# load package:
library(biglm)
data <- matrix(rnorm(10000000),nrow= 1000000,ncol = 100)
data <- cbind(round(runif(1000000),0),data)
# Data must be of data.frame type
data <- as.data.frame(data)
# Use variable names in formula
str <- "V1 ~ V2 + V3 + V4 + V5 + V6"
# run model using bigglm function
model<- bigglm(as.formula(str),
data = data,
family=binomial(),
maxit = 20)
# run the same model using the standard glm package
model2<- glm(as.formula(str),
data = data,
family=binomial(),
maxit = 20)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.