blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbfb9b2f636c99b40f937965c0f3a4c539a58e45 | ee08d388ecd2cd85456f43fc858fa49e2527f82a | /man/refFraction.Rd | 424f993601cc3bb3aff8b9ed78b4b66f733754fb | [] | no_license | jimhester/AllelicImbalance | 0842cef996ec85753ca55e2cbb1b082f5280c898 | fb58d9542aeb23ecef87ea39a48ed62eee1456a8 | refs/heads/master | 2021-01-15T20:43:33.519374 | 2014-12-02T00:18:16 | 2014-12-02T00:18:16 | 31,621,700 | 1 | 0 | null | 2015-03-03T21:14:50 | 2015-03-03T21:14:50 | null | UTF-8 | R | false | false | 624 | rd | refFraction.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{methods}
\name{refFraction}
\alias{refFraction}
\alias{refFraction,ASEset-method}
\title{reference fraction}
\usage{
refFraction(x, ...)
}
\arguments{
\item{x}{\code{ASEset} object}
\item{...}{arguments to forward to internal functions}
}
\description{
The fractions for all heterozygote reference alleles
}
\details{
Neccessary to measure the effect of mapbias over heterozygous SNPs
}
\examples{
#load example data
data(ASEset)
a <- ASEset
rf <- refFraction(a, strand="*")
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\keyword{fraction}
\keyword{reference}
|
64168d322390397ede374a9adaf288add4db4b09 | a3c78700a65f10714471a0d307ab984e8a71644d | /modules/assim.batch/tests/testthat/test.autoburnin.R | 8e81769f3caee1952bebdc9ccac394b09ca4504c | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | PecanProject/pecan | e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f | ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c | refs/heads/develop | 2023-08-31T23:30:32.388665 | 2023-08-28T13:53:32 | 2023-08-28T13:53:32 | 6,857,384 | 187 | 217 | NOASSERTION | 2023-09-14T01:40:24 | 2012-11-25T23:48:26 | R | UTF-8 | R | false | false | 1,292 | r | test.autoburnin.R | library(PEcAn.assim.batch)
library(testthat)
context("Autoburnin functions")
# Generate some simple data for testing convergence check
n1 <- 7200
n2 <- 800
mu_common <- 0
chain1 <- coda::mcmc(cbind("a" = c(rnorm(n1, 5), rnorm(n2, mu_common)),
"b" = c(rnorm(n1, 5), rnorm(n2, mu_common))))
chain2 <- coda::mcmc(cbind("a" = c(rnorm(n1, -5), rnorm(n2, mu_common)),
"b" = c(rnorm(n1, -5), rnorm(n2, mu_common))))
test_mcmc <- coda::mcmc.list(chain1, chain2)
burnin <- getBurnin(test_mcmc, threshold = 1.1)
burned <- autoburnin(test_mcmc)
test_that("Burnin value is a number and within the dimensions of `test_mcmc`", {
expect_is(burnin, "numeric")
expect_is(test_mcmc[burnin,], "list")
expect_is(unlist(test_mcmc[burnin,]), "numeric")
})
test_that("Number of chains hasn't changed", {
expect_equal(length(test_mcmc), length(burned))
})
test_that("Burned-in chains have same dimensions", {
expect_equal(dim(burned[[1]]), dim(burned[[2]]))
})
test_that("Burned-in chains are shorter than original", {
expect_true(coda::niter(test_mcmc) > coda::niter(burned))
})
test_that("Burnin value is where chains actually converge", {
expect_true(burnin > n1)
})
|
9d62df5fd6cd850ff2433916b27703790c9b6e3f | 03da12705dd437ca9032ba1da0c8f91e8f802c9a | /man/learnSignatures.Rd | 47d9081b204fce48772abf93cc8cd621f522b764 | [
"MIT"
] | permissive | kevinrue/hancock | 8513986fa0cc04c845a48aaa7ff52cb55efcef84 | cd901dfe4a21786efe81feb7cde78ac00d2a013e | refs/heads/master | 2021-07-14T18:06:47.404423 | 2020-05-12T17:38:34 | 2020-05-12T17:38:34 | 158,458,183 | 9 | 2 | NOASSERTION | 2020-05-12T17:38:35 | 2018-11-20T22:16:05 | R | UTF-8 | R | false | true | 2,000 | rd | learnSignatures.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learn-methods.R
\name{learnSignatures}
\alias{learnSignatures}
\title{Method to Learn Signatures from SummarizedExperiment}
\usage{
learnSignatures(se, assay.type = "counts",
method = c("PositiveProportionDifference", "PPD"), ...)
}
\arguments{
\item{se}{An object of class inheriting from "\code{\link{SummarizedExperiment}}".}
\item{assay.type}{A string specifying which assay values to use, e.g., \code{"counts"} or \code{"logcounts"}.}
\item{method}{Learning method. See section "Learning methods".}
\item{...}{Additional arguments affecting the learning method.}
}
\value{
A \code{\link{Sets}} object.
}
\description{
These method signatures learn gene set signatures optionally augmented with
(semi-)quantitative information for the prediction of sample and cell identities
in \code{SummarizedExperiment} objects.
}
\section{Learning methods}{
\describe{
\item{PositiveProportionDifference, PPD}{
\emph{Requires prior cluster membership information.}
This method computes the proportion of samples positive for each feature in each cluster,
and subsequently identifies for each cluster the features showing the maximal difference
between the detection rate in the cluster of interest and the detection rate in all other clusters.}
}
}
\examples{
# Example data ----
library(SummarizedExperiment)
nsamples <- 100
u <- matrix(rpois(20000, 2), ncol=nsamples)
rownames(u) <- paste0("Gene", sprintf("\%03d", seq_len(nrow(u))))
colnames(u) <- paste0("Cell", sprintf("\%03d", seq_len(ncol(u))))
se <- SummarizedExperiment(assays=list(counts=u))
# Example usage ----
se1 <- se
colData(se1)[, "cluster"] <- factor(sample(head(LETTERS, 3), ncol(se1), replace=TRUE))
gs <- learnSignatures(se1, method="PositiveProportionDifference", cluster.col="cluster")
relations(gs)
}
\seealso{
\code{\link[=learnMarkersByPositiveProportionDifference]{learnMarkersByPositiveProportionDifference()}}
}
\author{
Kevin Rue-Albrecht
}
|
4122cdddf458437eba1ad89b21dea184b715dac1 | b49fb76ade4a6bfcc163436857833b2fe9bc29c3 | /R/effects_apc.R | 133787a0e90cb8d866297d21ba75513f6d1dabbc | [] | no_license | volkerschmid/bamp | 8659109d5f399e8609cea05475261871a6bfd249 | f89888f9874f83a8fe6046d4ca4fbd8624d6f07f | refs/heads/master | 2023-02-23T18:11:51.839627 | 2023-02-15T09:39:17 | 2023-02-15T09:39:17 | 116,521,258 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,508 | r | effects_apc.R | #' Effects from Fitted APC Model
#'
#' @param object an apc object
#' @param mean logical. If TRUE, mean effects are computed
#' @param quantiles Scalar or vector of quantiles to compute (only if mean=FALSE)
#' @param update logical. If TRUE, the apc object including the effects is returned
#' @param ... Additional arguments will be ignored
#' @return List of age, period, cohort effects or apc object including effects (if update=TRUE)
#' @export
#' @examples
#' \dontrun{
#' data(apc)
#' model <- bamp(cases, population, age="rw1", period="rw1", cohort="rw1", periods_per_agegroup = 5)
#' effects(model)
#' }
effects.apc<-function(object, mean=FALSE, quantiles=0.5, update=FALSE, ...)
{
x<-object
#check, if we have done this before
if (!is.null(x$effect))
{
if (all(attr(effects,"settings")==c(mean,quantiles)))return(x$effect)
}
age=x$samples$age
age=summary(age, quantiles=quantiles)
if (mean)
{
age=age$statistics[,1]
}
else
{
age=age$quantiles
}
period=x$samples$period
period=summary(period, quantiles=quantiles)
if (mean)
{
period=period$statistics[,1]
}
else
{
period=period$quantiles
}
cohort=x$samples$cohort
cohort=summary(cohort, quantiles=quantiles)
if (mean)
{
cohort=cohort$statistics[,1]
}
else
{
cohort=cohort$quantiles
}
effects<-list("age"=age, "period"=period, "cohort"=cohort)
attr(effects,"settings") <- c(mean,quantiles)
if (update){x$effects<-effects; return(x)}
return(effects)
} |
d16b8f752a9b7892fcb38a101dc54b37598344e5 | 0af4345398b6672b4d64f92851815988b9d07437 | /R/bayes_prop_test.R | 006d1d3c7dbaca6e8cde47a0519978f7c4aed323 | [] | no_license | dpastoor/bayesian_first_aid | 66e7fdbe97e341d72293d93510b808bbb4f9b35d | 5a60873268fd003c37bd7c4c2fcf0fd6e1cd3efa | refs/heads/master | 2020-12-31T04:17:15.609733 | 2014-11-09T21:06:17 | 2014-11-09T21:06:17 | 27,091,680 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,972 | r | bayes_prop_test.R | #'Bayesian First Aid Alternative to a Test of Proportions
#'
#'\code{bayes.prop.test} estimates the relative frequency of success for two or
#'more groups using Bayesian estimation and is intended as a replacement for
#'\code{\link{prop.test}}.
#'
#'Given data on the number of successes and failures \code{bayes.prop.test}
#'estimates \ifelse{latex}{\eqn{\theta_{1...m}}}{\eqn{\theta}₁…ₘ}, the relative
#'frequencies of success for each of the \eqn{m} groups. The following model is
#'assumed for each group:
#'
#'\deqn{x \sim \mathrm{Binom}(\theta, n)}{x ~ Binomial(\theta, n)} \deqn{\theta
#'\sim \mathrm{Beta}(1, 1)}{\theta ~ Beta(1, 1)}
#'
#'
#'Here the prior on the \eqn{\theta}s is a non-informative \eqn{\mathrm{Beta}(1,
#'1)}{Beta(1, 1)} distribution which is identical to a \eqn{\mathrm{Uniform}(0,
#'1)}{Uniform(0, 1)} distribution. By \code{plot}ing and looking at a
#'\code{summary} of the object returned by \code{bayes.prop.test} you can get
#'more information about the shape of the posterior and the posterior predictive
#'distribution. \code{\link{model.code}} prints out the corresponding R code
#'underlying \code{bayes.prop.test} which can be copy-n-pasted into an R script
#'and modified, for example, changing the prior on \eqn{\theta}.
#'
#'The \code{print} and \code{plot} function will only work well with a small
#'number of groups (2 to 6). If you have more groups you might want to run the
#'model with a small number of groups and then print the model code using
#'\code{\link{model.code}} and fit the model using that code. The current model does
#'not assume any dependency between the groups, if this is an unreasonable assumption
#'you might want to modify the model code (from \code{\link{model.code}}) to
#'include a dependency between the groups (see
#'\href{http://lingpipe-blog.com/2009/09/23/bayesian-estimators-for-the-beta-binomial-model-of-batting-ability/}{here}
#'for an example).
#'
#'@param x a vector of counts of successes, a one-dimensional table with two
#' entries, or a two-dimensional table (or matrix) with 2 columns, giving the
#' counts of successes and failures, respectively.
#'@param n a vector of counts of trials; ignored if x is a matrix or a table.
#'@param comp.theta a vector of fixed relative frequencies of success to compare
#' with the estimated relative frequency of success. The length of
#' \code{comp.theta} must be the same as the number of groups specified by
#' \code{x}, and its elements must be greater than 0 and less than 1. This
#' argument fills a similar role as \code{p} in \code{\link{prop.test}}.
#'@param alternative ignored and is only retained in order to mantain
#' compatibility with \code{\link{prop.test}}.
#'@param cred.mass the amount of probability mass that will be contained in
#' reported credible intervals. This argument fills a similar role as
#' \code{conf.level} in \code{\link{prop.test}}.
#'@param correct ignored and is only retained in order to mantain compatibility
#' with \code{\link{prop.test}}
#'@param n.iter The number of iterations to run the MCMC sampling.
#'@param progress.bar The type of progress bar. Possible values are "text",
#' "gui", and "none".
#'@param p same as \code{comp.theta} and is only retained in order to mantain
#' compatibility with \code{\link{prop.test}}.
#'@param conf.level same as \code{cred.mass} and is only retained in order to
#' mantain compatibility with \code{\link{prop.test}}.
#'
#'
#'@return A list of class \code{bayes_prop_test} that contains information about
#' the analysis. It can be further inspected using the functions
#' \code{summary}, \code{plot}, \code{\link{diagnostics}} and
#' \code{\link{model.code}}.
#'
#' @examples
#'
#'
#' # Data from Muller, F. H., Tobakmissbrauch und Lungencarcinom,
#' # Zeit. f. Krebsforsch. 49, 57-85, 1939. One of the early papers
#' # investigating the relation between smoking and lung cancer.
#'
#' # Number of heavy smokers in one group of 86 lung cancer patients
#' # and one group of 86 healthy individuals.
#' no_heavy_smokers <- c(56, 31)
#' no_cases <- c(86, 86)
#'
#' bayes.prop.test(no_heavy_smokers, no_cases)
#'
#' # Save the return value in order to inspect the model result further.
#' fit <- bayes.prop.test(no_heavy_smokers, no_cases)
#' summary(fit)
#' plot(fit)
#'
#' # MCMC diagnostics (should not be necessary for such a simple model)
#' diagnostics(fit)
#'
#' # Print out the R code to run the model. This can be copy'n'pasted into
#' # an R-script and further modified.
#' model.code(fit)
#'
#'
#'@seealso \code{\link{bayes.binom.test}} for when you want to estimate the
#' relative frequency for only one group.
#'
#'@export
bayes.prop.test <- function (x, n, comp.theta = NULL, alternative = NULL, cred.mass = 0.95, correct = NULL, n.iter=15000, progress.bar="none", p, conf.level) {
if(! missing(alternative)) {
warning("The argument 'alternative' is ignored by bayes.prop.test")
}
if(! missing(correct)) {
warning("The argument 'correct' is ignored by bayes.prop.test")
}
if(! missing(p)) {
comp.theta <- p
}
if(! missing(conf.level)) {
cred.mass <- conf.level
}
x_name <- deparse(substitute(x))
n_name <- deparse(substitute(n))
### Begin slightly modified code from prop.test
DNAME <- deparse(substitute(x))
if (is.table(x) && length(dim(x)) == 1L) {
if (dim(x) != 2L)
stop("table 'x' should have 2 entries")
l <- 1
n <- sum(x)
x <- x[1L]
}
else if (is.matrix(x)) {
if (ncol(x) != 2L)
stop("'x' must have 2 columns")
l <- nrow(x)
n <- rowSums(x)
x <- x[, 1L]
}
else {
DNAME <- paste(DNAME, "out of", deparse(substitute(n)))
if ((l <- length(x)) != length(n))
stop("'x' and 'n' must have the same length")
}
OK <- complete.cases(x, n)
x <- x[OK]
n <- n[OK]
if ((k <- length(x)) < 1L)
stop("not enough data")
if (any(n <= 0))
stop("elements of 'n' must be positive")
if (any(x < 0))
stop("elements of 'x' must be nonnegative")
if (any(x > n))
stop("elements of 'x' must not be greater than those of 'n'")
if(length(comp.theta) == 1) {
comp.theta <- rep(comp.theta, length(x))
}
if (is.null(comp.theta) && (k == 1))
comp.theta <- 0.5
if (!is.null(comp.theta)) {
if (length(comp.theta) != l)
stop("'comp.theta' must have the same length as 'x' and 'n' or be a single number")
comp.theta <- comp.theta[OK]
if (any((comp.theta <= 0) | (comp.theta >= 1)))
stop("elements of 'comp.theta' must be in (0,1)")
}
if ((length(cred.mass) != 1L) || is.na(cred.mass) ||
(cred.mass <= 0) || (cred.mass >= 1))
stop("'cred.mass' must be a single number between 0 and 1")
### END code from prop.test
if(length(x) == 1) {
return(bayes.binom.test(x, n, comp.theta, cred.mass = ifelse(is.null(cred.mass), 0.5, cred.mass),
n.iter = n.iter, progress.bar = progress.bar))
}
mcmc_samples <- jags_prop_test(x, n, n.chains = 3, n.iter = ceiling(n.iter / 3) , progress.bar=progress.bar)
if(is.null(comp.theta)) {
temp_comp_val <- 0.5
} else {
temp_comp_val <- comp.theta
}
stats <- mcmc_stats(mcmc_samples, cred_mass = cred.mass, comp_val = temp_comp_val)
diff_stats <- mcmc_stats(create_theta_diff_matrix(as.matrix(mcmc_samples)))
stats <- rbind(stats, diff_stats)
bfa_object <- list(x = x, n = n, comp_theta = comp.theta, cred_mass = cred.mass,
x_name = x_name, n_name = n_name, data_name = DNAME,
mcmc_samples = mcmc_samples, stats = stats)
class(bfa_object) <- c("bayes_prop_test", "bayesian_first_aid")
bfa_object
}
create_theta_diff_matrix <- function(samples_mat) {
n_groups <- sum(str_count(colnames(samples_mat), "theta\\["))
combs <- combn(n_groups, 2)
theta_diffs <- sapply(1:ncol(combs), function(comb_i) {
i <- combs[1, comb_i]
j <- combs[2, comb_i]
theta_diff <- samples_mat[,paste0("theta[", i,"]")] - samples_mat[,paste0("theta[", j,"]")]
theta_diff <- matrix(theta_diff, nrow = 1, dimnames = NULL)
theta_diff
})
colnames(theta_diffs) <- apply(combs, 2, function(comb) {paste0("theta_diff[", comb[1], ",", comb[2], "]")})
theta_diffs
}
prop_model_string <- "model {
for(i in 1:length(x)) {
x[i] ~ dbinom(theta[i], n[i])
theta[i] ~ dbeta(1, 1)
x_pred[i] ~ dbinom(theta[i], n[i])
}
}"
jags_prop_test <- function(x, n, n.chains=3, n.iter=5000, progress.bar="none") {
mcmc_samples <- run_jags(prop_model_string, data = list(x = x, n = n), inits = list(theta = (x + 1) / (n + 2)),
params = c("theta", "x_pred"), n.chains = n.chains, n.adapt = 0,
n.update = 0, n.iter = n.iter, thin = 1, progress.bar=progress.bar)
mcmc_samples
}
format_group_diffs <- function(bfa_object) {
s <- bfa_object$stats
n_groups <- length(bfa_object$x)
med_diff_mat <- matrix("", nrow = n_groups, ncol = n_groups)
hdi_diff_mat <- matrix("", nrow = n_groups, ncol = n_groups)
diff_names <- rownames(s)[ str_detect(rownames(s), "theta_diff\\[")]
for(diff_name in diff_names) {
indices_match <- str_match(diff_name, "\\[(\\d+),(\\d+)\\]$")
i <- as.numeric(indices_match[1,2])
j <- as.numeric(indices_match[1,3])
med_diff_mat[i, j] <- as.character(round(s[diff_name, "median"], 2) )
hdi_diff_mat[i, j] <- paste("[", signif(s[diff_name, "HDIlo"], 2), ", ", signif(s[diff_name, "HDIup"], 2), "]", sep="")
}
diff_mat <- matrix("", nrow = n_groups * 2, ncol = n_groups)
for(i in seq_len(n_groups)) {
diff_mat[1 + (i - 1) * 2,] <- med_diff_mat[i,]
diff_mat[2 + (i - 1) * 2,] <- hdi_diff_mat[i,]
}
rownames(diff_mat) <- rep(seq_len(n_groups), each = 2)
rownames(diff_mat)[1:nrow(diff_mat) %% 2 == 0] <- ""
rownames(diff_mat) <- paste0(" ", rownames(diff_mat))
diff_mat <- format(diff_mat, width = max(nchar(diff_mat)), justify = "centre")
colnames(diff_mat) <- format(as.character(1:ncol(diff_mat)), width = max(nchar(diff_mat)), justify = "centre")
diff_mat <- diff_mat[-c(nrow(diff_mat) - 1, nrow(diff_mat)), -1, drop=FALSE]
diff_mat
}
# TODO
#' @method plot bayes_prop_test
#' @export
plot.bayes_prop_test <- function(x, ...) {
samples <- as.matrix(x$mcmc_samples)
# Throw away everything except the what we want to plot, the theta samples.
samples <- samples[,str_detect(colnames(samples), "^theta\\[")]
n_groups <- length(x$x)
diff_samples <- create_theta_diff_matrix(as.matrix(x$mcmc_samples))
layout_mat <- matrix( 0 , nrow=n_groups, ncol=n_groups)
#layout_mat[,1] <- seq_len(n_groups)
diag(layout_mat) <- seq_len(n_groups)
old_par <- par(no.readonly = TRUE)
layout_mat <- t(layout_mat)
layout_mat[lower.tri(layout_mat)] <- seq(n_groups + 1, by = 2,length.out = (ncol(diff_samples)))
layout_mat <- t(layout_mat)
layout_mat[lower.tri(layout_mat)] <- seq(n_groups + 2, by = 2,length.out = (ncol(diff_samples)))
layout(layout_mat)
par( mar=c(3.5,2,2,2) , mgp=c(2.25,0.7,0) )
post_xlim <- range(apply(samples, 2, quantile, probs = c(0.001, 0.999)))
# Some rules for making the post_xlim nice, with a preference for showing endpoints of the scale
xlim_length <- abs(diff(post_xlim))
if( post_xlim[1] - xlim_length < 0) {
post_xlim[1] <- 0
}
if(post_xlim[2] + xlim_length > 1) {
post_xlim[2] <- 1
}
plotPost(samples[,"theta[1]"], cex.lab = 1.5, xlab=bquote(theta[1]), main=paste("Rel. Freq. Group 1"),
cred_mass= x$cred_mass, col="#5DE293" , show_median=TRUE, comp_val=x$comp_theta[1], xlim=post_xlim)
for(i in 2:n_groups) {
plotPost(samples[,paste0("theta[",i, "]")], cex.lab = 1.5, xlab=bquote(theta[.(i)]), main=paste("Group", i),
cred_mass= x$cred_mass, col="#5DE293" , show_median=TRUE, comp_val=x$comp_theta[i], xlim=post_xlim, show_labels = FALSE)
}
diff_xlim <- range(apply(diff_samples, 2, quantile, probs = c(0.001, 0.999)))
if(all(diff_xlim < 0)) {
diff_xlim[2] <- 0
} else if(all(diff_xlim > 0)) {
diff_xlim[1] <- 0
}
for(i in 1:ncol(diff_samples)) {
diff_name <- colnames(diff_samples)[i]
indices_match <- str_match(diff_name, "\\[(\\d+),(\\d+)\\]$")
group_i <- as.numeric(indices_match[1,2])
group_j <- as.numeric(indices_match[1,3])
plotPost(diff_samples[,i], cex.lab = 1.5, xlab=bquote(theta[.(group_i)] - theta[.(group_j)]),
main="", cred_mass= x$cred_mass, col="skyblue" , show_median=TRUE,
comp_val=0, xlim=diff_xlim, show_labels = FALSE)
plotPost(-diff_samples[,i], cex.lab = 1.5, xlab=bquote(theta[.(group_j)] - theta[.(group_i)]),
main="", cred_mass= x$cred_mass, col="skyblue" , show_median=TRUE,
comp_val=0, xlim=sort(-diff_xlim), show_labels = FALSE)
}
par(old_par)
invisible(NULL)
}
#' @export
print.bayes_prop_test <- function(x, ...) {
s <- format_stats(x$stats)
cat("\n")
cat("\tBayesian First Aid propotion test\n")
cat("\n")
cat("data: ", x$data_name, "\n", sep="")
pad_width <- max(nchar(as.character(c(x$x, x$n)))) + 1
cat("number of successes: ", paste(str_pad(x$x, pad_width), collapse = ","), "\n", sep="")
cat("number of trials: ", paste(str_pad(x$n, pad_width), collapse = ","), "\n", sep="")
cat("Estimated relative frequency of success [", s[1, "HDI%"] ,"% credible interval]:\n", sep="")
for(param_i in which(str_detect(rownames(s), "theta\\["))) {
param <- paste("theta[", param_i, "]", sep="")
cat(" Group ", param_i,": " ,s[param, "median"], " [", paste(s[param, c("HDIlo", "HDIup")], collapse = ", "),"]\n", sep = "")
}
group_diffs <- format_group_diffs(x)
if(ncol(group_diffs) > 1) {
cat("Estimated pairwise group differences (row - column) with", s[1, "HDI%"] ,"% cred. intervals:\n")
cat(format("Group", width = 2 + nchar(rownames(group_diffs)[1]) * 2 + sum(nchar(colnames(group_diffs))),
justify = "centre"), "\n", sep="")
print(format_group_diffs(x), quote=FALSE)
} else {
cat("Estimated group difference (Group 1 - Group 2):\n")
cat(" ", str_trim(group_diffs[1,1]), " ",group_diffs[2,1], "\n", sep="")
}
if(! is.null(x$comp_theta)) {
cat("The prob. that the relative frequency of success is less/more than comp. val:\n")
comp_table <- s[str_detect(rownames(s), "theta\\["), c("comp", "%<comp", "%>comp")]
rownames(comp_table) <- paste(" Group ", 1:nrow(comp_table), ":", sep="")
colnames(comp_table) <- c("comp. val.", " <", " >")
print(format(comp_table, justify="centre"), quote=FALSE)
}
if(ncol(group_diffs) == 1) {
cat("The relative frequency of success is larger for Group 1 by a probability\n")
cat("of", s["theta_diff[1,2]", "%>comp"], "and larger for Group 2 by a probability of", s["theta_diff[1,2]", "%<comp"], ".\n")
}
cat("\n")
invisible(NULL)
}
#' @export
summary.bayes_prop_test <- function(object, ...) {
s <- round(object$stats, 3)
cat(" Data\n")
pad_width <- max(nchar(as.character(c(object$x, object$n)))) + 1
cat("number of successes: ", paste(str_pad(object$x, pad_width), collapse = ","), "\n", sep="")
cat("number of trials: ", paste(str_pad(object$n, pad_width), collapse = ","), "\n", sep="")
cat("\n")
cat(" Model parameters and generated quantities\n")
cat("theta[i]: the relative frequency of success for Group i\n")
cat("x_pred[i]: predicted number of successes in a replication for Group i\n")
cat("theta_diff[i,j]: the difference between two groups (theta[i] - theta[j])\n")
cat("\n")
cat(" Measures\n" )
print(s[, c("mean", "sd", "HDIlo", "HDIup", "%<comp", "%>comp")])
cat("\n")
cat("'HDIlo' and 'HDIup' are the limits of a ", s[1, "HDI%"] ,"% HDI credible interval.\n", sep="")
cat("'%<comp' and '%>comp' are the probabilities of the respective parameter being\n")
cat("smaller or larger than ", s[1, "comp"] ,".\n", sep="")
cat("\n")
cat(" Quantiles\n" )
print(s[, c("q2.5%", "q25%", "median","q75%", "q97.5%")] )
invisible(NULL)
}
#' @export
diagnostics.bayes_prop_test <- function(fit) {
print_mcmc_info(fit$mcmc_samples)
cat("\n")
print_diagnostics_measures(round(fit$stats, 3))
cat("\n")
cat(" Model parameters and generated quantities\n")
cat("theta: The relative frequency of success\n")
cat("x_pred: Predicted number of successes in a replication\n")
cat("theta_diff[i,j]: the difference between two groups (theta[i] - theta[j])\n")
old_par <- par( mar=c(3.5,2.5,2.5,0.6) , mgp=c(2.25,0.7,0) )
plot(fit$mcmc_samples)
par(old_par)
invisible(NULL)
}
# Model code for the Bayesian First Aid alternative to the test of proportions #
#' @export
model.code.bayes_prop_test <- function(fit) {
cat("### Model code for the Bayesian First Aid ###\n### alternative to the test of proportions ###\n")
cat("require(rjags)\n\n")
cat("# Setting up the data\n")
cat("x <-", deparse(fit$x, ), "\n")
cat("n <-", deparse(fit$n), "\n")
cat("\n")
pretty_print_function_body(prop_model_code)
invisible(NULL)
}
# Not to be run, just to be printed
prop_model_code <- function(x, n) {
# The model string written in the JAGS language
BayesianFirstAid::replace_this_with_model_string
# Running the model
model <- jags.model(textConnection(model_string), data = list(x = x, n = n),
n.chains = 3, n.adapt=1000)
samples <- coda.samples(model, c("theta", "x_pred"), n.iter=5000)
# Inspecting the posterior
plot(samples)
summary(samples)
# You can extract the mcmc samples as a matrix and compare the thetas
# of the groups. For example, the following shows the median and 95%
# credible interval for the difference between Group 1 and Group 2.
samp_mat <- as.matrix(samples)
quantile(samp_mat[, "theta[1]"] - samp_mat[, "theta[2]"], c(0.025, 0.5, 0.975))
}
prop_model_code <- inject_model_string(prop_model_code, prop_model_string) |
00fac2402c6e82f1c455fe96be0d2ae5d42ea209 | 31a4ce9df583305af3aa93432947259e1e245ebd | /Plot1.R | 811c2404f668bef84cdf1489ca6bb39ad9a4adc9 | [] | no_license | schornipatrick/ExData_Plotting1 | 4f9f2c9c6f20d4a294825d416f957653c7675719 | 47245b76bede6e8dee7f29edae65878182522eab | refs/heads/master | 2021-06-27T01:44:18.945420 | 2017-09-15T08:53:24 | 2017-09-15T08:53:24 | 103,351,752 | 0 | 0 | null | 2017-09-13T04:05:28 | 2017-09-13T04:05:28 | null | UTF-8 | R | false | false | 861 | r | Plot1.R | # Reading in complete Data
AllData <- read.csv(file = "household_power_consumption.txt", sep = ";")
# Subsetting Data, using the rows
startRow <- min(which(AllData$Date == "1/2/2007"))
endRow <- max(which(AllData$Date == "2/2/2007"))
MyData <- AllData[startRow:endRow, ] # Use of the dplyr package
# Convert 'Date' to Date format
MyData$Date <- as.Date(strptime(MyData$Date, format = "%d/%m/%Y"))
# Check for NAs, inscripted as "?"
sum("?" %in% MyData)
# Since sum is zero, we conclude there aren't any NAs.
## STARTING PLOT 1
# Making Data numeric
MyData$Global_active_power <- as.numeric(MyData$Global_active_power)
# Constructing Histogram
par(mfrow = c(1,1))
hist(MyData$Global_active_power, main = "Global Active Power", xlab = "Global active Power (kilowatts)", col = 'red')
# Save histogram
dev.print(png, 'Plot1.png', width = 480, height = 480)
|
9737779bd6a8d8954cbf387b8852d7b95cece1e0 | e972107b76ddaee9f9a9d07205edbad5c29d4fe7 | /man/opls_for_projection.Rd | e095c62cf016f5abd994c15f0495480eaf00e3b4 | [] | no_license | cran/resemble | 3d2fef81b344b7928b32eaf1cc11c37258970786 | 3fea782f3c3985eb53a3b867d04f2e9f4fe425a2 | refs/heads/master | 2023-04-30T13:40:39.686133 | 2023-04-20T20:30:02 | 2023-04-20T20:30:02 | 17,919,896 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,861 | rd | opls_for_projection.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{opls_for_projection}
\alias{opls_for_projection}
\title{orthogonal scores algorithn of partial leat squares (opls) projection}
\usage{
opls_for_projection(X, Y, ncomp, scale,
maxiter, tol,
pcSelmethod = "var",
pcSelvalue = 0.01,
algorithm = "pls",
xls_min_w = 3,
xls_max_w = 15)
}
\arguments{
\item{X}{a matrix of predictor variables.}
\item{Y}{a matrix of either a single or multiple response variables.}
\item{ncomp}{the number of pls components.}
\item{scale}{logical indicating whether \code{X} must be scaled.}
\item{maxiter}{maximum number of iterations.}
\item{tol}{limit for convergence of the algorithm in the nipals algorithm.}
\item{pcSelmethod}{if \code{regression = TRUE}, the method for selecting the
number of components.
Options are: \code{'manual'}, \code{'cumvar'} (for selecting the number of
principal components based on a given cumulative amount of explained
variance) and \code{'var'} (for selecting the number of principal components
based on a given amount of explained variance). Default is \code{'cumvar'}.}
\item{pcSelvalue}{a numerical value that complements the selected method
(\code{pcSelmethod}).
If \code{'cumvar'} is chosen (default), \code{pcSelvalue} must be a value
(larger than 0 and below 1) indicating the maximum amount of cumulative
variance that the retained components should explain. Default is 0.99.
If \code{'var'} is chosen, \code{pcSelvalue} must be a value (larger than 0
and below 1) indicating that components that explain (individually)
a variance lower than this threshold must be excluded. If \code{'manual'}
is chosen, \code{pcSelvalue} has no effect and the number of components
retrieved are the one specified in \code{ncomp}.}
\item{algorithm}{(for weights computation) a character string indicating
what method to use. Options are:
\code{'pls'} for pls (using covariance between X and Y),
\code{'mpls'} for modified pls (using correlation between X and Y) or
\code{'xls'} for extended pls (as implemented in BUCHI NIRWise PLUS software).}
\item{xls_min_w}{(for weights computation) an integer indicating the minimum window size for the "xls"
method. Only used if \code{algorithm = 'xls'}. Default is 3 (as in BUCHI NIRWise PLUS software).}
\item{xls_max_w}{(for weights computation) an integer indicating the maximum window size for the "xls"
method. Only used if \code{algorithm = 'xls'}. Default is 15 (as in BUCHI NIRWise PLUS software).}
}
\value{
a list containing the following elements:
\itemize{
\item{\code{coefficients}}{ the matrix of regression coefficients.}
\item{\code{bo}}{ a matrix of one row containing the intercepts for
each component.}
\item{\code{scores}}{ the matrix of scores.}
\item{\code{X_loadings}}{ the matrix of X loadings.}
\item{\code{Y_loadings}}{ the matrix of Y loadings.}
\item{\code{projection_mat}}{ the projection matrix.}
\item{\code{Y}}{ the \code{Y} input.}
\item{\code{variance}}{ a \code{list} conating two objects: \code{x_var}
and \code{y_var}.
These objects contain information on the explained variance for the \code{X}
and \code{Y} matrices respectively.}
\item{\code{transf}}{ a \code{list} conating two objects: \code{Xcenter}
and \code{Xscale}}.
\item{\code{weights}}{ the matrix of wheights.}
}
}
\description{
Computes orthogonal socres partial least squares (opls)
projection with the NIPALS algorithm. It allows multiple response variables.
Although the main use of the function is for projection, it also retrieves
regression coefficients. NOTE: For internal use only!
}
\author{
Leonardo Ramirez-Lopez
}
\keyword{internal}
|
58900ea3d9f782ac415a213f4cbddd40cf9342e9 | 29585dff702209dd446c0ab52ceea046c58e384e | /hwriterPlus/R/latex.R | f51fc219c687d1faf7b0e7d787bebf16ad407da2 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,501 | r | latex.R | as.latex <- function(x, label = NULL,
inline = ifelse(is.null(label), TRUE, FALSE),
count = ifelse(is.null(label), FALSE, TRUE))
{
out <- list(alt = x, inline = inline, count = count, label = label)
class(out) <- "latex"
return(out)
}
hwriteLatex <- function(ltx, page = NULL,
table.attributes = NULL,
tr.attributes = NULL,
td.attributes = NULL, ...)
{
## cat is used to write to files, deal with output to standard output
if (is.null(page)) page = ""
## Note: no append argument as it could ONLY be happened to work...
## count: add a (#)
## label: add before: Equation (#): label
if (! is(ltx, "latex")) ltx <- as.latex(ltx)
if (ltx$inline){
## inline: directly there in the text (can't be counted or labeled)
cat(paste("\\(", ltx$alt,"\\)", sep = ""),
file = page, append = TRUE, sep = " ")
} else {
## not inline: own living space (will be within a table to center it)
if (ltx$count) {
## update counter hwriterEquation
## hwriterEquation <<- hwriterEquation + 1
eqnNum <- get("hwriterEquation", .hwriterGlobalEnv)
eqnNum <- eqnNum + 1
assign("hwriterEquation", eqnNum, .hwriterGlobalEnv)
## deal with label
if (is.null(ltx$label)){
## hwriterEquationList[hwriterEquation] <<-
## paste("eq:", hwriterEquation, sep = "")
eqnLabel <- paste("eq:", eqnNum, sep = "")
} else {
## hwriterEquationList[hwriterEquation] <<-
## paste("eq:", ltx$label, sep = "")
eqnLabel <- paste("eq:", ltx$label, sep = "")
}
eqnList <- get("hwriterEquationList", .hwriterGlobalEnv)
eqnList[eqnNum] <- eqnLabel
assign("hwriterEquationList", eqnList, .hwriterGlobalEnv)
## write out equation as table with equation number
if (is.null(table.attributes)){
table.attributes <- "border = '0' width = '90%'"
}
if (is.null(td.attributes)){
td.attributes <- c("width = '50'",
"align = 'center'",
"align = 'right' width = '50'")
}
cat(paste("\n<br /><center><table ",
table.attributes, "><tr ",
tr.attributes, "><td ",
td.attributes[1], "> </td><td ",
td.attributes[2], ">\\[",
ltx$alt, "\\]</td><td ",
td.attributes[3], " id = '",
eqnLabel,
"'>(", eqnNum,
")</td></tr></table></center><br />",
sep = ""),
file = page, append = TRUE)
} else {
if (is.null(table.attributes)){
table.attributes <- "border = '0'"
}
cat(paste("<br /><center><table ",
table.attributes, "><tr ",
tr.attributes, "><td align = 'center'>\\[",
ltx$alt, "\\]</td></tr></table></center><br />",
sep = ""),
file = page, append = TRUE)
}
}
}
|
45a8267aa83702caa9775a1ad852a83c443c4476 | 6e9b4b5b43bb146d1aef1ec47745a94cb1b91141 | /cachematrix.R | dba303b8c6e23c5ff763c003fec7fad1307f3da3 | [] | no_license | DasarathanSampath/ProgrammingAssignment2 | b5a7e96138e3f6931d8c4da520a03d948434e8fd | e3041b40d2b8fc54760bb43c3a3b25c1e4d9854f | refs/heads/master | 2020-08-18T06:35:37.591701 | 2019-10-17T10:07:18 | 2019-10-17T10:07:18 | 215,758,968 | 0 | 0 | null | 2019-10-17T09:52:42 | 2019-10-17T09:52:41 | null | UTF-8 | R | false | false | 1,606 | r | cachematrix.R | ## Coursera R-Programming assignment 2 - week 3
## Caching the Inverse of Matrix
## Matrix supplied to this function should be always invertible
## "makeCacheMatrix" function creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(mat = matrix()) {
#function variable "mat" sets to an empty matrix
# Assigns inverse value to NULL
inv <- NULL
# set function sets matrix to a new matrix(y) & reset inverse matrix to NULL, if any pre-calculated value
set <- function(y) {
mat <<- y
inv <<- NULL
}
#returns the matrix "mat"
get <- function() mat
#Sets the inverse matrix to inv
setinverse <- function(solve) inv <<- solve
#returns the inverse matrix inv
getinverse <- function() inv
#returns the list of above defined values
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## "cachesolve" function computes the inverse of special matrix returned by "makeCacheMatrix
## Matrix supplied to this function should be always invertible
## If the matrix is same & inverse has been already calculated, this will retrive from cache,
## else it will calculate a new inverse & cache it
cacheSolve <- function(mat, ...) {
## Return a matrix that is the inverse of 'mat'
#retrives a inverse matrix
inv <- mat$getinverse()
#Check if it is NULL. & returns a valid cache, if not NULL
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
#Calculates a new inverse & returns it
data <- mat$get()
inv <- solve(data, ...)
mat$setinverse(inv)
inv
} |
91abb14db757caa5699017a9ee561b4622b50c42 | 54bf9bc76aaa7e1fec5961efb12bfb636fa90a2e | /inst/shiny/skeleSimShinyGUI/runSim.R | 95e154353bc80bbc3571492eb5d761ad76aa2cc4 | [] | no_license | christianparobek/skeleSim | 25d63dc3eeee6d8218d19e0f011229cfb843d053 | 0d61409497283ac1db129379b479639261695f83 | refs/heads/master | 2020-03-28T05:36:45.448623 | 2020-02-26T21:55:51 | 2020-02-26T21:55:51 | 32,469,895 | 3 | 9 | null | 2017-11-22T16:30:16 | 2015-03-18T16:16:29 | HTML | UTF-8 | R | false | false | 4,737 | r | runSim.R | output$runButton <- renderUI({
if (!is.null(req(supportValues$simroot)))
actionButton("btnRun","Run simulation")
})
output$saveButton <- renderUI({
if (!is.null(req(supportValues$simroot)))
actionButton("btnSave","Save example inputs for each scenario")
})
output$rootText <- renderText({paste("Simulation inputs, outputs and work directory will be written in:", supportValues$simroot)})
observeEvent(input$btnRun, {
req(supportValues$simroot)
# print("in run")
if(!is.null(supportValues$objLabel)) {
# print("past first null test")
# print("supportValues$objLabel")
## create 'fnameLabel' and parameter filename (paramsFname)
supportValues$fnameLabel <- paste(supportValues$objLabel, format(Sys.time(), "%Y%m%d.%H%M"),round(runif(1,min=0,max=10000)), sep = ".")
paramsFname <- paste(supportValues$fnameLabel, "params.rdata", sep = ".")
paramsFname <- paste0(ifelse(is.null(supportValues$simroot),".",supportValues$simroot),"/",paramsFname)
if (debug()) print(paramsFname)
## create parameter object based on user-defined 'objLabel' and save to file
assign("ssClass", rValues$ssClass)
save(list = "ssClass" , file = paramsFname) #this needs updating to be resiliant
output$txtSaveStatus <- renderText({
if(file.exists(paramsFname)) {
paste("Wrote skeleSim parameter file: '", paramsFname, "'", sep = "")
} else {
"Error writing files."
}
})
#### do the running now
if(!is.null(supportValues$fnameLabel)) {
scriptFname <- paste(supportValues$fnameLabel, ".script.R", sep = "")
paramFname <- paste(supportValues$fnameLabel, ".params.rdata", sep = "")
outFname <- paste(supportValues$fnameLabel, ".skeleSim.out.rdata", sep = "")
logFname <- paste(supportValues$fnameLabel, ".log", sep = "")
scriptFname <- paste(supportValues$simroot,scriptFname,sep="/")
write("rm(list = ls())", file = scriptFname, append=TRUE)
write("library(methods)", file = scriptFname, append = TRUE)
### Right now the way I'm going about things is to switch to the skeleSim root directory
#### source all the relevant files, and then change to the directory where the simulation
#### should be run (supportValues$simroot)
write(paste0("library(skeleSim)"), file = scriptFname)
write(paste0("library(adegenet)"),file = scriptFname, append=TRUE)
write("getwd()",file = scriptFname, append=TRUE)
cdcmd <- chartr("\\","/",paste0("setwd('",supportValues$simroot,"')")) #make this work on windows also
write(cdcmd, file = scriptFname, append=TRUE)
write("getwd()",file = scriptFname, append=TRUE)
#### end of the sourceing
line <- paste("load('", paramFname, "')", sep = "")
write(line, file = scriptFname, append = TRUE)
write("ls()", file = scriptFname, append = TRUE)
write("ssClass <- runSim(ssClass)", file = scriptFname, append = TRUE)
# write("ssClass@timing <- NULL", file = scriptFname, append = TRUE)
line <- paste("save(ssClass, file = '", outFname, "')", sep = "")
write(line, file = scriptFname, append = TRUE)
cond <- TRUE
if (debug()) print("about to run new R session")
if (debug()) print(ifelse(supportValues$OS=="unix","Unix-based system","Windows-based system"))
if (supportValues$OS=="unix") #
{
cmd <- paste("cd",supportValues$simroot,"; nohup R CMD BATCH --no-restore ", scriptFname)
system(cmd, wait = FALSE)
} else { #windows output put in shiny directory
cmd <- paste("start R CMD BATCH --no-restore", scriptFname)
shell(cmd,wait=F)
}
if (debug()) print("about to run system")
#### output$txtRunStatus <- renderText({
# if(cond) {
# cmd <- paste("nohup R CMD BATCH --no-restore", scriptFname)
# print("about to run system")
# system(cmd, wait = FALSE)
# "Simulation run commenced."
# supportValues$fnameLabel <- NULL
# # stopApp()
# } else {
# "Error starting simulations."
# }
# })
}
}
})
|
31de46fcc907d2756c114446ee3711ec8eef8d47 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ggvis/examples/shiny-ggvis.Rd.R | 8833bd4ab48ab19fb79020a4786a691d8ef587ce | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 565 | r | shiny-ggvis.Rd.R | library(ggvis)
### Name: shiny-ggvis
### Title: Connect a ggvis graphic to a shiny app.
### Aliases: shiny-ggvis bind_shiny bind_shiny_ui ggvisOutput
### ** Examples
## Run these examples only in interactive R sessions
if (interactive()) {
# Simplest possible app:
library(shiny)
runApp(list(
ui = bootstrapPage(
ggvisOutput("p"),
uiOutput("p_ui")
),
server = function(..., session) {
mtcars %>%
ggvis(~wt, ~mpg) %>%
layer_points() %>%
layer_smooths(span = input_slider(0, 1)) %>%
bind_shiny("p", "p_ui")
}
))
}
|
2ecaa077069c0cfd925f3414e39e18746213afb2 | 740d1e0f1c064e59f07c239593e8cca8bc5112f0 | /man/check_guru_key.Rd | 16bd41b5d9353fef7cfe5482b1b538ad6aa7a8e6 | [] | no_license | JovingeLabSoftware/BioradConfig | 189a4a3c320d0d0bf156c3c67237c1e39f35b273 | 6377c4553abab921027717d78995b2b02841f8cc | refs/heads/master | 2021-01-18T23:30:40.972700 | 2017-03-17T11:52:49 | 2017-03-17T11:52:49 | 40,320,343 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 402 | rd | check_guru_key.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guru-utils.R
\name{check_guru_key}
\alias{check_guru_key}
\title{Check if an existing API key works}
\usage{
check_guru_key(token)
}
\arguments{
\item{token}{The API token to test}
}
\description{
This function will test an API token from LabGuru - returning \code{TRUE} if
the token is valid and \code{FALSE} otherwise.
}
|
506a011f62beb623a4548b604c00b3d322f3eef3 | def539ffd56e59f2322a106be0b446d6c11cfd53 | /R/5. 군집분석_범주형.r | e5ebe6014b9fed23898cceffe5a414a34707cc35 | [] | no_license | hyejeongjang/playstore_analysis | a1d73ebdf09db3e38d6d8323d9fb5cd0087c1763 | c45a82699287adcad328c0b3d77963a1ea74ed43 | refs/heads/master | 2022-11-23T08:10:16.305992 | 2020-07-23T21:52:28 | 2020-07-23T21:52:28 | 282,060,248 | 0 | 0 | null | null | null | null | UHC | R | false | false | 1,826 | r | 5. 군집분석_범주형.r | library(tidyverse)
library(ca)
library(factoextra)
library(cluster)
library(igraph)
data=read.csv("C:/Users/JANG/Documents/DUKSUNG/전공/정보통계학과/SENIOR/다변량 및 빅데이터분석/논문/prep_googleapp_use.csv", sep=",")
app %>% head
########### 상위 70% 카테고리
cat=app %>%
group_by(Category) %>%
summarise(n=n()) %>%
arrange(desc(n)) %>%
mutate(per=round(cumsum(n)/sum(n), 2)) %>%
mutate(pper=round(n/sum(n), 2)) %>%
filter(per<0.7)
############# 카테고리와 연령대 데이터프레임
cat.rat=app %>%
filter(Category == cat$Category) %>%
select(Category, Content.Rating) %>%
mutate(n=1) %>%
group_by(Category, Content.Rating) %>%
summarise(n=n()) %>%
spread(Content.Rating, n)
############ 결측치 제거
cat.rat[is.na(cat.rat)]=0
# 데이터 프레임으로 만들기
cat.rat=data.frame(cat.rat)
# 카테고리를 행으로 만들기
rownames(cat.rat)=cat.rat$Category
# 첫번째 열 제거
cat.rat=cat.rat[,-1]
cat.rat
################# 군집분석을 위한 최적 클러스터 개수 찾기
set.seed(123)
fviz_nbclust(cat.rat, FUN = kmeans, method = "wss")
fviz_nbclust(cat.rat, FUN = kmeans, method = "silhouette")
############## 군집분석
set.seed(1234)
km.cr=kmeans(cat.rat, centers = 3, nstart = 25)
p2 <- fviz_cluster(km.cr, geom = c("point", "text"), data = cat.rat) + ggtitle("Category and Content.Rating made 3 cluster")
p2
############## 군집분석 나무 그림
res.dist <- dist(cat.rat, method = "euclidean")
res.hc <- hclust( d = res.dist, method = "ward.D2")
fviz_dend( res.hc, k = 3, # Cut in four groups
cex = 0.5, # label size
k_colors = c("#2E9FDF", "#00AFBB", "#E7B800"),
color_labels_by_k = TRUE, # color labels by groups
rect = TRUE # Add rectangle around groups
)
|
50dc095504fe00ee9b91d492ede7f588eac54e74 | 8105d46b2ae06b7bb76d3c0ab0fc195b687bd750 | /R/extract93.R | cb3596293e7f9551a30f22c8b46e7d56f1ee8e88 | [] | no_license | tnkocis/stReamflowstats | c8f0d8b905afccd40fc5a280f17378de4ba800bf | 0fc1c7ff1eb024e8434ee5898884e02e95fa7b51 | refs/heads/master | 2020-04-12T02:25:09.302694 | 2017-07-01T01:43:56 | 2017-07-01T01:43:56 | 34,279,048 | 0 | 3 | null | 2015-04-24T21:38:07 | 2015-04-20T18:37:04 | R | UTF-8 | R | false | false | 786 | r | extract93.R | # TODO: Add comment
#
# Author: tiffn_000
###############################################################################
spbatch_93 <- vector("list",7)
for(i in 1:7){
testnum2 <-i
load(paste("C:\\Users\\tiffn_000\\Documents\\workspaces\\full_record_spbatch_",i,".RData",sep=""))
load(paste("C:\\Users\\tiffn_000\\Documents\\workspaces\\REDO_base",".RData",sep=""))
i <- testnum2
spbatch_93[[i]] <- vector("list",length(which(datetable$gauge%in%names(spbatch))))
spbatch_93[[i]] <- spbatch[which(names(spbatch)%in%datetable$gauge)]
}
spbatch_93_unlist <- spbatch_93[[1]]
for(i in 2:7){
spbatch_93_unlist <- append(spbatch_93_unlist,spbatch_93[[i]])
}
save("spbatch_93_unlist", file= "C:\\Users\\tiffn_000\\Documents\\workspaces\\REDO_93_spbatch.RData") |
ad64c472e2de8f5a2efcd4a2fd5afe0f5837bc26 | 7307b6f375af3dd7e7afa7e1c8ac0b92872f9297 | /Code/Code_fragments.R | 4081f81ff09984cab7f02d60c2ac4ea089843c14 | [] | no_license | ploeckl/project_telephone | 317d3330c76217a4b8069cb7b1df7aed38410d71 | 80de7d334d85c69e65563f0bd53da22d1d76bbe3 | refs/heads/main | 2023-05-07T17:58:51.738733 | 2021-05-31T04:01:17 | 2021-05-31T04:01:17 | 314,726,024 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,419 | r | Code_fragments.R |
##Analysis 1896###############################################
SpatModel1896<-as.formula(Lines1896~-1+Y1896+I(Y1896*InstallTime)+I(Y1896*MarketAccess1880)+I(Y1896*MarketSize1880)+I(Y1896^2)+I(Y1896*City)+I(Y1896*PopShare1896)+I(Y1896*Fringe)+I(Y1896*Border)+I(Y1896*Gov1896)+I(Y1896*Pub1896)+I(Y1896*Agriculture)+I(Y1896*EmpRatio95)+I(Y1896*IndexDisSim95)+I(Y1896*StateTax)+I(Y1896*LocalTax)+I(Y1896*RailStation)+I(Y1896*RailRevenues)+I(Y1896*RailWeight)+I(Y1896*PostRevenues)+I(Y1896*Participation)+I(Y1896*Zentrum)+I(Y1896*(Catholics-Zentrum))+I(Y1896*Liberal)+I(Y1896*Socialist))
SpatMatrix1896<-mat2listw(MatInvDistSq[Main==TRUE & Towns$Lines1896>0,Main==TRUE & Towns$Lines1896>0]) #make sure it fits
Estimation1896<-lagsarlm(SpatModel1896,data=Towns[Main==TRUE & Towns$Lines1896>0,],SpatMatrix1896,tol.solve=1.0e-24)
#SpatModel1905<-as.formula(Lines1905~-1+Y1905+I(Y1905*InstallTime)+I(Y1905*MarketAccess1880)+I(Y1905*MarketSize1880)+I(Y1905^2)+I(Y1905*City)+I(Y1905*PopShare1905)+I(Y1905*Fringe)+I(Y1905*Border)+I(Y1905*Gov1905)+I(Y1905*Pub1905)+I(Y1905*Agriculture)+I(Y1905*EmpRatio07)+I(Y1905*IndexDisSim07)+I(Y1905*StateTax)+I(Y1905*LocalTax)+I(Y1905*RailStation)+I(Y1905*RailRevenues)+I(Y1905*RailWeight)+I(Y1905*PostRevenues)+I(Y1905*Participation)+I(Y1905*Zentrum)+I(Y1905*(Catholics-Zentrum))+I(Y1905*Liberal)+I(Y1905*Socialist))
#SpatModel1900<-as.formula(Lines1900~-1+Y1900+I(Y1900*InstallTime)+I(Y1900*MarketAccess1880)+I(Y1900*MarketSize1880)+I(Y1900^2)+I(Y1900*City)+I(Y1900*PopShare1900)+I(Y1900*Fringe)+I(Y1900*Border)+I(Y1900*Gov1900)+I(Y1900*Pub1900)+I(Y1900*Agriculture)+I(Y1900*EmpRatio95)+I(Y1900*IndexDisSim95)+I(Y1900*StateTax)+I(Y1900*LocalTax)+I(Y1900*RailStation)+I(Y1900*RailRevenues)+I(Y1900*RailWeight)+I(Y1900*PostRevenues)+I(Y1900*Participation)+I(Y1900*Zentrum)+I(Y1900*(Catholics-Zentrum))+I(Y1900*Liberal)+I(Y1900*Socialist))
#SpatModel1905small<-as.formula(Lines1905~-1+Y1905+I(Y1905*InstallTime)+I(Y1905*MarketAccess1880)+I(Y1905*MarketSize1880)+I(Y1905^2)+I(Y1905*City)+I(Y1905*PopShare1905)+I(Y1905*Fringe)+I(Y1905*Border)+I(Y1905*Gov1905)+I(Y1905*Pub1905)+I(Y1905*Agriculture)+I(Y1905*EmpRatio07)+I(Y1905*IndexDisSim07)+I(Y1905*StateTax)+I(Y1905*LocalTax)+I(Y1905*RailStation)+I(Y1905*RailRevenues)+I(Y1905*RailWeight)+I(Y1905*PostRevenues)+I(Y1905*Participation)+I(Y1905*Zentrum)+I(Y1905*(Catholics-Zentrum))+I(Y1905*Liberal)+I(Y1905*Socialist))
#SpatMatrix1905<-mat2listw(MatInvDistSq[Main==TRUE,Main==TRUE]) #make sure it fits
#SpatMatrix1900<-mat2listw(MatInvDistSq[Main==TRUE & Towns$Lines1900>0,Main==TRUE & Towns$Lines1900>0]) #make sure it fits
#SpatMatrix1905small<-mat2listw(MatInvDistSq[Main==TRUE & Towns$Lines1900>0,Main==TRUE & Towns$Lines1900>0]) #make sure it fits
#City PopShare1905 Fringe Border
#log(InstallTime) Gov1905 Pub1905
#EmpRatio07 IndesDisSim07 Agriculture
#StateTax LocalTax
#PostRevenues RailStation RailWeight RailRevenues Nachnahme
#Participation Socialist Liberal Zentrum (Catholics-Zentrum)
######################################################
Towns$MarketAccess1880Both<-MatInvDistSq%*%Towns$Y1880
Towns$MarketAccess1896Both<-MatInvDistSq%*%Towns$Y1896
Towns$MarketAccess1900Both<-MatInvDistSq%*%Towns$Y1900
Towns$MarketAccess1905Both<-MatInvDistSq%*%Towns$Y1905
Towns$MarketAccessLines1896Both<-MatInvDistSq%*%Towns$Lines1896
Towns$MarketAccessLines1900Both<-MatInvDistSq%*%Towns$Lines1900
Towns$MarketAccessLines1905Both<-MatInvDistSq%*%Towns$Lines1905
Towns$MarketSize1880Both<-rowSums(MatInvDistSq*Towns$Y1880)
Towns$MarketSize1896Both<-rowSums(MatInvDistSq*Towns$Y1896)
Towns$MarketSize1900Both<-rowSums(MatInvDistSq*Towns$Y1900)
Towns$MarketSize1905Both<-rowSums(MatInvDistSq*Towns$Y1905)
##################################################################
Pairs1896<-read.csv("C:\\Box\\Research\\Telephone\\project_telephone\\Data\\Input\\Matrix1896.csv", header=TRUE)
colnames(Pairs1896)<-c("Town1","Town2","Calls","Connection")
Pairs1896<-as.data.frame(Pairs1896)
Pairs1896$Town1<-as.character(Pairs1896$Town1)
Pairs1896$Town2<-as.character(Pairs1896$Town2)
Pairs1896$Lat1<-0
Pairs1896$Long1<-0
Pairs1896$Lat2<-0
Pairs1896$Long2<-0
for (i in 1:dim(Pairs1896)[1]){
if (sum(Towns$Town==Pairs1896$Town1[i])>0){
Pairs1896$Lat1[i]<-Towns$Latitude[Towns$Town==Pairs1896$Town1[i]]
Pairs1896$Long1[i]<-Towns$Longitude[Towns$Town==Pairs1896$Town1[i]]
Pairs1896$Participants1[i]<-Towns$Participants1896[Towns$Town==Pairs1896$Town1[i]]
Pairs1896$Lines1[i]<-Towns$Lines1896[Towns$Town==Pairs1896$Town1[i]]
Pairs1896$MainLines1[i]<-Towns$MainLines1896[Towns$Town==Pairs1896$Town1[i]]
Pairs1896$Privat1[i]<-Towns$Privat1896[Towns$Town==Pairs1896$Town1[i]]
Pairs1896$Lines1[i]<-Towns$Lines1896[Towns$Town==Pairs1896$Town1[i]]
Pairs1896$Size1[i]<-Towns$Y1896[Towns$Town==Pairs1896$Town1[i]]
}
if (sum(Towns$Town==Pairs1896$Town2[i])>0){
Pairs1896$Lat2[i]<-Towns$Latitude[Towns$Town==Pairs1896$Town2[i]]
Pairs1896$Long2[i]<-Towns$Longitude[Towns$Town==Pairs1896$Town2[i]]
Pairs1896$Participants2[i]<-Towns$Participants1896[Towns$Town==Pairs1896$Town2[i]]
Pairs1896$Lines2[i]<-Towns$Lines1896[Towns$Town==Pairs1896$Town2[i]]
Pairs1896$MainLines2[i]<-Towns$MainLines1896[Towns$Town==Pairs1896$Town2[i]]
Pairs1896$Privat2[i]<-Towns$Privat1896[Towns$Town==Pairs1896$Town2[i]]
Pairs1896$Lines2[i]<-Towns$Lines1896[Towns$Town==Pairs1896$Town2[i]]
Pairs1896$Size2[i]<-Towns$Y1895[Towns$Town==Pairs1896$Town2[i]]
}
}
Pairs1896<-Pairs1896[Pairs1896$Lat1>0 & Pairs1896$Lat2>0,]
Pairs1896$Distance<-distaz(Pairs1896$Lat1,Pairs1896$Long1,Pairs1896$Lat2,Pairs1896$Long2)[[1]]$dist
matLines1<-matrix(0,dim(Towns1896)[1],dim(Towns1896)[1])
matLines2<-matrix(0,dim(Towns1896)[1],dim(Towns1896)[1])
matDistance<-matrix(0,dim(Towns1896)[1],dim(Towns1896)[1])
matCalls<-matrix(0,dim(Towns1896)[1],dim(Towns1896)[1])
for (i in 1:dim(Towns1896)[1]){
matLines1[i,]<-Towns1896$Lines[i]
matLines2[,i]<-Towns1896$Lines[i]
for (j in 1:dim(Towns1896)[1]){
matDistance[i,j]<-distaz(Towns1896$Latitude[i],Towns1896$Longitude[i],Towns1896$Latitude[j],Towns1896$Longitude[j])$dist
if (sum(Pairs1896$Town1==Towns1896$Town[i] & Pairs1896$Town2==Towns1896$Town[j])>0){
matCalls[i,j]<-Pairs1896$Calls[Pairs1896$Town1==Towns1896$Town[i] & Pairs1896$Town2==Towns1896$Town[j]]
matCalls[j,i]<-Pairs1896$Calls[Pairs1896$Town1==Towns1896$Town[i] & Pairs1896$Town2==Towns1896$Town[j]]
}
}
}
matCalls[matCalls<1000]<-1000
FullPairs1896<-cbind(as.vector(matLines1[upper.tri(matLines1)==TRUE]),as.vector(matLines2[upper.tri(matLines2)==TRUE]),as.vector(matDistance[upper.tri(matDistance)==TRUE]),as.vector(matCalls[upper.tri(matCalls)==TRUE]))
FullPairs1896<-data.frame(FullPairs1896)
names(FullPairs1896)<-c("Lines1","Lines2","Distance","Calls")
###############################################################################################################
Pairs1900<-read.csv("C:\\Box\\Research\\Telephone\\project_telephone\\Data\\Input\\Matrix1900.csv", header=TRUE)
colnames(Pairs1900)<-c("Town1","Town2","Calls","Connection")
Pairs1900<-as.data.frame(Pairs1900)
Pairs1900$Town1<-as.character(Pairs1900$Town1)
Pairs1900$Town2<-as.character(Pairs1900$Town2)
Pairs1900$Lat1<-0
Pairs1900$Long1<-0
Pairs1900$Lat2<-0
Pairs1900$Long2<-0
for (i in 1:dim(Pairs1900)[1]){
if (sum(Towns$Town==Pairs1900$Town1[i])>0){
Pairs1900$Lat1[i]<-Towns$Latitude[Towns$Town==Pairs1900$Town1[i]]
Pairs1900$Long1[i]<-Towns$Longitude[Towns$Town==Pairs1900$Town1[i]]
Pairs1900$Participants1[i]<-Towns$Participants1900[Towns$Town==Pairs1900$Town1[i]]
Pairs1900$Lines1[i]<-Towns$Lines1900[Towns$Town==Pairs1900$Town1[i]]
Pairs1900$MainLines1[i]<-Towns$MainLines1900[Towns$Town==Pairs1900$Town1[i]]
Pairs1900$Privat1[i]<-Towns$Privat1900[Towns$Town==Pairs1900$Town1[i]]
Pairs1900$Lines1[i]<-Towns$Lines1900[Towns$Town==Pairs1900$Town1[i]]
Pairs1900$Size1[i]<-Towns$Y1900[Towns$Town==Pairs1900$Town1[i]]
}
if (sum(Towns$Town==Pairs1900$Town2[i])>0){
Pairs1900$Lat2[i]<-Towns$Latitude[Towns$Town==Pairs1900$Town2[i]]
Pairs1900$Long2[i]<-Towns$Longitude[Towns$Town==Pairs1900$Town2[i]]
Pairs1900$Participants2[i]<-Towns$Participants1900[Towns$Town==Pairs1900$Town2[i]]
Pairs1900$Lines2[i]<-Towns$Lines1900[Towns$Town==Pairs1900$Town2[i]]
Pairs1900$MainLines2[i]<-Towns$MainLines1900[Towns$Town==Pairs1900$Town2[i]]
Pairs1900$Privat2[i]<-Towns$Privat1900[Towns$Town==Pairs1900$Town2[i]]
Pairs1900$Lines2[i]<-Towns$Lines1900[Towns$Town==Pairs1900$Town2[i]]
Pairs1900$Size2[i]<-Towns$Y1895[Towns$Town==Pairs1900$Town2[i]]
}
}
Pairs1900<-Pairs1900[Pairs1900$Lat1>0 & Pairs1900$Lat2>0,]
Pairs1900$Distance<-distaz(Pairs1900$Lat1,Pairs1900$Long1,Pairs1900$Lat2,Pairs1900$Long2)[[1]]$dist
matLines1<-matrix(0,dim(Towns1900)[1],dim(Towns1900)[1])
matLines2<-matrix(0,dim(Towns1900)[1],dim(Towns1900)[1])
matDistance<-matrix(0,dim(Towns1900)[1],dim(Towns1900)[1])
matCalls<-matrix(0,dim(Towns1900)[1],dim(Towns1900)[1])
for (i in 1:dim(Towns1900)[1]){
matLines1[i,]<-Towns1900$Lines[i]
matLines2[,i]<-Towns1900$Lines[i]
for (j in 1:dim(Towns1900)[1]){
matDistance[i,j]<-distaz(Towns1900$Latitude[i],Towns1900$Longitude[i],Towns1900$Latitude[j],Towns1900$Longitude[j])$dist
if (sum(Pairs1900$Town1==Towns1900$Town[i] & Pairs1900$Town2==Towns1900$Town[j])>0){
matCalls[i,j]<-Pairs1900$Calls[Pairs1900$Town1==Towns1900$Town[i] & Pairs1900$Town2==Towns1900$Town[j]]
matCalls[j,i]<-Pairs1900$Calls[Pairs1900$Town1==Towns1900$Town[i] & Pairs1900$Town2==Towns1900$Town[j]]
}
}
}
matCalls[matCalls<1000]<-1000
FullPairs1900<-cbind(as.vector(matLines1[upper.tri(matLines1)==TRUE]),as.vector(matLines2[upper.tri(matLines2)==TRUE]),as.vector(matDistance[upper.tri(matDistance)==TRUE]),as.vector(matCalls[upper.tri(matCalls)==TRUE]))
FullPairs1900<-data.frame(FullPairs1900)
names(FullPairs1900)<-c("Lines1","Lines2","Distance","Calls")
############################################################3
#MatMAPfalz<-MatInvDist*PfalzTowns
#MarketAccessPfalz<-matrix(0,nrow=max(Towns$InstallTime),ncol=dim(Towns)[1])
#for (i in 1:(dim(MarketAccessPfalz)[1]-1)){MarketAccessPfalz[i+1,]<-colSums(MatMAPfalz*TeleMonth[i,])}
#MatUsedPfalz<-(t(t(MatInvDist)*PfalzTowns))
#MarketSizePfalz<-matrix(0,nrow=max(Towns$InstallTime),ncol=dim(Towns)[1])
#for (i in 1:dim(MarketSizePfalz)[1]){MarketSizePfalz[i,]<-colSums(t(MatUsedPfalz*PopMonth[i+(301-dim(MarketSizePfalz)[1]),]) *(TeleMonth[i,]>0))} #Consider replacing telemonth indicator with telemonth amount
TownsHazardMA[j,2]<-MarketAccessPfalz[j-Row[i,1]+1,i]
TownsHazardMA[j,4]<-MarketSizePfalz[j-Row[i,1]+1,i]
#########################################
#for (i in 1:dim(MarketSizeMatrix)[1]){MarketPopMatrix[i,]<-colSums(t(MatInvDistTel*PopMonth[i+(301-dim(MarketSizeMatrix)[1]),]) *(TeleMonth[i,]>0))}
TownsHazardCons<-Towns[,c("Town","Bezirk","Region","PostOffice","Bahnbezirk","PostBahn","PostRevenues","RailStation","RailRevenues","RailWeight","EmpRatio82","IndexDisSim82","Y1880","MarketAccess1880Both","MarketAccess1880","MarketSize1880","MarketDistance1880","PopShare1880","City", "Fringe","Border","StateTax", "LocalTax","Agriculture","Participation","Zentrum", "Socialist","Liberal","Catholics","DifCatholicsZentrum","InstallTime","InstallMonth") ]
|
438264a178adcd608e01bdb1bcc7bc2f60638978 | ed665b38ab00b9e7aeb84db4300d4dabdb05f6eb | /virtualenv/man/ve_deactivate.Rd | 0bcbe55e75928af732b588fc4b6225c43b210eaf | [] | no_license | beringresearch/ABC | b13e899812a4813d7fc831320d7518092e687de1 | 5ccde87da481f7a116e418cb1dc8c56c3a5a9ee0 | refs/heads/master | 2021-01-20T14:35:24.114781 | 2018-02-09T08:00:49 | 2018-02-09T08:00:49 | 82,760,779 | 2 | 1 | null | 2018-02-09T08:00:50 | 2017-02-22T04:31:28 | HTML | UTF-8 | R | false | true | 333 | rd | ve_deactivate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ve_deactivate.R
\name{ve_deactivate}
\alias{ve_deactivate}
\title{Deactivate current virtual environment}
\usage{
ve_deactivate()
}
\description{
Upon deactivation, R environment is set to default package environemnt.
All attached packages are unloaded
}
|
ffc6a00479a597c5edc023a8901c420c91b5fb16 | 8675b30482bb05ef64e8bc1935319dc9a6928e89 | /generalized_linear_models.R | 9545f5de26a81a5ea5de235c2b66e2bc1e7c4429 | [] | no_license | ncanna2/linear_statistical_models | e9f29b569bae0a8fc52fc54b351f78f442ee97ab | cee144319c49494fdfd20a3887953aacec68b96d | refs/heads/main | 2023-05-01T04:27:46.582984 | 2021-05-20T00:48:37 | 2021-05-20T00:48:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,683 | r | generalized_linear_models.R | #********************************************************************************
#
# General Linear Models
#
#********************************************************************************
#************************************
# Reading in data
#************************************
library(ggplot2)
library(ggpubr)
library(ggbiplot)
require(dplyr)
library(ade4)
library(lattice)
library(ggfortify)
library(ggResidpanel)
library(MASS)
library("GGally")
# Set working directory
datadir <- "~/Desktop/Fall 2020 Classes/SYS4021/Data/Spam"
sourcedir <-"~/Desktop/Fall 2020 Classes/SYS4021/Source"
spam <- read.table(paste(datadir,"/Spam.txt", sep=""), sep = " ", header = F)
#*****************************************
# Source code
#*****************************************
setwd(sourcedir)
source("AccidentInput.R")
source("SPM_Panel.R")
source("PCAplots.R")
source("FactorPlots.R")
source("ROC.R")
source("pc.glm.R")
source("TestSet.R")
#**************************************************************
#
# Graphical Analysis
#
#**************************************************************
# Look at the data
dim(spam)
summary(spam)
# Which variable is the response variable?
table(spam$V58)
# What proportion is spam?
sum(spam[,58])/length(spam[,58])
sum(spam[,58])
length(spam[,58])
#***************************************************************
#
# Log of Predictors
#
#***************************************************************
# Repeat the above graphical analysis with a log transform of the
# predictor variables
Lspam <- log(spam[,-58] + .1)
Lspam[,58] <- spam[,58]
# Obtain boxplots with variables 1-9 vs. the response.
# Which variables are more discriminatory?
for(i in 1:9)
{
assign(paste0("V", i), ggplot(data = Lspam, aes_string(x=as.factor(Lspam$V58),y=Lspam[,i])) +
geom_boxplot(fill= "steelblue") +
ggtitle(paste("V", i, sep = "")))
}
ggarrange(V1,V2,V3,V4,V5,V6,V7,V8,V9,ncol=3,nrow=3)
#1. Obtain box plots for log transforms of variables 20-28 with variable 58.
for(i in 20:28)
{
assign(paste0("V", i), ggplot(data = Lspam, aes_string(x=as.factor(Lspam$V58),y=Lspam[,i])) +
geom_boxplot(fill= "steelblue") +
ggtitle(paste("V", i, sep = "")))
}
ggarrange(V20,V21,V22,V23,V24,V25,V26,V27,V28,ncol=3,nrow=3)
dev.off()
#***************************************************************
#
# Scatterplot matrix
#
#***************************************************************
uva.pairs(Lspam[,c("V58", "V49", "V50", "V51", "V52", "V53", "V54", "V55", "V56", "V57")])
#****************************************************
#
# Principal Components
#
#****************************************************
# Obtain the principal components for variables 1-57.
# Look at the biplot and explain what you see.
spam.pca = princomp(Lspam[,1:57], cor = T)
biplot(spam.pca)
ggbiplot(spam.pca, varname.size = 5, labels=row(spam)[,1])
cumplot(spam.pca, col = "grey")
biplot.fact <- function(pc.obj, res, comp = c(1,2), pch1 = 19, pch2 = 18)
{
if ("scores" %in% names(pc.obj)){ # princomp object
plot(pc.obj$scores[res == 0,comp[1]], pc.obj$scores[res == 0,comp[2]], col = "blue", xlab = paste("Comp", comp[1]), ylab = paste("Comp", comp[2]), ylim = c(min(pc.obj$scores[,comp[2]]), max(pc.obj$scores[,comp[2]])),
xlim = c(min(pc.obj$scores[,comp[1]]), max(pc.obj$scores[,comp[1]])), pch = pch1)
points(pc.obj$scores[res == 1,comp[1]], pc.obj$scores[res == 1,comp[2]], col = "red", pch = pch2)
}else{ # prcomp object
plot(pc.obj$x[res == 0,comp[1]], pc.obj$x[res == 0,comp[2]], col = "blue", xlab = paste("Comp", comp[1]), ylab = paste("Comp", comp[2]), ylim = c(min(pc.obj$x[,comp[2]]), max(pc.obj$x[,comp[2]])),
xlim = c(min(pc.obj$x[,comp[1]]), max(pc.obj$x[,comp[1]])), pch = pch1)
points(pc.obj$x[res == 1,comp[1]], pc.obj$x[res == 1,comp[2]], col = "red", pch = pch2)
}
}
biplot.fact.gg <- function(pc.obj, res, labels, comp = c(1,2))
{
if ("scores" %in% names(pc.obj)){ # princomp object
ggplot(data = data.frame(pc.obj$scores), aes(x=pc.obj$scores[,comp[1]], y=pc.obj$scores[,comp[2]])) +
geom_point(aes(color = factor(res))) + xlab("Comp 1") + ylab("Comp 2") +
theme(legend.title = element_blank()) + scale_color_discrete(labels=labels)
}else{ # prcomp object
ggplot(data = data.frame(pc.obj$x), aes(x=pc.obj$x[,comp[1]], y=pc.obj$x[,comp[2]])) +
geom_point(aes(color = factor(res))) + xlab("Comp 1") + ylab("Comp 2") +
theme(legend.title = element_blank()) + scale_color_discrete(labels=labels)
}
}
biplot.fact(spam.pca, Lspam[,58])
legend(-30, 10, legend = c("Spam", "Ham"), pch = c(18, 19), col = c("red", "blue"))
#****************************************************
#
# General Linear Models
#
#****************************************************
#*
Lspam.short = Lspam[,c(1:10,47:58)]
#Creating main effects model with just the first 10 and last 10 (48:57) predictor variables each of which has been log transformed with a 0.1 offset
Lspam.glm.main <- glm(Lspam[,58]~., data = Lspam.short, family = binomial)
summary(Lspam.glm.main)
Lspam.null <- glm(Lspam[,58]~1, data = Lspam, family = binomial)
anova(Lspam.null, Lspam.glm.main, test = "Chi")
#Using drop1() function from the MASS library to compute significance tests for each term in the main effects model using just the log transformed first and last 10 predictors
Lspam.glm.main <- glm(V58~., data = Lspam.short, family = binomial)
summary(Lspam.glm.main)
drop1(Lspam.glm.main, response~., test = "Chi", data = dspam)
spam.no4 <- update(Lspam.glm.main, .~.-V4, data = Lspam.short)
anova(spam.no4, Lspam.glm.main, test = "Chi")
(exp(Lspam.glm.main$coefficients[5])-1)*100
Lspam.short[1,]
predict(Lspam.glm.main, newdata = Lspam.short[1,])
exp(predict(Lspam.glm.main, newdata = Lspam.short[1,]))
exp(predict(Lspam.glm.main, newdata = data.frame(Lspam.short[1,], V4 = 1)))
#Predictions in the original dataset for main effects
spam.log.predict <- predict(Lspam.glm.main, type = "response")
score.table(spam.log.predict, spam[,58], 0.5)
#Stepwise
Lspam.glm.main <- glm(V58~., data = Lspam.short, family = binomial)
step.Lspam.glm.main <- step(Lspam.glm.main, data = Lspam.short, family = binomial)
#Partial likelihood test
anova(step.Lspam.glm.main, Lspam.glm.main, test = "Chi")
#Predictions in the original dataset for stepwise
spam.log.predict.step <- predict(step.Lspam.glm.main, type = "response")
score.table(spam.log.predict.step, spam[,58], 0.5)
#**************************************************
#
# Evaluate performance with AIC
#
#**************************************************
AIC(step.Lspam.glm.main)
AIC(Lspam.glm.main)
#*****************************
#
# ROC Curves
#
#*****************************
roc.plot.gg <- plot.roc.gg(spam.log.predict, spam[,58], "Main Effects")
roc.plot.gg <- lines.roc.gg(roc.plot.gg, spam.log.predict.step, spam[,58], "Step")
roc.plot.gg
#****************************************************
#
# GLM Principal Components Regression
#
#****************************************************
glm.spam.pca = princomp(Lspam.short, cor = T)
cumplot(glm.spam.pca, col = "grey")
spam.pca.glm98 <- pc.glm(glm.spam.pca, 98, spam[,58])
summary(spam.pca.glm98)
# Do a model utility test for model that uses
# PCs that account for 98% of the variance
spampca.null <- pc.null(glm.spam.pca, 98, spam[,58])
anova(spampca.null, spam.pca.glm98, test = "Chi")
glm.spam.pca = princomp(Lspam.short, cor = T)
spam.pca.glm98 <- pc.glm(glm.spam.pca, 98, spam[,58])
glm.spam.pca.predict <- predict(spam.pca.glm98, type = "response")
score.table(glm.spam.pca.predict, spam[,58], 0.5) |
5f638892e326fc1baedddf439176a06c02d9c6bd | 6eb75f8d342ca6b6b40c37147829cd1f8fb48989 | /man/softnum.Rd | fbb9f4caaedaaa6c3fe3fce3d4106beb18e2ae90 | [] | no_license | xanoob/crabrawlR | b820c94746a80086ef678e551e0c724f2448fa22 | 9af05b5c26740a03ae7ccb15e4c1bf48429ac09a | refs/heads/master | 2020-04-03T13:24:49.838067 | 2018-11-12T04:16:19 | 2018-11-12T04:16:19 | 155,283,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 869 | rd | softnum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crabrawlR_fun.R
\name{softnum}
\alias{softnum}
\title{Soft core taxa value threshold--95\% from Kaas et al 2012
Up to what \% of taxa can have this gene absent to qualify for 'soft-core' definition?.
Returned int is \# of taxa that can have zeros in column (gene family) to qualify as 'soft-core'.}
\usage{
softnum(taxct)
}
\arguments{
\item{vector}{with taxa names or taxa row nums}
}
\value{
numeric int, max # of taxa that can have genes missing
}
\description{
Soft core taxa value threshold--95\% from Kaas et al 2012
Up to what \% of taxa can have this gene absent to qualify for 'soft-core' definition?.
Returned int is \# of taxa that can have zeros in column (gene family) to qualify as 'soft-core'.
}
\examples{
softnum(PangenomeMatrix[,1])
}
\keyword{matrix}
\keyword{pangenome,}
|
0dee5019240585b953e87c0d2104b0c64d308f8e | 547879970637764007739b3abf0000c7075f835d | /scripts/R/Heatmap.R | d760ab357d2b63a694f7193fd41f2913101b32da | [] | no_license | natalytack/Intoshia_variabili_genome | 5a79014d30e2cd45224700447e31cb2253236d9d | 3d1cf9ff2f401b1eec79dbbd10cfca08dd91f522 | refs/heads/master | 2022-09-18T13:43:20.278496 | 2020-06-03T22:19:54 | 2020-06-03T22:19:54 | 269,199,612 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 735 | r | Heatmap.R | library(readxl)
library(reshape2)
all_metabol <- read_xls("*.xls",sheet = 2)
data <- as.data.frame(all_metabol)
data<-(dcast(data, Pathway ~ species, value.var="genes"))
rownames(data)<-data$Pathway
data<-data[,-1]
data$zero<-rowSums(data!=0)<ncol(data)
data<-data[data$zero==FALSE,]
data<-data[,-5]
data<-as.matrix(data)
library(pheatmap)
# Heatmap 1
pheatmap(data,cluster_cols = FALSE,cluster_rows = FALSE,scale = "row",cellwidth = 25,display_numbers = data)
# Heatmap 2
pheatmap(data,cluster_cols = FALSE,cluster_rows = FALSE,scale = "none",cellwidth = 25,display_numbers = data)
# Heatmap 3
logdata<-log(data)
pheatmap(logdata,cluster_cols = FALSE,cluster_rows = FALSE,scale = "none",cellwidth = 25,display_numbers = data)
|
77957ab6ac8497c716a749e00dd8ed8ac40879b8 | 74c6ddfd01a4d5819349ac47d4ed6dafeec65bf4 | /crit_think_mod_5.R | 0faa6eb87e9d3c2f084a71763759b462b5b0db9d | [] | no_license | DCCP80/Foundation-of-Data-Analytics | 5d26c432b9288691e1e9f02aa68822450c98f036 | 813c8eab175fc110d32f7eac05096c3d23360e5a | refs/heads/master | 2020-05-24T14:58:29.595503 | 2019-08-05T16:16:39 | 2019-08-05T16:16:39 | 187,320,736 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 580 | r | crit_think_mod_5.R | #Hypothesis testing
#Read the .csv in as dataframe
samplesbycounty= as.data.frame(
read.csv("C:/Users/damrine/Desktop/STUFF/DATA/Samples_by_County.csv")
)
#checking the first ten rows
samplesbycounty[1:10,]
#Testing the third column only or percent of samples by county
prcnt<-samplesbycounty[,3]
#Had to convert this from factor to numeric because of the "%" sign.
prcnt_test<-as.numeric(sub("%","",prcnt))/100
#checking the data
prcnt_test
#Run the simple t-test with null hypothesis as the mean is greater than 50%
t.test(prcnt_test,mu=50, alternative = "less") |
3c9afabc6d98d34567a0ed7f23c8453cf6f62539 | 815b653a737474b62b6288da8dff2844430417bb | /R/data.R | 2bca0e6b7832bea9cb10b656d220864bef76f4e9 | [] | no_license | cran/opentripplanner | 6c7d7ab5d5f8248d43607a33fc920652437df2ea | bd9469eb61b88d2638ca48cc59bfe30a1561dcd1 | refs/heads/master | 2023-02-13T21:33:46.252919 | 2023-02-02T16:30:02 | 2023-02-02T16:30:02 | 236,634,375 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 517 | r | data.R | #' Example JSON for driving
#'
#' Example JSON response from OTP
#' This is used for internal testing and has no use
#'
#' @format json
"json_example_drive"
#' Example JSON for transit
#'
#' Example JSON response from OTP
#' This is used for internal testing and has no use
#'
#' @format json
"json_example_transit"
#' Example JSON for driving long distance
#'
#' Example JSON response from OTP
#' This is used for internal testing and has no use
#'
#' @format json
"json_example_long_drive"
|
7a35af59c20c7b8561cf61b6b5080ddab71c2fff | 597afb6139f2f41f197e180efbe177811f4f6ab4 | /Plot1.R | b9dfe8c2074aa09d6d18107ca3bd2db56854c9b2 | [] | no_license | djodjen/ExData_Plotting1 | 03897ce8641c6ae0333656dd9519fd92590cd874 | fedee10f0a85d2816f403a4be28e03f062daeccc | refs/heads/master | 2020-12-25T16:36:08.037804 | 2014-11-09T15:13:36 | 2014-11-09T15:13:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 668 | r | Plot1.R | setwd("c:/coursera")
fileToRead <- "c:/coursera/household_power_consumption.txt"
##reading the file and formating the Date field to be a Date
myData <- read.table(fileToRead, header = TRUE, sep = ';', na.strings="?")
myData$Date <- as.Date(myData$Date, format='%d/%m/%Y')
##subsetting only the data that we need
dateLow <- as.Date('2007-02-01', format='%Y-%m-%d')
dateHigh <- as.Date('2007-02-02', format='%Y-%m-%d')
dataSubset <- subset(myData, Date == dateLow | Date == dateHigh)
##creating Plot1
hist(dataSubset$Global_active_power, col = "red", xlab = "Global Active Power (kilowats)", main = "Global Active Power")
dev.copy(device = png, "Plot1.png")
dev.off() |
7caac5044fae59f9c3b3c071b3369d75486a8d11 | 7ae64966a18056434545eec13a6fe6c563a8fb50 | /dirichlet.R | 290222302998616175f2965cb7301dc60f77c4fb | [] | no_license | Squiercg/recologia | 522812932fc26ae6f0f60df911365ef06b00d916 | d873b3fa08bac3fe633506c8c3591b884526c728 | refs/heads/master | 2020-05-21T22:11:43.879102 | 2019-11-07T14:23:35 | 2019-11-07T14:23:35 | 9,555,220 | 7 | 6 | null | null | null | null | UTF-8 | R | false | false | 1,336 | r | dirichlet.R | ##http://recologia.com.br/2018/05/distribuicao-de-dirichlet/
library(Compositional)
library(MCMCpack)
##
parametros <- c(1,1,1)
amostra <- rdirichlet(10,parametros)
amostra
rowSums(amostra)
ddirichlet(amostra[1,], c(1,2,3))
##Explorando a distribuição
amostra <- rdirichlet(200, c(.1,.1,.1) )
bivt.contour(amostra)
amostra <- rdirichlet(200, c(1,1,1) )
bivt.contour(amostra)
amostra <- rdirichlet(200, c(10,10,10) )
bivt.contour(amostra)
amostra <- rdirichlet(200, c(100,100,100) )
bivt.contour(amostra)
amostra <- rdirichlet(200, c(1000,1000,1000) )
bivt.contour(amostra)
##Lembrando dos dados iris
head(iris)
##PCA dos dados iris
coordenadas <- princomp(iris[,1:4])$scores[,1:2]
plot(coordenadas[,1],coordenadas[,2],pch=19,col=as.numeric(iris$Species),frame=F,xlab="PCA 1",ylab="PCA 2")
##Plot em três dimensões
setosa <- iris[iris$Species=="setosa",c(3,4,1)]
setosa <- as.matrix(setosa/rowSums(setosa))
bivt.contour(setosa)
##
versicolor <- iris[iris$Species=="versicolor",c(3,4,1)]
versicolor <- as.matrix(versicolor/rowSums(versicolor))
virginica <- iris[iris$Species=="virginica",c(3,4,1)]
virginica <- as.matrix(virginica/rowSums(virginica))
##Plot das 3 espécies
par(mfrow=c(1,3))
bivt.contour(setosa)
title("setosa")
bivt.contour(versicolor)
title("versicolor")
bivt.contour(virginica)
title("virginica")
|
a02d235f54556fc3dcb8fbf63873da7cd5c0b6c1 | a317fe63879cf1e9a2cbc4d54af2bdf3e3d3ac3e | /plot2.R | 1857710ee28722baad01bc5b6c26c50913c36034 | [] | no_license | siavash9000/ExData_Plotting1 | a9323f2df5ee3ebc9c5a674490e4913c6c418a0e | 688dd2d0c6284557c25f55559a8f07b7ac1502a1 | refs/heads/master | 2021-01-18T06:05:16.550743 | 2014-06-17T19:49:04 | 2014-06-17T19:49:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 920 | r | plot2.R | tryCatch({
data<-read.csv("household_power_consumption.txt",na.strings='?',sep=";")
},warning = function(w) {
print("The data file is missing. Please download and extract the file household_power_consumption.txt from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip")
}, error = function(e) {
print("The data file is missing. Please download and extract the file household_power_consumption.txt from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip")
})
data$Time <- paste(data$Date, data$Time)
data$Time <- strptime(data$Time,"%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date,"%d/%m/%Y")
february_begin<-subset(data,data$Date>=as.Date("2007-02-01")&data$Date<=as.Date("2007-02-02"))
png(file = "plot2.png")
plot(x=february_begin$Time,y=february_begin$Global_active_power, ylab="Global Active Power(kilowatts)",type='l',xlab="")
dev.off() |
dc436c4c4440f07116835161cbe9d9e46384f27e | 22892c3c784ce87cc596071538d1824314febe46 | /R/apply_graph_theme.R | 913872871c6eb3cb99073279c10aed53ccc5f2a6 | [] | no_license | cran/justifier | 295beb2d72576c7147791a45828a391b96299140 | 2d65657d6b127cbda621c43ea9847d1d03687363 | refs/heads/master | 2023-03-20T23:58:57.792413 | 2023-03-05T10:00:02 | 2023-03-05T10:00:02 | 236,617,694 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,195 | r | apply_graph_theme.R | #' Apply multiple DiagrammeR global graph attributes
#'
#' @param graph The [DiagrammeR::DiagrammeR] graph to apply the attributes to.
#' @param ... One or more character vectors of length three, where the first element is
#' the attribute, the second the value, and the third, the attribute type (`graph`,
#' `node`, or `edge`).
#'
#' @return The [DiagrammeR::DiagrammeR] graph.
#' @examples exampleJustifier <- '
#' ---
#' assertion:
#' -
#' id: assertion_id
#' label: "An assertion"
#' decision:
#' -
#' id: decision_id
#' label: "A decision"
#' justification:
#' -
#' id: justification_id
#' label: "A justification"
#' assertion:
#' -
#' id: assertion_id
#' description: "A description of an assertion"
#' source:
#' -
#' id: source1_id
#' label: "First source"
#' -
#' id: source2_id
#' label: "second source"
#' ---
#' ';
#' justifications <-
#' justifier::load_justifications(text=exampleJustifier);
#' miniGraph_original <-
#' justifications$decisionGraphs[[1]];
#' miniGraph <-
#' justifier::apply_graph_theme(
#' miniGraph_original,
#' c("color", "#0000AA", "node"),
#' c("shape", "triangle", "node"),
#' c("fontcolor", "#FF0000", "node")
#' );
#' ### This line shouldn't be run when executing this example as test,
#' ### because rendering a DiagrammeR graph takes quite long
#' \dontrun{
#' DiagrammeR::render_graph(miniGraph);
#' }
#' @export
apply_graph_theme <- function(graph,
...) {
for (currentSetting in list(...)) {
if ((length(currentSetting) != 3) && is.character(currentSetting)) {
stop("Only provide character vectors of length 3 in the dots (...) argument!");
} else {
graph <-
DiagrammeR::add_global_graph_attrs(graph,
currentSetting[1],
currentSetting[2],
currentSetting[3]);
}
}
return(graph);
}
|
14b6c9f08654ad604dad036c4ea64cd2c72d67c0 | 24e6e67c0bdfdb1eb5633efafe12d3719469eca5 | /R/genomewidePlots.R | d5e482ee676960927233e01d83169d5899dd3871 | [] | no_license | IOstrovnaya/Clonality | 247a6bd4fc8b95076eb7c6fed0a417ec045b0187 | b968228ef94527a729ee7ec9f798cf532f022a94 | refs/heads/master | 2023-07-21T15:26:00.866463 | 2023-07-07T21:19:58 | 2023-07-07T21:19:58 | 172,739,754 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,395 | r | genomewidePlots.R | genomewidePlots <-
function(data.seg1,classall,ptlist,ptpair,ptLR,plot.as.in.analysis=TRUE)
{
nicen<-function(x)
{#rounding of LR
if (x<100) round(x,1) else formatC(x,2)
}
if ((!is.character(data.seg1$data$chrom) | !all(nchar(data.seg1$data$chrom==6)) | ! all(substr(data.seg1$data$chrom,1,3) %in% c("Chr","chr")) | ! all(substr(data.seg1$data$chrom,6,6) %in% c("p","q"))))
stop("Chromosome should have format 'chr' + two digits + 'p or q', like in 'chr01p','Chr11q' etc")
samnms<-names(data.seg1$data)[-c(1,2)]
chrlist<-unique(data.seg1$data$chrom)
nchr<-length(chrlist)
datseg<-subset(data.seg1,sample=ptpair)
dsamnms<-ptpair
if (plot.as.in.analysis) #if true only segments that classify chromosomes are colored; if false the one step CBS results are plotted
{
tum1<-classall[,samnms==ptpair[1]]
tum2<-classall[,samnms==ptpair[2]]
for (ch in c(1:nchr))
{
datseg$output[datseg$output$ID==dsamnms[1] & datseg$output$chrom==chrlist[ch] &
datseg$output[,7]!=tum1[ch],7]<-"Normal"
datseg$output[datseg$output$ID==dsamnms[2] & datseg$output$chrom==chrlist[ch] &
datseg$output[,7]!=tum2[ch],7]<-"Normal"
qq<-datseg$output[datseg$output$ID==dsamnms[2] & datseg$output$chrom==chrlist[ch],]
nq<-nrow(qq)
if (nq>1 & (all(qq[,7]=="Gain") | all(qq[,7]=="Loss")))
{
w<-which(datseg$output$ID==dsamnms[2] & datseg$output$chrom==chrlist[ch])
ll<-qq[1,]
ll$num.mark<-sum(qq$num.mark)
ll$seg.mean<-sum(qq$num.mark*qq$seg.mean)/ sum(qq$num.mark)
datseg$output[w[1],]<-ll
datseg$output<-datseg$output[-c(w[2]:w[nq]),]
}
qq<-datseg$output[datseg$output$ID==dsamnms[1] & datseg$output$chrom==chrlist[ch],]
nq<-nrow(qq)
if (nq>1 & (all(qq[,7]=="Gain") | all(qq[,7]=="Loss")))
{
w<-which(datseg$output$ID==dsamnms[1] & datseg$output$chrom==chrlist[ch])
ll<-qq[1,]
ll$num.mark<-sum(qq$num.mark)
ll$seg.mean<-sum(qq$num.mark*qq$seg.mean)/ sum(qq$num.mark)
datseg$output[w[1],]<-ll
datseg$output<-datseg$output[-c(w[2]:w[nq]),]
}
}
}
line<-paste("Patient ",ptlist[samnms==ptpair[1]],sep="")
if (!is.null( ptLR))
{
numb<-as.numeric(ptLR[ptLR[,1]==dsamnms[1] & ptLR[,2]==dsamnms[2],4])
if (numb<=1) line<-paste(line,", odds in favor of independence = ",nicen(1/numb),sep="") else
line<-paste(line,", odds in favor of clonality (metastasis) = ",nicen(numb),sep="")
}
prettyplot(datseg,path="",lab.general=line,nm="",t1lab=paste("Sample",dsamnms[1]),
t2lab=paste("Sample",dsamnms[2]))
}
|
7e2bde3f5117edcd898b948366b9585fc9c632cb | c34ac14ac0766e7448056389aba4ec964ea8e5c1 | /man/dissimilarity_disputedness.Rd | 7c1ca59f4b6f0c14edfd326baf9011f226492a59 | [
"MIT"
] | permissive | simdiversity/RNcEDGPS | a2479b6aade5ae562ea7142af168557e7011e364 | 98079140963419ce83948cdef654af95992760b1 | refs/heads/master | 2023-03-05T20:37:53.447085 | 2021-02-01T07:32:02 | 2021-02-01T07:32:02 | 259,299,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 472 | rd | dissimilarity_disputedness.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bavaud.R
\name{dissimilarity_disputedness}
\alias{dissimilarity_disputedness}
\title{dissimilarity_disputedness}
\usage{
dissimilarity_disputedness(M, f, disp)
}
\arguments{
\item{M}{a matrix containing NAs}
\item{f}{a named weight array}
\item{disp}{an array containing column disputedness}
}
\value{
a dissimilarity renormalised by disputedness
}
\description{
dissimilarity_disputedness
}
|
25d233e5183a79f29d0e9322fc9d06592dfed211 | bf9f77e17111b590fe44905ebd9391009a2a1390 | /data-raw/scripts/diplome.R | e118b40a37051b13a3812c32f8f99b3982cf164d | [
"MIT"
] | permissive | ove-ut3/apogee | 5cd9fed8e1cb4fc359b824fdb16ff269952d6320 | c08ff84497bbaab4af90a0eeb779a338ff158b87 | refs/heads/master | 2021-06-02T09:03:41.344113 | 2020-05-19T13:22:59 | 2020-05-19T13:22:59 | 115,185,672 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,602 | r | diplome.R | #### Diplôme ####
diplome <- readxl::read_excel("data-raw/data/Diplome.xlsx", skip = 1) %>%
patchr::rename(impexp::access_import("_rename", access_base_path))
usethis::use_data(diplome, overwrite = TRUE)
#### Diplôme - type ####
diplome_type <- readxl::read_excel("data-raw/data/Etape.xlsx", "Etape_diplome_type") %>%
patchr::rename(impexp::access_import("_rename", access_base_path)) %>%
dplyr::select(code_type_diplome) %>%
unique() %>%
dplyr::full_join(impexp::access_import("diplome_type", "data-raw/data/Tables_ref.accdb"), by = "code_type_diplome") %>%
dplyr::arrange(code_type_diplome) %>%
dplyr::mutate(acronyme_type_diplome = dplyr::if_else(is.na(acronyme_type_diplome), code_type_diplome, acronyme_type_diplome))
usethis::use_data(diplome_type, overwrite = TRUE)
#### Diplôme origine - type ####
diplome_origine_type <- readxl::read_excel("data-raw/data/Diplome.xlsx", "Diplome_anterieur_origine", skip = 1) %>%
patchr::rename(impexp::access_import("_rename", access_base_path)) %>%
dplyr::rename(code_type_diplome_origine = code_type_diplome_anterieur) %>%
dplyr::full_join(impexp::access_import("diplome_origine_type", "data-raw/data/Tables_ref.accdb"),
by = "code_type_diplome_origine"
) %>%
dplyr::mutate(lib_type_diplome_origine = dplyr::if_else(is.na(lib_type_diplome_origine), lib_type_diplome_anterieur, lib_type_diplome_origine)) %>%
dplyr::select(-lib_type_diplome_anterieur) %>%
dplyr::add_row(code_type_diplome_origine = NA_character_, lib_type_diplome_origine = "Non-ventilé")
usethis::use_data(diplome_origine_type, overwrite = TRUE)
|
2d2270edfbf4d45cfbcd5cf345421ea9e43b0980 | 20dc05ee5ca66121017f7382ee4097c4ff2c37a2 | /data/children/pre-processing/neets.R | ab3c45f632f7dee9822db99cf96741c86a7520ac | [
"MIT"
] | permissive | traffordDataLab/dashboard | 6d885f26213a1177b8f2e3a411a0f914c5c92beb | 83e9bc0cd14d1fac19c743c1e5eece54e0145458 | refs/heads/master | 2021-07-10T20:23:26.488619 | 2020-08-20T15:57:45 | 2020-08-20T15:57:45 | 182,110,692 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,370 | r | neets.R | # 16-17 year olds not in education, employment or training (NEET) or whose activity is not known #
# Source: PHE Fingertips (PHOF 1.05)
# URL: https://fingertips.phe.org.uk/profile/public-health-outcomes-framework
# Licence: Open Government Licence v3.0
library(tidyverse) ; library(fingertipsR)
# retrieve indicators
select_indicators()
# retrieve corresponding metadata
indicator_metadata(IndicatorID = 93203) %>% formattable::formattable()
gm <- fingertips_data(IndicatorID = 93203, AreaTypeID = 102, ParentAreaTypeID = 126, rank = TRUE) %>%
filter(AreaCode == "E47000001",
Sex == "Persons") %>%
mutate(AreaName = str_replace(AreaName, "CA-Greater Manchester", "Greater Manchester"))
counties <- fingertips_data(IndicatorID = 93203, AreaTypeID = 102, rank = TRUE) %>%
filter(AreaType %in% c("England", "County & UA (pre 4/19)"),
Sex == "Persons")
df <- bind_rows(gm, counties) %>%
select(area_code = AreaCode,
area_name = AreaName,
period = Timeperiod,
value = Value,
significance = ComparedtoEnglandvalueorpercentiles) %>%
mutate(indicator = "Proportion of 16-17 year olds not in education, employment or training (NEET)",
measure = "Percentage",
unit = "Persons",
value = round(value, 1)) %>%
select(-significance, everything())
write_csv(df, "../neets.csv")
|
723f10e7f20b49c45627dbd6fe70a7eed3ded5fd | 8a4ccd2abbfc215eada83c3e091e4685728bbcce | /R/rs_enclose_selection_with.R | 37679fb0b24f5b44f73607738e17af0c4bbb9376 | [
"MIT"
] | permissive | cran/spAddins | 5799227f85dc5acf4c09da936d8e826284a33d35 | 694a0caf8b75774d120393e67479b5fa8cbac591 | refs/heads/master | 2021-01-20T22:15:17.449385 | 2017-12-14T17:50:02 | 2017-12-14T17:50:02 | 63,228,351 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,121 | r | rs_enclose_selection_with.R | # 'Enclose selected symbols' addins ---------------------------------------
#' Enclose selection of text with indicated symbols
#'
#' @param symbol (character) A sequence of symbols to add on both sides of selection.
#' @param symbol_before (character) A sequence of symbols to before the selection
#' (overrides value of \code{symbol}).
#' @param symbol_after (character) A sequence of symbols to add after the selection
#' (overrides value of \code{symbol}).
#' @export
rs_enclose_selection_with <- function(symbol = "",
symbol_before = symbol,
symbol_after = symbol) {
context <- rstudioapi::getActiveDocumentContext()
# For the first selection only
sel <- context$selection[[1]]
old_text <- sel$text
Encoding(old_text) <- "UTF-8"
new_text <- paste0(symbol_before, old_text, symbol_after)
rstudioapi::insertText(location = sel$range,
text = as.character(new_text),
id = context$id)
}
|
8ccd620bcc0afae02d59d458d8591fcdd6a6e658 | d464e33f2e317ac395da5335eacbe80d0446497d | /Aula5/scriptAula5.R | 485d3726d1e85fa1ca79f2e93a11f54508bc299a | [] | no_license | lago1970/Inferencia2018 | 3bc6559e384ee83942e5f1c34c45cf94f52858db | 04a62688f1bbdc030fa8efb4d91d5267a40ccae3 | refs/heads/master | 2020-04-02T17:47:13.342622 | 2018-10-17T15:02:55 | 2018-10-17T15:02:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,030 | r | scriptAula5.R | bsb <- read.csv("https://raw.githubusercontent.com/cursoRunb/Inferencia2018/master/Aula4/climaBsb.csv")
names(bsb)
rquadrado <- c()
for( i in 8:21){
mod <- lm(bsb$temp_inst ~ bsb[,i])
rquadrado <- c(rquadrado,summary(mod)$r.squared)
}
round(rquadrado,3)
names(rquadrado) <- names(bsb)[8:21]
round(rquadrado,4)
plot(bsb$umid_inst,bsb$temp_inst)
mod1 <- lm(bsb$temp_inst ~ bsb$umid_inst)
abline(mod1$coefficients)
summary(mod1)
mod1.2 <- lm(temp_inst ~radiacao,bsb)
summary(mod1.2)
mod2 <- lm(temp_inst ~ radiacao + umid_inst,bsb)
summary(mod2)
shapiro.test(mod2$residuals)
bptest(mod2)
names(mod2)
plot(bsb$umid_inst,bsb$radiacao)
plot(mod2)
mod3 <- lm(temp_inst ~ temp_max + temp_min,bsb)
summary(mod3)
par(mfrow=c(2,2))
erros <- mod1$residuals
plot(erros)
hist(erros,breaks = 10)
qqnorm(erros)
qqline(erros)
plot(bsb$umid_inst,erros)
shapiro.test(erros)
ks.test(erros,"pnorm")
install.packages("lmtest")
require(lmtest)
bptest(mod1)
plot(mod1)
plot(bsb[,5:10])
?glm
|
259ba99110d826bb757a789d655a818054812f0b | 9fc42f797640e101922990bb3462f4318204b222 | /run_analysis.r | a994ed408f45fbf83a9b7319d878107a887fa103 | [] | no_license | ayubumer/Coursera-DS-Getting_and_Cleaning_Data_Project | 5ae9909cab8b7e641e6c006626f2ef3e07da4902 | 91688c14cf447c8d6d938cf52ab6f92e62e309ac | refs/heads/master | 2021-01-02T22:35:04.330101 | 2017-08-04T14:07:15 | 2017-08-04T14:07:15 | 99,346,689 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,359 | r | run_analysis.r |
## Adding library
library("dplyr")
library("reshape2")
library("plyr")
## Read X_train Data
x_train<-read.table("./R/proj2/UCIHARDataset/train/X_train.txt")
## Read X_test Data
x_test<-read.table("./R/proj2/UCIHARDataset/test/X_test.txt")
## Merge the training and the test sets to create one data set
mergedstep1<-rbind(x_train,x_test)
## Read features Data
features<-read.table("./R/proj2/UCIHARDataset/features.txt")[,2]
## Find indexes for mean and standard deviation
grep("mean|std",tolower(features))
## Extracts only the measurements on the mean and standard deviation for each measurement.
mergedstep2<-mergedstep1[ ,grepl("mean|std",tolower(features))]
## Read Y_train Data
y_train<-read.table("./R/proj2/UCIHARDataset/train/Y_train.txt")
names(y_train)<-"Activity"
## Read Y_test Data
y_test<-read.table("./R/proj2/UCIHARDataset/test/Y_test.txt")
names(y_test)<-"Activity"
## Merge Activities
mergedact<-rbind(y_train,y_test)
## Loading Activity Labels
actlabels<-read.table("./R/proj2/UCIHARDataset/activity_labels.txt")
## Joining with activities to tag labels
mergedactDesc<-merge(mergedact,actlabels,by.x = "Activity",by.y = "V1",all=FALSE)
## Uses descriptive activity names to name the activities in the data Set
mergedstep3<-cbind(mergedstep2,mergedactDesc$V2)
## Appropriately labels the data set with descriptive variable names.
names(mergedstep3)=features[grepl("mean|std",tolower(features))]
names(mergedstep3)[87]<-"Activity"
## From the data set in step 4,
## creates a second, independent tidy data set with the average of each variable for each activity and each subject.
## Attach Subject from Subject Data File data set
subject_train<-read.table("./R/proj2/UCIHARDataset/train/subject_train.txt")
subject_test<-read.table("./R/proj2/UCIHARDataset/test/subject_test.txt")
## binding both data set
mergedSubject<-rbind(subject_train,subject_test)
### binding both column to final data set
mergedstep4<-cbind(mergedstep3,mergedSubject)
names(mergedstep4)[88]<-"Subject"
names(mergedstep4)
## creates a second, independent tidy data set with the average of each variable for each activity and each subject.
TidyData<-ddply(mergedstep4,.(Activity,Subject),function(x)colMeans(x[,1:86],na.rm = TRUE))
## Writing data set to File
write.table(TidyData,"./R/proj2/UCIHARDataset/tidydata.txt",row.names = FALSE) |
f69558c50045ccc815e0c86aaf11f60bd4dcca7d | 66d62e491e0e6338b4d844650416541e847a93e5 | /code/tests/testPerceptron.R | a256c8bb24c626b244d103b3a96ebc6fac09a835 | [] | no_license | rgmantovani/ia2018uel | 8e87f14d8e3094e16050820f4e9144e2195305d4 | 4a7a503cad6ee196c40976e4184c783542cf4336 | refs/heads/master | 2020-03-30T08:45:25.768978 | 2018-12-14T12:06:47 | 2018-12-14T12:06:47 | 151,037,170 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,912 | r | testPerceptron.R | # -----------------------------------------------------------------
# -----------------------------------------------------------------
source("../perceptron.R")
library("ggplot2")
# -----------------------------------------------------------------
# -----------------------------------------------------------------
seed.value = 42
set.seed(seed.value)
# -----------------------------------------------------------------
# creating dataset
# -----------------------------------------------------------------
# artificial dataset
#x1 = c(4,2,5,3,1.5,2.5,4,5,1.5,3,5,4)
#x2 = c(5,4.5,4.5,4,3,3,3,3,1.5,2,1.5,1)
#bias = 1
#class = c(1,-1,1,1,-1,-1,1,1,-1,-1,1,-1)
#dataset = data.frame(bias, x1, x2, class)
# dataset AND (uncomment to run with this dataset)
x1 = c(0,0,1,1)
x2 = c(0,1,0,1)
bias = 1
class = c(-1,-1,-1,1)
dataset = data.frame(bias, x1, x2, class)
# -----------------------------------------------------------------
# training perceptron
# -----------------------------------------------------------------
w = c(0.5, 0.5, 0.5)
obj = perceptron.train(dataset = dataset, weights = w,
lrn.rate = 0.5)
# -----------------------------------------------------------------
# plotting training error convergence
# -----------------------------------------------------------------
df = data.frame(1:obj$epochs, obj$avgErrorVec)
colnames(df) = c("epoch", "avgError")
# Avg training error
g = ggplot(df, mapping = aes(x = epoch, y = avgError))
g = g + geom_line() + geom_point()
g = g + scale_x_continuous(limit = c(1, nrow(df)))
ggsave(g, file = paste0("perceptron_convergence_",
seed.value,".jpg"), width = 7.95, height = 3.02, dpi = 480)
# -----------------------------------------------------------------
# ploting the obtained hyperplane
# -----------------------------------------------------------------
dataset$class = as.factor(dataset$class)
g2 = ggplot(dataset, mapping = aes(x = x1, y = x2, colour = class,
shape = class))
g2 = g2 + scale_x_continuous(limit = c(0, 7))
g2 = g2 + scale_y_continuous(limit = c(0, 6))
g2 = g2 + geom_point(size = 3) + theme_bw()
# hyper-plane
slope = -(obj$weights[1]/obj$weights[3])/(obj$weights[1]/obj$weights[2])
intercept = -obj$weights[1]/obj$weights[3]
g2 = g2 + geom_abline(intercept = intercept, slope = slope)
ggsave(g2, file = paste0("perceptron_hyperplane_",
seed.value,".jpg"), width = 6, height = 6, dpi = 480)
# -----------------------------------------------------------------
# testing data
# -----------------------------------------------------------------
test1 = c(1,2,2)
res1 = perceptron.predict(example = test1, weights = obj$weights)
print(test1)
print(res1)
test2 = c(1,4,4)
res2 = perceptron.predict(example = test2, weights = obj$weights)
print(test2)
print(res2)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
|
916419fc3b947f96e12fb0edf767222490c8a5a9 | f5f88098782c560256684ff0d86ca8bc9da13880 | /Titanic Analysis.R | 940dca3187ccd5baa1428fe2307e5bc0b6fe6afb | [] | no_license | juanitotaveras/DV_RProject2 | ba56e0366accb2ff8fc2e568ccd40e7c6a5c47da | fa999aa83a781a9394af8abced5a66fd56d5dfe6 | refs/heads/master | 2021-01-20T04:30:17.826175 | 2015-10-03T00:44:29 | 2015-10-03T00:44:29 | 42,971,453 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,710 | r | Titanic Analysis.R | require("jsonlite")
require("RCurl")
require("ggplot2")
# Change the USER and PASS below to be your UTEid
df <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select * from RESPIRATORY_INFECTIONS"'),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_jmt3686', PASS='orcl_jmt3686', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE), ))
df
summary(df)
head(df)
require(extrafont)
ggplot() +
coord_cartesian() +
scale_x_continuous() +
scale_y_continuous() +
#facet_wrap(~SURVIVED) +
facet_grid(DEATHS_1_TO_59_MONTHS), labeller=label_both) + # Same as facet_wrap but with a label.
#facet_grid(PCLASS~SURVIVED, labeller=label_both) +
labs(title='Death 1 to 59 months') +
labs(x="Deaths", y=paste("Percentage")) +
layer(data=df,
mapping=aes(x=as.numeric(as.character(AGE)), y=as.numeric(as.character(FARE)), color=SEX),
stat="identity",
stat_params=list(),
geom="point",
geom_params=list(),
#position=position_identity()
position=position_jitter(width=0.3, height=0)
)
ggplot() +
coord_cartesian() +
scale_x_discrete() +
scale_y_continuous() +
#facet_grid(PCLASS~SURVIVED, labeller=label_both) +
labs(title='Titanic') +
labs(x="SURVIVED", y=paste("FARE")) +
layer(data=df,
mapping=aes(x=SEX, y=as.numeric(as.character(FARE)), color=as.character(SURVIVED)),
stat="identity",
stat_params=list(),
geom="point",
geom_params=list(),
#position=position_identity()
position=position_jitter(width=0.3, height=0)
)
|
bc301abea54fd78efbadec989f64c793adae3aa5 | eaf731fdef1b584cadeaa0df7b1e1ad10d4cbc2b | /machinelearning/datapreparation/waRRior.machinelearning.datapreparation.dynamic_data_evaluation.R | 12b626523312d942e28301c8eb3ae236230827d9 | [
"MIT"
] | permissive | joelgsponer/waRRior | ad0efb0214c7830097dfa459a50e9c151fb69654 | 9c879271626d6619c6596a79a0ccefec8d9e0d16 | refs/heads/master | 2021-01-18T23:38:11.971296 | 2018-08-24T20:45:34 | 2018-08-24T20:45:34 | 34,519,160 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,089 | r | waRRior.machinelearning.datapreparation.dynamic_data_evaluation.R |
waRRior.machinelearning.datapreparation.dynamic_data_evaluation <- function(df, as.dplyr = T){
#Style
par(lwd = 3)
dev.off()
#Functions
save.df <- function(){
b <- readline("do you want to save the df? (y/n)")
if(b == "y"){
p <- readline("please indicate path:")
tryCatc(save(df, file = p), error = function(e){cat("Please check path - file not saved")})
}
}
#Variables
variables.output = c()
variables.input = c()
variables.input.high.importance = c()
variables.input.low.importance = c()
variables.scaled = c()
variables.logtransformed = c()
variables.exclude = c()
df <- as.data.frame(df)
waRRior.snippets.verbose("dimensions:")
print(dim(df))
waRRior.snippets.verbose("press key to cycle through collumns.")
readline()
for(col in names(df)){
user.input <- ""
while(user.input != "n"){
cat("---\n\n")
waRRior.snippets.verbose(col)
user.input <- readline("action ('H' = help):")
tryCatch({
switch(user.input
#Inspect
,p = print(df[,col])
,t = print(table(df[,col]))
,s = print(summary(df[,col]))
,b = {
par(mar = c(4,6,4,4))
barplot(table(df[,col]), col = google.colors$Indigo$accent[1], border = google.colors$Indigo$accent[4], lwd = 3, las = 2, cex.names = 0.7)
}
,h = {
hist(df[,col], col = google.colors$Indigo$accent[1], border = google.colors$Indigo$accent[4], lwd = 3, main = NA, xlab = col)
}
,a = {
tmp <- df[,col]
tmp.log <- log(tmp)
tmp.z <- scale(tmp)
tmp.log.z <- scale(tmp)
dev.new(width = 12, height = 6)
par(mfrow = c(2,4))
tmp.plot <- function(x, x.title){
hist(x, col = google.colors$Indigo$accent[1], border = google.colors$Indigo$accent[4], lwd = 3, main = col, xlab = col)
qqnorm(x,pch = 20,col = google.colors$Indigo$accent[4], main = x.title)
qqline(x)
}
tmp.plot(tmp, "Vanilla")
tmp.plot(tmp.log, "Log transformed")
tmp.plot(tmp.z, "Z scaled")
tmp.plot(tmp.log.z, "Log transformed and z scaled")
}
,c = print(class(unlist(df[1,col])))
,q = {
qqnorm(df[,col],pch = 20,col = google.colors$Indigo$accent[4])
qqline(df[,col])
}
#Modify
,r = {
waRRior.snippets.verbose("convert class to?\n('c' = character,'n' = numeric,'f' = factor,'l' = logical, 'd' = date or 'a' = abort")
as.what <- readline()
df[,col] <- switch(as.what
,c = as.character(df[,col])
,n = as.numeric(df[,col])
,f = as.factor(df[,col])
,l = as.logical(df[,col])
,d = as.Date(df[,col])
,a = df[,col]
)
}
,x = {
variables.exclude = c(variables.exclude, col)
waRRior.snippets.verbose("excluded variables:")
print(variables.exclude)
user.input = 'n'
}
,l = {
variables.logtransformed = c(variables.logtransformed, col)
df[,col] <- log(df[,col])
waRRior.snippets.verbose("log transformed variables:")
print(variables.logtransformed)
}
,z = {
variables.scaled = c(variables.scaled, col)
df[,col] <- scale(df[,col])
waRRior.snippets.verbose("scaled input variables:")
print(variables.scaled)
}
,low = {
variables.input = c(variables.input, col)
variables.input.low.importance = c(variables.input.low.importance, col)
waRRior.snippets.verbose("low importance input variables:")
print(variables.input.low.importance)
user.input = 'n'
}
,high = {
variables.input = c(variables.input, col)
variables.input.high.importance = c(variables.input.high.importance, col)
waRRior.snippets.verbose("high importance input variables:")
print(variables.input.high.importance)
user.input = 'n'
}
,out = {
variables.output = c(variables.output, col)
waRRior.snippets.verbose("output variables:")
print(variables.output)
user.input = 'n'
}
#Controls
,n = waRRior.snippets.verbose("...next...")
,e = return(df)
,S = save.df()
,m = dev.off()
,H = {waRRior.snippets.verbose("
HELP
Inspection:
'p' = print
't' = print table
's' = print summary
'c' = print class
'b' = barplot
'q' = qq plot normal distribution
Modify:
'r' = recode class
'z' = z transform
'l' = log transoform
Classification:
'x' = mark for exclusion
'out' = output variable
'low' = low importance
'high' = high importance
Controls:
'n' = next collumn
'm' = close plot
'S' = save
'e' = exit and return df
"
)}
)
}, error = function(e){print(e)})
}
}
save.df()
if(as.dplyr){
require(dplyr)
df <- tbl_df(df)
}
return(list(
data = df
,variables.output = unique(variables.output)
,variables.input = unique(variables.input)
,variables.input.high.importance = unique(variables.input.high.importance)
,variables.input.low.importance = unique(variables.input.low.importance)
,variables.scaled = unique(variables.scaled)
,variables.logtransformed = unique(variables.logtransformed)
,variables.exclude = unique(variables.exclude)
))
}
|
c602e66538442c86bedca1099a32eac842e88bb1 | d6c5fdcb5200fc002a2c94863267642c8531deb4 | /plot2.R | 9cc90c1129ff09cee6724f150914dbecb9479a47 | [] | no_license | dpelcyger/exploratory_data_analysis_courseproject1 | e562dddf4e62f9906ce7ff648dbf48bce934c174 | f200910b6f16820e37bb6f3e7f38afa78ebffc46 | refs/heads/master | 2020-07-06T08:02:52.090993 | 2016-11-19T11:35:57 | 2016-11-19T11:35:57 | 74,052,396 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 818 | r | plot2.R | #Read in data
inputFile <- "C:/Users/david/Desktop/exploratorydata/household_power_consumption.txt"
DF <- read.csv(inputFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
#Subset where date is 1/2/2007 or 2/2/2007
subsetDF <- subset(DF, Date %in% c("1/2/2007","2/2/2007"))
#Convert global_active_power to numeric
subsetDF$Global_active_power <- as.numeric(as.character(subsetDF$Global_active_power))
#Concatenate date and time into new column Timestamp
subsetDF$Timestamp <-paste(subsetDF$Date, subsetDF$Time)
#Construct plot2.PNG plot
png(file= "plot2.png", width=480, height=480)
#Plot date/time vs Global_active_power
plot(strptime(subsetDF$Timestamp, "%d/%m/%Y %H:%M:%S"), subsetDF$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power(kilowatts)")
#Close PNG Device
dev.off()
|
be938686652b5a8dd5cab1a3f1b8e686d06a5940 | ffa7bdef38e5aef63804e41005e2fe1897c69817 | /R/BTDR-package.R | aa28e47084fd66262a62f21ca9070b2d07561053 | [] | no_license | heckendorfc/BTDR | 08922a7c603c76966c5559d3ac1ce9539196e284 | f13e1d28b4603fbee64a6b8efe953ff45f4dc915 | refs/heads/master | 2021-01-24T03:37:18.817487 | 2018-04-25T18:09:54 | 2018-04-25T18:09:54 | 36,127,036 | 2 | 3 | null | 2017-06-15T17:06:10 | 2015-05-23T14:33:46 | R | UTF-8 | R | false | false | 904 | r | BTDR-package.R | #' Functions to access BUPID Top-Down results
#'
#' This package provides convenient functions to work with results files
#' generated with BUPID Top-Down. These include conversions to other formats
#' and common visualizations.
#'
#' \tabular{ll}{
#' Package: \tab BTDR\cr
#' Type: \tab Package\cr
#' Version: \tab 1.0\cr
#' Date: \tab 2014-08-18\cr
#' License: \tab BSD 2-clause License\cr
#' }
#'
#' @aliases BTDR-package BTDR
#'
#' @import methods
#' @import ggplot2
#' @importFrom xml2 xml_add_child xml_dtd xml_new_root write_xml
#' @importFrom magrittr %>%
#' @importFrom yaml yaml.load
#' @importFrom stats lm
#'
#' @useDynLib BTDR
#'
#' @docType package
#' @author Christian Heckendorf
#'
#' Maintainer: Christian Heckendorf <heckend@@bu.edu>
#' @references ~~ Literature or other references for background information ~~
#' @keywords package
#'
#' @name BTDR-package
NULL
|
1925462dc22d72622a8cf56239aa55afe8f42baa | 17d78cb3b0e8c2cafcd9b75681439e146bede3ab | /analysis/uk-cases-incidence-early.R | d2e2e98043cc6d67550fcbeb45b9a0a0188e860a | [
"MIT"
] | permissive | paulmaunders/coronavirus-graphs | b03de0cdb5792554571d4fdd2d815bf86326cc75 | ea0e4807d2533cce4a8f1cd63753ec779de9f1f4 | refs/heads/main | 2023-02-10T13:22:56.036926 | 2021-01-04T16:47:46 | 2021-01-04T16:47:46 | 302,459,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,335 | r | uk-cases-incidence-early.R | # Incidence plots
# https://rviews.rstudio.com/2020/03/05/covid-19-epidemiology-with-r/
rm(list=ls()); # Clear environment
library('jsonlite')
library('incidence')
library('ggplot2')
govapi <- fromJSON("https://api.coronavirus.data.gov.uk/v1/data?filters=areaName=United%2520Kingdom;areaType=overview&structure=%7B%22areaType%22:%22areaType%22,%22areaName%22:%22areaName%22,%22areaCode%22:%22areaCode%22,%22date%22:%22date%22,%22newCasesByPublishDate%22:%22newCasesByPublishDate%22,%22cumCasesByPublishDate%22:%22cumCasesByPublishDate%22%7D&format=json")
# Original code to convert daily case count to a large list of repeating dates, 1 per case
#df <- data.frame(date_of_onset = as.Date(govapi$data$date), cases = govapi$data$newCasesByPublishDate)
#df <- as.data.frame(lapply(df, rep, df$cases))
#dat <- df$date
#i <- incidence(dat)
#print(plot(i))
# Found a simpler way to import case data into incidence!
i.7 <- as.incidence(govapi$data$newCasesByPublishDate, dates = as.Date(govapi$data$date), interval=7)
# First 20 weeks
fit.both <- fit(i.7[1:20], split=as.Date("2020-04-01"))
before_label_text <- paste("Daily growth rate: ", round(fit.both$before$info$r*100, digits = 2), "%\n",
"Doubling time: ", round(fit.both$before$info$doubling, digits = 2), " days",
sep="")
after_label_text <- paste("Daily decline rate: ", round(fit.both$after$info$r*100, digits = 2), "%\n",
"Halving time: ", round(fit.both$after$info$halving, digits = 2), " days",
sep="")
p <- plot(i.7[1:20], fit=fit.both, color="blue", border = "white") +
labs(title="UK Coronavirus Cases - February to June 2020") +
theme_light(base_size = 14) +
geom_label(
label=before_label_text,
x=as.Date("2020-02-01"),
y=60000,
hjust="left",
label.padding = unit(0.55, "lines"), # Rectangle size around label
label.size = 0.35,
size=5,
color = "black",
fill="#ffffff"
) +
geom_label(
label=after_label_text,
x=as.Date("2020-05-01"),
y=60000,
hjust="left",
label.padding = unit(0.55, "lines"), # Rectangle size around label
label.size = 0.35,
size=5,
color = "black",
fill="#ffffff"
)
print (p)
#mtext(side=3, line=2, at=-0.1, adj=0, cex=1.3, "UK Coronavirus Cases")
fit.both |
b884df750a72a9140b405dfb0da7e81436b65335 | 0a7c1e94041bcd14d9ee74598bd232a8446143b6 | /run_analysis.R | d9e2e1869baef8e3f6a5df59976fd295fe4ffa37 | [] | no_license | hoyah702/g-cdata | 4b0c00c986a35061d31610c50efe133763aaa7c4 | 87d450e34c16bf314d1bc81fed4f26b47e743bf9 | refs/heads/master | 2016-09-06T11:16:21.776919 | 2014-08-24T14:01:30 | 2014-08-24T14:01:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,811 | r | run_analysis.R | run_analysis <- function(){
## Load all the files required
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
labels <- read.table("UCI HAR Dataset/activity_labels.txt",colClasses="character")
features <- read.table("UCI HAR Dataset/features.txt", colClasses="character")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
## Label the activities: "y_train" and "y_test"
y_train <- as.matrix(y_train)
y_test <- as.matrix(y_test)
for(i in seq_along(y_train)){
y_train[i,1] <- labels[y_train[i,1],2]
}
for(i in seq_along(y_test)){
y_test[i,1] <- labels[y_test[i,1],2]
}
## column bind "X_train", "y_train" and "subject_train"
## then label the column names.
traindata <- data.frame(subject_train,y_train,X_train)
colnames(traindata) <- c("Subject","Activity",features[,2])
## Column bind "X_test", "y_test" and "subject_test",
## then coerce its column names into that of "traindata"
testdata <- data.frame(subject_test,y_test,X_test)
colnames(testdata) <- colnames(traindata)
## Row bind the "train" and "test" sets
data <- rbind(traindata, testdata)
## Extract the measurements on the mean and sd columns
## along with subject/activity labels
meanindex <- grep("mean",colnames(data))
stdindex <- grep("std",colnames(data))
cindex <- c(meanindex, stdindex)
cindex <- sort(cindex)
data <- data[,c(1,2,cindex)]
## Sort and order
data <- data[order(data$Subject,data$Activity),]
## Constructing the first part of the tidy data set: "submean"
## the average of each variable for each subject
submean <- data.frame(1:30)
colnames(submean) <- "Subject"
for(i in 3:ncol(data)){ ## Feature variables are arranged from the 3rd column onwards
varmean <- as.data.frame(tapply(data[,i],data$Subject,mean))
colnames(varmean) <- colnames(data)[i]
submean <- data.frame(submean,varmean)
}
## Constructing the second part of the tidy data set: "actmean"
## the average of each variable for each activity
actmean <- data.frame(1:6)
colnames(actmean) <- "Activity"
for(i in 3:ncol(data)){
varmean <- as.data.frame(tapply(data[,i],data$Activity,mean))
colnames(varmean) <- colnames(data)[i]
actmean <- data.frame(actmean,varmean)
}
actmean[,1] <- labels[,2]
## Save the result as a tab-delimited file format
write.table(submean,"tidydata-subject.txt",sep="\t",row.names=FALSE)
write.table(actmean,"tidydata-activity.txt",sep="\t",row.names=FALSE)
}
|
49022aa597386f49be904c6e74a4ccb3ab8a3abc | dea46b6b755043ef14abf13b02a740d2c58be5ff | /R/CytoQCView.R | 3f7e27d159a6f1936589b67806c4fd40b74f357e | [] | no_license | isglobal-brge/affy2sv | 9fa521369f30ceafb8c2e7a2fc1d8725801a862d | 14894cae0d029264acf3ff0439633b632f827662 | refs/heads/main | 2023-05-31T07:58:16.476295 | 2021-04-20T12:18:33 | 2021-04-20T12:18:33 | 375,614,166 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 872 | r | CytoQCView.R | #' Create an object for CytoScan visualization
#'
#' This function creates an object of class \code{CytoQCViewer} that allows
#' to perform a visual quality control. Three plots are avaialble: snp,
#' calling and sc.
#'
#' @param path Location of the .cychp files.
#' @param visualization "snp", "int" or "sc".
#' @param individual name of the invividual for calling and sc visualizations.
#' @param verbose If set to \code{TRUE} usefull information is shown.
#'
#' @export CytoQCView
CytoQCView <- function(path, visualization, individual, verbose = FALSE) {
visualization <- value(visualization, c("snp", "int", "sc"))
if (is.na(visualization)) {
stop("Wrong type of 'visualization'. Available are: 'snp', 'calling' and 'sc'.")
}
new("CytoQCViewer", location = path, type = visualization,
individual = individual, verbose = verbose)
} |
f8d2b6f5069688894aa74ae8578eb85fa9d3040b | 104c4e0e8d9b239e93a3aecd238a74110aa7103d | /tests/testthat/test-install-packages-addin.R | d5ffa0a29d8dbbf1f58a7657337aa358e5bd6b1c | [
"MIT"
] | permissive | OuhscBbmc/OuhscMunge | ac1eaad44b093a6d59282a31f35a9f5f39bba4b4 | dfe4ff23215d03f21b65d42ada7869034c6bad40 | refs/heads/main | 2023-08-03T04:02:00.184327 | 2023-07-23T04:08:15 | 2023-07-23T04:08:15 | 31,865,906 | 1 | 3 | NOASSERTION | 2023-04-17T23:31:47 | 2015-03-08T21:16:47 | R | UTF-8 | R | false | false | 295 | r | test-install-packages-addin.R | library(testthat)
# ---- install_packages_addin --------------------------------------------------------------
test_that("install_packages_addin", {
testthat::skip("This function is too invasive to run on other people's machines.")
install_packages_addin()
testthat::expect_true(TRUE)
})
|
332068515b4b17d7f030592b54ec3ce299bb6c3d | f697cbbb0da988fd43c07652d5955fbb82e25e38 | /David/profileViewer/R/parseGeneNames.r | 74d9156e69e934165d5295cfcb01a8af8db4a984 | [] | no_license | aidanmacnamara/epiView | eec75c81b8c7d6b38c8b41aece3e67ae3053fd1c | b3356f6361fcda6d43bf3acce16b2436840d1047 | refs/heads/master | 2021-05-07T21:22:27.885143 | 2020-06-22T13:08:51 | 2020-06-22T13:08:51 | 109,008,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 578 | r | parseGeneNames.r | #' parses out the gene names from a deliminated string
#'
#' Newline, tab, colon and semicolon and comma delimited lists are supported
#'
#' @param genes an input string to be parsed
#'
#' @return a character vector with one element for every gene
#' @export
#'
#' @examples
#' parseGeneNames("a b c")
#' parseGeneNames("a, b, c")
#' # etc.
parseGeneNames=function(genes){
genes=gsub("\\n"," ",genes)
genes=gsub("\\t"," ",genes)
genes=gsub(","," ",genes)
genes=gsub(";"," ",genes)
genes=gsub(":"," ",genes)
genes=gsub("\\s+"," ",genes)
strsplit(genes," ")[[1]]
}
|
5c212736acf737d26689dfca2a0e63607b3b3ee4 | 0698e6bc118d5c62498ad03981d92e2b0a1be17f | /Unidad6/Prac_Uni6/bin/magrittr_ej.R | a8d3e6633d4509c2a993ffe94fed3ea10e0329e5 | [] | no_license | maryjomvz/BioinfInvRepro2017-II | 0e62ee11b4b0df4e9fb41c85f0642411e038e02f | d571391a76cd601c1db7e85ac2c26fc4eefafc1c | refs/heads/master | 2021-01-24T18:06:43.476244 | 2017-03-09T03:43:02 | 2017-03-09T03:43:02 | 84,396,473 | 1 | 0 | null | 2017-03-09T04:00:42 | 2017-03-09T04:00:41 | null | UTF-8 | R | false | false | 437 | r | magrittr_ej.R | # A partir de sólo con las muestras de los estados Puebla, Jalisco, Yucatan crea una df que contenga las columnas NSiembra, Raza y Altitud de las muestras de Puebla ordenadas de menor a mayor altitud.
library(magrittr)
fullmat<- read.delim("../meta/maizteocintle_SNP50k_meta_extended.txt")
x<-fullmat$Estado %in% c("Puebla", "Jalisco", "Chiapas") %>%
fullmat[., ] %>%
.[.$Estado=="Puebla", c(2,5,16)] %>%
.[order(.$Altitud), ]
|
00011c0c6b5bf8b9f7f25f1754bae756e44470d9 | fd6f271a68c5bf575c3db49a828be1e40bfc8856 | /man/get_extended_data_from_NASIS_db.Rd | 38073c0aa819203592fe8e68cf01e39545225340 | [] | no_license | cran/soilDB | 57df11e3b59c4d575fe796de0abfaec1f7ac34b6 | 7e0bd1a50c268319003d47376c4525da51ac7b0d | refs/heads/master | 2023-09-06T04:30:47.133326 | 2023-08-29T06:20:02 | 2023-08-29T08:31:36 | 17,699,760 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,278 | rd | get_extended_data_from_NASIS_db.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_extended_data_from_NASIS_db.R
\name{get_extended_data_from_NASIS_db}
\alias{get_extended_data_from_NASIS_db}
\title{Get accessory tables and summaries from a local NASIS Database}
\usage{
get_extended_data_from_NASIS_db(
SS = TRUE,
nullFragsAreZero = TRUE,
stringsAsFactors = NULL,
dsn = NULL
)
}
\arguments{
\item{SS}{get data from the currently loaded Selected Set in NASIS or from
the entire local database (default: \code{TRUE})}
\item{nullFragsAreZero}{should fragment volumes of NULL be interpreted as 0?
(default: TRUE), see details}
\item{stringsAsFactors}{deprecated}
\item{dsn}{Optional: path to local SQLite database containing NASIS
table structure; default: \code{NULL}}
}
\value{
A list with the results.
}
\description{
Get accessory tables and summaries from a local NASIS Database
}
\examples{
\donttest{
if(local_NASIS_defined()) {
# query extended data
e <- try(get_extended_data_from_NASIS_db())
# show contents of extended data
str(e)
}
}
}
\seealso{
\code{\link{get_hz_data_from_NASIS_db}},
\code{\link{get_site_data_from_NASIS_db}}
}
\author{
Jay M. Skovlin and Dylan E. Beaudette
}
\keyword{manip}
|
3ec72d8f04628ccecdcbaa4b027ee2a8d46f5a96 | 19340266c615642bac59b9cf503dde4909e0518b | /R/compare.r | 9df94f188b36b9bd139627faf050818088981671 | [] | no_license | cran/MSCMT | 9b85db50c8c75f74e0f672a3603cc3dadd338ee0 | 6f2b439226bf346c55abd9e78c33bd57ae5bd40e | refs/heads/master | 2023-04-29T01:07:59.193527 | 2023-04-17T17:20:06 | 2023-04-17T17:20:06 | 64,135,868 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,965 | r | compare.r | #' Compare MSCMT estimation results
#'
#' \code{compare} collects estimation results from \code{\link{mscmt}} for
#' comparison purposes.
#'
#' \code{compare} collects (potentially many) estimation results from
#' \code{\link{mscmt}} in a special object of class \code{"mscmt"}, which
#' includes a component \code{"comparison"} where the different estimation
#' results are aggregated.
#' This aggregated information is used by the \code{\link{ggplot.mscmt}} and
#' \code{\link{print.mscmt}} methods to present summaries of the different
#' results.
#'
#' @param ... Objects of class \code{"mscmt"} or (a) list(s) containing objects
#' of class \code{"mscmt"}.
#' @param auto.name.prefix A character string (default: "") internally used to
#' facilitate automatic naming in nested lists of unnamed estimation results.
#'
#' @return An object of class \code{"mscmt"}, which itself contains the
#' individual estimation results as well as a component \code{"comparison"}
#' with aggregated information.
#' @importFrom stats sd
#' @export compare
compare <- function(...,auto.name.prefix="") {
arg <- list(`...`)
argnames <- names(arg)
if (is.null(argnames)) argnames <- rep("",length(arg))
res <- list()
agg <- NULL
scalars <- c("loss.v","rmspe","loss.w","solution.type","treated.unit","std.v",
"model.type")
XwN <- function(nam) {
res <- rep("x",length(nam))
names(res) <- nam
res
}
myjoin <- function(a,b) {
newrn <- unique(c(rownames(a),rownames(b)))
res <- matrix(NA,nrow=length(newrn),ncol=ncol(a)+ncol(b))
colnames(res) <- c(colnames(a),colnames(b))
rownames(res) <- newrn
res[rownames(a),colnames(a)] <- a
res[rownames(b),colnames(b)] <- b
res
}
add <- function(a,b) {
if (is.null(a)) return(b)
if (length(unique(c(a$names,b$names)))!=length(a$names)+length(b$names))
stop("names are not unique")
res <- a
res$names <- c(a$names,b$names) # merge names
for (le in names(res$variables)) { # merge variables
for (i in 1:3) {
res$variables[[le]][[i]] <-
cbind(res$variables[[le]][[i]],b$variables[[le]][[i]])
if (length(res$names)>1) colnames(res$variables[[le]][[i]]) <- res$names
}
}
for (le in scalars) # merge results
res$results[[le]] <- c(a$results[[le]],b$results[[le]])
for (le in setdiff(names(res$results),scalars))
res$results[[le]] <- myjoin(a$results[[le]],b$results[[le]])
res
}
if (("comparison" %in% names(arg))&&(!inherits(arg,"mscmt")))
stop("'comparison' must not be one of the results' names")
if (("placebo" %in% names(arg))&&(!inherits(arg,"mscmt")))
stop("'placebo' must not be one of the results' names")
for (i in seq_along(arg)) {
if (inherits(arg[[i]],"mscmt")&&(!is.null(arg[[i]]$comparison))) { # current argument already is a comparison
res <- c(res,arg[[i]][setdiff(names(arg[[i]]),"comparison")])
agg <- add(agg,arg[[i]]$comparison)
} else
if (((!inherits(arg[[i]],"mscmt"))&&is.list(arg[[i]]))|| # current argument is a list, but not an object of class "mscmt" -> 'recursion'
(inherits(arg[[i]],"mscmt")&&(is.null(arg[[i]]$placebo)&&
is.null(arg[[i]]$combined)))) { # current argument is a result of univariate "mscmt" estimation -> 'recursion'
tmpres <- do.call("compare",c(arg[[i]],
list(auto.name.prefix=paste0(auto.name.prefix,
LETTERS[i]))))
res <- c(res,tmpres[setdiff(names(tmpres),"comparison")])
agg <- add(agg,tmpres$comparison)
} else
if (inherits(arg[[i]],"mscmt")&&(is.null(arg[[i]]$comparison))) { # current is object of class "mscmt", but not a comparison
if (!is.null(arg[[i]]$placebo)) arg[[i]] <- arg[[i]][[1]] # placebo study? -> take first component!
if (argnames[i]=="") argnames[i] <-
paste0("Result",auto.name.prefix,LETTERS[i])
tmpR <- list(arg[[i]])
names(tmpR) <- argnames[i]
res <- c(res,tmpR) # add whole 'mscmt' object
tmp1 <- list(w=arg[[i]]$w, # prepare estimation results
v=arg[[i]]$v[,ncol(arg[[i]]$v)],
loss.v=arg[[i]]$loss.v,
rmspe=arg[[i]]$rmspe,
loss.w=arg[[i]]$loss.w[[length(arg[[i]]$loss.w)]],
solution.type=arg[[i]]$solution.type,
treated.unit=arg[[i]]$treated.unit,
control.units=XwN(arg[[i]]$control.units),
dependent=XwN(arg[[i]]$dependent),
predictor=XwN(arg[[i]]$predictor),
dependent.start=arg[[i]]$times.dep[1,,drop=FALSE],
dependent.end=arg[[i]]$times.dep[2,,drop=FALSE],
predictor.start=arg[[i]]$times.pred[1,,drop=FALSE],
predictor.end=arg[[i]]$times.pred[2,,drop=FALSE],
agg.pred=XwN(arg[[i]]$agg.pred),
agg.fns=arg[[i]]$agg.fns,
std.v=arg[[i]]$std.v,
model.type=paste0(if(length(arg[[i]]$dependent)>1) "M","SCM",
if(arg[[i]]$dataprep.scaled$trafo.v$has.trafo) "T")
)
for (le in setdiff(names(tmp1),scalars)) {
tmp1[[le]] <- if (is.matrix(tmp1[[le]])) t(tmp1[[le]]) else
cbind(tmp1[[le]])
colnames(tmp1[[le]]) <- argnames[i]
}
for (le in scalars) names(tmp1[[le]]) <- argnames[i]
tmp2 <- vector("list",length(arg[[i]]$combined)) # prepare combined data
names(tmp2) <- names(arg[[i]]$combined)
for (j in seq_along(arg[[i]]$combined))
tmp2[[j]] <- list(data.treat=arg[[i]]$combined[[j]][,1],
data.synth=arg[[i]]$combined[[j]][,2],
gaps=arg[[i]]$combined[[j]][,3])
tmp3 <- argnames[i]
agg <- add(agg,list(names=tmp3,variables=tmp2,results=tmp1))
} else # no valid input
# if (!is.list(arg[[i]])&&(!inherits(arg[[i]],"mscmt")))
warning("skipping argument which is neither of class 'list' nor 'mscmt'")
}
final <- c(res,list(comparison=agg))
class(final) <- "mscmt"
if (!is.null(final$comparison)) final else NULL # do we have a valid result?
}
|
c1a24ab3df497ae2c09d715ab2373095e3e8bcd1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/randomizationInference/examples/zeroEffect.Rd.R | 741d0637f78534e2569e5758e5b255026401262a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 361 | r | zeroEffect.Rd.R | library(randomizationInference)
### Name: zeroEffect
### Title: Potential Outcomes With Zero Treatment Effects
### Aliases: zeroEffect
### Keywords: methods
### ** Examples
## Assignments
w = c(0,0,0,0,0,1,1,1,1,1)
## Modified Assignments
w_new = c(1,1,1,1,1,0,0,0,0,0)
## Outcomes
y = c(4,6,5,7,4,7,11,9,8,12)
zeroEffect(y, w, w_new) ## Returns y as is
|
703b545c7394f9d6b64cfa6b59519c724143d4c4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hyper.fit/examples/hyper.basic.Rd.R | 274d34b44167875a25dbcd86efff2412ae8a51f4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,983 | r | hyper.basic.Rd.R | library(hyper.fit)
### Name: hyper.basic
### Title: Functions to calculate various basic properties important for
### line fitting
### Aliases: hyper.basic makecovarray2d makecovarray3d makecovmat2d
### makecovmat3d makerotmat2d makerotmat3d rotdata2d rotdata3d rotcovmat
### ranrotcovmat2d ranrotcovmat3d makeranrotmat projX projcovmat
### projcovarray arrayvecmult
### Keywords: fit utility rotation array matrix covariance
### ** Examples
extheta=30 #Experiment with changing this
exdim='z' #Experiment with chaging to 'x' or 'y'
exvecx=1:10 #Experiment with changin this
exvecy=1:10 #Experiment with changin this
exvecz=1:10 #Experiment with changin this
print(cbind(exvecx, exvecy))
print(rotdata2d(exvecx, exvecy, extheta))
print(rotdata3d(exvecx, exvecy, exvecz, extheta, exdim))
print(makerotmat2d(extheta))
print(makerotmat3d(extheta, dim=exdim))
exsx=1 #Experiment with changing this
exsy=2 #Experiment with changing this
exsz=3 #Experiment with changing this
excorxy=0.8 #Experiment with changing this between -1 and 1
excorxz=-0.3 #Experiment with changing this between -1 and 1
excoryz=0.5 #Experiment with changing this between -1 and 1
print(makecovmat2d(exsx, exsy, excorxy))
print(makecovmat3d(exsx, exsy, exsz, excorxy, excorxz, excoryz))
print(makecovarray2d(exsx*1:4, exsy*1:4, excorxy))
print(makecovarray3d(exsx*1:4, exsy*1:4, exsz*1:4, excorxy, excorxz, excoryz))
excovmat2d=makecovmat2d(exsx, exsy, excorxy)
excovmat3d=makecovmat3d(exsx, exsy, exsz, excorxy, excorxz, excoryz)
excovarray2d=makecovarray2d(exsx*1:4, exsy*1:4, excorxy)
excovarray3d=makecovarray3d(exsx*1:4, exsy*1:4, exsz*1:4, excorxy, excorxz, excoryz)
print(rotcovmat(excovmat2d, extheta))
print(rotcovmat(excovmat3d, extheta, exdim))
print(ranrotcovmat2d(excovmat2d))
print(ranrotcovmat3d(excovmat3d))
exprojvec2d=c(1, 2)
exprojvec2d=exprojvec2d/sqrt(sum(exprojvec2d^2))
exprojvec3d=c(1, 2, 3)
exprojvec3d=exprojvec3d/sqrt(sum(exprojvec3d^2))
print(projX(cbind(exvecx, exvecy), exprojvec2d))
print(projX(cbind(exvecx, exvecy, exvecz), exprojvec3d))
print(projcovmat(excovmat2d, exprojvec2d))
print(projcovmat(excovmat3d, exprojvec3d))
print(projcovarray(excovarray2d, exprojvec2d))
print(projcovarray(excovarray3d, exprojvec3d))
#Notice that the first outputs of the 2d/3d projcovarray example correspond to the outputs
#of the 2d/3d projcovmat examples.
#First for comparison:
print(t(matrix(1:9,3) %*% 1:3))
print(t(matrix(10:18,3) %*% 1:3))
print(t(matrix(19:27,3) %*% 1:3))
#And now an array example of the above operations:
print(arrayvecmult(array(1:27,c(3,3,3)),1:3))
#And an example where all array dimensions are different:
print(matrix(1:6,2) %*% 1:3)
print(matrix(7:12,2) %*% 1:3)
print(matrix(13:18,2) %*% 1:3)
print(matrix(19:24,2) %*% 1:3)
print(arrayvecmult(array(1:24,c(2,3,4)),1:3))
#Note that the following is not allowed:
## Not run:
##D print(arrayvecmult(array(1:24,c(3,2,4)),1:3))
## End(Not run)
|
8f2fe258bae1cbf235cb20828ce60b6d1c746988 | d042d3a8cabc05aab3897ee90c35e27a25b43658 | /raw_data/R/Baron2016_m.R | d71930079cb5892e165cf3121695cc1ad6cb0cc1 | [] | no_license | rahulbhadani/Benchmarking_Clustering_Methods_scRNAseq | 5950d04ff6ed4f7f64fdff0eed42c30732d12b67 | 366e9fc5a394e8ed398eb24687d08fe5c73b3f30 | refs/heads/master | 2022-03-30T08:20:04.185289 | 2019-12-20T19:39:19 | 2019-12-20T19:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 763 | r | Baron2016_m.R |
# combine counts from two mouse
m1 <- read.csv("raw_data/temp/GSM2230761_mouse1_umifm_counts.csv", header = T)
m2 <- read.csv("raw_data/temp/GSM2230762_mouse2_umifm_counts.csv", header = T)
rownames(m1) <- m1[,1]
rownames(m2) <- m2[,1]
labels_m1 <- as.character(m1$assigned_cluster)
labels_m2 <- as.character(m2$assigned_cluster)
m1 <- m1[,4:ncol(m1)]
m2 <- m2[,4:ncol(m2)]
m1 <- t(m1)
m2 <- t(m2)
data <- cbind(m1, m2)
ann <- data.frame(mouse = c(rep(1, length(labels_m1)), rep(2, length(labels_m2))),
Group = c(labels_m1, labels_m2))
rownames(ann) <- colnames(data)
source("utils/create_sce.R")
sce <- create_sce(data=data,colData=ann )
save(sce, file="raw_data/Rdata/Baron2016_m.Rdata")
rm(list=ls())
system("rm raw_data/temp/*")
|
18e11e1e05568fb3af6e2cf5eb17297ec203e4d2 | 90163a1a67f0987ff2a402db491b2961981dbd36 | /R/utils.R | 92e111c88dec78d4df84314e672166c8bd229000 | [] | no_license | cran/rdaemon | 3fca237b1118fb7a5a061277305f7fd7764700f8 | 750767c3efeb571fbf7c691f2b183932ad29a738 | refs/heads/master | 2023-07-29T09:08:47.467818 | 2021-09-15T20:10:02 | 2021-09-15T20:10:02 | 406,030,759 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,475 | r | utils.R | readTxt <- function(file){
readChar(file, file.info(file)$size)
}
.difftime <- function(t1, t2){
difftime(t1, t2, units = "secs")
}
truncateTxt <- function(x, n){
for(i in seq_along(x))
if(nchar(x[i]) > n)
x[i] <- paste0(substring(x[i], 1, n), "...")
x
}
.warning <- function(prefix){
function(e){
flog.warn(paste0(prefix, ": %s"), conditionMessage(e))
tryInvokeRestart("muffleWarning")
}
}
.error <- function(prefix){
function(e){
flog.error(paste0(prefix, ": %s"), conditionMessage(e))
}
}
.suspendInterruptsIfRequired <- function(expr, interruptable){
if(interruptable){
expr
}else{
suspendInterrupts(expr)
}
}
handleExceptions <- function(expr, warningPrefix, errorPrefix){
tryCatch(
{
withCallingHandlers(
expr,
warning = .warning(warningPrefix),
error = function(e) {
flog.error(paste0(errorPrefix, ": %s"), conditionMessage(e))
stack <- sys.calls()
stack <- truncateTxt(sapply(stack, deparse), 50)
stack <- stack[!grepl("[t|T]ryCatch", stack)]
stack <- stack[!grepl("withCallingHandlers", stack, fixed = TRUE)]
stack <- stack[!grepl(".handleSimpleError", stack, fixed = TRUE)]
stack <- stack[!grepl("simpleError", stack, fixed = TRUE)]
stack <- stack[!grepl("handleExceptions", stack, fixed = TRUE)]
idx <- which(grepl("runTasks", stack, fixed = TRUE))
if(length(idx)){
stack <- stack[-seq_len(max(idx))]
}
flog.error("Calling stack: ")
for(i in stack){
flog.error(" %s", i)
}
}
)
},
error = function(e) NULL
)
}
isScalerChar <- function(x){
length(x) == 1 && is.character(x)
}
checkDaemonArgs <- function(daemonName = NULL, taskId = NULL){
if(!is.null(daemonName)){
stopifnot(isScalerChar(daemonName))
}
if(!is.null(taskId)){
stopifnot(isScalerChar(taskId))
}
}
getOS <- function(){
sysinf <- Sys.info()
if (!is.null(sysinf)){
os <- sysinf['sysname']
if (os == 'Darwin')
os <- "osx"
} else { ## mystery machine
os <- .Platform$OS.type
if (grepl("^darwin", R.version$os))
os <- "osx"
if (grepl("linux-gnu", R.version$os))
os <- "linux"
}
tolower(os)
}
|
4a0a180f68c4fe5018500979a5b2a544dda5d03d | 29585dff702209dd446c0ab52ceea046c58e384e | /ggvis/inst/doc/overview.R | be749c54806fe8329a19f071bcd36f587d54612e | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 767 | r | overview.R | ## ---- echo = FALSE, message = FALSE--------------------------------------
library(knitr)
library(ggvis)
opts_chunk$set(comment = "#>", error = FALSE, tidy = FALSE)
opts_chunk$set(fig.width = 2.5, fig.height = 1.5, dpi = 100)
## ---- echo = FALSE, fig.width = 4----------------------------------------
# Histogram
faithful %>% ggvis(~eruptions, fill := "#ffffdd", fill.hover := "#eebbbb") %>%
layer_histograms(width = 0.2) %>%
add_axis("x", title = "eruptions") %>%
add_axis("y", title = "count")
## ---- echo = FALSE, fig.width = 3, fig.height = 3------------------------
# Scatter plot with model fit line
mtcars %>%
ggvis(~wt, ~mpg) %>%
layer_points() %>%
layer_smooths(span = input_slider(0.2, 1, 0.75, step = 0.05,
label = "Smoothing span"))
|
4b6e53a29b525df1f0af0f25ef2c0f0a7e4d7224 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hdm/examples/Growth-Data.Rd.R | c6a86348e51a94ef3e4a8eeab53f36bd191fcccd | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 193 | r | Growth-Data.Rd.R | library(hdm)
### Name: Growth Data
### Title: Growth data set
### Aliases: Example GDP Growth 'Growth Data' GrowthData
### Keywords: GDP Grwoth datasets
### ** Examples
data(GrwothData)
|
568c4818bff19b24ab6d932fc8c9b31158cd2bbc | 5cc637f6e45e2ccf5839ac00de4e973013cd46dd | /cachematrix.R | 9dd96b85b74ca9f5cef3452803183d9ab7564192 | [] | no_license | LaurensKil/ProgrammingAssignment2 | ea8c7cc238cd4aa1eabe3a473fd0a22510a145c9 | e1e0b0433339a318ab937ecf3af64300844625fc | refs/heads/master | 2021-01-19T09:21:28.600973 | 2017-02-15T21:36:50 | 2017-02-15T21:36:50 | 82,101,078 | 0 | 0 | null | 2017-02-15T20:01:48 | 2017-02-15T20:01:47 | null | UTF-8 | R | false | false | 1,041 | r | cachematrix.R | ## makeCacheMatrix and cacheSolve collaboratively cache a matrix,
## calculate its inverse and cache its inverse. Calculation
## of the inverse is only performed in case the inverse of
## the matrix has not been calculated previously; if the
## inverse has been cached previously, chached data are retrieved
## makeCacheMatrix generates a list of functions to cache a matrix
## (supplied as argument) and its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
get <- function() x
setinv <- function(inverse) i <<- inverse
getinv <- function() i
list(get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve checks whether a list generated by makeCacheMatrix
## has a cached calculated inverse. If so, the cached inverse is
## displayed, if not, the inverse is calculated
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
e4a6949a9a34d026f4b17a32b37bfa4f51e1f40c | 2b37e34406d54afb3e714ba651358f4e9bb7430d | /man/pou_logl_fast.Rd | 1c1d2837d8479fcdbb52e7e1c220d85fd5e98d1f | [] | no_license | jpmeagher/sdsBAT | 68c3bacffb9fe5c680c7cd3de9acc49ed946c4f1 | 257a28dbc707155f35cd899799184ffff1948198 | refs/heads/master | 2021-01-20T03:14:53.593120 | 2017-05-15T20:47:55 | 2017-05-15T20:47:55 | 89,512,838 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,232 | rd | pou_logl_fast.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likelihood_functions.R
\name{pou_logl_fast}
\alias{pou_logl_fast}
\title{Fast Phylogenetic Likelihood}
\source{
P. Z. Hadjipantelis, N. S. Jones, J. Moriarty, D. A. Springate, and
C. G. Knight, Function-valued traits in evolution, Journal of The Royal
Society Interface. 10(82), 20121032 (2013). \url{https://github.com/fpgpr}
}
\usage{
pou_logl_fast(ln_hyperparameters, phylogenetic_tree, observations)
}
\arguments{
\item{ln_hyperparameters}{Vector of the natural logarithm of hyperparameters
for an Ornstein-Uhlenbeck kernel in order \eqn{log(phylogenetic noise,
phylogenetic length-scale, non-phylogenetic noise)}}
\item{phylogenetic_tree}{Phylogenetic tree of relationships between
observations}
\item{observations}{Observations from the tips of the phylogenetic tree}
}
\value{
A positive value which is the negative log likelihood for the data.
}
\description{
Using the inline and RcppEigen packages performs a fast calculation of the
Phylogenetic Ornsetin-Uhelenbeck Process negative log likelihood, given the
process hyperparameters, a phylogenetic tree, and some data. The C++ code was
taken from code accompanying Hajipantelis et al.
}
|
18bc49c4ae18167f00366a990a66c1635145b7f3 | 7d54f701a66943f17cadba1ce52604abaa559ab0 | /util_graphics_and_plot.R | 96851811e433843eacfa82582b5f049a21848e0d | [] | no_license | knbknb/R_utils | d9b271446366ad75eb25fd37b87432f4aa05f838 | 6cfa819508861ab3bd5ff5b6b829a1b7a9dc52a2 | refs/heads/master | 2021-03-12T19:36:44.068628 | 2020-05-03T10:56:15 | 2020-05-03T10:56:15 | 8,218,403 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,818 | r | util_graphics_and_plot.R |
########################################
## Graphics output wrappers
## For easy one-liners, like:
## dopdf("tmp.pdf",width=5,height=5,cmd=plot(x,y))
util$dopdf <- function(filename,..., cmd) {
pdf(filename, ...)
eval(cmd)
dev.off()
if (exists('OPEN') && OPEN)
system(sprintf("open %s", filename))
}
attr(util$dopdf, "help") <- "Command: dopdf('tmp.pdf',width=5,height=5,cmd=plot(x,y))"
util$dopng <- function(filename,..., cmd) {
png(filename, ...)
par(mar = c(0,0,0,10))
eval(cmd)
dev.off()
if ((exists('OPEN') && OPEN))
system(sprintf("open %s", filename))
}
util$dosink <- function(filename,cmd, open=NULL) {
# like capture.output() but follows open/OPEN conventions here
sink(filename)
eval(cmd)
sink(NULL)
if (?coalesce(open, exists('OPEN') && OPEN))
system(sprintf("open %s", filename))
}
util$dosvg <- function(filename, ..., cmd, open=NULL) {
library("RSvgDevice")
devSVG(filename, ...)
eval(cmd)
dev.off()
if (prio_check(open, exists('OPEN') && OPEN))
system(sprintf("open %s", filename))
}
########################################
## Base R Plotting routines
util$linelight <- function(x,y, lty='dashed', col='lightgray', ...) {
# highlight a point with lines running to the axes.
left = par('usr')[1]
bot = par('usr')[3]
segments(left,y, x,y, lty=lty, col=col, ...)
segments(x,bot, x,y, lty=lty, col=col, ...)
}
attr(util$linelight, "help") <- "Highlight point(s) in a plot with dashed lines running to the x,y-axes."
util$hintonplot <- function(mat, max_value=max(abs(mat)), mid_value=0, ...) {
# Plots a matrix/dataframe/table as colored, size-varying boxes
# I dunno who started calling this a "Hinton plot", but anyways
# Example:
# hintonplot(matrix(rnorm(100),10))
# Example, for counts:
# table(cyl=mtcars$cyl, mpg=cut(mtcars$mpg,3))
# mpg
# cyl (10.4,18.2] (18.2,26.1] (26.1,33.9]
# 4 0 6 5
# 6 2 5 0
# 8 12 2 0
# hintonplot(table(cyl=mtcars$cyl, mpg=cut(mtcars$mpg,3)))
plot.new()
plot.window(xlim=c(0.5,ncol(mat)+0.5), ylim=c(0.5,nrow(mat)+0.5))
x_mid = 1:ncol(mat)
y_mid = 1:nrow(mat)
area = abs(mat) / max_value
side = sqrt(area)
for (x in 1:ncol(mat)) {
for (y in nrow(mat):1) {
# ym = (nrow(mat):1)[y]
ym = y
d = side[ym,x] / 2
rect(x-d, y-d, x+d, y+d, col=if (mat[ym,x]>0) 'darkblue' else 'darkred')
}
}
axis(1, 1:ncol(mat), labels=colnames(mat))
# axis(2, nrow(mat):1, labels=row.names(mat))
axis(2, 1:nrow(mat), labels=row.names(mat))
title(xlab=names(dimnames(mat))[2], ylab=names(dimnames(mat))[1], ...)
}
attr(util$hintonplot, "help") <- " Plots a matrix/dataframe/table as colored, size-varying boxes (simpler than a heatmap)" |
ab812e3d340eae83e067073484bf529d1a19ccc4 | a76ac30fd89bf141abdf754c620cc071c6b7859c | /PedGFLMM/pkg/man/snpPos.Rd | 4a1edf18f5a4a15a5c58314b2f373776b50cd103 | [] | no_license | jasa-acs/Gene-Based-Association-Testing-of-Dichotomous-Traits-With-Generalized-Functional-Linear-Mixed-Mod... | cb85a96094a93599a0f4b455a741cf62b47c5dfa | 7e6272dbc921ecf84366221a825011850f72b7c9 | refs/heads/master | 2023-02-09T01:02:02.501904 | 2021-01-04T16:54:15 | 2021-01-04T16:54:15 | 325,889,005 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 688 | rd | snpPos.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exampleData.R
\docType{data}
\name{snpPos}
\alias{snpPos}
\title{snpPos}
\format{An object of class \code{data.frame} with 311 rows and 3 columns.}
\usage{
snpPos
}
\description{
Example marker position data frame, available via data(exampleData).
}
\details{
This data frame provides marker positions for each SNP. The first column, chr, contains
the chromosome number, the second column, snp, contains the SNP name, and the third
column, pos, contains the position of the SNP in base pairs.
}
\seealso{
\code{\link{Ped}}, \code{\link{geno}}, \code{\link{cov}}, \code{\link{exampleData}}
}
\keyword{datasets}
|
8bf7a72bffbf23a1becdadf9bdd326e81048dad0 | 97c2e7f4087eacc6f2356e0451413f7b71373ec5 | /R/get_janes_companies.R | 15db857ba2937b544c5de1a980304227b0477f64 | [] | no_license | cgpeltier/janes | 5c2bd46d7d3850fbe412bafae24213fc42d77b99 | 414218830dfb3d3646595983a83e7fb2d134ba65 | refs/heads/master | 2023-04-08T08:32:53.786585 | 2021-04-13T14:22:31 | 2021-04-13T14:22:31 | 282,264,512 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,860 | r | get_janes_companies.R | #' @title get_janes_companies
#' @description Pulls Janes company data
#'
#' @param country Country in which base is located
#' @param query Search term for companies (i.e. company name)
#'
#' @return Janes equipment data.
#' @importFrom httr GET
#' @importFrom httr content
#' @importFrom jsonlite fromJSON
#' @importFrom stringr str_replace_all
#' @importFrom magrittr "%>%"
#' @importFrom stringr str_remove
#' @importFrom purrr map
#' @importFrom jsonlite flatten
#' @importFrom dplyr bind_rows
#' @importFrom dplyr rename
#' @importFrom tibble tibble
#' @importFrom tidyr unnest_wider
#' @importFrom tidyr unnest_auto
#' @importFrom dplyr select
#' @importFrom dplyr rename_with
#' @importFrom janitor clean_names
#' @importFrom janitor remove_empty
#' @importFrom dplyr starts_with
#' @importFrom dplyr any_of
#' @importFrom tidyr unite
#' @importFrom dplyr mutate
#' @export
get_janes_companies <- function(country = NULL, query = NULL){
page_range <- get_page_range(country = country, endpoint = "companies",
query = str_replace_all(query, " ", "%20"))
companies <- map(page_range, ~ get_janes_info(x = .x, country = country,
endpoint = "companies",
query = str_replace_all(query, " ", "%20"))) %>%
bind_rows()
companies_data <- map(companies$url, get_janes_data)
companies_data %>%
tibble() %>%
unnest_wider(".") %>%
unnest_wider(".") %>%
conditional_unnest_wider("organisation") %>%
unnest_all("organisation") %>%
unnest_all("organisation") %>%
unnest_all("organisation") %>%
unnest_all("organisation") %>%
rename_with(~ str_remove(., "^[^_]+_[^_]+_")) %>%
rename_with(~ str_remove(., "(?<=[a-z])_(?=\\d+)"))
}
|
22cb3a54fa84f19ff427d4c6d9d27be0d73a8df7 | ff9eb712be2af2fa24b28ecc75341b741d5e0b01 | /R/chenTTest.ci.R | a2bbfd24e767955e52553eb6f39aa8359adee336 | [] | no_license | alexkowa/EnvStats | 715c35c196832480ee304af1034ce286e40e46c2 | 166e5445d252aa77e50b2b0316f79dee6d070d14 | refs/heads/master | 2023-06-26T19:27:24.446592 | 2023-06-14T05:48:07 | 2023-06-14T05:48:07 | 140,378,542 | 21 | 6 | null | 2023-05-10T10:27:08 | 2018-07-10T04:49:22 | R | UTF-8 | R | false | false | 2,544 | r | chenTTest.ci.R | chenTTest.ci <-
function (muhat, sdhat, skewhat, n, alternative, conf.level,
p.value.type = c("z", "t", "Avg. of z and t"), paired = FALSE)
{
p.value.type <- match.arg(p.value.type)
alpha <- 1 - conf.level
fcn.to.min <- function(mu.weird, muhat.weird, sdhat.weird,
skewhat.weird, n.weird, alternative.weird, alpha.weird,
p.value.type.weird) {
p.value <- chenTTest.sub(mu = mu.weird, muhat = muhat.weird,
sdhat = sdhat.weird, skewhat = skewhat.weird, n = n.weird,
alternative = alternative.weird)$p.value[p.value.type.weird]
(alpha.weird - p.value)^2
}
string <- ifelse(paired, "The sample skew for the paired differences",
"The sample skew")
switch(alternative, greater = {
ci.type <- "Lower"
ucl <- Inf
if (skewhat <= 0) {
warning(paste(string, "is less than or equal to 0.\n ",
"Chen's test is not appropriate for a\n ", "\"lower\" confidence interval.\n"))
lcl <- NA
} else {
lcl <- nlminb(start = muhat, objective = fcn.to.min,
upper = muhat, muhat.weird = muhat, sdhat.weird = sdhat,
skewhat.weird = skewhat, n.weird = n, alternative.weird = alternative,
alpha.weird = alpha, p.value.type.weird = p.value.type,
control = list(step.max = sdhat/sqrt(n)))$par
}
}, less = {
ci.type <- "Upper"
lcl <- -Inf
if (skewhat >= 0) {
warning(paste(string, "is greater than or equal to 0.\n ",
"Chen's test is not appropriate for an\n ", "\"upper\" confidence interval.\n"))
ucl <- NA
} else {
ucl <- nlminb(start = muhat, objective = fcn.to.min,
lower = muhat, muhat.weird = muhat, sdhat.weird = sdhat,
skewhat.weird = skewhat, n.weird = n, alternative.weird = alternative,
alpha.weird = alpha, p.value.type.weird = p.value.type,
control = list(step.max = sdhat/sqrt(n)))$par
}
})
ci.limits <- c(lcl, ucl)
names(ci.limits) <- c("LCL", "UCL")
ret.obj <- list(name = "Confidence", parameter = ifelse(paired,
"mean of differences", "mean"), limits = ci.limits, type = ci.type,
method = paste("Based on", p.value.type), conf.level = conf.level,
sample.size = n, dof = n - 1)
oldClass(ret.obj) <- "intervalEstimate"
ret.obj
}
|
6d411aa6c4068f71c5a45b437b283cf7e6fad7ee | 6e941e412e0533c34b07a3eca745c5733f86551e | /man/Zeta.Rd | afa31ddbbbc615a9da77f1e83496a9d60c9e9114 | [] | no_license | RobinHankin/gsl | cbda723112829f1e6c4c7e52255ab63ad0ac8f3c | d36fc01cc3a8fd568e75c8ffbae28556edb011a9 | refs/heads/master | 2023-05-22T13:29:59.810818 | 2023-02-22T21:02:40 | 2023-02-22T21:02:40 | 133,783,190 | 15 | 10 | null | 2023-01-18T19:03:13 | 2018-05-17T08:30:21 | R | UTF-8 | R | false | false | 1,186 | rd | Zeta.Rd | \name{Zeta}
\alias{Zeta}
\alias{zeta_int}
\alias{zeta}
\alias{zetam1_int}
\alias{zetam1}
\alias{hzeta}
\alias{eta_int}
\alias{eta}
\title{Zeta functions}
\description{
Zeta functions as per the Gnu Scientific Library 7.31 and AMS-55,
section 23.2. These functions are declared in header file
\code{gsl_sf_zeta.h}
}
\usage{
zeta_int(n, give=FALSE, strict=TRUE)
zeta(s, give=FALSE, strict=TRUE)
zetam1_int(n, give=FALSE, strict=TRUE)
zetam1(s, give=FALSE, strict=TRUE)
hzeta(s, q, give=FALSE, strict=TRUE)
eta_int(n, give=FALSE, strict=TRUE)
eta(s, give=FALSE, strict=TRUE)
}
\arguments{
\item{n}{input: integer values}
\item{s,q}{input: real values}
\item{give}{Boolean with \code{TRUE} meaning to return a list of three
items: the value, an estimate of the error, and a status number.}
\item{strict}{Boolean, with \code{TRUE} meaning to return \code{NaN}
if status is an error.}
}
\references{\url{https://www.gnu.org/software/gsl/}}
\author{Robin K. S. Hankin}
\examples{
n <- 1:10
cbind(n,zeta(n),eta(n)) #table 23.3, p 811
zeta_int(1:5)
zeta(c(pi,pi*2))
zetam1_int(1:5)
zetam1(c(pi,pi*2))
hzeta(1.1,1.2)
eta_int(1:5)
eta(c(pi,pi*2))
}
\keyword{array}
|
f4a926c8b2874e123ad3bacbf6a393c61e1b01e0 | effe14a2cd10c729731f08b501fdb9ff0b065791 | /cran/paws.internet.of.things/man/iot_register_thing.Rd | 72ccfc61e8fb776114672073435e2d2a4cf428d7 | [
"Apache-2.0"
] | permissive | peoplecure/paws | 8fccc08d40093bb25e2fdf66dd5e38820f6d335a | 89f044704ef832a85a71249ce008f01821b1cf88 | refs/heads/master | 2020-06-02T16:00:40.294628 | 2019-06-08T23:00:39 | 2019-06-08T23:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 841 | rd | iot_register_thing.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_register_thing}
\alias{iot_register_thing}
\title{Provisions a thing}
\usage{
iot_register_thing(templateBody, parameters)
}
\arguments{
\item{templateBody}{[required] The provisioning template. See \href{https://docs.aws.amazon.com/iot/latest/developerguide/programmatic-provisioning.html}{Programmatic Provisioning}
for more information.}
\item{parameters}{The parameters for provisioning a thing. See \href{https://docs.aws.amazon.com/iot/latest/developerguide/programmatic-provisioning.html}{Programmatic Provisioning}
for more information.}
}
\description{
Provisions a thing.
}
\section{Request syntax}{
\preformatted{svc$register_thing(
templateBody = "string",
parameters = list(
"string"
)
)
}
}
\keyword{internal}
|
122a27263c3f482ce90bc6daf5b0946b75527d52 | c0905d539a101ab2d98d5fe5f43113d4e8f61968 | /code/1_niche_model_maxent.R | 5bccccfc070ec7bba380456c3a0ec92e6c551ceb | [] | no_license | LuisEnriqueAngelesGonzalez/OinsularisNiche | 475c9538e28c5a3fe2d6755d75b21ae075769ddc | 4ce5c8c26dde735a545c9a3104bf680cc3374692 | refs/heads/main | 2023-03-03T10:52:41.235706 | 2021-02-16T21:49:39 | 2021-02-16T21:49:39 | 336,632,390 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,823 | r | 1_niche_model_maxent.R | # To install kuenm following the next steps
# install_packages("devtools")
# library(devtools)
# devtools::install_github("marlonecobos/kuenm")
library(kuenm)
# -------------------------------------------------------------------------
# Model calibration -------------------------------------------------------
# -------------------------------------------------------------------------
# Input files
occ_joint <- "dataset/Oct_ins_joint.csv" # Joint data
occ_tra <- "dataset/Oct_ins_train.csv" # Training dafa
M_var_dir <- "M_variables" #follow the structure file (environmental variables)
batch_cal <- "Candidate_models"
out_dir <- "Candidate_Models"
reg_mult <- c(seq(0.5, 4, 0.5)) # Regularization multiplier
f_clas <- c("l","lq","lqp", 'lqph') # Features (linear, quadratic, product, hinge)
maxent_path <- "maxent"
wait <- FALSE
run <- TRUE
kuenm_cal(occ.joint = occ_joint,
occ.tra = occ_tra,
M.var.dir = M_var_dir,
batch = batch_cal,
out.dir = out_dir,
reg.mult = reg_mult,
f.clas = f_clas,
maxent.path = maxent_path,
wait = wait, run = run)
# After run the code above, select and open the file Candidate_models.bat in order to generate all the models that
# will be saved in the Candidate_Models folder
# for MAC users the instructions above (line 35-36) are unnecessary
# If you find this error “unable to access jarfile” while you run the “1_niche_model_maxent” code,
# replace “maxent_path <- maxent” for “maxent_path <- "the complete path of your maxent.jar file"
# For example: "/Users/yourname/Documents/maxent”
# -------------------------------------------------------------------------
# Model selection ---------------------------------------------------------
# -------------------------------------------------------------------------
occ_test <- "dataset/Oct_ins_test.csv"
out_eval <- "Calibration_results"
threshold <- 5 # omission rate
rand_percent <- 50 # related to partial ROC
iterations <- 100 # number of iterations
kept <- FALSE # to keep (TRUE) or delete (FALSE) the candidate models
selection <- "OR_AICc" # Omission rate and Delta Akaike
paral_proc <- FALSE # make this true to perform pROC calculations in parallel, recommended
# only if a powerful computer is used (see function's help)
# Note, some of the variables used here as arguments were already created for previous function
cal_eval <- kuenm_ceval(path = out_dir,
occ.joint = occ_joint,
occ.tra = occ_tra,
occ.test = occ_test,
batch = batch_cal,
out.eval = out_eval,
threshold = threshold,
rand.percent = rand_percent,
iterations = iterations,
kept = FALSE,
selection = selection,
parallel.proc = paral_proc)
### For replicate the results select only the model with lowest Delta Akaike
### value
# -------------------------------------------------------------------------
# Model Projections -------------------------------------------------------
# -------------------------------------------------------------------------
batch_fin <- "Final_models"
mod_dir <- "Final_Models"
rep_n <- 1 #change the number of runs if you wish
rep_type <- "Bootstrap"
jackknife <- TRUE #Jacknife analysis
out_format <- "logistic" # Type of output of the model
project <- TRUE # Project model to scenarios
G_var_dir <- "G_variables" #here is the RCP scenarios (.asc)
ext_type <- "ext"# We allow our model to extrapolate. See ext_type help
write_mess <- TRUE # Generate MESS maps (extrapolation risk)
write_clamp <- TRUE
wait1 <- FALSE
run1 <- TRUE
args <- NULL
kuenm_mod(occ.joint = occ_joint,
M.var.dir = M_var_dir,
out.eval = out_eval,
batch = batch_fin,
rep.n = rep_n,
rep.type = rep_type,
jackknife = jackknife,
out.dir = mod_dir,
out.format = out_format,
project = project,
G.var.dir = G_var_dir,
ext.type = ext_type,
write.mess = write_mess,
write.clamp = write_clamp,
maxent.path = maxent_path,
args = args,
wait = wait1,
run = run1)
|
becba09ccc69d43359f9c311c4b2b77b904614de | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i15-m7-u4-v0.pddl_planlen=5/dungeon_i15-m7-u4-v0.pddl_planlen=5.R | 67d8e09d65fe1346ba7953d69d043256592359df | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 86 | r | dungeon_i15-m7-u4-v0.pddl_planlen=5.R | 8cfd58824aa08733fe2f6f54949a4b91 dungeon_i15-m7-u4-v0.pddl_planlen=5.qdimacs 1763 6493 |
2af470f6aaacbd81d104b64b580b75047c1b3976 | c851eaaf39e24face76ffaabd1ba4af07d7fa5a9 | /inst/bugfixes.R | b27a1322b0b3a8d9f2295183028ad35e1adc8af2 | [
"CC-BY-4.0"
] | permissive | DocEd/d.inspectEHR | f497e41ec1901c2ddfa92719a498438048824a71 | d602bab0f3f9022f2e73df22b41373676b87f141 | refs/heads/master | 2022-12-04T15:12:20.242503 | 2020-08-21T13:54:53 | 2020-08-21T13:54:53 | 285,819,547 | 3 | 4 | CC-BY-4.0 | 2020-08-26T19:29:07 | 2020-08-07T12:06:08 | R | UTF-8 | R | false | false | 4,607 | r | bugfixes.R | library(d.inspectEHR)
library(tidyverse)
library(dbplyr)
library(patchwork)
## Put your normal connection details in below.
ctn <- DBI::dbConnect()
## And put your schema inside the quotes:
schema <- ""
meas <- tbl(ctn, in_schema(schema, "measurement")) %>%
group_by(measurement_concept_id) %>%
tally() %>%
collect() %>%
mutate(across(everything(), as.integer))
dq_ref <- d.inspectEHR:::dq_ref
dq_ans <- d.inspectEHR:::dq_ans
is.integer64 <- d.inspectEHR:::is.integer64
meas_dq <- dq_ref %>%
filter(concept_id %in% meas$measurement_concept_id) %>%
select(concept_id, long_name, target_column)
meas_units <- tbl(ctn, in_schema(schema, "measurement")) %>%
distinct(.data$unit_concept_id) %>%
collect() %>%
pull() %>%
as.integer()
meas_operator <- tbl(ctn, in_schema(schema, "measurement")) %>%
distinct(.data$operator_concept_id) %>%
collect() %>%
pull() %>%
as.integer()
meas_dict1 <- mini_dict(ctn, schema, meas_units)
meas_dict2 <- mini_dict(ctn, schema, meas_operator)
meas_dict <- bind_rows(meas_dict1, meas_dict2)
for (i in seq_along(meas_dq$concept_id)) {
current_concept <- meas_dq$concept_id[i]
current_name <- meas_dq$long_name[meas_dq$concept_id == current_concept]
target_col <- meas_dq$target_column[meas_dq$concept_id == current_concept]
curr_title <- stringr::str_sub(current_name, 1, 30)
if (nchar(curr_title) >= 30) {
curr_title <- paste0(curr_title, "...")
}
print(paste0(current_concept, ": ", curr_title))
working <- tbl(ctn, in_schema(schema, "measurement")) %>%
filter(measurement_concept_id %in% !! current_concept) %>%
collect() %>%
mutate(across(where(is.integer64), as.integer)) %>%
mutate(across(c(contains("date"), -contains("datetime")), as.Date))
working_unit <- meas_dict %>%
filter(concept_id %in% unique(working$unit_concept_id))
single_units <- nrow(working_unit) == 1
label_units <- working_unit[1,"concept_name",drop = TRUE]
measure_n <- nrow(working)
print(single_units)
print(label_units)
working <- left_join(
working,
tbl(ctn, in_schema(schema, "visit_occurrence")) %>%
select(visit_occurrence_id, visit_start_datetime, visit_end_datetime) %>%
collect(),
by = "visit_occurrence_id")
boundaries <- working %>%
summarise(
before = sum(measurement_datetime < visit_start_datetime, na.rm = TRUE),
after = sum(measurement_datetime > visit_end_datetime, na.rm = TRUE)
) %>%
tidyr::pivot_longer(everything(), names_to = "condition", values_to = "count")
dup <- working %>%
select(.data$person_id, .data$measurement_datetime, .data[[target_col]]) %>%
janitor::get_dupes(everything()) %>%
tally(name = "count") %>%
tibble::add_column(condition = "duplications", .before = TRUE)
miss <- tibble::tribble(
~condition, ~count,
"no visit", sum(is.na(working$visit_occurrence_id))
)
bind_rows(boundaries, dup, miss) %>%
mutate(
total = measure_n,
`p` = round((count/total)*100, 0),
tolerance = c(1, 1, 1, 100)
)
if (target_col == "value_as_number") {
val_dist <- working %>%
select(value_as_number) %>%
ggplot(aes(x = value_as_number)) +
geom_density() +
theme_classic() +
labs(x = label_units)
} else {
opt <- dq_ans[dq_ans$concept_id == current_concept, c("option_concept_id", "option_name")]
val_dist <- working %>%
select(value_as_concept_id) %>%
group_by(value_as_concept_id) %>%
tally() %>%
mutate(value_as_concept_id = factor(
value_as_concept_id,
levels = opt$option_concept_id,
labels = opt$option_name
)) %>%
ggplot(aes(
x = value_as_concept_id)) +
geom_point(aes(y = n)) +
geom_segment(aes(
y = 0,
yend = n,
xend = as.factor(value_as_concept_id))) +
theme_classic() +
labs(y = "number of respones", x = "categories") +
theme(axis.title.y = element_blank()) +
coord_flip()
}
# timing distribution
timing_dist <- working %>%
select(measurement_datetime) %>%
mutate(measurement_datetime = hms::as_hms(measurement_datetime)) %>%
ggplot(aes(x = measurement_datetime)) +
geom_density() +
theme_classic() +
labs(x = "time of sample")
# samples over time
sample_timing <- working %>%
select(measurement_date) %>%
group_by(measurement_date) %>%
tally() %>%
ggplot(aes(x = measurement_date, y = n)) +
geom_path() +
theme_classic() +
labs(x = "measurement date", y = "daily samples")
(val_dist | timing_dist) / sample_timing
}
|
cb11c8a09992ea8618acf16a97b62d2e5a5b9e4e | 1158f854bd76f4b32302f96ae3f76964a5f607c2 | /R/errorReport.R | b3434cbf05131ea8e9540f0eb0ad351c54153068 | [] | no_license | cran/dismo | 17e0fe560f1a3ed6db6b7b3fa26b997b42fa1a8f | 3d6e871876080c822553a73d6afea608d183e044 | refs/heads/master | 2023-05-24T17:59:52.457998 | 2023-05-21T22:50:06 | 2023-05-21T22:50:06 | 17,695,530 | 10 | 15 | null | 2020-05-20T06:24:11 | 2014-03-13T04:26:40 | R | UTF-8 | R | false | false | 3,818 | r | errorReport.R | # Download geographic data and return as R object
# Author: Jacob van Etten
# License GPL3
# Version 0.1
# October 2008
.errorReport <- function(xyxy,threshold)
{
distn <- matrix(ncol=16,nrow=length(xyxy[,1]))
for(i in 1: length(xyxy[,1])) {
distn[i,] <- .errorDist(xyxy[i,], threshold)
}
errorNames <- c("Imprecision","Lonlat swap","Sign latitude","Sign longitude","Sign latitude and longitude","Lonlat swap and sign latitude","Lonlat swap and sign longitude","Lonlat swap and both signs","Wrong longitude","Wrong latitude","Lonlat swap and wrong longitude","Lonlat swap and wrong latitude","Sign latitude and wrong longitude","Sign longitude and wrong latitude","Sign latitude, lonlat swap and wrong longitude","Sign longitude, lonlat swap and wrong latitude")
classify <- function(x) {
if (any(is.na(x))){out <- NA}
else {out <- errorNames[min(which(x==min(x)))]}
return(out)
}
errorCategory <- apply(distn,1,classify)
classify2 <- function(x) {
if (any(is.na(x))){out <- NA}
else {out <- min(which(x==min(x)))}
return(out)
}
index <- apply(distn,1,classify2)
index <- cbind(c(1:length(xyxy[,1])),index)
dist2 <- matrix(ncol=16,nrow=length(xyxy[,1]))
for(i in 1: length(xyxy[,1])) {
dist2[i,] <- .errorDist(xyxy[i,],0)
}
errorDistance <- dist2[index]/1000
result <- cbind(errorCategory,errorDistance)
return(result)
}
.errorDist <- function(pp,a){
x1 <- pp[1]
y1 <- pp[2]
x2 <- pp[3]
y2 <- pp[4]
if(any(is.na(c(x1, y1, x2, y2)))){
out <- NA
} else {
difference <- pointDistance(c(x1, y1), c(x2, y2), type='GreatCircle')
xy.exchange <- pointDistance(c(y1, x1), c(x2, y2), type='GreatCircle')
signch.lat <- pointDistance(c(x1, -y1), c(x2, y2), type='GreatCircle')
signch.lon <- pointDistance(c(-x1, y1), c(x2, y2), type='GreatCircle')
signch.latlon <- pointDistance(c(-y1, -x1), c(x2, y2), type='GreatCircle')
xy.signch.lat <- pointDistance(c(y1, -x1), c(x2, y2), type='GreatCircle')
xy.signch.lon <- pointDistance(c(-y1, x1), c(x2, y2), type='GreatCircle')
xy.signch.latlon <- pointDistance(c(-y1, -x1), c(x2, y2), type='GreatCircle')
wrong.lon <- pointDistance(c(0, y1), c(a, y2), type='GreatCircle')
wrong.lat <- pointDistance(c(x1, 0), c(x2, a), type='GreatCircle')
xy.wrong.lon <- pointDistance(c(y1, 0), c(x2, a), type='GreatCircle')
xy.wrong.lat <- pointDistance(c(0, x1), c(a, y2), type='GreatCircle')
signch.lat.wrong.lon <- pointDistance(c(0, -y1), c(a, y2), type='GreatCircle')
signch.lon.wrong.lat <- pointDistance(c(-x1, 0), c(x2, a), type='GreatCircle')
signch.lat.xy.wrong.lon <- pointDistance(c(-y1, 0), c(x2, a), type='GreatCircle')
signch.lon.xy.wrong.lat <- pointDistance(c(0, -x1), c(a, y2), type='GreatCircle')
out <- c(difference, xy.exchange, signch.lat, signch.lon, signch.latlon, xy.signch.lat, xy.signch.lon, xy.signch.latlon, wrong.lon, wrong.lat, xy.wrong.lon, xy.wrong.lat, signch.lat.wrong.lon, signch.lon.wrong.lat, signch.lat.xy.wrong.lon, signch.lon.xy.wrong.lat)
return(out)
}
}
.errorGenerate <- function(pp){
x1 <- pp[1]
y1 <- pp[2]
if(any(is.na(c(x1, y1)))){
out <- NA
} else {
no.change <- c(x1, y1)
xy.exchange <- c(y1, x1)
signch.lat <- c(x1, -y1)
signch.lon <- c(-x1, y1)
signch.latlon <- c(-x1, -y1)
xy.signch.lat <- c(y1, -x1)
xy.signch.lon <- c(-y1, x1)
xy.signch.latlon <- c(-y1, -x1)
out <- rbind(no.change, xy.exchange, signch.lat, signch.lon, signch.latlon, xy.signch.lat, xy.signch.lon, xy.signch.latlon)
rownames(out) <- c("no.change", "xy.exchange", "signch.lat", "signch.lon", "signch.latlon", "xy.signch.lat", "xy.signch.lon", "xy.signch.latlon")
return(out)
}
}
|
45f9f2290dc726ee97497d9f75cfdd79d9f498ad | 01c9a7cc57285dcdfcaa6b60cbae846cd4b58dbf | /Rscript/FORA2mat_NLDAS.R | af0f6d9d8f5bf36c48a444da9baa890b132091e3 | [] | no_license | fkwai/MatlabScripts | 0566acdf4be5bc1ab1fee9023b92f8c471c02308 | 32804563f1064754ae7910227ee4455331eaf17c | refs/heads/master | 2021-02-04T16:14:40.116493 | 2019-05-16T02:20:11 | 2019-05-16T02:20:11 | 48,926,652 | 6 | 3 | null | null | null | null | UTF-8 | R | false | false | 681 | r | FORA2mat_NLDAS.R |
FORA2mat_NLDAS<-function(year,month){
library("rgdal")
library("sp")
library("R.matlab")
# source("LDAS2mat_func.R")
AttrFile=paste("F:/wrgroup/NLDAS_Forcing/FORA/2014/attr.txt",sep="")
GribtabFile=paste("F:/wrgroup/NLDAS_Forcing/gribtab_NLDAS_FORA_hourly.002.txt",
sep="")
MatfileDir="Y:/NLDAS/3H/FORA_daily_mat"
for (y in year){
for(m in month){
ym=paste(as.character(y),formatC(m,width=2,flag="0"),sep="")
FLtxt=paste("F:/wrgroup/NLDAS_Forcing/FORA/FileList/",ym,"_filelist.txt",sep="")
MatSaveFolder=paste(MatfileDir,"/",ym,sep="")
readNLDAS_daily(FLtxt,AttrFile,GribtabFile,MatSaveFolder)
}
}
}
|
7c46ce2d2f0e96db86dd3c024a67d7f35f1a2cc3 | cda4f6558b33940bd08f82229e5f81981772d2e2 | /inst/doc/Basic-Stats-Concepts.R | 31929cccb045dbf1ad8444f05af18f930acdb1be | [] | no_license | Hamrita/StatMath_R | 1de443c746e701545732c932ffa1317a7e4c383d | db276d39cb58c559bd45a5d395d22829e1b0dbc1 | refs/heads/master | 2023-08-04T05:52:21.969547 | 2021-09-23T23:11:37 | 2021-09-23T23:11:37 | 409,765,862 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,025 | r | Basic-Stats-Concepts.R | ## ---- include = FALSE----------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- echo = FALSE, out.width="25%", fig.align='center'----
knitr::include_graphics("pictures/introDA2/ndis.jpg")
## ----echo=TRUE, message=FALSE, warning=FALSE-----
library(datasets)
data("quakes")
str(quakes)
## ----echo=TRUE, message=FALSE, warning=FALSE-----
table(quakes$mag)
## ----echo=TRUE, fig.height=3, fig.width=6, message=FALSE, warning=FALSE----
hist(quakes$mag, breaks = 24)
## ---- echo = FALSE, out.width="60%", fig.align='center'----
knitr::include_graphics("pictures/introDA2/Skew_PosvsNeg.png")
## ---- echo = FALSE, out.width="60%", fig.align='center'----
knitr::include_graphics("pictures/introDA2/Kurtosis.png")
## ----echo=TRUE, fig.height=4, fig.width=8, message=FALSE, warning=FALSE----
hist(quakes$mag)
## ----message=FALSE-------------------------------
library(moments)
skewness(quakes$mag)
library(psych)
describe(quakes$mag)
## ------------------------------------------------
#moments
kurtosis(quakes$mag)
#psych
describe(quakes$mag)
## ------------------------------------------------
summary(quakes$mag)
## ----message = FALSE-----------------------------
library(pastecs)
stat.desc(quakes)
## ----message = FALSE-----------------------------
library(Hmisc)
Hmisc::describe(quakes)
## ----message = FALSE-----------------------------
library(psych)
psych::describe(quakes)
## ------------------------------------------------
mean(quakes$mag)
## ------------------------------------------------
median(quakes$mag)
## ----message=FALSE-------------------------------
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
getmode(quakes$mag)
## ---- echo = FALSE, out.width="40%", fig.align='center'----
knitr::include_graphics("pictures/introDA2/bimodal_distribution.png")
## ---- echo = FALSE, out.width="75%", fig.align='center'----
knitr::include_graphics("pictures/introDA2/Capture_01.png")
## ------------------------------------------------
range(quakes$mag)
psych::describe(quakes$mag)
## ---- echo = FALSE, out.width="50%", fig.align='center'----
knitr::include_graphics("pictures/introDA2/interquartile_range.png")
## ------------------------------------------------
quantile(quakes$mag)
summary(quakes$mag)
## ------------------------------------------------
quantile(quakes$mag, c(0.05,0.50,0.75,0.95))
## ----message=FALSE-------------------------------
var(quakes$mag)
## ------------------------------------------------
sd(quakes$mag)
## ----message = FALSE-----------------------------
quakes$zscore <- scale(quakes$mag)
head(quakes$zscore)
str(quakes$zscore)
mean(quakes$mag)
sd(quakes$mag)
## ------------------------------------------------
cov(quakes$mag, quakes$depth)
## ----message=FALSE, warning=FALSE----------------
cor(quakes$mag, quakes$depth)
## ----echo=TRUE, fig.height=4, fig.width=8, message=FALSE, warning=FALSE----
library(corrplot)
corrplot(cor(quakes), order = "hclust")
|
280502a3c94de5b1eccccee4e432244d19863cea | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/airGR/vignettes/V01_get_started.R | 9a32c102bbac5bac61538fc9b1c0c450c3a2876c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,561 | r | V01_get_started.R | ## ------------------------------------------------------------------------
library(airGR)
## ------------------------------------------------------------------------
data(L0123001)
summary(BasinObs)
## ------------------------------------------------------------------------
InputsModel <- CreateInputsModel(FUN_MOD = RunModel_GR4J, DatesR = BasinObs$DatesR,
Precip = BasinObs$P, PotEvap = BasinObs$E)
str(InputsModel)
## ------------------------------------------------------------------------
Ind_Run <- seq(which(format(BasinObs$DatesR, format = "%d/%m/%Y")=="01/01/1990"),
which(format(BasinObs$DatesR, format = "%d/%m/%Y")=="31/12/1999"))
str(Ind_Run)
## ------------------------------------------------------------------------
RunOptions <- CreateRunOptions(FUN_MOD = RunModel_GR4J,
InputsModel = InputsModel, IndPeriod_Run = Ind_Run,
IniStates = NULL, IniResLevels = NULL, IndPeriod_WarmUp = NULL)
str(RunOptions)
## ------------------------------------------------------------------------
InputsCrit <- CreateInputsCrit(FUN_CRIT = ErrorCrit_NSE, InputsModel = InputsModel,
RunOptions = RunOptions, Qobs = BasinObs$Qmm[Ind_Run])
str(InputsCrit)
## ------------------------------------------------------------------------
CalibOptions <- CreateCalibOptions(FUN_MOD = RunModel_GR4J, FUN_CALIB = Calibration_Michel)
str(CalibOptions)
## ------------------------------------------------------------------------
OutputsCalib <- Calibration_Michel(InputsModel = InputsModel, RunOptions = RunOptions,
InputsCrit = InputsCrit, CalibOptions = CalibOptions,
FUN_MOD = RunModel_GR4J, FUN_CRIT = ErrorCrit_NSE)
Param <- OutputsCalib$ParamFinalR
Param
## ------------------------------------------------------------------------
OutputsModel <- RunModel_GR4J(InputsModel = InputsModel, RunOptions = RunOptions, Param = Param)
str(OutputsModel)
## ----eval=F--------------------------------------------------------------
# plot(OutputsModel, Qobs = BasinObs$Qmm[Ind_Run])
## ------------------------------------------------------------------------
OutputsCrit <- ErrorCrit_NSE(InputsCrit = InputsCrit, OutputsModel = OutputsModel)
## ------------------------------------------------------------------------
OutputsCrit <- ErrorCrit_KGE(InputsCrit = InputsCrit, OutputsModel = OutputsModel)
|
226f14a36c83c71ee3265d76c1c15c69cc137274 | 4c658f7a82f3f29f762c86b956a8ef138fef986c | /Machine_Learning/Code/2_Data_Exploration/Data_Exploration.R | 516b6e0c502ac75e93edacde97b42255ef278272 | [] | no_license | MirkoRima/Predict_Flight_Delays | 8a16488a4cb57f0baa37f57159ffee78a8c9504c | 1f1624bc22b45311cd87d368e65df596023bf9ee | refs/heads/master | 2020-05-23T15:34:00.713883 | 2019-05-15T13:18:38 | 2019-05-15T13:18:38 | 186,828,427 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,710 | r | Data_Exploration.R |
#############################
# EXPLORE DATA TRAINING SET #
#############################
library("plyr")
library("rstudioapi")
# the following line is for getting the path of your current open file
current_path <- getActiveDocumentContext()$path
# The next line set the working directory to the relevant one:
setwd(dirname(current_path ))
setwd("../../Datasets/")
# Import the final dataset about the ATL-TPA Airports
TPA_Trainingset <- read.csv("TPA_Trainingset.csv", stringsAsFactors=FALSE)
# create a support dataset
TPA_Dataset_v1 = TPA_Trainingset
################
# Explore data #
################
###########################################################################################
# Precipitation analysis
# I replace the values in coloumns "Precipitation" with some predetermined value to evaluate the intensity of precipitation
TPA_Dataset_v1$PRECIPITATION <-ifelse(TPA_Dataset_v1$PRECIPITATION <= 0.001 | TPA_Dataset_v1$PRECIPITATION == "T" , "No precipitation",
ifelse(TPA_Dataset_v1$PRECIPITATION <= 0.1 , "Light rain",
ifelse(TPA_Dataset_v1$PRECIPITATION <= 0.79 , "Rain",
ifelse(TPA_Dataset_v1$PRECIPITATION <= 1.5 , "Heavy rain",
ifelse(TPA_Dataset_v1$PRECIPITATION > 1.5, "Storm",0)))))
# Create precipitation table
Precipitation_per_numflights = data.frame(table(TPA_Dataset_v1$PRECIPITATION ))
# Rename the titles of the created table
Precipitation_per_numflights = rename(Precipitation_per_numflights, c("Var1"= "Precipitation type", "Freq" = "Numbers of flights"))
# Order the results in the precipitation table
Precipitation_per_numflights = Precipitation_per_numflights[c(3,2,4,1,5),]
# Count delayed flights per precipitation type
ritP1 = count(TPA_Dataset_v1$PRECIPITATION == "No precipitation" & TPA_Dataset_v1$ARRIVAL_DELAY >0)
ritP1 = ritP1$freq[2]
ritP2 = count(TPA_Dataset_v1$PRECIPITATION == "Light rain" & TPA_Dataset_v1$ARRIVAL_DELAY >0)
ritP2 = ritP2$freq[2]
ritP3 = count(TPA_Dataset_v1$PRECIPITATION == "Rain" & TPA_Dataset_v1$ARRIVAL_DELAY >0)
ritP3 = ritP3$freq[2]
ritP4 = count(TPA_Dataset_v1$PRECIPITATION == "Heavy rain" & TPA_Dataset_v1$ARRIVAL_DELAY >0)
ritP4 = ritP4$freq[2]
ritP5 = count(TPA_Dataset_v1$PRECIPITATION == "Storm" & TPA_Dataset_v1$ARRIVAL_DELAY >0)
ritP5 = ritP5$freq[2]
# I add coloumns "Number of delayed flights"
Precipitation_per_numflights$"Number of delayed flights" <-ifelse(Precipitation_per_numflights$`Precipitation type` == "No precipitation", ritP1,
ifelse(Precipitation_per_numflights$`Precipitation type` == "Light rain", ritP2,
ifelse(Precipitation_per_numflights$`Precipitation type` == "Rain", ritP3,
ifelse(Precipitation_per_numflights$`Precipitation type` == "Heavy rain", ritP4,
ifelse(Precipitation_per_numflights$`Precipitation type` == "Storm",ritP5, 0)))))
###########################################################################################
# Find total flights/delayed flights
All_flights_TPA = nrow(TPA_Dataset_v1)
All_flights_delayed_TPA = data.frame(table(which(TPA_Dataset_v1$ARRIVAL_DELAY>0)))
All_flights_delayed_TPA = sum(All_flights_delayed_TPA$Freq)
All_flights_delayed_TPA_Table = data.frame(table(All_flights_TPA,All_flights_delayed_TPA))
All_flights_delayed_TPA_Table$Freq = NULL
# Rename the titles of the created table
All_flights_delayed_TPA_Table = rename(All_flights_delayed_TPA_Table, c("All_flights_TPA"= "All flights", "All_flights_delayed_TPA" = "All flights delayed"))
# Find flights per days of week
Flights_per_day_TPA = data.frame(table(TPA_Dataset_v1$DAY_OF_WEEK))
# Rename the titles of the created table
Flights_per_day_TPA = rename(Flights_per_day_TPA, c("Var1"= "Days of week", "Freq" = "Number of flights"))
# Find delayed flights per days of week
rit1 = count(TPA_Dataset_v1$DAY_OF_WEEK == 1 & TPA_Dataset_v1$ARRIVAL_DELAY >0)
rit1 = rit1$freq[2]
rit2 = count(TPA_Dataset_v1$DAY_OF_WEEK == 2 & TPA_Dataset_v1$ARRIVAL_DELAY >0)
rit2 = rit2$freq[2]
rit3 = count(TPA_Dataset_v1$DAY_OF_WEEK == 3 & TPA_Dataset_v1$ARRIVAL_DELAY >0)
rit3 = rit3$freq[2]
rit4 = count(TPA_Dataset_v1$DAY_OF_WEEK == 4 & TPA_Dataset_v1$ARRIVAL_DELAY >0)
rit4 = rit4$freq[2]
rit5 = count(TPA_Dataset_v1$DAY_OF_WEEK == 5 & TPA_Dataset_v1$ARRIVAL_DELAY >0)
rit5 = rit5$freq[2]
rit6 = count(TPA_Dataset_v1$DAY_OF_WEEK == 6 & TPA_Dataset_v1$ARRIVAL_DELAY >0)
rit6 = rit6$freq[2]
rit7 = count(TPA_Dataset_v1$DAY_OF_WEEK == 7 & TPA_Dataset_v1$ARRIVAL_DELAY >0)
rit7 = rit7$freq[2]
# insert the flight delay in the table
Flights_per_day_TPA$`Number of delayed flights` <- ifelse(Flights_per_day_TPA$`Days of week`== 1, rit1,
ifelse(Flights_per_day_TPA$`Days of week`== 2, rit2,
ifelse(Flights_per_day_TPA$`Days of week`== 3, rit3,
ifelse(Flights_per_day_TPA$`Days of week`== 4, rit4,
ifelse(Flights_per_day_TPA$`Days of week`== 5, rit5,
ifelse(Flights_per_day_TPA$`Days of week`== 6, rit6,
ifelse(Flights_per_day_TPA$`Days of week`== 7, rit7,0)))))))
###########################################################################################
TPA_Dataset_v2 = TPA_Trainingset
##########################
# factor support dataset #
##########################
TPA_Dataset_v2$AIRLINE = factor(TPA_Dataset_v2$AIRLINE)
TPA_Dataset_v2$ARRIVAL_DELAY = factor(TPA_Dataset_v2$ARRIVAL_DELAY)
TPA_Dataset_v2$DAY = factor(TPA_Dataset_v2$DAY)
TPA_Dataset_v2$DAY_OF_WEEK = factor(TPA_Dataset_v2$DAY_OF_WEEK)
TPA_Dataset_v2$DESTINATION_AIRPORT = factor(TPA_Dataset_v2$DESTINATION_AIRPORT)
TPA_Dataset_v2$ORIGIN_AIRPORT = factor(TPA_Dataset_v2$ORIGIN_AIRPORT)
TPA_Dataset_v2$MONTH = factor(TPA_Dataset_v2$MONTH)
###################
# create graphics #
###################
#AIRLINE - ARRIVAL_DELAY
plot(TPA_Dataset_v2$AIRLINE, TPA_Dataset_v2$ARRIVAL_DELAY, col = TPA_Dataset_v2$ARRIVAL_DELAY)
#legend('topleft', legend = levels(TPA_Dataset_v2$ARRIVAL_DELAY), col = c(1,2), pch = 16)
#DAY - ARRIVAL_DELAY
plot(TPA_Dataset_v2$DAY, TPA_Dataset_v2$ARRIVAL_DELAY, col = TPA_Dataset_v2$ARRIVAL_DELAY)
#legend('topleft', legend = levels(TPA_Dataset_v2$ARRIVAL_DELAY), col = c(1,2), pch = 16)
#DAY_OF_WEEK - ARRIVAL_DELAY
plot(TPA_Dataset_v2$DAY_OF_WEEK, TPA_Dataset_v2$ARRIVAL_DELAY, col = TPA_Dataset_v2$ARRIVAL_DELAY)
#legend('topleft', legend = levels(TPA_Dataset_v2$ARRIVAL_DELAY), col = c(1,2), pch = 16)
#MONTH - ARRIVAL_DELAY
plot(TPA_Dataset_v2$MONTH, TPA_Dataset_v2$ARRIVAL_DELAY, col = TPA_Dataset_v2$ARRIVAL_DELAY)
#legend('topleft', legend = levels(TPA_Dataset_v2$ARRIVAL_DELAY), col = c(1,2), pch = 16)
#DESTINATION_AIRPORT - ARRIVAL_DELAY
plot(TPA_Dataset_v2$DESTINATION_AIRPORT, TPA_Dataset_v2$ARRIVAL_DELAY, col = TPA_Dataset_v2$ARRIVAL_DELAY)
#legend('topleft', legend = levels(TPA_Dataset_v2$ARRIVAL_DELAY), col = c(1,2), pch = 16)
|
0d88f2659d2d2fc310abf23bfdcdba5b1c4fc9ee | f9ffe33a3a20c335b44d59db7e7a70f66460944b | /man/bm_xgb.Rd | b9d620fec4c13681e4e2a166aaac9f840978b55f | [] | no_license | Allisterh/tsensembler | 84ea7e4ffe683d5c49ee87a1583c79a963215d56 | 56b55fae56ce7aaedd71cdca1bd0196928fb9256 | refs/heads/master | 2023-01-01T18:38:12.784510 | 2020-10-27T13:00:02 | 2020-10-27T13:00:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 358 | rd | bm_xgb.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base-model-xgb.r
\name{bm_xgb}
\alias{bm_xgb}
\title{Base model for XGBoost}
\usage{
bm_xgb(form, data, lpars)
}
\arguments{
\item{form}{formula}
\item{data}{Training data}
\item{lpars}{list of parameters--deprecated}
}
\description{
Base model for XGBoost
}
\keyword{internal}
|
d94d5e0b922dea03ff944a024e35486536c6ee55 | b1be35dfe938b191cbad324399e14cdb707e7aa4 | /R/function.R | b16bc1bb9a89b6b27b159cfe682dcff9e3576e40 | [] | no_license | bthe/gadgetsim | defff2bdfa754e424d5b963510e9a7518063be48 | bc74d1a29475d59258a4d471c040c0f8d89cf711 | refs/heads/master | 2020-12-27T09:07:25.568262 | 2020-02-03T13:25:42 | 2020-02-03T13:25:42 | 237,846,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,104 | r | function.R |
##' For each prey an upper limit needs to be set on the total amount
##' consumed by all predators so as not to obtain more consumption than
##' available biomass. Consumption is limited to 95\% (\eqn{R_M}) of the
##' available biomass. This is implemented by scaling target consumption by all
##' predators. Let \eqn{R_{prey}(l)}{R_prey(l)} be the Ratio consumed and
##' \eqn{R_M} be the maximum ratio consumed then
##' \deqn{R_{prey}(l)=\frac{\sum_{pred}\sum_{L}C_{pred,prey}(L,l)}{N_{prey}(l)W_{prey}(l)}}
##' If \eqn{R_{prey}(l)>R_M}{R_prey(l)>R_M} consumption is adjusted as follows
##' \deqn{C_{pred,prey}(L,l)=R_MN_{prey}(l)W_{prey}(l)\frac{C_{pred,prey}(L,l)}{\sum_{pred}C_{pred,prey}(L,l)}}
##' @title Adjust for overconsumption
##' @param catches is the commercial catch of prey
##' @param predation is the consumption of prey by predator
##' @param stocks is the total number of prey
##' @param i is the stock index
##' @param maxratioconsumed is maximum depletion
##' @return a list with adjusted catches/consumption for C, S and E.
adjustconsumption <- function(catches,
predation,
stocks,
i,
maxratioconsumed){
totalCon <- list()
for(stock in names(stocks)){
if(dim(stocks[[stock]])[1] == 1){
agg.ind <- 2:3
} else {
agg.ind <- 2:4
}
fleetCon <- plyr::aaply(catches[[stock]][,,,,i],agg.ind,sum)
if(!is.null(predation)){
totalCon[[stock]] <- fleetCon +
plyr::aaply(predation[[stock]][,,,,i],agg.ind,sum)
} else {
totalCon[[stock]] <- fleetCon
}
ratio <- totalCon[[stock]]/plyr::aaply(stocks[[stock]][,,,i],agg.ind-1,sum)
ratio <- ifelse(is.infinite(ratio)|is.nan(ratio),0,ratio)
index <- ratio > maxratioconsumed
if(sum(index)>0){
print(sprintf("Warning - understocking has occured in stock %s at step %s",stock,i))
totalCon[[stock]][index] <- (maxratioconsumed/ratio[index])*
totalCon[[stock]][index]
# if(is.null(dim(index))){
# ind.dim <- c(1,length(index))
# } else {
# ind.dim <- dim(index)
# }
# index2 <- array(rep(index,each=dim(catches[[stock]])[1]),
# c(dim(catches[[stock]])[1],ind.dim,
# dim(catches[[stock]])[4]))
# catches[[stock]][,,,,i][index2] <- (maxratioconsumed/ratio[index])*
# catches[[stock]][,,,,i][index2]
# predation[[stock]][,,,,i][index2] <- (maxratioconsumed/ratio[index])*
# predation[[stock]][,,,,i][index2]
}
}
return(totalCon)
}
whaleCatch <- function(N,NTagged,quota,salpha,sbeta){
MaleS <- suitability(c(salpha['Male'],sbeta['Male'],
0,1),as.numeric(dimnames(N)$age))[1,]
FemaleS <- suitability(c(salpha['Female'],sbeta['Female'],
0,1),as.numeric(dimnames(N)$age))[1,]
sl <- cbind(MaleS,FemaleS)
F <- min(1,quota/sum(sl*t(apply((N+NTagged),2:3,sum))))
#Proportion caught each year
Fly<-F*sl
# if(length(dimnames(N)$stock)>0)
# Fly <- rep(Fly,each=dim(N)[1])
# else
# Fly <- rep(Fly,each=2)
C <- plyr::aaply(N,1,function(x) x*t(Fly))[dimnames(N)$stock,
dimnames(N)$gender,
dimnames(N)$age]
CT <- plyr::aaply(NTagged,1,function(x) x*t(Fly))[dimnames(N)$stock,
dimnames(N)$gender,
dimnames(N)$age]
return(list(C=C,CT=CT))
}
##' This is a helper function for the firststep function. This defines the
##' length distribution for each age group
##' @title Length distribution
##' @param mu mean length for all ages
##' @param sigma standard deviation of length for all ages
##' @param l lengthgroups
##' @param par gadget parameters objects
##' @return a matrix of dimension length(mu) X (length(l)-1)
distr <- function(mu,sigma,l,par=data.frame()) {
mu <- eval.gadget.formula(mu,par)$V1
sigma <- eval.gadget.formula(sigma,par)$V1
fi <- (stats::pnorm(rep(l[-1],each=length(sigma)),mu,sigma)-
stats::pnorm(rep(l[-length(l)],each=length(sigma)),mu,sigma))
dim(fi) <- c(length(sigma),length(l)-1)
fi[,1] <- stats::pnorm(rep(l[2],length(sigma)),mu,sigma)
fi[,length(l)-1] <- (1-stats::pnorm(rep(l[length(l)-1],
length(sigma)),mu,sigma))
return(t(fi))
}
##' The following variables are used in the consumption calculations:
##' l is Lengthgroup in prey
##' L is Lengthgroup in predator
##' A is Areasize
##' H The density (biomass per area unit) of available food at
##' which the predator can consume half maximum consumption
##' \eqn{\Delta t} as Length of timestep
##' \eqn{M_{pred}(L)} as Maximum consumption
##' \eqn{\psi_{pred}(L)} as Fraction of \eqn{M_{pred}} consumed
##' N_{pred}(L) as Number of predator \eqn{pred} in lengthgroup $L
##' N_{prey}(l) as Number of prey \eqn{prey} in lengthgroup l
##' W_{prey}(l) as The mean weight of prey of length l
##' S_{pred,prey}(L,l) as Suitability of prey at length l for pred at length L
##' C_{prey,pred}(L,L) as Total weight predator of length L consumes of prey of length $l$
##' The formula for the consumption is as follows:
##' \deqn{C_{pred,prey}(L,l)=N_{pred}(L)M_{pred}(L)\Psi_{pred}(L)\frac{F_{pred,prey}(L,l)}{\sum_{l,prey}F_{pred,prey}(L,l)}}
##' \deqn{=N_{pred}(L)M_{pred}(L)\frac{\sum_{l,prey}F_{pred,prey}(L,l)}{\sum_{l,prey}F_{pred,prey}(L,l)+HA}\frac{F_{pred,prey}(L,l)}{\sum_{l,prey}F_{pred,prey}(L,l)}}
##' \deqn{=N_{pred}(L)M_{pred}(L)\frac{F_{pred,prey}(L,l)}{\sum_{l,prey}F_{pred,prey}(L,l)+HA}}
##' where
##' \deqn{F_{pred,prey}(L,l) =S_{pred,prey}(L,l)N_{prey}(l)W_{prey}(l)}
##' \deqn{ M_{pred}(L) =m_0e^{(m_1T-m_2T^3)}L_{pred}^{m_4}\Delta t}
##' The suitability function for predation used in the \R model is:
##' \deqn{S_{pred,prey}(L,l) = \frac{\delta}{1+e^{-\alpha-\beta l-\gamma L}}}
##' With one predator, one prey and otherfood the equation becomes:
##' \deqn{C_{L,l}=N_{L}M_{L}\Psi_{L}\frac{F_{L,l}}{\sum_lF_{L,l}+OA}}
##' \deqn{=N_{L}M_{L}\frac{F_{L,l}}{\sum_lF_{L,l}+OA+HA}}
##' where O is the density of otherfood.
##' @title Eat
##' @param PreyInArea Number of prey items in the area
##' @param PredInArea Number of predator items in the area
##' @param step the timestep, that is the time of the year
##' @param opt gadget options list
##' @return The total unadjusted consumption of the predator on the prey
eat <- function(PreyInArea,PredInArea,step,opt){
preydim <- dim(PreyInArea)
preddim <- dim(PredInArea)
if(preydim[2]!=preddim[2]){
print("Error - the number of lengthgroups is not the same for predation")
}else{
numoflgroups <- preydim[2]
}
## The amount eaten
Eat<-array(0,preydim[1:3])
## The suitability for the predation
Spred<-suitability(params=c(opt$spalpha,opt$spbeta,opt$spgamma,opt$spdelta),opt$lt,opt$lt)
## Food = S(L,l)*N_l
Food<-array(0,c(numoflgroups,opt$numoflgroups,preydim[3]))
for(area in 1:opt$numofareas){
Prey <- PreyInArea[area,,,step]
Pred <- PredInArea[area,,,step]
# F_{Llat}
for(i in 1:preydim[3])
Food[,,i]<-t(Spred)*Prey[,i]*opt$w
# loop through predators lengths
predsum <- apply(Pred,1,sum)*opt$maxConsumption
foodsum <- apply(Food,2,sum)
other <- opt$H*opt$lengthoftimesteps+opt$otherfood[step]*opt$otherfrac
for(j in 1:numoflgroups){
Eat[area,,] <- Eat[area,,] +
predsum[j]*Food[,j,]/(foodsum[j] + other*opt$areasize)
}
Eat[area,,] <- Eat[area,,]/opt$w
}
return(Eat)
}
##' The simulation is started with an initial population, the age of which
##' ranges from the minimum age of the immature stock to the maximum age
##' of the mature stock. This population is calculated as follows:
##' Let \eqn{n_{a}} be the number of age \eqn{a} individuals in the first
##' timestep, \eqn{\mu_{a}} the mean length at age \eqn{a} and \eqn{\sigma_{a}}
##' the standard deviation of length at age \eqn{a}. For the minimum age
##' (representing recruitment) see the recruitment function. For a given
##' constant mortality \eqn{Z_{0}} we get
##' \deqn{n_{a} = n_{a-1}e^{-Z_{0}}}
##' to calculate the number at age for all \eqn{a}.
##' The number in lengthgroup $i$ at age $a$ in timestep 1 can then be
##' calculated from:
##' \deqn{N_{i,a,1} = n_a\left(\Phi\left(\frac{l_{i+1}-\mu_a}{\sigma_a}\right)-
##' \Phi\left(\frac{l_{i}-\mu_a}{\sigma_a}\right)\right)}{N_{i,a,1} = n_a (Phi(l_{i+1}-mu_a)/sigma_a)-Phi(l_{i}-\mu_a)/sigma_a))}
##' where \eqn{l_{i}} and \eqn{l_{i+1}} are the endpoints of lengthgroup \eqn{i},
##' \eqn{N_{l,a,t}} is the number at age \eqn{a} in lengthgroup \eqn{l} at
##' timestep \eqn{t} and \eqn{\Phi}{Phi} is the probability function for Normal
##' distribution.
##' NB: in Gadget (which is programmed in C++) the value of \eqn{\Phi}{Phi} is
##' approximated whereas R uses integration. To ensure
##' compatibility between the models, the initial population for Gadget is
##' entered directly from the initial population file rather than
##' calculated from a Normal distribution. While this is an option
##' within Gadget, it is not the standard method.
##' @title The first timestep
##' @param n Number of age 1 individuals
##' @param mu Vector of mean length per age.
##' @param sigma Vector of standard deviation of length per age.
##' @param l Vector of length groups
##' @param z Natural mortality per age
##' @param numofareas Number of areas
##' @param probarea Vector of proportions living in an area.
##' @param minage Minimum age of the species.
##' @param maxage Maximum age of the species.
##' @return matrix with initial distribution
firststep <- function(n,
mu,
sigma,
l,
z,
numofareas,
probarea,
minage,
maxage
) {
minlen <- min(l)
maxlen <- max(l)
numoflgroups <- length(l)-1
num.agegroup <- n[1]*exp(-(minage:maxage-1)*z)
if((maxage - minage +1)!=length(sigma)) {
stop("Error - the number of age groups for sigma doesnt match the maximum age")
# return('Error')
}
temp <- distr(mu,sigma,l)*rep(num.agegroup,each=numoflgroups)
# if(minage>1)
# temp[,1:minage] <- 0
# assign the distribution to areas according to the probability of being in
# that area
if(length(probarea)!=numofareas){
stop("Error - the area probabilities do not match the number of areas")
}
initial.distribution <- array(rep(temp,each=numofareas),
c(numofareas,dim(temp)))*probarea
dimnames(initial.distribution) <- list(area=1:numofareas,
length=minlen:(maxlen-1),
age=minage:maxage
)
return(initial.distribution)
}
##' Growth is according to a von Bertalanffy equation
##' \deqn{\mu_{a} = L_{\infty}(1-e^{-\kappa a})}
##' with the lengthvbsimple growth function from Gadget implemented.
##' For a fish of age a and length l, mean length growth \eqn{\Delta L} is
##' then calculated as:
##' \deqn{\Delta L =L_{\infty}(1 - \frac{l}{L_{\infty}})(1 - e^{-\kappa \Delta t})}
##' \deqn{Delta L =L_{infty}(1 - l/L_{infty})(1 - e^{-kappa Delta t})}
##' The length distribution is updated using the beta-binomial
##' distribution, ie the probability of growing x lengthgroups, given
##' maximum lengthgroupgrowth n, is
##' \deqn{P[X = x] = \frac{\Gamma(n+1)}{\Gamma(n-x+1)\Gamma(x+1)} \frac{\Gamma(\alpha + \beta)}
##' {\Gamma(n+\alpha+\beta)} \frac{\Gamma(n-x+\beta)}{\Gamma(\beta)} \frac{\Gamma(x+a)}{\Gamma(\alpha)}}
##' with \eqn{\alpha = \frac{\beta\Delta L}{n-\Delta L}} to preserve the mean
##' lengthgrowth according to the equation equation above. NB: the
##' expected value of \eqn{\Delta L} should be taken into consideration when
##' fixing n.
##' Let \eqn{G = [g_{ij}]} be the length updating matrix where \eqn{g_{ij}} is the
##' probability of growing from lengthgroup i to lengthgroup j
##' obtained from the equation above.
##' \deqn{N_{l,a+1,t+\Delta t} = \sum_{l'\leq l}g_{l'l}N_{l,a,t}}
##' with \eqn{N_{l,a,t}} as described for the initial population for a >
##' min a .
##' @title Update length
##' @param lt Vector of midpoints for the length groups.
##' @param beta Beta for the Von Bertanlanffy curve
##' @param lsup \eqn{l_\infty}{l_infty} for the Von Bertanlanffy.
##' @param k \eqn{\kappa}{kappa} for the Von Bertanlanffy.
##' @param dt Length of the time interval.
##' @param lengthgrouplen length of the lengthgroups.
##' @param binn is the maximum updating length.
##' @return a matrix where the index (j,i) repsents the probability of going
##' lengthgroup i to lengthgroup j
growthprob <-function(lt,
beta,
lsup,
k,
dt,
lengthgrouplen,
binn)
{
prob <- function(alpha,beta,x){
na <- length(alpha)
n <- length(x) - 1
alpha <- rep(alpha,n + 1)
x <- rep(x,each=na)
## Create a probability matrix where the columns represent the
## probability of growing x lengthgroups for each lengthgroup
## length group jumps are distributed according to a beta-binomial
## distribution
val <- exp(lgamma(n + 1)+
lgamma(alpha + beta) +
lgamma(n - x + beta) +
lgamma(x + alpha) -
lgamma(n - x + 1) -
lgamma(x + 1) -
lgamma(n + alpha + beta) -
lgamma(beta) -
lgamma(alpha))
dim(val) <- c(na,n + 1)
growth.matrix <- array(0,c(na,na))
for(lg in 1:na){
if(lg == na){
growth.matrix[na,na] <- 1
} else if(lg + n > na){
growth.matrix[lg,lg:(na-1)] <- val[lg,1:(na - lg )]
growth.matrix[lg,na] <- sum(val[lg,(na - lg + 1):(n + 1)])
} else {
growth.matrix[lg,lg:(n + lg)] <- val[lg,]
}
}
return(growth.matrix)
}
## dmu[i] is the mean growth in lengthgroup i
dmu <- lsup*(1-lt/lsup)*(1-exp(-k*dt))
## if growth>maxgrowth assume that growth is a bit smaller than maxgrowth
dmu[dmu/lengthgrouplen >= binn] <- binn-0.1
## if growth is negative assume no growth
dmu[dmu < 0] <- 0
alpha <- (beta*dmu/lengthgrouplen)/(binn-dmu/lengthgrouplen)
## possible length growth
length.growth <- 0:binn
return(prob(alpha,beta,length.growth))
}
##' If more than one area is defined the user can define migration
##' between the areas. The migration procedure is the same as in MULTSPEC.
##' The migration is defined and takes place in every timestep and it is
##' assumed to be constant for all years. This means that the porportion that
##' migrates from area i to j can be different between different timesteps
##' (seasons) but they are fixed between observations (years). Migration at
##' timestep t is defined by the user as an \eqn{n\times n}{nxn} transition
##' matrix P_t := [p_{ij}] where p_{ij} is the proportion moving from area j
##' to area i, ignoring other processes. For P to be a transition matrix
##' \eqn{\sum_ip_{ij} = 1}{sum_i p_ij = 1}, for each j. The vector of abundance
##' for all areas at time t is therefore:
##' \deqn{\mathbf{N_t} = P_t \mathbf{N_{t-1}}}{N_t = P_t N_{t-1}}
##' In a two area example this would mean that if N_{1,t-1} is a matrix
##' containing the abundance in numbers in area 1 at timestep t before
##' migration and N_{2,t-1} is the same number for area 2, the numbers after
##' migration will be
##' \deqn{N_{1,t} = p_{11}\cdot N_{1,t-1} + p_{12}\cdot N_{2,t-1} }
##' \deqn{N_{2,t} = p_{21}\cdot N_{2,t-1}+ p_{22}\cdot N_{2,t-1} }
##' @title Migrate
##' @name migrate
##' @param N An array containing substock abundance by area (first dimension) and other variables
##' @param M an array with migration matricies
##' @return the migrated substock abundance array
##' @author Bjarki Þór Elvarsson
migrate <- function(N,M){
numofareas <- dim(N)[1]
for(area in 1:numofareas){
N[area,,] <- apply(M[area,]*N,c(2,3),sum)
}
return(N)
}
##' The timestep (or timesteps) on which recruitment takes place is
##' defined by the user.
##' Given \eqn{n_{t}} recruits, at timestep t, with mean length
##' \eqn{\mu}{mu} and
##' standard deviation of length \eqn{\sigma}{sigma}, the number of recruits in
##' lengthgroup i is calculated by:
##' \deqn{N_{i,1,t} = n_t(\Phi(\frac{l_{i+1}-\mu}{\sigma})-\Phi(\frac{l_{i}-\mu}{\sigma}))}
##' As for the initial population, the number of recruits in each length
##' groups is given in the recruit input file.
##' @title Recruitment
##' @name recruits
##' @param n Number of recruited individuals
##' @param mu Vector of mean length per age.
##' @param sigma Vector of standard deviation of length per age.
##' @param l Vector of length groups
##' @param numofareas Number of areas
##' @param probarea Vector of proportions living in an area.
##' @param numobs Number of years.
##' @param numoftimesteps Number of observations per year.
##' @return Recruits by area, length, time
recruits <- function(n,mu,sigma,
l,numofareas,probarea,
numobs,numoftimesteps)
{
Temp <- distr(mu,sigma,l)%*%t(n)
rec <- array(rep(Temp,each=2),c(numofareas,dim(Temp)))*probarea
dimnames(rec) <- list(area = 1:numofareas,
length = min(l):(max(l)-1),
time=paste(sprintf('Year_%s',rep(1:numobs,
each=numoftimesteps)
),
sprintf('Step_%s',rep(1:numoftimesteps,
numobs)),
sep='_'))
return(rec)
}
##' The suitability function for predation used in the R model is:
##' \deqn{S_{pred,prey}(L,l) = \frac{\delta}{1+e^{-\alpha-\beta l-\gamma L}}}{S_{pred,prey}(L,l) = \frac{delta}{1+e^{-alpha-beta l-gamma L}}}
##' With one predator, one prey and otherfood the equation becomes:
##' \deqn{C_{L,l}=N_{L}M_{L}\Psi_{L}\frac{F_{L,l}}{\sum_lF_{L,l}+OA}}
##' \deqn{=N_{L}M_{L}\frac{F_{L,l}}{\sum_lF_{L,l}+OA+HA}}
##' where $O$ is the density of otherfood.
##' @title Prey suitability
##' @name suitability
##' @param params suitability paramters
##' @param l prey length group(s)
##' @param L predator length group(s)
##' @param type suitability function
##' @param normalize Logical, should the output be normalized
##' @param to.data.frame Logical, return a data.frame instead of an array
##' @return matrix of suitabilities, columns prey length, lines predator length
##' @export
suitability <- function(params,
l,
L=c(0),
type = 'exponential',
to.data.frame = FALSE,
normalize = FALSE)
{
if(tolower(type) == 'andersenfleet'){
type <- 'andersen'
L <- params[6]
}
if(tolower(type) == 'constant'){
S <- array(params[1],c(length(L),length(l)))
} else if(tolower(type) == 'straightline') {
S <- array(rep(l*params[1] + params[2],each = length(L)),
c(length(L),length(l)))
} else if(tolower(type) == 'exponential'){
S <- array(params[4]/(1+exp(-(params[1]+params[2]*rep(l,each=length(L))+
params[3]*rep(L,length(l))))),
c(length(L),length(l)))
} else if(tolower(type) == 'exponentiall50' |
tolower(type) == 'expsuitfuncl50' |
tolower(type) == 'newexponentiall50'){
S <- array(rep(1/(1+exp(-params[1]*(l - params[2]))),each = length(L)),
c(length(L),length(l)))
} else if(tolower(type) == 'richards') {
S <- array(params[4]/(1+exp(-(params[1]+params[2]*rep(l,each=length(L))+
params[3]*rep(L,length(l))))),
c(length(L),length(l)))^(1/params[5])
} else if(tolower(type) == 'andersen') {
l.tmp <- rep(l,each=length(L))
L.tmp <- rep(L,length(l))
if(L==0)
L.tmp <- stats::median(l.tmp)
S <- array(params[1] + params[3]*
ifelse(log(L.tmp/l.tmp) < params[2],
exp(-(log(L.tmp/l.tmp)-params[2])^2/params[5]),
exp(-(log(L.tmp/l.tmp)-params[2])^2/params[4])),
c(length(L),length(l)))
} else if(tolower(type) == 'gamma'){
S <- array(rep((l/((params[1] - 1)*params[2]*params[3]))^(params[1] -1) *
exp(params[1] - 1 - l/(params[2]*params[3])),
each = length(L)),
c(length(L),length(l)))
} else {
stop(sprintf('Error in suitability -- %s not defined',type))
}
if(to.data.frame){
dimnames(S) <- list(L=L,l=l)
S <- as.data.frame.table(S,responseName = 'suit')
if(normalize)
S$suit <- S$suit/max(S$suit)
S$L <- as.numeric(as.character(S$L))
S$l <- as.numeric(as.character(S$l))
} else {
dimnames(S) <- list(sprintf('Pred_length_%s',L),
sprintf('Prey_length_%s',l))
}
return(S)
}
#' Write suitability line for a predator
#' @name pred_suit
#' @param pred Character. The name of the predator
#' @param prey Character. The preyname of the stock to be eaten
#' @param fun Character. The selection function for the predator
#' @param params A list of named parameters that are needed by \code{fun}.
#' Names will be used to output the switches to be used by gadget.
#' Alternatively, a vector of numbers can be used.
#' @return A character vector of the form
#' <preyname> function <functionname> <vector of parameters>
#' @examples
#' library(magrittr) # import %>% function
#' path <- tempfile()
#' gadgetstock('garfish', path, missingOkay=TRUE) %>%
#' gadget_update('doeseat',
#' name = 'comm',
#' suitability = pred_suit(pred='garfish',
#' prey='zebrafish',
#' fun='newexponentiall50',
#' params=list('alpha', 'l50')))
#' @author Paul Frater
#' @export
pred_suit <- function(pred=NA,
prey=NA,
fun='newexponentiall50',
params=NULL) {
paste0('\n',
paste(prey, 'function', fun,
ifelse(is.numeric(params),
params,
do.call(paste, lapply(params, function(x) {
if (is.numeric(x)) {
return(x)
} else {
sprintf('#%1$s.%2$s.%3$s',
pred, prey, x)
}
}))),
sep='\t'))
}
#' Write suitability line for the surveydistribution likelihood component
#' @name surveydist_suit
#' @param survey.name Character. The name of the survey
#' @param stock Character. The name of the stock surveyed
#' @param fun Character. The selection function for the survey
#' @param params A list of named parameters that are needed by \code{fun}.
#' Names will be used to output the switches to be used by gadget.
#' Alternatively, a vector of numbers can be used.
#' @return A character vector of the form
#' function <functionname> <vector of parameters>
#' @examples
#' library(magrittr) # import %>% function
#' path <- tempfile()
#' gadgetlikelihood('likelihood', path, missingOkay=TRUE) %>%
#' gadget_update('surveydistribution',
#' name = 'ldist.spr',
#' weight = 1,
#' data = data.frame(year = 1, step = 1, area = 1, age = 1, length = 1, number = 1),
#' parameters = quote(exp(spr.si.beta)) %>%
#' to.gadget.formulae(),
#' suitability = surveydist_suit(stock = 'zebrafish',
#' fun = 'constant',
#' params = 1),
#' stocknames = 'zebrafish')
#' @author Paul Frater
#' @export
surveydist_suit <- function(survey.name='survey',
stock=NULL,
fun='newexponentiall50',
params=NULL) {
paste0(paste('function', fun,
ifelse(is.numeric(params),
params,
do.call(paste, lapply(params, function(x) {
if (is.numeric(x)) {
return(x)
} else {
sprintf('#%1$s.%2$s.%3$s',
stock, survey.name, x)
}
}))),
sep='\t'))
}
overlap <- function(Abundance,mixing){
stock.num <- plyr::aaply(Abundance,c(1,3),
function(x) sum(x))[dimnames(Abundance)$stock,
dimnames(Abundance)$age]
for(stock in dimnames(Abundance)$stock)
Abundance[stock,,] <- t(stock.num[stock,]%o%mixing[,stock])
return(Abundance)
}
dispersion <- function(Abundance,dispersion){
tmp <- Abundance
Abundance <- 0*tmp
for(from in dimnames(Abundance)$stock){
for(to in dimnames(Abundance)$stock){
Abundance[to,,] <- Abundance[to,,] +
tmp[from,,]*dispersion[from,to]
}
}
return(Abundance)
}
init.pop <- function(init.abund,M,maxage,probarea){
x <- init.abund*(1-exp(-M))/(1-exp(-M*(maxage+1)))
array(rep(x*exp(-M*(0:maxage)),
each=length(probarea))*probarea,
c(length(probarea),maxage+1))
}
#tag.experiment <- function(Abundance,tag.number){
# tag.number*Abundance/sum(Abundance)
#}
Births <- function(B,N,A,z,K){
b <- array(0,c(dim(N)[1],2,dim(N)[2]),
dimnames=list(stock=dimnames(N)$stock,
gender=c('Male','Female'),
area=dimnames(N)$area))
for(stock in dimnames(N)$stock){
b[stock,'Male',] <- B*N[stock,]*(1+A*(1-(sum(N[stock,])/K[stock])^z))
b[stock,'Female',] <- B*N[stock,]*(1+A*(1-(sum(N[stock,])/K[stock])^z))
}
return(b)
}
spawnfunc <- function(type='simplessb',N,W,p,...){
fecundity <- function(N,W,p,a,l){
R <- p[1]*sum(outer(l^p[2],a^p[3])*N^p[4]*W^p[5])
return(R)
}
simplessb <- function(N,W,p){
R <- p[1]*sum(N*W)
return(R)
}
ricker <- function(N,W,p){
S <- sum(N*W)
R <- p[1]*S*exp(-p[2]*S)
return(R)
}
bevertonholt <- function(N,W,p){
S <- sum(N*W)
R <- p[1]*S/(p[2] + S)
return(R)
}
if(type=='simplessb'){
return(simplessb(N,W,p))
}
if(type=='fecundity'){
return(fecundity(N,W,p,...))
}
if(type=='ricker'){
return(ricker(N,W,p))
}
if(type=='bevertonholt'){
return(bevertonholt(N,W,p))
}
}
|
e31f34552ed001d611d7131f73076f32c348836d | 2ba22f489011cfb61d6727ab522bf3904f78eefc | /R/summary.catatis.R | b1538ea18f967252a944e277df25a42ad43a6840 | [] | no_license | cran/ClustBlock | 847297472d9cc6f05bad33b23fd78a48938dead7 | eed656e469929805b6c72f465912c965fe9f580f | refs/heads/master | 2023-07-07T05:26:15.786420 | 2023-06-29T17:00:02 | 2023-06-29T17:00:02 | 174,553,356 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,397 | r | summary.catatis.R | ##=============================================================================
##' @title Show the CATATIS results
##'
##' @usage
##' \method{summary}{catatis}(object, ...)
##'
##' @description
##' This function shows the CATATIS results
##'
##'
##' @param object object of class 'catatis'.
##'
##' @param ... further arguments passed to or from other methods
##'
##'
##' @return a list with:
##' \itemize{
##' \item homogeneity: homogeneity of the subjects (in percentage)
##' \item weights: the weights associated with the subjects to build the compromise
##' \item eigenvalues: the eigenvalues associated to the correspondance analysis
##' \item inertia: the percentage of total variance explained by each axis of the CA
##' }
##'
##'
##' @keywords CATA RATA
##'
##'
##' @seealso \code{\link{catatis}}
##'
##' @export
##=============================================================================
summary.catatis=function(object, ...)
{
res.catatis=object
if(inherits(res.catatis, "catatis")==FALSE)
{
stop("The class of the object must be 'catatis'")
}
NameBlocks=names(res.catatis$weights)
res=list(homogeneity=res.catatis$homog, weights=res.catatis$weights,
eigenvalues=res.catatis$eigenvalues, inertia=res.catatis$inertia)
return(res)
}
|
1dcdcf36c708b396f88be86e0e97aef28caf8ee0 | 829e0f6b0f9bed7fa23288177ae5f04ffcd39f96 | /R/whiskerplot.R | f9068b83a499784d36f1ce512331d1050c2f328e | [] | no_license | awong234/jagsUI | 325df96e2af1f7d4c46de2f204beada5ef786b5b | 26589367d0ba622e217bbac3bd68b795f391720a | refs/heads/master | 2021-07-12T20:27:18.160216 | 2019-01-02T19:37:28 | 2019-01-02T19:37:28 | 145,328,837 | 0 | 0 | null | 2018-08-19T18:43:29 | 2018-08-19T18:43:29 | null | UTF-8 | R | false | false | 1,278 | r | whiskerplot.R |
whiskerplot <- function(x,parameters,quantiles=c(0.025,0.975),zeroline=TRUE){
if(class(x)!="jagsUI"){stop('Requires jagsUI object as input')}
devAskNewPage(ask=FALSE)
#Generate a list of all specified output parameters
#Expand from shorthand if necessary
parameters <- translate.params(x,parameters)
n <- length(parameters)
xstructure <- c(1:n)
qs <- function(x,y){as.numeric(quantile(x,y))}
means <- tops <- bottoms <-ymin <- ymax <- vector(length=n)
for (i in 1:n){
hold <- unlist(x$samples[,parameters[i]])
means[i] <- mean(hold)
tops[i] <- qs(hold,quantiles[2])
bottoms[i] <- qs(hold,quantiles[1])
}
ymin <- min(bottoms)
ymax <- max(tops)
plot(xstructure,means,xaxt="n",ylim=c(ymin,ymax),xlim=c(0,n+1),xlab="Parameters",ylab="Parameter Values",pch=19,cex=1.5,
main=paste('Whisker plot, quantiles (',quantiles[1],' - ',quantiles[2],')',sep=""))
axis(side=1, at=c(1:n), labels=parameters)
box()
if(zeroline){abline(h=0)}
for (i in 1:n){
segments(x0=xstructure[i],y0=bottoms[i],x1=xstructure[i],y1=tops[i], lwd=2)
segments(x0=xstructure[i]-0.2,y0=bottoms[i],x1=xstructure[i]+0.2,y1=bottoms[i])
segments(x0=xstructure[i]-0.2,y0=tops[i],x1=xstructure[i]+0.2,y1=tops[i])
}
} |
23fb58ef7ec2aaf2238667d2d628d386ecb4c703 | f44f7fff3d88edd3549b4531b289d2e12ae6622b | /Code/04_sample_for_irr.R | 1300f0a95032742a0414984618600e1c29357b0a | [] | no_license | lizmckenna/ps239t-final-project | a846ed615069719575778972332289e9ac996f38 | bbbdb98c8dbce0a4d60dd9f2e3248e81b403f652 | refs/heads/master | 2016-08-09T22:59:30.330420 | 2015-12-11T22:17:32 | 2015-12-11T22:17:32 | 47,226,296 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,189 | r | 04_sample_for_irr.R | ---
title: "Final project: 04_sample_for_irr"
author: "Liz McKenna"
date: "Fall 2015"
output: "upload R script to github repo"
---
### setup environment
setwd("/Users/lizmckenna/Desktop/ps239t-final-project")
getwd()
rm(list=ls())
#read in analysis dataset; complete list of protest events
irr_full <- read.csv("Data/analysis_dataset.csv", header=TRUE, encoding = "UTF-8")
#subset to a stratified sample of articles (weighted by city observations)
#here was my attempt to write a function to do this but alas, it didn't work
irr.sample <- function(x,y)
{
city.subset <- subset(mydata, city=='x')
city.irr <- city.subset[sample(1:nrow(city.subset), y, replace = TRUE)]
}
#...also not working
subset <- function(x){
x.subset <- subset(irr_full, city =="x")
return x.subset
}
#subset by city
bh = subset(irr_full, city == 'belo_horizonte')
brasilia = subset(irr_full, city == 'brasilia')
sao_paulo = subset(irr_full, city == 'sao_paulo')
rio = subset(irr_full, city == 'rio')
campinas = subset(irr_full, city == 'campinas')
curitiba = subset(irr_full, city == 'curitiba')
fortaleza = subset(irr_full, city == 'fortaleza')
recife = subset(irr_full, city == 'recife')
porto_alegre = subset(irr_full, city == 'porto_alegre')
salvador = subset(irr_full, city == 'salvador')
dim(salvador)[1]/dim(irr_full)[1]*100
bh_irr <- bh[sample(1:nrow(bh), 3,replace=FALSE),]
bras_irr <- brasilia[sample(1:nrow(brasilia), 6,replace=FALSE),]
cur_irr <- curitiba[sample(1:nrow(curitiba), 2,replace=FALSE),]
camp_irr <- campinas[sample(1:nrow(campinas), 1,replace=FALSE),]
fort_irr <- fortaleza[sample(1:nrow(fortaleza), 2,replace=FALSE),]
pa_irr <- porto_alegre[sample(1:nrow(porto_alegre), 2,replace=FALSE),]
rec_irr <- recife[sample(1:nrow(recife), 2,replace=FALSE),]
rio_irr <- rio[sample(1:nrow(rio), 12,replace=FALSE),]
sal_irr <- salvador[sample(1:nrow(salvador), 2,replace=FALSE),]
sp_irr <- sao_paulo[sample(1:nrow(sao_paulo), 13,replace=FALSE),]
irr <- rbind(bh_irr, bras_irr, cur_irr, camp_irr, fort_irr, pa_irr, rec_irr, rio_irr, sal_irr, sp_irr)
#### write file to import to qualtrics for raters to process #####
write.csv(irr,"Data/irr_sample.csv",row.names = F) # write all
|
32e9cb39167977287cac5f1c9503ab5342607cef | 0ea96e7b4d16c6595415d657103f7a2391421706 | /man/Hicexp-class.Rd | f0fdcac829ad0e7b377ef8cb7bc90cedd030213a | [
"MIT"
] | permissive | dozmorovlab/multiHiCcompare | c913334612e9b6fc8807341d370f11715ed51da0 | dcfe4aaa8eaef45e203f3d7f806232bb613d2c9b | refs/heads/master | 2022-04-29T02:56:04.486050 | 2022-04-22T01:43:22 | 2022-04-22T01:43:22 | 144,622,761 | 4 | 7 | NOASSERTION | 2020-03-30T13:53:16 | 2018-08-13T19:10:22 | R | UTF-8 | R | false | true | 739 | rd | Hicexp-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\docType{class}
\name{Hicexp-class}
\alias{Hicexp-class}
\title{An S4 class for working with Hi-C data}
\value{
Hicexp class
}
\description{
An S4 class for working with Hi-C data
}
\section{Slots}{
\describe{
\item{\code{hic_table}}{A data.table containing the sparse upper triangular matrix
for your Hi-C data.}
\item{\code{comparison}}{The results of a multiHiCcompare comparison.}
\item{\code{metadata}}{Data.frame for covariate information.}
\item{\code{resolution}}{The resolution of the dataset.}
\item{\code{normalized}}{Indicator for if data has been normalized.}
}}
\examples{
data('hicexp2')
hicexp2
}
|
902dec7b2fdf7f349664f78c8577d3a96a8343c3 | f25bb59059686ff0fee72819b310aea531e7786b | /R/RcppExports.R | e16bf9516cb4e0fe2d71b65f01caf8fd7513e9a2 | [] | no_license | cran/mmpca | 6b01c198de542d24c4ca2b6e819593d3ee463cb1 | 2da77dd0b96a5b5ad99191948fb7a2df8c61dfcf | refs/heads/master | 2022-11-21T02:53:42.491583 | 2022-11-15T07:50:02 | 2022-11-15T07:50:02 | 236,627,759 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 870 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
c_objective <- function(theta, x_list, masks_list, inds, k, p, lambda) {
.Call(`_mmpca_c_objective`, theta, x_list, masks_list, inds, k, p, lambda)
}
c_grad <- function(theta, x_list, masks_list, inds, k, p, lambda, n_threads) {
.Call(`_mmpca_c_grad`, theta, x_list, masks_list, inds, k, p, lambda, n_threads)
}
c_Vxi <- function(xi) {
.Call(`_mmpca_c_Vxi`, xi)
}
c_optim_mmpca <- function(start, x_list, masks_list, inds, k, p, lambda, max_iter, trace, n_threads) {
.Call(`_mmpca_c_optim_mmpca`, start, x_list, masks_list, inds, k, p, lambda, max_iter, trace, n_threads)
}
c_invVinner <- function(t) {
.Call(`_mmpca_c_invVinner`, t)
}
c_init_parallel <- function() {
invisible(.Call(`_mmpca_c_init_parallel`))
}
|
c1a1d91eeff43237ea56e1ce1e11070decc8dd8c | b3e8635e9ed9f42d87c193aa61ce902b4742d1a2 | /man/lpdaac_getmod_dates.Rd | 84a18f449a147b5399a64837e3a712fdf389cc92 | [] | no_license | hxfan1227/MODIStsp | 8398730c3b0253efdc2df1a1649ebaa54bc44fc5 | 70a10eb61f26f48a89a82737ffb33dbf9278b8ad | refs/heads/devel | 2021-01-12T01:04:46.579685 | 2017-01-05T11:49:36 | 2017-01-05T11:49:36 | 78,343,496 | 0 | 1 | null | 2017-01-08T13:11:39 | 2017-01-08T13:11:38 | null | UTF-8 | R | false | true | 1,071 | rd | lpdaac_getmod_dates.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MODIStsp_lpdaac_accessoires.R
\name{lpdaac_getmod_dates}
\alias{lpdaac_getmod_dates}
\title{lpdaac_getmod_dates}
\usage{
lpdaac_getmod_dates(dates, date_dirs)
}
\arguments{
\item{dates}{2- element string array specifying start/end dates (yyyy.mm.dd) for which the http addresses of folders in lpdaac should be retrieved
(e.g., c("2015.1.1", "2015.12.31)}
\item{date_dirs}{data frame full list of folders in lpdaa archive for product of interest}
}
\value{
array of folder names containing data for the modis product acquired in the period specified by "dates"
}
\description{
Accessory function to find the folders corresponding to the requested dates period within the full list retrieved by lpdaac_getmod_dirs
}
\author{
Original code by Babak Naimi (.getModisList, in ModisDownload.R - http://r-gis.net/?q=ModisDownload )
Modified to adapt it to MODIStsp scheme and to http archive (instead than old FTP) by Lorenzo Busetto, phD (2014-2015)
email: busetto.l@irea.cnr.it
license GPL 3.0
}
|
6070c850343bf92996ce410f09cae3894d68a5d2 | 70ae6baa8ba5ec4af1cb683e81a7f7dfb27ac77b | /man/layer_line.Rd | a9892aec86a559f592d0ca7b625c6c74b3abb08f | [] | no_license | nteetor/chartisan | e89e86ba683e07748aa8f00c1c8081e2909dfa09 | d2280928a4dd6a2337a76cfad2c638238eb38e17 | refs/heads/master | 2021-09-04T06:53:09.061171 | 2018-01-16T22:07:16 | 2018-01-16T22:07:16 | 108,591,025 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 413 | rd | layer_line.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layer-line.R
\name{layer_line}
\alias{layer_line}
\title{A line chart}
\usage{
layer_line(chart, smooth = FALSE)
}
\arguments{
\item{chart}{A \code{chartist} object.}
\item{smooth}{One of \code{TRUE} or \code{FALSE}, specifying if the line should be
smoothed, defaults to \code{FALSE}.}
}
\description{
Add a line layer to a chart.
}
|
86fa3ead63cee1e46ce2e224f6833e08608d4099 | 23da05ef70a4cffe9c2ee2a58982378dd575327c | /URA_Ch12.R | 9e9eb1e17a07305c6f4df31c4d78f57576d555f8 | [] | no_license | rmuhumuza/URA-Rcode | 3fd74ee000363743ba8a67dbf886c1f085757609 | 98bf00df9877cee416c28af33b1322becf0d79e5 | refs/heads/master | 2022-04-14T00:59:34.256271 | 2020-04-09T03:11:22 | 2020-04-09T03:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,801 | r | URA_Ch12.R |
## Figure 12.1 (need to run add.loess function first)
Worth = read.table("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/Pass.txt")
attach(Worth)
fit = lm(P.assets ~ Age)
abs.resid = abs(fit$residuals)
plot(Age, abs.resid); add.loess(Age, abs.resid)
add.loess(Age, abs.resid, span=2, lty=2)
## Section 12.1
fit.0 = lm(P.assets ~ Age, weights=1/Age^0)
summary(fit.0); logLik(fit.0)
fit.1 = lm(P.assets ~ Age, weights=1/Age^1)
summary(fit.1); logLik(fit.1)
fit.2 = lm(P.assets ~ Age, weights=1/Age^2)
summary(fit.2); logLik(fit.2)
## Figure 12.2
par(mfrow=c(2,2))
par(mar=c(4, 4, 1, 4))
plot(Age, P.assets, xlab=""); abline(-6.25592, 0.27928, lty=1)
abline(-5.87593, 0.26825, lty=2); abline(-5.65151, 0.26137, lty=3)
xlist = seq(20, 70, .1)
Low0 = -6.2559+0.27928*xlist - 2*1.651
Upp0 = -6.2559+0.27928*xlist + 2*1.651
plot(Age, P.assets, ylab="", xlab="", ylim = c(-5, 20))
points(xlist, Low0, type="l", lty=1); points(xlist, Upp0, type="l",
lty=1)
Low1 =-5.87593+0.268255*xlist - 2*.2691*xlist^(1/2)
Upp1 =-5.87593+0.268255*xlist + 2*.2691*xlist^(1/2)
plot(Age, P.assets,ylim = c(-5, 20))
points(xlist, Low1, type="l"); points(xlist, Upp1, type="l")
Low2 = -5.65151+0.26137*xlist - 2*.0448*xlist
Upp2 = -5.65151+0.26137*xlist + 2*.0448*xlist
plot(Age, P.assets,ylab="",ylim = c(-5, 20))
points(xlist, Low2, type="l"); points(xlist, Upp2, type="l")
## Figure 12.3
par(mfrow=c(1,2))
n=500
set.seed(123)
x = 5*rexp(n)
y = 2*x + x*rnorm(n)
## The weight is inversely proportional to the conditional variance
w = 1/x^2
fit.ols = lm(y ~ x)
ols.pred = fit.ols$fitted.values
fit.wls = lm(y ~ x, weights = w)
wls.pred = fit.wls$fitted.values
## Scatterplot and fitted functions
plot(x, y, pch=".", cex=1.5, ylim=c(-5,120))
points(sort(x), sort(ols.pred), type="l", lty=2)
points(sort(x), sort(wls.pred), type="l", lty=1)
legend("topleft", c("WLS", "OLS"), lty = c(1,2))
## Fitted OLS and WLS lines and data where variance is small
plot(x[x<1], y[x<1], ylim = c(-1,4))
points(sort(x[x<1]), sort(ols.pred[x<1]), type="l", lty=2)
points(sort(x[x<1]), sort(wls.pred[x<1]), type="l", lty=1)
legend("topleft", c("WLS", "OLS"), lty = c(1,2))
## Section 12.2.1 (Figure 12.4)
Worth = read.table("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/Pass.txt")
attach(Worth); n = nrow(Worth); X = Age
## Conditional-x heteroscedastic simulation model parameters
gamma = 0.05; beta0 = -6; beta1 = 0.25
## Simulation study to understand efficiency of WLS versus OLS
NSIM = 20000
b1.ols = numeric(NSIM); b1.wls = numeric(NSIM)
for (i in 1:NSIM) {
Y = beta0 + beta1*X + gamma*X*rnorm(n)
b1.ols[i] = lm(Y ~ X)$coefficients[2]
b1.wls[i] = lm(Y ~ X, weights=1/X^2)$coefficients[2]
}
par(mfrow=c(2,1))
hist(b1.ols, xlim = c(.10,.40), main = "Distribution of OLS Estimates")
hist(b1.wls, xlim = c(.10,.40), main = "Distribution of WLS Estimates")
sd(b1.ols); sd(b1.wls)
## Section 12.3
ba = read.table("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/gpa_gmat.txt")
attach(ba)
y = gpa; x1 = gmat
x2 = ifelse(degree=="P", 1,0)
library(maxLik)
loglik <- function(param) {
b0 = param[1]
b1 = param[2]
b2 = param[3]
g0 = param[4]
g1 = param[5]
g2 = param[6]
mean = b0 + b1*x1 + b2*x2
ln.sd = g0 + g1*x1 + g2*x2 ; sd = exp(ln.sd)
z = (y - mean)/sd
ll = sum(dnorm(z,log=T) - ln.sd)
ll
}
fit = maxLik(loglik, start=c(lm(y~x1+x2)$coefficients,0,0,0))
summary(fit)
## assuming constant variance
library(maxLik)
loglik0 <- function(param) {
b0 = param[1]
b1 = param[2]
b2 = param[3]
g0 = param[4]
mean = b0 + b1*x1 + b2*x2
ln.sd = g0; sd = exp(ln.sd)
z = (y - mean)/sd
ll = sum(dnorm(z,log=T) - ln.sd)
ll
}
fit0 = maxLik(loglik0, start=c(lm(y~x1+x2)$coefficients,0))
summary(fit0)
## Figure 12.5
g = fit$estimate; g0 = g[4]; g1 = g[5]; g2 = g[6]
gmat.g = seq(400,800,10)
s.phd = exp(g0 + g1*gmat.g + g2)
s.masters = exp(g0 + g1*gmat.g)
plot(gmat.g,s.phd, type="l", ylim = c(0,.4),
ylab="Estimated Standard Dev. of GPA", xlab="GMAT Score")
points(gmat.g,s.masters, type="l", lty=2)
abline(h = exp(fit0$estimate[4]), lwd=2)
legend("bottomright", c("Ph.D.", "Masters"), lty = c(1,2))
lm(y ~ x1 + x2)$ coefficients
summary(lm(y ~ x1 + x2))$sigma
AIC(fit0)
AIC(fit)
## Likelihood ratio test
LL = fit$maximum
LL0 = fit0$maximum
Chi.sq = 2*(LL - LL0)
Chi.sq
pval = 1- pchisq(Chi.sq, 6-4)
pval
## using gamlss library
library(gamlss)
mod=gamlss(y~x1+x2,sigma.fo=~x1+x2, data=ba)
summary(mod)
## Section 12.5.1 (Figure 12.6)
Worth = read.table("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/Pass.txt")
attach(Worth); n = nrow(Worth); X = Age;
## Conditional-x heteroscedastic simulation model parameters:
gamma = 0.05; beta0 = -6; beta1 = 0.25
## Simulation study to understand how well e2^2 estimates sigma2^2
NSIM = 20000
e2 = numeric(NSIM)
for (i in 1:NSIM) {
eps = gamma*X*rnorm(n)
Y = beta0 + beta1*X + eps
e2[i] = lm(Y ~ X)$residuals[2]
}
hist(e2^2, main="", breaks=50, xlab="Squared Second Residual")
abline(v=(0.05*43)^2, lty = 1, lwd=2)
abline(v=mean(e2^2), col="gray", lty=2, lwd=2)
## HC standard errors
ba = read.table("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/gpa_gmat.txt")
attach(ba)
y = gpa
x1 = gmat
x2 = ifelse(degree=="P", 1,0)
library(car)
fit = lm(y ~ x1 + x2)
## Homoscedastic analysis
summary(fit)$coefficients[,1:3]
## Heteroscedasticity-consistent analysis using method HC3
se.hc3 = sqrt(diag(hccm(fit, type = "hc3")))
b = fit$coefficients
t = b/se.hc3
cbind(b, se.hc3, t)
## Investigate the performance of HC
Worth = read.table("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/Pass.txt")
attach(Worth); n = nrow(Worth); X = Age;
## Conditional-x heteroscedastic simulation model parameters:
gamma = 0.05; beta0 = -6; beta1 = 0.25
## Conditional-x heteroscedastic simulation model
## Simulation study to estimate the true confidence level of ordinary
## and HC intervals
library(car)
NSIM = 10000
b1 = numeric(NSIM)
se.b1 = numeric(NSIM)
se.b1.robust = numeric(NSIM)
for (i in 1:NSIM) {
eps = gamma*X*rnorm(n)
Y = beta0 + beta1*X + eps
fit = lm(Y~X)
fit1 = summary(lm(Y~X))
b1[i] = fit1$coefficients[2,1]
se.b1[i] = fit1$coefficients[2,2]
se.b1.robust[i] = sqrt(diag(hccm(fit, type = "hc3")))[2]
}
chk1 = (b1 - 2*se.b1 < beta1) *(b1 + 2*se.b1 > beta1)
mean(chk1)
chk2 = (b1 - 2*se.b1.robust < beta1) *(b1 + 2*se.b1.robust > beta1)
mean(chk2)
## Section 12.6
X = c(3.1, 4.3, 0.7); Y = c(1.2, 2.0, 1.3)
# Analysis based on n=3
XY.1 = data.frame(X,Y); summary(lm(Y~X, data=XY.1))
# Analysis based on n=6
XY.2 = rbind(XY.1,XY.1); summary(lm(Y~X, data=XY.2))
# Analysis based on n=12
XY.3 = rbind(XY.2,XY.2); summary(lm(Y~X, data=XY.3))
# Analysis based on n=24
XY.4 = rbind(XY.3,XY.3); summary(lm(Y~X, data=XY.4))
# Analysis based on n=48
XY.5 = rbind(XY.4,XY.4); summary(lm(Y~X, data=XY.5))
## Section 12.6.1
charity = read.csv("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/charitytax.csv")
attach(charity)
sub.f = as.factor(SUBJECT)
library(nlme)
# ML/GLS fit assuming classical sig^2*I covariance structure
fit.ols = gls(CHARITY ~ INCOME + DEPS + sub.f, method="ML")
# ML/GLS fit assuming block-diagonal AR1 covariances
fit.gls = gls(CHARITY ~ INCOME + DEPS + sub.f,
corr=corAR1(form = ~1 | SUBJECT), method="ML")
summary(fit.ols)$tTable
summary(fit.ols)$sigma
summary(fit.gls)$tTable
summary(fit.gls)$sigma
fit.gls$modelStruct$corStruct
AIC(fit.ols)
AIC(fit.gls)
## Exercise 4
ge = read.csv("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/ge_ret_vol.csv")
|
180542cebc8da58e61b839c2a3597328b464dd40 | 43b17584478c0360d0fdced151db43c35728837a | /R/ci.R | 842338d7f6f62d66e860f7f16c4b6090cb882979 | [] | no_license | cran/gitlabr | 51357cc4c136b4d5125a1d39aec63ea62ef509d1 | b8d64273024933804044ca8eeab18930a4611c55 | refs/heads/master | 2022-10-03T07:18:40.338952 | 2022-09-13T10:00:02 | 2022-09-13T10:00:02 | 48,080,948 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,295 | r | ci.R | #' Define GitLab CI jobs content
#'
#' Exploration of job content is deprecated as of 'gitlabr' 1.1.7.
#' Content of .gitlab-ci.yml file is now created using templates with
#' use_gitlab_ci(type = "check-coverage-pkgdown"). See [use_gitlab_ci()].
#'
#' @export
#' @rdname gitlabci
#' @seealso [use_gitlab_ci()]
#' @return Creates the content of a .gitlab-ci.yml file as character.
#'
#' @examples
#' \dontrun{
#' # Deprecated
#' gl_ci_job()
#' }
gl_ci_job <- function() {
.Deprecated('use_gitlab_ci', package = 'gitlabr', old = 'gl_ci_job')
}
#' Add .gitlab-ci.yml file in your current project from template
#'
#' @param image Docker image to use in GitLab ci. If NULL, not specified!
#' @param path destination path for writing GitLab CI yml file
#' @param overwrite whether to overwrite existing GitLab CI yml file
#' @param repo_name REPO_NAME environment variable for R CRAN mirror used
#' @param type type of the CI template to use
#' @param add_to_Rbuildignore add CI yml file and cache path used inside the
#' CI workflow to .Rbuildignore?
#'
#' @details
#' Types available are:
#'
#' - "check-coverage-pkgdown": Check package along with Code coverage with {covr}
#' and {pkgdown} site on GitLab Pages
#' - "check-coverage-pkgdown-renv": Check package built in a fixed {renv} state
#' along with Code coverage with {covr} and {pkgdown} site on GitLab Pages.
#' - "bookdown": Build {bookdown} HTML and PDF site on GitLab Pages
#' - "bookdown-production": Build {bookdown} HTML and PDF site on GitLab Pages.
#' Where default page is for branch named 'production' and "dev/" sub-folder is for
#' 'main' (or 'master') branch.
#'
#' @export
#'
#' @return Used for side effects. Creates a .gitlab-ci.yml file in your directory.
#'
#' @examples
#' # Create in another directory
#' use_gitlab_ci(image = "rocker/verse:latest", path = tempfile(fileext = ".yml"))
#' \dontrun{
#' # Create in your current project with template for packages checking
#' use_gitlab_ci(image = "rocker/verse:latest", type = "check-coverage-pkgdown")
#' }
use_gitlab_ci <- function(image = "rocker/verse:latest",
repo_name = "https://packagemanager.rstudio.com/all/__linux__/focal/latest",
path = ".gitlab-ci.yml",
overwrite = TRUE,
add_to_Rbuildignore = TRUE,
type = "check-coverage-pkgdown") {
choices <- gsub(".yml", "", list.files(system.file("gitlab-ci", package = "gitlabr")))
type <- match.arg(type, choices = choices, several.ok = FALSE)
file <- system.file("gitlab-ci", paste0(type, ".yml"), package = "gitlabr")
# Modify content
lines <- readLines(file)
# Change {image}
lines <- gsub(pattern = "\\{image\\}", replacement = image, x = lines)
# Changer {repo_name}
lines <- gsub(pattern = "\\{repo_name\\}", replacement = repo_name, x = lines)
# Changer {url} - $CI_API_V4_URL
# lines <- gsub(pattern = "\\{url\\}", replacement = url, x = lines)
writeLines(enc2utf8(lines), path)
if (isTRUE(add_to_Rbuildignore)) {
path_build_ignore <- file.path(dirname(path), ".Rbuildignore")
if (!file.exists(path_build_ignore)) {writeLines("", path_build_ignore)}
r_build_ignore <- readLines(path_build_ignore)
path_rbuild <- paste0("^", gsub("[.]", "\\\\.", basename(path)), "$")
if (!path_rbuild %in% r_build_ignore) {
writeLines(enc2utf8(c(r_build_ignore, path_rbuild)), path_build_ignore)
}
r_build_ignore <- readLines(path_build_ignore)
if (!"^ci/lib$" %in% r_build_ignore) {
writeLines(enc2utf8(c(r_build_ignore, "^ci/lib$")), path_build_ignore)
}
if (grepl("renv", type) & !"^cache$" %in% r_build_ignore) {
writeLines(enc2utf8(c(r_build_ignore, "^cache$")), path_build_ignore)
}
}
}
#' Access the GitLab CI builds
#'
#' List the jobs with `gl_jobs`, the pipelines with `gl_pipelines` or
#' download the most recent artifacts
#' archive with `gl_latest_build_artifact`. For every branch and job combination
#' only the most recent artifacts archive is available.
#' `gl_builds` is the equivalent for GitLab API v3.
#'
#' @param project project name or id, required
#' @param ... passed on to [gitlab()] API call
#' @export
#' @rdname gl_builds
#'
#' @examples \dontrun{
#' # connect as a fixed user to a GitLab instance
#' set_gitlab_connection(
#' gitlab_url = "https://gitlab.com",
#' private_token = Sys.getenv("GITLAB_COM_TOKEN"))
#'
#' # Get pipelines and jobs information
#' gl_pipelines(project = "<<your-project-id>>")
#' gl_jobs(project = "<<your-project-id>>")
#' gl_latest_build_artifact(project = "<<your-project-id>>", job = "build")
#' }
gl_pipelines <- function(project, ...) {
gitlab(gl_proj_req(project = project, "pipelines", ...), ...)
}
#' @export
#' @rdname gl_builds
gl_jobs <- function(project, ...) {
gitlab(gl_proj_req(project = project, "pipelines", ...), ...)
}
#' @export
#' @param api_version Since `gl_builds` is no longer working for GitLab API v4,
#' this must be set to "3" in order to avoid deprecation warning and HTTP error. It currently
#' default to "4" with deprecation message.´
#' @rdname gl_builds
gl_builds <- function(project, api_version = 4, ...) {
if (api_version != 3) {
.Deprecated("gl_pipelines", package = "gitlabr", old = "gl_builds")
}
gitlab(gl_proj_req(project = project, "builds", ...), ...)
}
#' @export
#' @rdname gl_builds
#' @param job Name of the job to get build artifacts from
#' @param ref_name name of ref (i.e. branch, commit, tag)
#' @param save_to_file either a path where to store .zip file or NULL if raw should be returned
#' @return returns the file path if `save_to_file` is TRUE, or the archive as raw otherwise.
gl_latest_build_artifact <- function(project, job, ref_name = get_main(), save_to_file = tempfile(fileext = ".zip"), ...) {
raw_build_archive <- gitlab(gl_proj_req(project = project,
c("jobs", "artifacts", ref_name, "download"),
...),
job = job, auto_format = FALSE, ...)
if (!is.null(save_to_file)) {
writeBin(raw_build_archive, save_to_file)
return(save_to_file)
}
else {
return(raw_build_archive)
}
}
|
a2ecb64e7b5c4c07e60058fed6ee0986e3a18e43 | 673460a9d687dd00d4a61038d57583b1baeff2f7 | /man/detectDiffModSamples.Rd | 81153710765d6731a356b6a45d538b22cac36555 | [
"MIT"
] | permissive | satopan/beem-static | 4d5d6a9993afdfb3ad8e72ef49bdf1a690980663 | 1fdc8e05f87ec3162e119c0f53a503edb8785958 | refs/heads/master | 2023-07-20T08:12:05.161115 | 2021-08-24T22:48:20 | 2021-08-24T22:48:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 429 | rd | detectDiffModSamples.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func.EM.r
\name{detectDiffModSamples}
\alias{detectDiffModSamples}
\title{detectDiffModSamples}
\usage{
detectDiffModSamples(err, threshold, filter)
}
\arguments{
\item{err}{the errors defined in `func.EM`}
\item{threshold}{threshold to filter out samples}
}
\description{
detectDiffModSamples
}
\author{
Chenhao Li, Gerald Tan, Niranjan Nagarajan
}
|
50be4f6657573691b12a0f12491aa91b7f8eaa45 | 9dab20a9cbd17aaf84f8c5dc99c08c90259aa5a0 | /MichalWiewiorko1.R | 559cd696cc1df3c580328cbde00ec8d64fd532eb | [] | no_license | Horytnik/R_Training | 460cc9929d3de9d2c308dc9e30d77c1510523d8e | bfb25357316b8c644d23ec0780509cfb5da083df | refs/heads/main | 2023-01-20T23:11:51.052207 | 2020-12-07T15:45:16 | 2020-12-07T15:45:16 | 311,009,139 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,385 | r | MichalWiewiorko1.R |
#Zadanie 1
podzielnoscLiczb <- function(a,b)
{
if(a %% b == 0) return(TRUE) else return(FALSE)
}
print(podzielnoscLiczb(4,2))
print(podzielnoscLiczb(5,2))
#Zadanie 2
print("Średnia predkosc z calej trasy: ")
print((120 + 90)/2)
# Zadanie 3
x<- c(1,2,1,3,1)
y<- c(1,1,1,1,2)
pearsonCalc <-function(x,y){
if(length(x) == length(y))
{
return( cor(x, y, method = "pearson", use = "complete.obs"))
}
else return("Lenght of x is not equal to y")
}
pearsonCalc(x,y)
daneTab <- read.table("dane.csv", header = TRUE, sep = ";")
pearsonCalc(daneTab$waga, daneTab$wzrost)
install.packages("ggpubr")
library("ggpubr")
ggscatter(daneTab, x = "waga", y = "wzrost",
add = "reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = "pearson",
xlab = "Waga [kg]", ylab = "Wzrost[cm]")
# Korelacja 0.979 oznacza ze zmienne waga i wzrost sa ze soba bardzo powiazane. Oznacza to, ze wraz ze wzrotem wagi bedzie rosl
# wzrost lub wraz ze wzrostem wagi rosnie nam wzrost.
# Zadanie 4
stworzDataFrame <- function(ile=1){
columns <- c(strsplit(readline("Podaj kolumny po przecinku "), ",")[[1]])
df <- data.frame(matrix(0, ncol = length(columns), nrow = ile))
colnames(df) <- columns
for(item in names(df)){
df[item] <- c(strsplit(readline(message("Podaj wektor danych kolumny ", item )), ",")[[1]])
}
return(df)
}
stworzDataFrame(3)
# Zadanie 5
sciezka <- "D:/Studia_PJATK/II_Semestr/ADNR/ProjrktyR/R_Training/smogKrakow/"
liczZplikow <- function(sciezka,nazwaKolumny,jakaFunkcja="mean",DlaIluPlikow=1){
fileList <- list.files(sciezka)
for (idx in DlaIluPlikow)
{
daneTab <- read.table(paste(sciezka,fileList[idx], sep=""), header = TRUE, sep = ",")
}
result <- switch (jakaFunkcja,
"mean" = mean(na.omit(daneTab[[nazwaKolumny]])),
"median" = median(na.omit(daneTab[[nazwaKolumny]])),
"max" = max(na.omit(daneTab[[nazwaKolumny]])),
"min" = min(na.omit(daneTab[[nazwaKolumny]])),
)
return(result)
}
liczZplikow(sciezka=sciezka,nazwaKolumny='X142_humidity',jakaFunkcja='mean',DlaIluPlikow = 2)
liczZplikow(sciezka=sciezka,nazwaKolumny='X142_pm10',jakaFunkcja='max',DlaIluPlikow = 3)
liczZplikow(sciezka=sciezka,nazwaKolumny='X142_pm10',jakaFunkcja='min',DlaIluPlikow = 3)
liczZplikow(sciezka=sciezka,nazwaKolumny='X142_pm10',jakaFunkcja='median',DlaIluPlikow = 1)
|
0caa68ba4eb7ee19b153a98278a6da6d56e5dbb7 | 49a32d071f2b5eaba99c13029c00dc34554f03f2 | /Functions.R | 8472dd0b5876d1ce82c6a693d01ec641506cb4cd | [] | no_license | AlexArgus/RI | 151e664919aece13b74c67f4ea33684c47feab7d | daaf557705095036888208408a940a10c03d8d87 | refs/heads/master | 2020-12-02T09:54:50.681716 | 2017-07-09T03:12:16 | 2017-07-09T03:12:16 | 96,658,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,426 | r | Functions.R |
options(digits.secs=5)
Sys.setlocale("LC_ALL", 'English_United States.1252')
# source("https://raw.githubusercontent.com/ggrothendieck/gsubfn/master/R/list.R")
####### much useful function #########
list <- structure(NA, class = "result")
"[<-.result" <- function(x,...,value) {
args <- as.list(match.call())
args <- args[-c(1:2,length(args))]
length(value) <- length(args)
for (i in seq(along = args)) {
a <- args[[i]]
if (!missing(a)) eval.parent(substitute(a <- v,list(a = a,v = value[[i]])))
}
x
}
source(paste0(WorkingDirectory,'/','Scripts/HFT/multiplot.R'))
list[q, p, m, n] <- c(3, 9, 12, 26)
list[k_60, k_20, k_6] <- c(113, 37, 13)
#################
coef <- function(l){
c1 <- 2/(l + 1)
c2 <- (l - 1)/(l + 1)
return( list('c1' = c1, 'c2' = c2) )
}
list[c1n, c2n] <- coef(n)
list[c1m, c2m] <- coef(m)
list[c1p, c2p] <- coef(p)
list[c1q, c2q] <- coef(q)
rm(coef)
################# Function for creating Special time points #########################
CreateTime <- function(Time){
CT <- paste0(dateOfTrade,' ',Time)
CT <- strptime(CT, format = '%Y-%m-%d %H:%M:%OS', tz = TimeZone)
CT <- as.POSIXct(CT)
return(CT)
}
################# Constants and Initial Values ######################################
MarketOpenRegularSession <- "10:00:00"
MarketCloseRegularSession <- "23:50:00"
GlobalSummator = 0
GlobalSummatorLong = 0
GlobalSummatorShort = 0
GlobalCommisionSummator = 0
Difference = 0
GraphOfGlobalSummator <- c()
GraphOfGlobalSummatorLong <- c()
GraphOfGlobalSummatorShort <- c()
GraphOfNumberOfDeals <- c()
GraphOfGlobalCommisionSummator <- c()
Deals <- c()
DealsLong <- c()
DealsShort <- c()
MaxDeals <- c()
MinDeals <- c()
BeginTimeOfdealsLong <- c()
BeginTimeOfdealsShort <- c()
TimeOfdeals <- c()
TimeOfdealsLong <- c()
TimeOfdealsShort <- c()
Date <- c()
# ActivRegularSession <- c()
# A_RegularSession <- c()
MaxDifference <- c()
TimeZone <- 'Europe/Moscow'
Sys.setenv(TZ = TimeZone)
format <- '%F %H:%M:%OS'
format2 <- '%y%m%d%H:%M:%OS'
FuturesCode <- 'RI'
########################################################
CreateMSB <- function(H){
P <- as.numeric( last( Price[ paste0('/', Startpoint) ] ) )
freq <- strsplit(deparse(substitute(H)), '_')[[1]][2]
EMASlow <- P #scalar
EMAFast <- P #scalar
M <- 0 #scalar
#MLag1 <- 0 #scalar
S <- 0 #scalar
B <- 0 #scalar
M_massive <- c()
B_massive <- c()
count <- index( H[paste0(Startpoint,'/', Endpoint)] )
l=0
for ( x in as.character(count)) {
l = l + 1
P <- as.numeric( last( H[ paste0('/', x) ] ) )
EMAFast <- c1m*P + c2m*EMAFast
EMASlow <- c1n*P + c2n*EMASlow
M <- c1q*(EMAFast - EMASlow) + c2q*M
S <- c1p*(EMAFast - EMASlow) + c2p*S
B <- c1q*(M - S) + c2q*B
M_massive <- append(M_massive, M)
B_massive <- append(B_massive, B)
#MLag1 <- M #scalar
# if(l%%100 == 0){cat(as.character(x), '\n')
# cat(paste0('M', freq,' '), round(M,digits = 3),
# paste0('S', freq,' '), round(S,digits = 3),
# paste0('B', freq,' '), round(B,digits = 3),'\n' )}
}
M_massive <- as.xts(M_massive, order.by = count)
B_massive <- as.xts(B_massive, order.by = count)
return( list(M_massive, B_massive) )
}
################# Horse work Functions #######################################
RealPriceTrading <- function(String, FUN, RealData) {
Min_or_Max <- .Primitive(FUN)
### We take Shift sec period to check bid/ask level ###
tbs_1 <- paste0(as.character(x), '/', as.character( x + Shift ) )
### After that we check 5*Delay sec duration period was or not Deals by Price one tick lower/higher ###
tbs_5 <- paste0(as.character(x + Shift), '/', as.character(x + Shift + Delay * 5 ) )
### First of all we get the current level of bid or ask ###
massive_1 <- RealData[tbs_1]$PRICE
if (length(massive_1) > 0) {
PriceWithDelay_1 <- Min_or_Max(massive_1)
} else {
PriceWithDelay_1 <- NA
}
### Next we check were or not deal with level lower/upper than levels bid/ask ( see above) ###
massive_5 <- RealData[RealData$DEAL_ID != ""][tbs_5]$PRICE
if (length(massive_5) > 0) {
PriceWithDelay_5 <- Min_or_Max(massive_5)
} else {
PriceWithDelay_5 <- NA;
}
cat('**************************************************************************', '\n')
cat('Range of PriceWithDelay_1 is ')
cat(range(massive_1), '\n')
cat('Range of PriceWithDelay_5 is ')
cat(range(massive_5), '\n')
cat('**************************************************************************', '\n')
cat( paste0(String, Delay * 1,': '), PriceWithDelay_1, '\n')
cat( paste0(String, Delay * 5,': '), PriceWithDelay_5, '\n')
if ( !is.na(PriceWithDelay_1) ) {
PriceWithDelay <- get0('PriceWithDelay_1')
Vol_1 <- RealData[RealData$PRICE == PriceWithDelay][tbs_1]$VOLUME
Vol_1 <- last(Vol_1)[[1]]
Vol_5 <- RealData[(RealData$PRICE <= PriceWithDelay ) & RealData$DEAL_ID != "" ][tbs_5]$VOLUME
Vol_5 <- sum( Vol_5 )
cat('VolumeWithDelay_1 ', Vol_1, '\n')
cat('VolumeWithDelay_5 ', Vol_5, '\n')
} else {
ZeroPoint <- RealData[ last( which( index( RealData ) <= x ) ) ]
PriceWithDelay <- ZeroPoint$PRICE[[1]]
cat('We get the Price from previous tick...', PriceWithDelay, '\n')
Vol_1 <- ZeroPoint$VOLUME[[1]]
Vol_5 <- RealData[(RealData$PRICE <= PriceWithDelay) & RealData$DEAL_ID != ""][tbs_5]$VOLUME
if (length(Vol_5) > 0) {Vol_5 <- sum(Vol_5)} else {Vol_5 <- 0}
cat('We get the Volume from previous tick...', 'Vol_1 ', Vol_1,' Vol_5 ', Vol_5, '\n')
}
###
if ( !is.na(PriceWithDelay_1) & !is.na(PriceWithDelay_5) ) {
if ( FUN == 'min') { condition <- (PriceWithDelay_5 <= PriceWithDelay - 0 ) & ( Vol_1 < Vol_5 )
#MaxDifference <- append(MaxDifference, as.numeric(PriceWithDelay) - as.numeric(PriceWithDelay_5) )
cat('Max Difference is ', PriceWithDelay - PriceWithDelay_5, '\n')
} else{ condition <- (PriceWithDelay_5 >= PriceWithDelay + 0 ) & ( Vol_1 < Vol_5 )
#MaxDifference <- append(MaxDifference, as.numeric(PriceWithDelay_5) - as.numeric(PriceWithDelay) )
cat('Max Difference is ', PriceWithDelay_5 - PriceWithDelay, '\n')
}
} else { condition <- FALSE }
###
if ( condition ) { return( PriceWithDelay ) # - FUNFUN(-0, 0))
} else {
PriceWithDelay <- NA;
cat('There is no Deal. Price go out ((( ...', '\n')
}
##############################
return(PriceWithDelay)
}
#################################################################################################################
ClosePriceTrading <- function(RealData) {
y <- x + Shift + Delay*5
PriceWithDelay <- last(RealData[ paste0('/', y) ]$PRICE)[[1]]
cat('We get the Price from previous tick...', PriceWithDelay, '\n')
return(PriceWithDelay)
}
OpenLong <- function() {
RP <- RealPriceTrading('OpenLongPriceWithDelay_', 'min', zB)
cat('New Deal ##################################################', '\n')
if(!is.na(RP)){
cat('Price: ', P, ' OpenLongPriceWithDelay: ', RP, 'Difference ', (P - RP), '\n')
Difference %+=% (P - RP); cat('TotalDifference', Difference, '\n')
Position %+=% c
CommisionSummator %+=% c*comissions
NumberOfDeals %+=% 1
AccumulatorLong %-=% RP
BeginTimeOfdealsLong <<- append(BeginTimeOfdealsLong, x)
TechAcL <<- - RP
cat('LongTime: ', paste0(x,' '))
cat('LongPrice: ', paste0(round(RP, digits = 0)),'\n')
# plotPrice <<- plotPrice + geom_point( aes(x=x, y=P), pch=2, color='blue')
#
# plotPrice
}else{cat('Time is', as.character(x), '\n')}
}
OpenShort <- function() {
RP <- RealPriceTrading('OpenShortPriceWithDelay_', 'max', zS)
cat('Next Deal ##################################################', '\n')
if(!is.na(RP)){
cat('Price: ', P, ' OpenShortPriceWithDelay: ', RP,
'Difference', (RP - P), '\n')
Difference %+=% (RP - P)
cat('TotalDifference', Difference, '\n')
Position %-=% c
CommisionSummator %+=% c*comissions
NumberOfDeals %+=% 1
AccumulatorShort %+=% RP
BeginTimeOfdealsShort <<- append(BeginTimeOfdealsShort, x)
TechAcS <<- RP
cat('TechShort:', (TechAcS - RP) ,' ')
cat('Short: ', paste0(x,' '))
cat('Price of Deal: ', paste0(round(RP, digits = 0)),'\n')
# plotPrice <<- plotPrice + geom_point( aes(x=x, y=P), pch=6, color='magenta' )
#
# plotPrice
}else{cat('Time is', as.character(x), '\n')}
}
CloseLong <- function() {
RP <- as.numeric(RealPriceTrading('CloseLongPriceWithDelay_', 'max', zS))
cat('Close Deal ##################################################', '\n')
if(is.na(RP)){ RP <- ClosePriceTrading(zB) }
cat('Price: ', P, ' CloseLongPriceWithDelay: ', RP,
'Difference', (RP - P), '\n')
Difference %+=% (RP - P)
cat('TotalDifference', Difference, '\n')
Position %-=% 1
CommisionSummator %+=% c*comissions
NumberOfDeals %+=% 1
AccumulatorLong %+=% RP
Deals <<- append(Deals, (TechAcL + RP))
DealsLong <<- append(DealsLong, (TechAcL + RP))
TimeOfdeals <<- append(TimeOfdeals, x)
TimeOfdealsLong <<- append(TimeOfdealsLong, x)
cat('CloseLongTime: ', paste0(x,' '))
cat('CloseLongPrice: ', paste0(round(RP, digits = 0)),'\n')
cat('TechLong:', round((TechAcL + RP), digits = 2) ,' ')
MaxDeals <<- append(MaxDeals, MaxProfitOfPositionLong)
MinDeals <<- append(MinDeals, MinProfitOfPositionLong)
cat('Max', round(MaxProfitOfPositionLong, digits = 2), ' ')
cat('Min', round(MinProfitOfPositionLong, digits = 2), '\n')
MaxProfitOfPositionLong <<- 0
MinProfitOfPositionLong <<- 0
TechAcL <<- 0;
cat('AccumulatorLong: ', round(AccumulatorLong, digits = 1), '\n')
# plotPrice <<- plotPrice + geom_point(aes(x=x, y=P), pch=6, color='red')
#
# plotPrice
}
CloseShort <- function() {
RP <- as.numeric(RealPriceTrading('CloseShortPriceWithDelay_', 'min', zB))
cat('Close Deal ##################################################', '\n')
if(is.na(RP)){ RP <- ClosePriceTrading(zS) }
cat('Price: ', P, ' CloseShortPriceWithDelay: ', RP,
'Difference', (P - RP), '\n')
Difference %+=% (P - RP)
cat('TotalDifference', Difference, '\n')
#cat( 'Position before deal: ', paste0(Position,'\n') )
Position %+=% 1
CommisionSummator %+=% c*comissions
NumberOfDeals %+=% 1
AccumulatorShort %-=% RP ; cat('AccumulatorShort ', AccumulatorShort, '\n','\n')
Deals <<- append(Deals, (TechAcS - RP))
DealsShort <<- append(DealsShort, (TechAcS - RP))
TimeOfdeals <<- append(TimeOfdeals,x)
TimeOfdealsShort <<- append(TimeOfdealsShort, x)
cat('TechShort:', round((TechAcS - RP), digits = 2) ,' ')
TechAcS <<- 0
##
MaxDeals <<- append(MaxDeals, MaxProfitOfPositionShort)
MinDeals <<- append(MinDeals, MinProfitOfPositionShort)
cat('Max', round(MaxProfitOfPositionShort, digits = 2), ' ')
cat('Min', round(MinProfitOfPositionShort, digits = 2), '\n')
MaxProfitOfPositionShort <<- 0
MinProfitOfPositionShort <<- 0
cat('Close: ', paste0(x,' '))
cat('Price of Deal: ', paste0(round(RP, digits = 0),' ') )
cat('AccumulatorShort: ', paste0(round(AccumulatorShort, digits = 1),'\n'))
# plotPrice <<- plotPrice + geom_point(aes(x=x, y=P), pch=2, color='green')
#
# plotPrice
}
########################################################################
`%+=%` = function(e1,e2) eval.parent(substitute(e1 <<- (e1 + e2) ) )
`%-=%` = function(e1,e2) eval.parent(substitute(e1 <<- (e1 - e2) ) )
#######################################################################
GetDDStat <- function(DD){
for (i in mt){ cat(as.character(i),
round( mean( DD[i] ), digits = 2),
round(length( DD[i] ), digits = 2),
round( sum( DD[i] ), digits = 2), '\n')
}
cat('Monday' , sum(DD[.indexwday(DD)==1]), '\n')
cat('Tuesday' , sum(DD[.indexwday(DD)==2]), '\n')
cat('Wednesday' , sum(DD[.indexwday(DD)==3]), '\n')
cat('Thursday' , sum(DD[.indexwday(DD)==4]), '\n')
cat('Friday' , sum(DD[.indexwday(DD)==5]), '\n')
}
cppFunction('NumericVector EMACpp(NumericVector ys, NumericVector alpha) {
int n = ys.size();
NumericVector out(n);
out[0] = ys[0];
for(int i = 1; i < n; ++i) {
double al = alpha[i - 1];
double mu = exp(-al);
double vu = (1 - mu)/al;
out[i] = mu*out[i-1] + (vu - mu)*ys[i-1] + (1 - vu)*ys[i];
}
return out;
}')
|
7c61b527a4df2ce5522219b9ca23da38cf3fc91e | ffb29f3c0423ab991e8615f7ebe5a0b143d14ec8 | /cachematrix.R | 9e7a2e17b9cbfde786e80ce81eb7c963d39fc452 | [] | no_license | burkley/ProgrammingAssignment2 | c3737d9ef32f7c5554998d765e01ee6c8d46acec | 11ecd3942cb70e16823a6adf84e5766d60ae937c | refs/heads/master | 2021-01-21T23:58:56.630540 | 2015-05-24T14:41:38 | 2015-05-24T14:41:38 | 36,099,925 | 0 | 0 | null | 2015-05-22T23:44:13 | 2015-05-22T23:44:12 | null | UTF-8 | R | false | false | 3,663 | r | cachematrix.R | ## File cachematrix.R defines 2 functions:
## 1) makeCacheMatrix
## 2) cacheSolve
##
## The functions are intended to work together to cache the inverse of a matrix
## (supplied by the user) in the global environment. The purpose of caching is
## to speed up access to the inverse when the inverse is requested multiple
## times (such as in a for loop).
##
## Usage:
## test1_special <- makeCacheMatrix(test1)
## test1_inverse <- cacheSolve(test1_special)
##
## where test1 is an ordinary matrix supplied by the user.
##
## The function makeCacheMatrix creates a special "matrix", which is really a
## list containing functions to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # The inverse of the matrix. Set to NULL initially.
# Set the value of the matrix. This function also sets the inverse of
# the matrix to NULL. The inverse of the matrix is set to NULL because
# the matrix that is set does not have its inverse specified yet.
#
# This function uses the operator "<<-" to set the value of the matrix.
# The "<<-" operator sets (i.e. caches) the value of the matrix in the
# global environment. Use of this operator makes subsequent retrievals
# of the matrix faster (since its value is cached in the global
# environment).
set <- function(y) {
x <<- y
m <<- NULL
}
# Get the value of the matrix.
get <- function() {
x
}
# Set the inverse of the matrix.
#
# This function uses the operator "<<-" to set the inverse of the matrix.
# The "<<-" operator sets (i.e. caches) the inverse of the matrix in the
# global environment. Use of this operator makes subsequent retrievals
# of the inverse faster since its value is cached in the global
# environment.
setinverse <- function(inv) {
m <<- inv
}
# Get the inverse of the matrix.
getinverse <- function() {
m
}
## Return a list containing functions to set and get the value of the
## matrix, and set and get the value of the inverse of the matrix.
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## The function cacheSolve calculates the inverse of the special "matrix"
## created by the function "makeCacheMatrix". However, it first checks to see
## if the inverse has already been calculated. If so, it gets the inverse from
## the cache and skips the computation. Otherwise, it calculates the inverse of
## the matrix and sets the value of the inverse in the cache via the setinverse
## function (the setinverse function of the special "matrix").
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
## Check to see if the inverse exists (i.e. it is not NULL). If so,
## return it.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## If the inverse does not exist, do the following:
## 1) get the matrix
## 2) compute the inverse matrix
## 3) set the inverse matrix
## 4) finally return the inverse matrix
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
e7ffdaac426ba66a6519aea4c305d43643311923 | 370e90a19bdd575a0306cb3ab7b11b0445c919e4 | /scripts/climb/mlesky_d1_B.1.1.7_control.R | 12c13d1fa26f5eb99adbbcc7941a7b30dbefbf63 | [] | no_license | Geidelberg/617.2 | be180da4f3247f033babf0f11d988e458cfeea3c | 31eb5dfa870ef0b28f6211e2a573eb89c853a77c | refs/heads/main | 2023-05-14T02:01:09.857899 | 2021-06-06T12:02:48 | 2021-06-06T12:02:48 | 374,338,755 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,652 | r | mlesky_d1_B.1.1.7_control.R |
library( ape )
library( lubridate )
library( treedater )
library(alakazam)
require(stringi)
require( mlesky )
run_mlesky <- function(tds_list, ofn, taxis) {
if(!inherits(tds_list, what = c("list")))
tds_list = readRDS(tds_list)
res_mlesky_list = lapply(tds_list, function(tds) {
weeks = round(as.numeric((date_decimal(max(tds[[1]]$sts))-date_decimal(min(tds[[1]]$sts)))/7))
res <- weeks * 2
class( tds ) <- 'multiPhylo'
tds = lapply( tds ,
function(x) {x$tip.label = unlist(lapply(strsplit(x$tip.label, '[|]'), function(y) paste0(y[1])))
return(x)}
)
NeStartTimeBeforePresent = max( tds[[1]]$sts ) - min( tds[[1]]$sts )
print(paste0("NeStartTimeBeforePresent = ", NeStartTimeBeforePresent))
sgs = parallel::mclapply( tds, function(td) {
mlskygrid(td, tau = NULL, tau_lower=.001, tau_upper = 10 , sampleTimes = td$sts , res = res, ncpu = 3, NeStartTimeBeforePresent = NeStartTimeBeforePresent)
}, mc.cores = 10 )
out = lapply(sgs, function(sg) {
with( sg, approx( time, ne, rule = 1, xout = taxis )$y )
})
out
})
# I am collapsing the results from all alignments and all dated trees together as one.
res_mlesky <-
do.call( cbind, lapply(res_mlesky_list, function(x) do.call( cbind, x ) ))
saveRDS( list( time = taxis, ne = res_mlesky ) , file=paste0(ofn, "_mlesky", '.rds' ))
res_mlesky
}
# metadata
civetfn = list.files( '/cephfs/covid/bham/results/msa/20210604/alignments/' , patt = 'cog_[0-9\\-]+_metadata.csv', full.names=TRUE) #'../phylolatest/civet/cog_global_2020-12-01_metadata.csv'
civmd = read.csv( civetfn , stringsAs=FALSE , header=TRUE )
civmd$central_sample_id=civmd$sequence_name
civmd$sample_date <- as.Date( civmd$sample_date )
civmd$sample_time <- decimal_date( civmd$sample_date )
mltr_fn = 'B.1.1.7'
mltr_list = list.files( '/cephfs/covid/bham/climb-covid19-geidelbergl/617.2/f0-trees' , patt = mltr_fn, full.names=TRUE)
mltr = lapply(mltr_list, read.tree)
sts <- lapply(mltr, function(x) {
civmd$sample_time[ match( x$tip.label , civmd$central_sample_id ) ]
})
taxis = decimal_date( seq( as.Date(date_decimal(min(unlist(sts)))) , as.Date(date_decimal(max(unlist(sts)))), by = 1) )
res_mlesky = run_mlesky(tds_list = "/cephfs/covid/bham/climb-covid19-geidelbergl/617.2/Sample_England_controlB.1.1.7_n_tree_dating_5_dated_trees.rds",
ofn = "/cephfs/covid/bham/climb-covid19-geidelbergl/617.2/Sample_England_controlB.1.1.7_n_tree_dating_5_dated_trees_mlesky.rds",
taxis = taxis)
|
0440a6fd14487125d6d06b6715da49b7b80ffb0f | 3fc2fe83b8cad2e42bf0c7d5df7c9f4244d3f285 | /R/sourceFormat.R | cdb9143154a3c77ad1ffe46a2c1ce54da51c6a4b | [] | no_license | SciViews/svIDE | 1935ec2d97e207bee37e252a580545b8fc23c93f | aaa84e45838fefc61020a951afa000e109b0fc0e | refs/heads/master | 2020-03-15T06:31:27.514200 | 2018-06-29T06:36:09 | 2018-06-29T06:36:09 | 132,009,538 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,755 | r | sourceFormat.R | Source <- function (...) {
.Deprecated("sourceFormat")
sourceFormat(...)
}
sourceFormat <- function (file, out.form = getOption("R.output.format"), local = FALSE,
echo = FALSE, print.eval = TRUE, verbose = getOption("verbose"),
prompt.echo = getOption("prompt"), max.deparse.length = 150,
chdir = FALSE, prompt = FALSE)
{
## This is a reworked version of .Rsource from RpadUtils (Tom Short)
## but this version uses source() itself
if (is.null(out.form)) out.form <- "text"
## capture.all() is inspired from capture.output(), but it captures
## both the output and the message streams and it evaluates in .GlobalEnv
capture.all <- function (...) {
args <- substitute(list(...))[-1]
file <- textConnection("rval", "w", local = TRUE)
sink(file, type = "output")
sink(file, type = "message")
on.exit({
sink(type = "output")
sink(type = "message")
close(file)
})
for (i in seq(length = length(args))) {
expr <- args[[i]]
if (mode(expr) == "expression")
tmp <- lapply(expr, withVisible) #tmp <- lapply(expr, evalVis)
else if (mode(expr) == "call")
tmp <- list(withVisible(expr)) #tmp <- list(evalVis(expr))
else if (mode(expr) == "name")
tmp <- list(withVisible(expr)) #tmp <- list(evalVis(expr))
else stop("bad argument")
for (item in tmp) {
if (item$visible)
print(item$value)
}
}
sink(type = "output")
sink(type = "message")
cat("====\n")
print(file)
cat("====\n")
return(file)
}
## We capture output from source() with default args slightly modified
### TODO: get rid of source() and use something like:
## (try(parse(textConnection("ls()")), silent = TRUE))
## with detection of incomplete lines and other error messages!
res <- capture.all(source(file = file, local = FALSE, echo = echo,
print.eval = print.eval, verbose = verbose, prompt.echo = prompt.echo,
max.deparse.length = max.deparse.length, chdir = chdir))
if (inherits(res, "list"))
res <- paste(res, collapse = "\n")
if (!out.form %in% c("none", "html"))
res <- paste(paste(res, collapse="\n"), "\n", sep = "")
## Note for out.form == "html", we want to use something like:
##require(R2HTML) || stop("Package 'R2HTML' is required!")
##res <- HTML(res, file = "")
## But since we do not want a dependency to R2HTML here,
## we should better put this in the SciViews-R manual
if (prompt)
res <- paste(res, options()$prompt, sep = "")
### TODO: possibly use a continue prompt!
invisible(res)
}
|
6b5f555f4729d3efc7706275a1ab5d45dd995634 | 3f5d792ba28aabb0d02991d1d6871a563d10c080 | /man/calcfunpca.Rd | 11689712d4c830ecd1cf7e175c265ecd87984c02 | [] | no_license | ikwak2/funqtl | e3845eaef06c1b57a197216dba08883e0383101b | 350cf93dd70f508c5af4cee7ef1bd887eea64f19 | refs/heads/master | 2022-05-03T13:34:08.966441 | 2022-04-18T02:32:07 | 2022-04-18T02:32:07 | 9,354,923 | 5 | 1 | null | 2020-05-29T04:19:00 | 2013-04-10T20:09:37 | R | UTF-8 | R | false | true | 1,253 | rd | calcfunpca.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcfunpca.R
\name{calcfunpca}
\alias{calcfunpca}
\title{Do dimensional reduction using functional pca.}
\usage{
calcfunpca(cross, pheno.cols, n.max = 4, criteria = 0.9, nbasis, nn = 0)
}
\arguments{
\item{cross}{An object of class \code{"cross"}. See the \code{\link[qtl]{read.cross}} for details.}
\item{pheno.cols}{Columns in the phenotype matrix to be used as the
phenotype.}
\item{n.max}{The number of maximum reduced dimension.}
\item{criteria}{how much of variance explained.}
\item{nbasis}{The number of basis to use.}
\item{nn}{The number of exact reduced dimension}
}
\value{
It gives a list, Y is a matrix that each column have principal components. eigf is a eigen function object from functional PCA using fda package.
}
\description{
Do dimensional reduction using functional pca.
}
\examples{
data(exd)
exd <- calc.genoprob(exd, step=2)
cvout <- cvfold(exd, basisset = 4:7, fold = 10)
cvout # basis number 5 have the smallest sse. So we take nbasis = 5.
Y <- calcfunpca(exd, criteria=0.9, nbasis = 5)$Y
out1 <- scanoneM(exd, Y, method = "hk")
}
\seealso{
\code{\link{scanoneM}}
}
\author{
Il-Youp Kwak, <email: ikwak2@stat.wisc.edu>
}
\keyword{utilities}
|
70785dabc033a795b708625efc6be86661a350f2 | f36b2ad1dc17ec05278f13c7fa72a1fd8343ee19 | /R/chk-missing.R | 192944f0b629d54a78e7bac9ac1c975eaab8e50a | [
"MIT"
] | permissive | poissonconsulting/chk | 45f5d81df8a967aad6e148f0bff9a9f5b89a51ac | c2545f04b23e918444d4758e4362d20dfaa8350b | refs/heads/main | 2023-06-14T19:32:17.452025 | 2023-05-27T23:53:25 | 2023-05-27T23:53:25 | 199,894,184 | 43 | 3 | NOASSERTION | 2023-01-05T18:50:23 | 2019-07-31T16:42:59 | R | UTF-8 | R | false | false | 819 | r | chk-missing.R | #' Check Missing Argument
#'
#' @description
#' Checks argument missing using
#'
#' `missing(x)`
#'
#' @details
#' Currently only checks if value is available
#' (as opposed to whether it was specified).
#'
#' @inheritParams params
#' @inherit params return
#'
#' @family chk_misc
#'
#' @examples
#' # chk_missing
#' fun <- function(x) {
#' chk_missing(x)
#' }
#' fun()
#' try(fun(1))
#' @export
chk_missing <- function(x, x_name = NULL) {
if (vld_missing(x)) {
return(invisible(NULL))
}
if (is.null(x_name)) x_name <- deparse_backtick_chk(substitute(x))
abort_chk(x_name, " must be missing")
}
#' @describeIn chk_missing Validate Missing Argument
#'
#' @examples
#' # vld_missing
#' fun <- function(x) {
#' vld_missing(x)
#' }
#' fun()
#' fun(1)
#' @export
vld_missing <- function(x) {
missing(x)
}
|
db867138546addfa3750916e87cf582254ceafaa | 1aa92f850ce632811aaa74d769527a8037d8c484 | /R/predict.R | ef8ef7983a24d2e6df27fecded517f9a44c62ed3 | [] | no_license | cran/mvord | 253c6e7deaf07bf5ac111571b6db307219f1597c | 6699126154748d7510647afc7bda27066aad3549 | refs/heads/master | 2021-06-02T15:11:40.519370 | 2021-03-17T12:20:12 | 2021-03-17T12:20:12 | 102,715,261 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 23,518 | r | predict.R | #' @title Marginal Predictions for Multivariate Ordinal Regression Models.
#'
#' @description
#' Obtains marginal predictions/fitted measures for objects of class \code{'mvord'}.
#' @param object an object of class \code{'mvord'}.
#' @param type types \code{"prob"}, \code{"class"}, \code{"linpred"}, \code{"pred"}, \code{"cum.prob"} are available.
#' @param newdata (optional) data frame of new covariates and new responses.
#' The names of the variables should correspond to the names of the
#' variables used to fit the model. By default the data on which the model
#' was estimated is considered.
#' @param subjectID (optional) vector specifying for which subjectIDs the predictions\cr or fitted values should be computed.
#' @param newoffset (optional) list of length equal to the number of outcomes, each element containing a vector of offsets to be considered.
#' @param ... further arguments passed to or from other methods.
#' @details The following types can be chosen in \code{marginal_predict}:
#' \tabular{ll}{
#' \code{type} \tab description\cr
#' \code{"prob"} \tab (default) fitted marginal probabilities for the observed response categories.\cr
#' \code{"class"} \tab fitted marginal classes of the observed responses.\cr
#' \code{"linpred"} \tab linear predictor \cr
#' \code{"cum.prob"} \tab fitted marginal cumulative probabilities for the observed response categories.\cr
#' \code{"all.prob"} \tab fitted marginal probabilities for all ordered classes of each response.
#' }
#'
##' The current implementation supports only in-sample predictions.
##' The row names of the output correspond to the subjectIDs.
#' @seealso \code{\link{predict.mvord}}, \code{\link{joint_probabilities}}
#' @export
marginal_predict <- function(object, newdata = NULL, type = "prob", subjectID = NULL, newoffset = NULL, ...){
#NEWDATA is NULL
## newoffset
if(!(type %in% c("prob", "linpred", "class", "cum.prob", "all.prob"))) stop("Invalid type chosen. Only types 'prob','linpred', 'class', 'cum.prob' and 'all.prob' are available.")
# args <- list(...)
# exist <- "newdata" %in% names(args)
# if(!exist) newdata <- NULL
# if (!is.null(newdata)) stop("newdata is not supported at the moment!")
if(is.null(newdata)){
x <- object$rho$x
y <- object$rho$y
offset <- object$rho$offset
} else {
newdata <- as.data.frame(newdata)
tmp <- prepare_newdata(object, newdata, newoffset)
x <- tmp$x
y <- tmp$y
object$error.struct <- tmp$error.struct
offset <- tmp$offset
}
if (is.null(subjectID)) ind <- seq_len(NROW(y)) else {
if(!all(subjectID %in% rownames(y))) stop("Not all subjectIDs in data!")
ind <- match(subjectID, rownames(y))
}
sigma <- error_structure(object, type = "sigmas")
stddevs <- sqrt(t(sapply(sigma, diag)))
##############################################################
marg_predictions <- switch(type,
prob = marg_pred_prob_fun(object, y, x, offset, stddevs, ind),
linpred = marg_pred_linpred_fun(object, y, x, offset, stddevs, ind),
all.prob = marg_pred_allprob_fun(object, y, x, offset, stddevs, ind),
cum.prob = marg_pred_cumprob_fun(object, y, x, offset, stddevs, ind),
class = marg_pred_class_fun(object, y, x, offset, stddevs, ind))
return(marg_predictions)
}
#' @title Predict method for Multivariate Ordinal Regression Models.
#' @description Obtains predicted or fitted values for objects of class \code{'mvord'}.
#' @param object an object of class \code{'mvord'}.
#' @param type types \code{"class"}, \code{"prob"} and \code{"cum.prob"} are available.
#' @param newdata (optional) data frame of new covariates and new responses.
#' @param subjectID (optional) vector specifying for which subjectIDs the predictions\cr or fitted values should be computed.
#' @param newoffset (optional) list of length equal to the number of outcomes, each element containing a vector of offsets to be considered.
#' @param ... further arguments passed to or from other methods.
#' @details
#' \tabular{ll}{
#' \code{type} \tab description\cr
#' \code{"class"} \tab combination of response categories with the highest probability.\cr
#' \code{"prob"} \tab (default) fitted joint probability for the observed response categories\cr
#' \tab or the categories provided in the response column(s) in \code{newdata}.\cr
#' \tab If response column(s) in
#' \code{newdata} contain only NAs, this will return a vector of ones. \cr
#' \code{"cum.prob"} \tab fitted joint cumulative probability for the observed response\cr
#' \tab categories or the categories provided in the response column(s) in \code{newdata}.\cr
#' \tab If response column(s) in \code{newdata} contain only NAs, this will return a vector of ones.
#' }
# #' The current implementation supports only in-sample predictions.
#' The (row) names of the output correspond to the subjectIDs.
#' @seealso \code{\link{marginal_predict}}, \code{\link{joint_probabilities}}
#' @method predict mvord
#' @export
predict.mvord <- function(object, newdata = NULL, type = "prob", subjectID = NULL, newoffset = NULL, ...){
# checks
if (is.null(object$rho$link$F_multi)) stop("Multivariate probabilities cannot be computed! Try marginal_predict()!")
if(!(type %in% c("prob", "class", "cum.prob"))) stop("Invalid type chosen. Only types 'prob', 'class' and 'cum.prob' are available.")
#NEWDATA is NULL
#args <- list(...)
#exist <- "newdata" %in% names(args)
#if(!exist) newdata <- NULL
#if (!is.null(newdata)) stop("newdata is not supported at the moment!")
if(is.null(newdata)){
x <- object$rho$x
y <- object$rho$y
offset <- object$rho$offset
} else {
newdata <- as.data.frame(newdata)
tmp <- prepare_newdata(object, newdata, newoffset)
x <- tmp$x
y <- tmp$y
object$error.struct <- tmp$error.struct
offset <- tmp$offset
}
if(is.null(subjectID)) ind <- seq_len(nrow(y)) else {
if(!all(subjectID %in% rownames(y))) stop("Not all subjectIDs in data!")
ind <- match(subjectID, rownames(y))
}
## get correlation/covariance matrices
sigma <- error_structure(object, type ="sigmas")
stddevs <- sqrt(t(sapply(sigma, diag)))
##############################################################
predictions <- switch(type,
prob = pred_prob_fun(object, y, x, offset, stddevs, sigma)[ind],
cum.prob = pred_cumprob_fun(object, y, x, offset, stddevs, sigma)[ind],
class = pred_class_fun(object, y, x, offset, stddevs, sigma)[ind, ])
return(predictions)
}
#' @title Extracts fitted Probabilities for Multivariate Ordinal Regression Models.
#'
#' @description
#' Extracts fitted probabilities for given response categories from a fitted model of class \code{'mvord'}.
#' @param object an object of class \code{'mvord'}.
#' @param response.cat vector or matrix with response categories (for each subject one row of length equal to the number of multiple measurements).
#' @param newdata (optional) data frame of new covariates and new responses. The names of the variables should correspond to the names of the
#' variables used to fit the model. By default the data on which the model was estimated is considered.
#' @param type \code{"prob"} for joint probabilities and \code{"cum.prob"} for joint cumulative probabilities.
#' @param subjectID (optional) vector specifying for which subjectIDs the predictions\cr or fitted values should be computed.
#' @param newoffset (optional) list of length equal to the number of outcomes, each element containing a vector of offsets to be considered.
#' @param ... further arguments passed to or from other methods.
#' @details
# #' \code{newdata} has to be in the same data format as in the fitted object of class \code{'mvord'}.
#'
##' The current implementation supports only in-sample predictions.
#' The row names of the output correspond to the subjectIDs.
#' @seealso \code{\link{predict.mvord}}, \code{\link{marginal_predict}}
#' @export
joint_probabilities <- function(object, response.cat, newdata = NULL, type = "prob", subjectID = NULL, newoffset = NULL,...) {
#checks
if (is.null(object$rho$link$F_multi)) stop("Multivariate probabilities cannot be computed! Try marginal_predict()!")
# args <- list(...)
# exist <- "newdata" %in% names(args)
if(!type %in% c("prob", "cum.prob")) stop("Invalid type chosen. Only types prob and cum.prob are available.")
# if(!exist) newdata <- NULL
# if (!is.null(newdata)) stop("newdata is not supported at the moment!")
ndim <- object$rho$ndim
if (is.null(newdata)){
nobs <- nobs(object)
x <- object$rho$x
y <- object$rho$y
offset <- object$rho$offset
} else {
newdata <- as.data.frame(newdata)
## check response.cat
if (is.vector(response.cat) && length(response.cat) != ndim)
stop("response.cat must have length equal to the number of outcomes.")
tmp <- prepare_newdata(object, newdata, newoffset)
x <- tmp$x
y <- tmp$y
if (!is.vector(response.cat) && nrow(response.cat) != nrow(y))
stop("Number of rows of response.cat does not correspond to number of subjects in newdata.")
object$error.struct <- tmp$error.struct
offset <- tmp$offset
}
if (is.null(subjectID)) {
ind <- seq_len(nrow(y))
} else {
if(!all(subjectID %in% rownames(y))) stop("Not all subjectIDs in data!")
ind <- match(subjectID, rownames(y))
}
## get correlation/covariance matrices
sigma <- error_structure(object, type ="sigmas")
stddevs <- sqrt(t(sapply(sigma, diag)))
###################################################
if(is.vector(response.cat)) {
response.cat <- matrix(response.cat, ncol = length(response.cat), nrow = nrow(y), byrow = TRUE)
}
ytmp <- cbind.data.frame(lapply(seq_len(ndim), function(j){
if (!all(response.cat[,j] %in% c(NA,levels(y[,j])))) {
stop("response.cat are different from the categories in the original data set")
} else {
ordered(response.cat[,j], levels = levels(y[,j]))
}
}))
Xcat <- make_Xcat(object, ytmp, x)
betatilde <- bdiag(object$rho$constraints) %*% object$beta
pred.upper <- sapply(seq_len(object$rho$ndim), function(j) {
th_u <- c(object$theta[[j]], object$rho$inf.value)[ytmp[, j]]
xbeta_u <- as.double(Xcat$U[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_u - xbeta_u - offset[[j]]
})/stddevs
pred.lower <- sapply(seq_len(object$rho$ndim), function(j) {
th_l <- c(-object$rho$inf.value, object$theta[[j]])[ytmp[, j]]
xbeta_l <- as.double(Xcat$L[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_l - xbeta_l - offset[[j]]
})/stddevs
pred.lower[is.na(pred.lower)] <- -object$rho$inf.value
pred.upper[is.na(pred.upper)] <- object$rho$inf.value
#################################
if(type == "cum.prob") pred.lower <- matrix(-object$rho$inf.value,
ncol = ndim, nrow = nrow(pred.upper))
prob <- object$rho$link$F_multi(U = pred.upper, L = pred.lower,
list_R = lapply(sigma, cov2cor))[ind]
names(prob) <- rownames(y)[ind]
return(prob)
}
prepare_newdata <- function(object, newdata, newoffset) {
if (object$rho$function.name == "mvord") {
if (!all(object$rho$index %in% colnames(newdata)))
stop("Subject and outcome index do not appear in column names of newdata.")
data.mvord <- mvord_data(newdata, object$rho$index,
object$rho$response.name,
unique(c(object$rho$x.names, object$rho$weights.name)),
y.levels = object$rho$levels,
response.names = object$rho$response.names)
y <- data.mvord$y
x <- lapply(seq_len(object$rho$ndim), function(j) {
if(all(is.na(data.mvord$x[[j]]))){
tmp <- data.mvord$x[[j]]
} else {
# rhs.form <- object$rho$formula #RH260820
# rhs.form[[2]] <- NULL #RH260820
rhs.form <- as.formula(paste0("~", paste(c(0,colnames(data.mvord$x[[j]])), collapse = " + "))) #RH260820 TODO 0 or 1
rhs.form <- as.formula(paste(as.character(object$rho$formula[-2]), collapse = " "))
new.rhs.form <- update(rhs.form, ~ . + 1)
tmp <- suppressWarnings(model.matrix(new.rhs.form,
model.frame(new.rhs.form, data.mvord$x[[j]], na.action = function(x) x),
contrasts.arg = attr(object, "contrasts")))
attribute <- attr(tmp, "assign")
intercept <- ifelse(attr(terms.formula(object$rho$formula), "intercept") == 1, TRUE, FALSE)
if (intercept == FALSE){
attribute <- attribute[-1]
tmp <- tmp[,-1, drop = FALSE]
}
tmp <- tmp[match(rownames(data.mvord$x[[j]]),rownames(tmp)), , drop=FALSE]
rownames(tmp) <- rownames(data.mvord$x[[j]])
attr(tmp, "assign") <- attribute
}
tmp
})
error.struct <- init_fun(object$error.struct, data.mvord, attr(object, "contrasts"))
} else {
# if (is.null(object$rho$index)) stop("Estimated model uses MMO2, newdata is long format.")
if (!all(object$rho$y.names %in% colnames(newdata)))
stop("Response names in newdata do not match with the outcome names in the estimated model.")
y <- newdata[,object$rho$y.names]
y <- do.call("cbind.data.frame", lapply(seq_len(ncol(y)), function(j)
ordered(y[,j], levels = object$rho$levels[[j]])))
colnames(y) <- object$rho$y.names
x <- lapply(seq_len(object$rho$ndim), function(j) {
rhs.form <- as.formula(paste(as.character(object$rho$formula[-2]), collapse = " "))
new.rhs.form <- update(rhs.form, ~ . + 1)
tmp <- suppressWarnings(model.matrix(new.rhs.form,
model.frame(new.rhs.form, newdata, na.action = function(x) x),
contrasts.arg = attr(object, "contrasts")))
attribute <- attr(tmp, "assign")
intercept <- ifelse(attr(terms.formula(object$rho$formula), "intercept") == 1, TRUE, FALSE)
if (intercept == FALSE){
attribute <- attribute[-1]
tmp <- tmp[,-1, drop = FALSE]
}
attr(tmp, "assign") <- attribute
as.data.frame(tmp)
})
data.mvord <- list(y = y, x = x)
error.struct <- init_fun(object$error.struct, data.mvord,
attr(object, "contrasts"))
}
if (is.null(newoffset)) {
newoffset <- lapply(seq_len(object$rho$ndim), function(j) {
rhs.form <- object$rho$formula
rhs.form[[2]] <- NULL
newdata_tmp <- switch(object$rho$function.name,
"mvord" = data.mvord$x[[j]],
"mvord2" = newdata)
mf <- model.frame(rhs.form, newdata_tmp,
na.action = function(x) x)
mf[is.na(mf)] <- 0
if (is.null(model.offset(mf))) {
ofs <- double(NROW(y))
} else {
ofs <- model.offset(mf)
}
ofs
})
}
return(list(error.struct = error.struct, y = y, x = x, offset = newoffset))
}
make_Xcat <- function(object, y, x) {
ndim <- NCOL(y)
ncat <- NULL
for (j in seq_len(NCOL(y))) {
ncat <- c(ncat, nlevels(y[, j]))
}
XcatU <- lapply(seq_len(ndim), function(x) integer())
XcatL <- lapply(seq_len(ndim), function(x) integer())
if (object$rho$p > 0) {
for (j in seq_len(ndim)) {
B2 <- 1 * (col(matrix(0, nrow(y), ncat[j])) ==
c(unclass(y[, j])))
mf <- do.call("cbind", lapply(as.data.frame(x[[j]]),
function(x) B2 * x))
XcatL[[j]] <- mf[,-(ncat[j] * (seq_len(object$rho$p) - 1) + 1), drop = FALSE]
XcatU[[j]] <- mf[,-(ncat[j] * seq_len(object$rho$p)), drop = FALSE]
}
}
list(U = XcatU, L = XcatL)
}
## marginal_predict: type == "linpred"
marg_pred_linpred_fun <- function(object, y, x, offset, stddevs, ind) {
Xcat <- make_Xcat(object, y, x)
betatilde <- bdiag(object$rho$constraints) %*% object$beta
pred.upper <- sapply(seq_len(object$rho$ndim), function(j) {
th_u <- c(object$theta[[j]], object$rho$inf.value)[y[, j]]
xbeta_u <- as.double(Xcat$U[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_u - xbeta_u - offset[[j]]
})/stddevs
pred.lower <- sapply(seq_len(object$rho$ndim), function(j) {
th_l <- c(-object$rho$inf.value, object$theta[[j]])[y[, j]]
xbeta_l <- as.double(Xcat$L[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_l - xbeta_l - offset[[j]]
})/stddevs
colnames(pred.lower) <- colnames(pred.upper) <- object$rho$y.names
rownames(pred.lower) <- rownames(pred.upper) <- rownames(y)[ind]
return(list(U = pred.upper[ind, ], L = pred.lower[ind, ]))
}
## marginal_predict: type == "prob"
marg_pred_prob_fun <- function(object, y, x, offset, stddevs, ind) {
Xcat <- make_Xcat(object, y, x)
betatilde <- bdiag(object$rho$constraints) %*% object$beta
pred.upper <- sapply(seq_len(object$rho$ndim), function(j) {
th_u <- c(object$theta[[j]], object$rho$inf.value)[y[, j]]
xbeta_u <- as.double(Xcat$U[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_u - xbeta_u - offset[[j]]
})/stddevs
pred.lower <- sapply(seq_len(object$rho$ndim), function(j) {
th_l <- c(-object$rho$inf.value, object$theta[[j]])[y[, j]]
xbeta_l <- as.double(Xcat$L[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_l - xbeta_l - offset[[j]]
})/stddevs
prob <- object$rho$link$F_uni(pred.upper) -
object$rho$link$F_uni(pred.lower)
prob <- prob[ind, ]
colnames(prob) <- object$rho$y.names
rownames(prob) <- rownames(y)[ind]
return(prob)
}
## marginal_predict: type == "all.prob"
marg_pred_allprob_fun <- function(object, y, x, offset, stddevs, ind) {
betatilde <- bdiag(object$rho$constraints) %*% object$beta
probs <- lapply(seq_len(object$rho$ndim), function(j){
pr <- sapply(seq_len(object$rho$ncat[j]), function(k){
ytmp <- y[,j]
ytmp[seq_along(ytmp)] <- levels(ytmp)[k]
dim(ytmp) <- c(length(ytmp),1)
Xcat <- make_Xcat(object, ytmp, x[j])
th_u <- c(object$theta[[j]], object$rho$inf.value)[ytmp[, 1]]
xbeta_u <- as.double(Xcat$U[[1]] %*% betatilde[object$rho$indjbeta[[j]]])
pred.upper <- (th_u - xbeta_u - offset[[j]])/stddevs[,j]
th_l <- c(-object$rho$inf.value, object$theta[[j]])[ytmp[, 1]]
xbeta_l <- as.double(Xcat$L[[1]] %*% betatilde[object$rho$indjbeta[[j]]])
pred.lower <- (th_l - xbeta_l - offset[[j]])/stddevs[, j]
object$rho$link$F_uni(pred.upper) - object$rho$link$F_uni(pred.lower)
})[ind, ]
colnames(pr) <- levels(object$rho$y[, j])
rownames(pr) <- rownames(y)[ind]
pr
})
names(probs) <- object$rho$y.names
return(probs)
}
marg_pred_cumprob_fun <- function(object, y, x, offset, stddevs, ind) {
probs <- marg_pred_allprob_fun(object, y, x, offset, stddevs, ind)
cum.probs <- lapply(probs, function(x) t(apply(x, 1, cumsum)))
return(cum.probs)
}
marg_pred_class_fun <- function(object, y, x, offset, stddevs, ind) {
probs <- marg_pred_allprob_fun(object, y, x, offset, stddevs, ind)
y.ord <- as.data.frame(sapply(seq_along(probs), function(j){
apply(probs[[j]], 1, function(i) {
class.max <- object$rho$levels[[j]][which.max(i)]
ifelse(length(class.max)==0, NA, class.max)
})
}))
for (j in seq_along(object$rho$levels))
y.ord[,j] <- ordered(y.ord[, j], levels = object$rho$levels[[j]])
colnames(y.ord) <- object$rho$y.names
return(y.ord[ind, ])
}
###############################
pred_prob_fun <- function(object, y, x, offset, stddevs, sigma) {
Xcat <- make_Xcat(object, y, x)
betatilde <- bdiag(object$rho$constraints) %*% object$beta
pred.upper <- sapply(seq_len(object$rho$ndim), function(j) {
th_u <- c(object$theta[[j]], object$rho$inf.value)[y[, j]]
xbeta_u <- as.double(Xcat$U[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_u - xbeta_u - offset[[j]]
})/stddevs
pred.lower <- sapply(seq_len(object$rho$ndim), function(j) {
th_l <- c(-object$rho$inf.value, object$theta[[j]])[y[, j]]
xbeta_l <- as.double(Xcat$L[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_l - xbeta_l - offset[[j]]
})/stddevs
pred.upper[is.na(pred.upper)] <- object$rho$inf.value
pred.lower[is.na(pred.lower)] <- -object$rho$inf.value
prob <- object$rho$link$F_multi(
U = pred.upper, L = pred.lower,
list_R = lapply(sigma, cov2cor))
names(prob) <- rownames(y)
return(prob)
}
pred_cumprob_fun <- function(object, y, x, offset, stddevs, sigma) {
Xcat <- make_Xcat(object, y, x)
betatilde <- bdiag(object$rho$constraints) %*% object$beta
pred.upper <- sapply(seq_len(object$rho$ndim), function(j) {
th_u <- c(object$theta[[j]], object$rho$inf.value)[y[, j]]
xbeta_u <- as.double(Xcat$U[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_u - xbeta_u - offset[[j]]
})/stddevs
pred.upper[is.na(pred.upper)] <- object$rho$inf.value
pred.lower <- matrix(-object$rho$inf.value, ncol = object$rho$ndim,
nrow = nrow(pred.upper))
cum.prob <- object$rho$link$F_multi(U = pred.upper, L = pred.lower,
list_R = lapply(sigma, cov2cor))
names(cum.prob) <- rownames(y)
return(cum.prob)
}
pred_class_fun <- function(object, y, x, offset, stddevs, sigma) {
ndim <- object$rho$ndim
betatilde <- bdiag(object$rho$constraints) %*% object$beta
if (prod(object$rho$ncat) > 1e6) {
stop("Number of class combinations over 1000000. Try joint_probabilities() for desired class combinations.")
} else {
cmbn <- expand.grid(lapply(object$rho$ncat, seq_len))
cmbn.labels <- expand.grid(object$rho$levels)
probs <- sapply(seq_len(nrow(cmbn)), function(i){
if (i %% 100 == 0) cat('Computed probabilities for', i, 'out of', nrow(cmbn),'combinations\n')
###############################################
ytmp <- sapply(seq_len(ndim),
function(j) object$rho$levels[[j]][cmbn[i,j]])
ytmp <- matrix(ytmp, ncol = ndim, nrow = nrow(y), byrow = TRUE)
ytmp <-cbind.data.frame(lapply(seq_len(object$rho$ndim), function(j){
if (!all(ytmp[,j] %in% c(NA, levels(y[,j])))) stop("response.cat are different from the categories in the original data set")
else ordered(ytmp[,j], levels = levels(y[,j]))
}))
Xcat <- make_Xcat(object, ytmp, x)
pred.upper <- sapply(seq_len(ndim), function(j) {
th_u <- c(object$theta[[j]], object$rho$inf.value)[ytmp[, j]]
xbeta_u <- as.double(Xcat$U[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_u - xbeta_u - offset[[j]]
})/stddevs
pred.lower <- sapply(seq_len(object$rho$ndim), function(j) {
th_l <- c(-object$rho$inf.value, object$theta[[j]])[ytmp[, j]]
xbeta_l <- as.double(Xcat$L[[j]] %*% betatilde[object$rho$indjbeta[[j]]])
th_l - xbeta_l - offset[[j]]
})/stddevs
pred.lower[is.na(pred.lower)] <- -object$rho$inf.value
pred.upper[is.na(pred.upper)] <- object$rho$inf.value
############################################
object$rho$link$F_multi(U = pred.upper, L = pred.lower,
list_R = lapply(sigma, cov2cor))
})
ind.max <- apply(probs,1,which.max)
class <- cmbn.labels[ind.max,]
rownames(class) <- rownames(y)
colnames(class) <- object$rho$y.names
return(class)
}
}
|
5867b814af64bf007c3f26956d2526c2da83204c | c34606129bd12e811d5107b13d47234bc05bceea | /R/mOrdNbr.R | 6a09bfb23912f0ac8b2f6b12a41f50f74f5b77cd | [] | no_license | shizelong1985/SDPDmod | 901bb9e6759d56923ce5db7e0a88d116354e52ef | d6e00accbf97f8d9be18e0b8b032ec3c81bd4c85 | refs/heads/master | 2023-04-21T05:43:38.628449 | 2021-05-09T15:38:05 | 2021-05-09T15:38:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,385 | r | mOrdNbr.R | #' @name mOrdNbr
#' @title 1st to m-th order neighbours matrix
#'
#' @description Finds the from 1th to m-th order neighbours matrix.
#'
#' @param sf_pol spatial polygons object
#' @param m the order of neighbours up to which they will be included in the weights matrix, default 1
#' @param neigbs neighbours list, default NULL
#' @param listv logocal, default FALSE. If TRUE the list of neighbours should also be returned
#' @param rn logical, default FALSE. If TRUE, the weigth matrix will be row normalised
#'
#' @return
#' \describe{\emph{W}} spatial weights matrix (and list of neighbours \emph{nlist})
#'
#' @author Rozeta Simonovska
#'
#' @import spdep
#' @import methods
#'
#' @examples
#' library("rgdal")
#' ger<-readOGR(system.file(dsn="shape",package="SDPDmod"),layer="GermanyNUTS3")
#' m1thn<-mOrdNbr(ger)
#' m4thn<-mOrdNbr(ger,4)
#' mat1<-rownor(m4thn)
#' m4thn2<-mOrdNbr(ger,4,listv=TRUE,rn=TRUE)
#' mat2<-m4thn2$W
#'
#' @export
mOrdNbr<-function(sf_pol=NULL, m = 1, neigbs = NULL, listv = FALSE, rn = FALSE){
if(is.null(sf_pol) & is.null(neigbs)){
stop("Missing value for sf_pol and neigbs! At least one value for sf_pol or neigbs has to be entered.")
}else if(!is.list(neigbs) & length(neigbs)==0) {
if(!is(sf_pol,"SpatialPolygons")) {
stop("Wrong entry! Value must be a spatial polygons object.")
}else{
neigbs<-spdep::poly2nb(sf_pol)
}
}
N<-length(neigbs)
W<-matrix(0,nrow=N,ncol=N)
for(i in 1:N){ if(all(neigbs[[i]]!=0)){ W[i,neigbs[[i]]]<-1 } }
nbrL<-vector("list",m)
nbrL[[1]]<-neigbs
if(m>1){
for(j in 2:m){ nbrL[[j]]<-vector("list",N) }
k<-2
repeat{
for(i in 1:N){
v.p<-vector()
mneigb<-nbrL[[k-1]]
v.n<-as.list(1:N)
if(all(mneigb[[i]])!=0){
for(j in mneigb[[i]]){ v.p<-c(v.p,neigbs[[j]]) }
v.pp<-unique(v.p)
v.pp<-v.pp[order(v.pp)]
for(l in 1:(m-1)){ v.n[[i]]<-c(v.n[[i]],nbrL[[l]][[i]]) }
v.ppp<-v.pp[which(!v.pp %in% c(i,v.n[[i]]))]
nbrL[[k]][[i]]<-v.ppp
if(length(v.ppp)!=0){ W[i,v.ppp]<-1 }
}
}
k<-k+1
if(k>m){break}
}
}
###row-normalisation
if(rn){ W<-rownor(W) }
if(listv){
return(list(W=W,nlist=nbrL))
}else{
return(W)
}
}
|
d0e9dad40e0d11d4ac336bc2686e5a5d8d05e865 | 8bd2121b538c43f9b66b32101babdbd5c2ec739c | /man/AUC.randomForest.Rd | 695e0bca5b6db3209f0ab64e29a00ef284a84f74 | [] | no_license | magnusmunch/CoRF | 4bf90b3865d8a9ca90bc2d2925ec92f2918808f3 | 7c9346457e98f9930a3b44d86b5e789df933e349 | refs/heads/master | 2022-04-04T03:35:10.894007 | 2020-02-20T15:58:55 | 2020-02-20T15:58:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 445 | rd | AUC.randomForest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CoRF.R
\name{AUC.randomForest}
\alias{AUC.randomForest}
\title{Calculate the oob-auc of a rfsrc object}
\usage{
AUC.randomForest(rf)
}
\arguments{
\item{rf}{A rfsrc object.}
}
\value{
Returns the oob-auc.
}
\description{
Calculate the oob-auc of a rfsrc object (from randomForestSRC) using rfsrc$predicticed.oob and the binary response variable.
}
|
0c22c7a7d36fd5a88643daf6f8ef6a42425a5545 | 277dbb992966a549176e2b7f526715574b421440 | /R_training/MyCode/3주_dplyr 및 시각화/day4/2. ggplot2_lab(학습).R | 39f0c8d2e2055075cacbccdf73412659aeb8a360 | [] | no_license | BaeYS-marketing/R | 58bc7f448d7486510218035a3e09d1dd562bca4b | 03b500cb428eded36d7c65bd8b2ee3437a7f5ef1 | refs/heads/master | 2020-12-11T04:30:28.034460 | 2020-01-17T08:47:38 | 2020-01-17T08:47:38 | 227,819,378 | 0 | 0 | null | 2019-12-13T12:06:33 | 2019-12-13T10:56:18 | C++ | UTF-8 | R | false | false | 2,160 | r | 2. ggplot2_lab(학습).R | library(ggplot2)
#1
mpg
ggplot(mpg,aes(x=cty,y=hwy))+geom_point(col= 'blue')
ggsave('result1.png')
# ggplot(mpg,aes(x=cty,y=hwy,col= 'blue'))+geom_point()
#2
View(mpg)
ggplot(mpg, aes(x=class,fill=drv))+geom_bar()
ggsave('result2.png')
#3
midwest<-data.frame(midwest)
ggplot(midwest,aes(x=poptotal,y=popasian,options(scipen = 99)))+geom_point()+
coord_cartesian(xlim=c(0,500000),ylim=c(0,10000))
ggsave('result3.png')
'정수로 표현하기 : options(scipen = 99) 실행 후 그래프 생성
지수로 표현하기 : options(scipen = 0) 실행 후 그래프 생성
R 스튜디오 재실행시 옵션 원상 복구됨'
#4
#ggplot(mpg,aes(x=class[class==c('compact','subcompact','suv')],y=cty))+geom_boxplot()
#ggplot(mpg,aes(x=class[mpg$class=='compact'],y=cty))+geom_boxplot()
#방법1 인덱싱 ~ 아직도 인덱싱 이해가 부족하네
'1. 벡더가 아닌 데이터프레임에서 인덱싱
2. ggplot이해 data에 사용할 데이터를 지정하고,
그 선정된 데이터에서_ aes할당은 컬럼 하나를 선택하는 것.'
ggplot(mpg[mpg$class==c('compact','subcompact','suv'),],aes(x=class,y=cty))+geom_boxplot()
#방법2 dplyr 패키지 이용
library(dplyr)
data(mpg)
mpg2<-mpg %>% filter(class==c('compact','subcompact','suv'))
str(mpg2)
ggplot(mpg2,aes(x=class,y=cty)) +
geom_boxplot() +
coord_cartesian(ylim=c(10,35))
ggsave('result4.png')
#5.
product<-read.table('data/product_click.log',
col.names = c('ord','kind'))
str(product)
# table(product$kind) : 할 필요가 없는게 geom_bar::ggplot에서 자동으로 통계
# ggplot(product,aes(x=kind)) + geom_bar(colour=kind)
ggplot(product,aes(x=kind)) + geom_bar(aes(fill=factor(product$kind)))
ggsave('result5.png')
#6
product
time <- format(product$ord)
t<-as.numeric(time)
# t<-as.Date(t,origin='1970/01/01')
t<-as.Date(t,format='%d%b%y')
weekdays(t)
table(t)
time <- strptime(product$ord, format = '%Y%m%d%H%M')
days<-format(time, "%A")
table(days)
ggplot(product, aes(x=days))+geom_bar()
ggplot(product, aes(x=days, fill=days))+geom_bar()
ggsave('result6.png')
|
9b5e66561998fa5f3f1aab0612f68dd5cef98f3b | 172eecc1b7bf13cce50deefffa7e2688445d9a68 | /MVA/PCA_Assignment.R | 6581ce747ee7e774e3157f5be85b1d784653bc37 | [] | no_license | pritesh1082/Projects | b0ddd4dfc791129c10db0dcd2d0e884447e4c658 | a7f7676a5c91232edd5ff4cc6b961081c438ceea | refs/heads/master | 2023-05-14T07:36:06.373782 | 2020-08-07T18:29:35 | 2020-08-07T18:29:35 | 232,046,637 | 0 | 0 | null | 2023-05-01T21:25:31 | 2020-01-06T07:22:56 | R | UTF-8 | R | false | false | 8,060 | r | PCA_Assignment.R | library(data.table)#Data. table is an extension of data. frame package in R. It is widely used for fast aggregation of large datasets,
library(Hmisc)#data analysis funs
library(dplyr)
library(tidyverse)
library(ggplot2)
library(plotly)
library(GGally)
library(ggthemes)
library(psych)
library(relaimpo)
library(e1071)
AirbnbIstanbul<- read.csv("C:/Pritesh/Rutgers/Courses/Projects/MVA/Dataset/AirbnbIstanbul.csv", stringsAsFactors=FALSE)
Istanbul <- copy(AirbnbIstanbul)
View(Istanbul)
str(Istanbul)
#Checking number f rows and columns
dim(Istanbul)
class(Istanbul)
names(Istanbul)
attach(Istanbul)
head(Istanbul,25)
#Removing neighbourhood_group and last_review
#Creating new data table with all the quantitative column named Istanbul_num
Istanbul_num2 <- Istanbul[,c("latitude","longitude","price","minimum_nights","number_of_reviews","calculated_host_listings_count","availability_365")]
#Correlation
setDT(Istanbul_num2)
cor(Istanbul_num2)
#Very little correlation between 'Number of reviews and calculated host listing - 0.174662879' & 'calcHostlisting and availability365 - 0.173068073'
#PCA
#Applying PCA on numeric data as it's not much recommende for categorical data
Istanbul_ip_pca <- prcomp(Istanbul_num2,scale=TRUE)
Istanbul_ip_pca
#PC1--> Dominated by negative effect of calculated_host_listings_count and availability_365 and no of reviews
#PC2--> major +ve effect of latitude and negative effect of longitude
#PC3 --> Major +ve effect of minimum_nights and Price
#PC4 --> Major negative effect of minimum_nights and +ve effect of price
#PC5 --> Major negative effect of availability_365 and +ve effect of number_of_reviews
#Summary pf PCAs
summary(Istanbul_ip_pca)
#As per Summary output, 'Cumulative Proportion' field, 88.97% of Cummulative variance is explained by PC1, PC2,----PC6
#So we will have to include PC1 till PC6 to prevent loss of Information.
Istanbul_ip_pca$sdev
# A table containing eigenvalues and %'s accounted, follows
# Eigenvalues are sdev^2
(eigen_Istanbul <- Istanbul_ip_pca$sdev^2)
names(eigen_Istanbul) <- paste("PC",1:7,sep="")## giving names PC1 to PC7
eigen_Istanbul
names(eigen_Istanbul)
#Taking Sum of all Eigen values
sumlambdas1 <- sum(eigen_Istanbul)
sumlambdas1 #sum of Eigenvalues is total var of ur dataset
propvar1 <- eigen_Istanbul/sumlambdas1
propvar1 #Propvar1 gives the percentage of variance for each PC component
#Percentage of total variance
percentvar <- (eigen_Istanbul/sumlambdas1) *100
percentvar
#Bar plot of Percentage variance
barplot(percentvar, main = "Bar Plot", xlab = "Principal Component", ylab = "Percentage Variance")
#[1] 0.1833604 0.1702333 0.1473737 0.1405079 0.1322170 0.1159617 0.1103461
#OP says none of the component explains much variance so will have to imclude all
#Can do plotting at this stage
cumvar_Istanbul <- cumsum(propvar1)
cumvar_Istanbul #This variable has cummulative sum of variance
#PC1 to PC6 explain 88.96% of variance
#Bar plot of Cummulative Percentage variance
barplot(cumvar_Istanbul, main = "Bar Plot", xlab = "Principal Component", ylab = "Percentage Variance")
matlambdas <- rbind(eigen_Istanbul,propvar1,cumvar_Istanbul)
rownames(matlambdas) <- c("Eigenvalues","Prop. variance","Cum. prop. variance")
round(matlambdas,4)
summary(Istanbul_ip_pca)
#As per Summary output, 'Cumulative Proportion' field, 88.97% of Cummulative variance is explained by PC1, PC2,----PC6
#So we will have to include PC1 till PC6 to prevent loss of Information.
#
Istanbul_ip_pca$rotation #= print(Istanbul_ip_pca)
#op of PCA cmd same as previous
print(Istanbul_ip_pca)
#Below command dives our new dataset
head(Istanbul_ip_pca$x,5) #This is our new dataset
#Scree Plot
plot(eigen_Istanbul, xlab = "Component number", ylab = "Component variance", type = "l", main = "Scree diagram")
plot(log(eigen_Istanbul), xlab = "Component number",ylab = "log(Component variance)", type="l",main = "Log(eigenvalue) diagram")
#=====second part===
names(Istanbul_ip)
Istanbultyp_pca <- cbind(data.frame(neighbourhood,room_type),Istanbul_ip_pca$x) ## Istanbul_ip_pca$x is the transformed matrix
names(Istanbultyp_pca)
#Istanbultyp_pca This is our new dataset
head(Istanbultyp_pca,5)
#Renaming Principal components
names(Istanbultyp_pca) <- c("Neighbourhood", "Room_Type", "calc_Review_365_Negative", "Lattitude_Positive_Longi_Negate",
"MinNight_Price_Positive","MinNightNegative_PricePos","availabilityNegate_Reviews_Pos",
"Positive_Lat_Long","CalcHostListing_Pos")
#This is Our new dataset
names(Istanbultyp_pca)
#View(Istanbultyp_pca)
dim(Istanbultyp_pca)
head(Istanbultyp_pca,5)
#---
# Means of scores for all the PC's classified by Survival status so da u can perform ttest on that
tabmeansPC1 <- aggregate(Istanbultyp_pca[,c(3,4,5,6,7,8,9)],by=list(room_type=Istanbul$room_type),mean)
tabmeansPC1 #Means of all the columns per Room Type
#In this op coz of +ve -VE signs u can see the means are different
tabmeansPC1 <- tabmeansPC1[rev(order(tabmeansPC1$room_type)),]
tabmeansPC1
tabfmeans1 <- t(tabmeansPC1[,-1]) #transpose
tabfmeans1
colnames(tabfmeans1) <- t(as.vector(tabmeansPC1[1]))
tabfmeans1 #This is means for all PCs per room Type
# Standard deviations of scores for all the PC's classified by Room Type
tabsdsPC1 <- aggregate(Istanbultyp_pca[,c(3,4,5,6,7,8,9)],by=list(room_type=Istanbul$room_type),sd)
tabfsds1 <- t(tabsdsPC1[,-1])
colnames(tabfsds1) <- t(as.vector(tabsdsPC1[1]))
tabfsds1 #This is Std Deviation for all PCs per room Type
class(tabfsds1) #changed to matrix
# Levene's tests (one-sided)
library(car)
library(carData)
names(Istanbultyp_pca)
(LTPC1 <- leveneTest(calc_Review_365_Negative~Istanbul$room_type,data=Istanbultyp_pca))
(p_PC1_1sided1 <- LTPC1[[3]][1]/2)
(LTPC2 <- leveneTest(Lattitude_Positive_Longi_Negate~Istanbul$room_type,data=Istanbultyp_pca))
(p_PC2_1sided=LTPC2[[3]][1]/2)
(LTPC3 <- leveneTest(MinNight_Price_Positive~Istanbul$room_type,data=Istanbultyp_pca))
(p_PC3_1sided <- LTPC3[[3]][1]/2)
(LTPC4 <- leveneTest(MinNightNegative_PricePos~Istanbul$room_type,data=Istanbultyp_pca))
(p_PC4_1sided <- LTPC4[[3]][1]/2)
(LTPC5 <- leveneTest(availabilityNegate_Reviews_Pos~Istanbul$room_type,data=Istanbultyp_pca))
(p_PC5_1sided <- LTPC5[[3]][1]/2)
(LTPC6 <- leveneTest(Positive_Lat_Long~Istanbul$room_type,data=Istanbultyp_pca))
(p_PC6_1sided <- LTPC6[[3]][1]/2)
(LTPC7 <- leveneTest(CalcHostListing_Pos~Istanbul$room_type,data=Istanbultyp_pca))
(p_PC7_1sided <- LTPC7[[3]][1]/2)
# Plotting the scores for the first and second components for Private Rooms
plot(Istanbultyp_pca$calc_Review_365_Negative, Istanbultyp_pca$Lattitude_Positive_Longi_Negate,
pch=ifelse(Istanbultyp_pca$Room_Type == "Private room",1,16),xlab="PC1", ylab="PC2", main="Private rooms against values for PC1 & PC2")
abline(h=0)
abline(v=0)
legend("bottomleft", legend=c("Private Room","Other"), pch=c(1,16))
#names(Istanbul_ip_pca)
#Istanbul_ip_pca
#range(Istanbul_ip_pca$x[,1])
#Shows PCA details like center std devn
View(Istanbul_ip_pca)
diag(cov(Istanbul_ip_pca$x))
xlim <- range(Istanbul_ip_pca$x[,1])
#xlim
#Istanbul_ip_pca$x[,1]
#Istanbul_ip_pca$x
plot(Istanbul_ip_pca$x,xlim=xlim,ylim=xlim)
Istanbul_ip_pca$rotation[,1]
Istanbul_ip_pca$rotation
#plot(Istanbul[,-1])
#Plotting Variances
plot(Istanbul_ip_pca)
#get the original value of the data based on PCA
center <- Istanbul_ip_pca$center
scale <- Istanbul_ip_pca$scale
new_Istanbul <- as.matrix(Istanbul[,-1])
new_Istanbul
#drop(scale(new_Istanbul,center=center, scale=scale)%*%Istanbul_ip_pca$rotation[,1])
predict(Istanbul_ip_pca)[,1]
names(Istanbul_ip_pca)
#The aboved two gives us the same thing. predict is a good function to know.
#out <- sapply(1:5, function(i){plot(Istanbul$room_type,Istanbul_ip_pca$x[,i],xlab=paste("PC",i,sep=""),ylab="RoomType")})
pairs.panels(Istanbul_ip_pca$x[1:100,c(1,2,3,4,5,6)])
##Conclusion: As per Summary output, 'Cumulative Proportion' field, 88.97% of Cummulative variance is explained by PC1, PC2,----PC6
#So we will have to include PC1 till PC6 to prevent loss of Information.
|
03e078cda5cf9bec0d08a96cf85b7a988e4d55fd | ebee9629abd81143610a6352288ceb2296d111ac | /tests/17-directsion-pcf.R | 7bf6f791f6b5fe6a7ebea731066aeaf263a12e86 | [] | no_license | antiphon/Kdirectional | 76de70805b4537a5aff0636486eb387cb64069b0 | 98ab63c3491f1497d6fae8b7b096ddd58afc4b29 | refs/heads/master | 2023-02-26T02:19:41.235132 | 2023-02-12T13:07:11 | 2023-02-12T13:07:11 | 37,183,574 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 581 | r | 17-directsion-pcf.R | #' anisotropic pcf
library(devtools)
load_all(".")
library(rstrauss)
set.seed(1)
d3 <- F
comp <- 0.5
bb <- cbind(c(0,comp) -comp/2, c(0,1/comp)-0.5/comp)
bbm <- cbind(0:1, 0:1)-0.5
M <- diag(c(1/comp, comp))
if(d3) bb<-cbind(bb, 0:1)
pp0 <- rstrauss(500, .01, R<-0.03, perfect=TRUE, bbox=bb, iter=2e4)
xm <- pp0$x%*%M
pp <- list(x=xm, bbox=bbm)
#plot(pp0$x, asp=1); points(pp$x, col=2)
#'
g0 <- pcf_directions(pp0, n_dir=25, r=seq(0,0.1, length=30))
g <- pcf_directions(pp, n_dir=25, r=seq(0,0.1, length=30))
#' compare:
par(mfrow=c(1,2))
flower.pcf_directions(g0)
flower(g)
|
3ec7acd14c0c08486a0d5ea456e47ecd074385d4 | 97de7ad5376d4808d1f8a2384ef030691aa4d037 | /Figure2.R | cadafb8b554d24cafcba3a09a5e0b5f81db9d787 | [] | no_license | BeijingCCB/GoldenTheory | 10ae148a3a8a4872fab8bd4d23862d2ff71a9deb | ba4fd9e1c8b7425318e9b0df06788864e001066f | refs/heads/main | 2023-06-02T14:48:42.207579 | 2021-06-19T09:04:22 | 2021-06-19T09:04:22 | 378,304,139 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,448 | r | Figure2.R |
coE <- log(read.csv("DJ_D_co.csv",header = F))[,-1]
coS <- log(read.csv("DJ_J_co.csv",header=F))[,-1]
moE <- log(read.csv("D_mo.csv",header=F))[,-1]
moS <- log(read.csv("J_mo.csv",header=F))[,-1]
dat <- list(ep.p=coE,sp.p=coS,ep.pi=moE,sp.pi=moS)
nn <- matrix(NA,nrow=100,ncol=14)
for(i in 1:100){
for(j in 1:14){
nn[i,j] <- max(c(dat$sp.p[i,j],dat$ep.p[i,j]))/min(c(dat$sp.p[i,j],dat$ep.p[i,j]))
}
}
nn1 <- matrix(NA,nrow=100,ncol=14)
for(i in 1:100){
for(j in 1:14){
nn1[i,j] <- max(c(dat$sp.pi[i,j],dat$ep.pi[i,j]))/min(c(dat$sp.pi[i,j],dat$ep.pi[i,j]))
}
}
mux <- matrix(NA,nrow=100,ncol=14)
for(i in c(1:100)){
for(j in 1:14){
td <- (max(c(dat$sp.p[i,j],dat$ep.p[i,j]))-min(c(dat$sp.p[i,j],dat$ep.p[i,j])))
if(td<0.1)
td <- 0.1
mux[i,j] <- (dat$sp.p[i,j]*dat$ep.p[i,j])/td
}
}
index <- which(mux>600)
mux[index] <- NA
muy <- matrix(NA,nrow=100,ncol=14)
for(i in c(1:100)){
for(j in 1:14){
muy[i,j] <- (min(c(dat$sp.p[i,j],dat$ep.p[i,j]))/min(c(dat$sp.pi[i,j],dat$ep.pi[i,j]))+
max(c(dat$sp.p[i,j],dat$ep.p[i,j]))/max(c(dat$sp.pi[i,j],dat$ep.pi[i,j])))/2
}
}
muy[index] <- NA
pdf("Figure_2.pdf",width=12.5,height=3.45*2,fonts=c("serif","Palatino"))
par(mar=c(2,0,0,0),oma=c(3.5,10.5,3,5))
par(mfrow=c(2,3))
#EP
coE <- log(read.csv("DP_D_co.csv",header = F))[,-1]
coS <- log(read.csv("DP_P_co.csv",header=F))[,-1]
moE <- log(read.csv("D_mo.csv",header=F))[,-1]
moS <- log(read.csv("P_mo.csv",header=F))[,-1]
dat <- list(ep.p=coE,sp.p=coS,ep.pi=moE,sp.pi=moS)
mux <- matrix(NA,nrow=100,ncol=14)
for(i in c(1:100)){
for(j in 1:14){
td <- (max(c(dat$sp.p[i,j],dat$ep.p[i,j]))-min(c(dat$sp.p[i,j],dat$ep.p[i,j])))
if(td<0.1)
td <- 0.1
mux[i,j] <- (dat$sp.p[i,j]*dat$ep.p[i,j])/td
}
}
index <- which(mux>600)
mux[index] <- NA
muy <- matrix(NA,nrow=100,ncol=14)
for(i in c(1:100)){
for(j in 1:14){
muy[i,j] <- (min(c(dat$sp.p[i,j],dat$ep.p[i,j]))/min(c(dat$sp.pi[i,j],dat$ep.pi[i,j]))+
max(c(dat$sp.p[i,j],dat$ep.p[i,j]))/max(c(dat$sp.pi[i,j],dat$ep.pi[i,j])))/2
}
}
muy[index] <- NA
###2-1
sp <- muy
es <- mux
splag <- sp[,1:2]
splin <- sp[,3:5]
spa <- sp[,-c(1:5)]
plot(NA,NA,type="n",lwd=2,col="#1C86EE55",xlab=" ",
ylab=" ",cex.lab=1.5,mgp = c(2.7, 1, 0),xaxt="n", yaxt="n",xaxs="i", yaxs="i",
xlim=c(0.76,1.62),ylim=c(-40,638))
a1 <- unlist(c(splag));b1 <- unlist(c(es[,c(1:2)]))
segments(1,-100,1,1000,lwd=1,col="#9BCD9B");#segments(-10,1,1,1,lwd=1,col="#9BCD9B")
Arrows(0.8,500,1.5,500,col="#9BCD9B",code=3,arr.type="triangle")
a1 <- a1[!is.na(a1)];b1 <- b1[!is.na(b1)]
points(a1,b1,cex=2,col="#FFA07A")
a1.o <- order(a1)
a11 <- a1[a1.o]; b11 <- b1[a1.o]
#>1
ii1 <- which(a11>0)
a111 <- a11[ii1];b111 <- b11[ii1]
lo11 <- lm(b111 ~ poly(a111,1))
lines(a111,predict(lo11,data.frame(x=a111)),col="red",lwd=3)
text(0.76+(1.62-0.76)*0.75,638*0.92,"r=0.182",cex=1.8,font=3)
#mtext("Lag",3,line=0.5,cex=1.6)
#mtext(expression(I["mu"]),2,line=5,cex=1.6,font=1)
axis(1,seq(0.8,1.6,0.2),rep("",5),las=1,cex.axis=1.7,tck=-0.05,mgp=c(2.5,1.8,0))
axis(2,seq(0,600,100),seq(0,600,100),las=1,cex.axis=2,tck=-0.05,mgp=c(2.5,1.6,0))
text(0.92,540,"Competition",cex=1.6,font=2);text(1.25,540,"Cooperation",cex=1.6,font=2)
mtext("A",3,line=-1.2,cex=1.6,font=1,adj=-0.25)
#mtext("Mathematical Descriptor",2,line=8,cex=1.8)
#mtext(expression(Z["mu"]),2,line=7.5,cex=1.8,font=1,adj=1.32)
mtext(expression(paste("Mathematical Descriptor"," (",Z["mu"],")",sep="")),2,line=7,cex=1.8,adj=1.7)
###2-2
plot(NA,NA,type="n",lwd=2,col="#1C86EE55",xlab=" ",
ylab=" ",cex.lab=1.5,mgp = c(2.7, 1, 0),xaxt="n", yaxt="n",xaxs="i", yaxs="i",
xlim=c(0.65,1.72),ylim=c(-40,638))
segments(1,-100,1,1000,lwd=1,col="#9BCD9B");
#segments(1,-10,1,10,lwd=1,col="#9BCD9B");segments(-10,1,1,1,lwd=1,col="#9BCD9B")
Arrows(0.68,500,1.55,500,col="#9BCD9B",code=3,arr.type="triangle")
points(unlist(c(splin)),unlist(c(es[,c(3:5)])),cex=2,col="#FFA07A")
a2 <- unlist(c(splin));b2 <- unlist(c(es[,c(3:5)]))
a2.o <- order(a2)
a22 <- a2[a2.o]; b22 <- b2[a2.o]
a22 <- a22[!is.na(a22)];b22 <- b22[!is.na(b22)]
#b22[1] <- -200;b22[length(b22)] <- 500
#>1
ii1 <- which(a22>0)
a221 <- a22[ii1];b221 <- b22[ii1]
lo21 <- lm(b221 ~ poly(a221,1))
lines(a221,predict(lo21,data.frame(x=a221)),col="red",lwd=3)
#mtext("Linear",3,line=0.5,cex=1.6)
axis(1,seq(0.7,1.7,0.2),rep("",6),las=1,cex.axis=1.7,tck=-0.05,mgp=c(2.5,1.8,0))
#mtext(expression(paste("Strength of Mutualism"," (",M[u],")",sep="")),1,line=4.1,cex=1.6)
text(0.65+(1.72-0.65)*0.75,638*0.92,"r=0.211",cex=1.8,font=3)
text(0.83,540,"Competition",cex=1.6,font=2);text(1.25,540,"Cooperation",cex=1.6,font=2)
###2-3
plot(NA,NA,type="n",lwd=2,col="#1C86EE55",xlab=" ",
ylab=" ",cex.lab=1.5,mgp = c(2.7, 1, 0),xaxt="n", yaxt="n",xaxs="i", yaxs="i",
xlim=c(0.62,1.52),ylim=c(-40,638))
segments(1,-100,1,1000,lwd=1,col="#9BCD9B");
#segments(1,-10,1,10,lwd=1,col="#9BCD9B");segments(-10,1,1,1,lwd=1,col="#9BCD9B")
Arrows(0.65,500,1.4,500,col="#9BCD9B",code=3,arr.type="triangle")
points(unlist(c(spa)),unlist(c(es[,-c(1:5)])),cex=2,col="#FFA07A")
#mtext("Asymptotic",3,line=0.5,cex=1.6)
axis(1,seq(0.7,1.5,0.2),rep("",5),las=1,cex.axis=1.7,tck=-0.05,mgp=c(2.5,1.8,0))
axis(4,seq(0,600,100),seq(0,600,100),las=1,cex.axis=2,tck=-0.05,mgp=c(2.5,1.6,0))
text(0.62+(1.52-0.62)*0.75,638*0.92,"r=0.223",cex=1.8,font=3)
a2 <- unlist(c(spa));b2 <- unlist(c(es[,-c(1:5)]))
a2.o <- order(a2)
a22 <- a2[a2.o]; b22 <- b2[a2.o]
a222 <- a22[!is.na(a22)];b222 <- b22[!is.na(b22)]
#>1
ii1 <- which(a222>0)
a2221 <- a222[ii1];b2221 <- b222[ii1]
lo21 <- lm(b2221 ~ poly(a2221,2))
lines(a2221,predict(lo21,data.frame(x=a2221)),col="red",lwd=3)
text(0.82,540,"Competition",cex=1.6,font=2);text(1.23,540,"Cooperation",cex=1.6,font=2)
coE <- log(read.csv("JP_J_co.csv",header = F))[,-1]
coS <- log(read.csv("JP_P_co.csv",header=F))[,-1]
moE <- log(read.csv("J_mo.csv",header=F))[,-1]
moS <- log(read.csv("P_mo.csv",header=F))[,-1]
dat <- list(ep.p=coE,sp.p=coS,ep.pi=moE,sp.pi=moS)
mux <- matrix(NA,nrow=100,ncol=14)
for(i in c(1:100)){
for(j in 1:14){
td <- (max(c(dat$sp.p[i,j],dat$ep.p[i,j]))-min(c(dat$sp.p[i,j],dat$ep.p[i,j])))
if(td<0.1)
td <- 0.1
mux[i,j] <- (dat$sp.p[i,j]*dat$ep.p[i,j])/td
}
}
index <- which(mux>600)
mux[index] <- NA
muy <- matrix(NA,nrow=100,ncol=14)
for(i in c(1:100)){
for(j in 1:14){
muy[i,j] <- (min(c(dat$sp.p[i,j],dat$ep.p[i,j]))/min(c(dat$sp.pi[i,j],dat$ep.pi[i,j]))+
max(c(dat$sp.p[i,j],dat$ep.p[i,j]))/max(c(dat$sp.pi[i,j],dat$ep.pi[i,j])))/2
}
}
muy[index] <- NA
###2-1
sp <- muy
es <- mux
splag <- sp[,1:2]
splin <- sp[,3:5]
spa <- sp[,-c(1:5)]
plot(NA,NA,type="n",lwd=2,col="#1C86EE55",xlab=" ",
ylab=" ",cex.lab=1.5,mgp = c(2.7, 1, 0),xaxt="n", yaxt="n",xaxs="i", yaxs="i",
xlim=c(0.76,1.62),ylim=c(-40,638))
a1 <- unlist(c(splag));b1 <- unlist(c(es[,c(1:2)]))
segments(1,-100,1,1000,lwd=1,col="#9BCD9B");#segments(-10,1,1,1,lwd=1,col="#9BCD9B")
Arrows(0.8,500,1.5,500,col="#9BCD9B",code=3,arr.type="triangle")
a1 <- a1[!is.na(a1)];b1 <- b1[!is.na(b1)]
points(a1,b1,cex=2,col="purple")
a1.o <- order(a1)
a11 <- a1[a1.o]; b11 <- b1[a1.o]
##>1
ii1 <- which(a11>0)
a111 <- a11[ii1];b111 <- b11[ii1]
lo11 <- lm(b111 ~ poly(a111,1))
lines(a111,predict(lo11,data.frame(x=a111)),col="red",lwd=3)
text(0.76+(1.62-0.76)*0.75,638*0.92,"r=0.161",cex=1.8,font=3)
#mtext("Lag",3,line=0.5,cex=1.6)
#mtext(expression(z["mu"]),2,line=5,cex=1.6,font=1)
axis(1,seq(0.8,1.6,0.2),c(0.8,"1.0",seq(1.2,1.6,0.2)),las=1,cex.axis=1.7,tck=-0.05,mgp=c(2.5,1.8,0))
axis(2,seq(0,600,100),seq(0,600,100),las=1,cex.axis=2,tck=-0.05,mgp=c(2.5,1.6,0))
text(0.92,540,"Competition",cex=1.6,font=2);text(1.25,540,"Cooperation",cex=1.6,font=2)
mtext("B",3,line=-1.2,cex=1.6,font=1,adj=-0.25)
###2-2
plot(NA,NA,type="n",lwd=2,col="#1C86EE55",xlab=" ",
ylab=" ",cex.lab=1.5,mgp = c(2.7, 1, 0),xaxt="n", yaxt="n",xaxs="i", yaxs="i",
xlim=c(0.63,1.72),ylim=c(-40,638))
segments(1,-100,1,1000,lwd=1,col="#9BCD9B");
#segments(1,-10,1,10,lwd=1,col="#9BCD9B");segments(-10,1,1,1,lwd=1,col="#9BCD9B")
Arrows(0.68,500,1.55,500,col="#9BCD9B",code=3,arr.type="triangle")
points(unlist(c(splin)),unlist(c(es[,c(3:5)])),cex=2,col="purple")
a2 <- unlist(c(splin));b2 <- unlist(c(es[,c(3:5)]))
a2.o <- order(a2)
a22 <- a2[a2.o]; b22 <- b2[a2.o]
a22 <- a22[!is.na(a22)];b22 <- b22[!is.na(b22)]
#b22[1] <- -200;b22[length(b22)] <- 500
##>1
ii1 <- which(a22>0)
b221 <- b22[ii1];a221 <- a22[ii1]
lo21 <- lm(b221 ~ poly(a221,1))
lines(a221,predict(lo21,data.frame(x=a221)),col="red",lwd=3)
#mtext("Linear",3,line=0.5,cex=1.6)
axis(1,seq(0.7,1.7,0.2),seq(0.7,1.7,0.2),las=1,cex.axis=1.7,tck=-0.05,mgp=c(2.5,1.8,0))
mtext(expression(paste("Strength of Mutualism"," (",M[u],")",sep="")),1,line=4.3,cex=1.6)
text(0.63+(1.72-0.63)*0.75,638*0.92,"r=0.268",cex=1.8,font=3)
text(0.83,540,"Competition",cex=1.6,font=2);text(1.25,540,"Cooperation",cex=1.6,font=2)
###2-3
plot(NA,NA,type="n",lwd=2,col="#1C86EE55",xlab=" ",
ylab=" ",cex.lab=1.5,mgp = c(2.7, 1, 0),xaxt="n", yaxt="n",xaxs="i", yaxs="i",
xlim=c(0.62,1.52),ylim=c(-40,638))
segments(1,-100,1,1000,lwd=1,col="#9BCD9B");
#segments(1,-10,1,10,lwd=1,col="#9BCD9B");segments(-10,1,1,1,lwd=1,col="#9BCD9B")
Arrows(0.65,500,1.4,500,col="#9BCD9B",code=3,arr.type="triangle")
points(unlist(c(spa)),unlist(c(es[,-c(1:5)])),cex=2,col="purple")
#mtext("Asymptotic",3,line=0.5,cex=1.6)
axis(1,seq(0.7,1.5,0.2),seq(0.7,1.5,0.2),las=1,cex.axis=1.7,tck=-0.05,mgp=c(2.5,1.8,0))
axis(4,seq(0,600,100),seq(0,600,100),las=1,cex.axis=2,tck=-0.05,mgp=c(2.5,1.6,0))
text(0.62+(1.52-0.62)*0.75,638*0.92,"r=0.273",cex=1.8,font=3)
a2 <- unlist(c(spa));b2 <- unlist(c(es[,-c(1:5)]))
a2.o <- order(a2)
a22 <- a2[a2.o]; b22 <- b2[a2.o]
a222 <- a22[!is.na(a22)];b222 <- b22[!is.na(b22)]
b222[length(b222)] <- 500
##>1
ii1 <- which(a222>0)
a2221 <- a222[ii1];b2221 <- b222[ii1]
lo21 <- lm(b2221 ~ poly(a2221,2))
lines(a2221,predict(lo21,data.frame(x=a2221)),col="red",lwd=3)
text(0.82,540,"Competition",cex=1.6,font=2);text(1.23,540,"Cooperation",cex=1.6,font=2)
dev.off() |
dee1a2585587fd93f17db7f599c844aa0517868f | ca8484b5b7ad6f6e2e3da3ad357304d55501c27e | /man/CrfGPCM.Rd | 285316b9148e8996ee3571922b645366129f872b | [] | no_license | qpcg/MEM | 64f621c1d0cdcf4d8d596445fdbdc97661bb14a4 | d13c2565e8f2d7bcc66de45e5c5902200b99e112 | refs/heads/master | 2020-05-29T12:03:59.964735 | 2017-02-22T20:28:30 | 2017-02-22T20:28:30 | 82,587,562 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 596 | rd | CrfGPCM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CrfGPCM.R
\name{CrfGPCM}
\alias{CrfGPCM}
\title{Cumulative Response Function GPCM}
\usage{
CrfGPCM(betas, z, IRT.param = TRUE, log = FALSE,
eps = .Machine$double.eps^(1/2))
}
\arguments{
\item{betas}{This is a list of betas from the BetasGPCM function}
\item{z}{I am not sure what z does.}
\item{IRT.param}{Should IRT parameterization be used? TRUE/FALSE}
}
\description{
This calculates the CRF of the GPCM.
}
\keyword{#@examples}
\keyword{#@export}
\keyword{CrfGPCM()}
\keyword{add}
\keyword{here}
\keyword{one}
|
76c9d39dbccfd09e374d660dbd3dc2bd0efac759 | 581624a754bbe1b2da360f025ac3296ce41c8c6f | /TimeSeries/Ts_Time_series.R | 3621128596aced8f4906f0c84fc89fbb76c941fa | [] | no_license | zkxshg/Test_of_R | d9c4b5bf299e88fc9f4bacbc7bb2cb11c93a81c8 | b9bffe77d178cf512a16b1a0c69128c034758914 | refs/heads/master | 2022-10-12T05:50:07.961086 | 2022-09-30T10:55:35 | 2022-09-30T10:55:35 | 184,598,229 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,009 | r | Ts_Time_series.R | #自定义时间序列数据
ts(1:8,start=c(2015,2),frequency=4)
#导入火鸡数据
library(nutshell)
data(turkey.price.ts)
turkey.price.ts
#时间序列数据简单描述
start(turkey.price.ts)
end(turkey.price.ts)
frequency(turkey.price.ts)
deltat(turkey.price.ts
#绘制时间序列图
library(nutshell)
data(turkey.price.ts)
plot(turkey.price.ts)
#绘制自相关函数图
acf(turkey.price.ts)
#绘制偏相关函数图
pacf(turkey.price.ts)
#输出自相关系数
library(nutshell)
data(turkey.price.ts)
acf(turkey.price.ts,plot=FALSE)
#输出偏相关系数
pacf(turkey.price.ts,plot=FALSE)
#输出互相关系数
library(nutshell)
data(ham.price.ts)
ccf(turkey.price.ts, ham.price.ts, plot=FALSE)
#训练ar时间序列模型
library(nutshell)
data(turkey.price.ts)
turkey.price.ts.ar <- ar(turkey.price.ts)
turkey.price.ts.ar
#预测未来一年价格
predict(turkey.price.ts.ar,n.ahead=12)
#绘制预测结果
ts.plot(turkey.price.ts,predict(turkey.price.ts.ar,n.ahead=24)$pred,lty=c(1:2))
|
c8bbb084b3d08b49107e242225bfbd624156d8dd | 91eb255fe719c4be1da12a8b30f4d0e35cee01dd | /man/BCCluster.Rd | 104a5988a6fe5f0c42f845afc7f6379913d648c3 | [] | no_license | eberlejonas/TaxCI | e3220abdcb7f71441cf9862fa4a9f212c4a36c7b | d5cb9efef5c850c402da9a8a3552a7c5e11ec765 | refs/heads/master | 2021-08-17T18:08:07.276642 | 2021-07-20T07:16:42 | 2021-07-20T07:16:42 | 87,522,366 | 3 | 1 | null | 2021-07-20T07:03:21 | 2017-04-07T08:18:29 | R | UTF-8 | R | false | false | 3,208 | rd | BCCluster.Rd | \name{BCCluster}
\alias{BCCluster}
\alias{print.BCCluster}
\title{
Barcode clustering
}
\description{
Barcode clustering using \code{\link{localMinima}}\{spider\}. The first local minimum is used for clustering.
}
\usage{
BCCluster(data, x, method="spider", sp.col="sp", distmodel="K80", pairwise.deletion=FALSE, fix.threshold=NA)
## S3 method for class 'BCCluster'\cr
print.BCCluster(x, ...)
}
\arguments{
\item{data}{
If method is "spider" (the default): a matrix or a list containing the DNA sequences; this must be of class "DNAbin" (use as.DNAbin is they are stored as character) (Copied from \code{\link{dist.dna}}).
If method is "external": a data.frame with two colums, the first containing specimen labels as in the taxonomic information and the second with integers describing cluster affiliation.
}
\item{x}{
A table with taxonomic information (must contain a column with species names which is specified with sp.col). rownames(x) must exactly match rownames(alignment).\cr
For S3 method BCCluster an object of class \code{BCCluster}.
}
\item{method}{
Either "spider" to use the package spider to infer clusters or "external" if the user supplies a clustering table (see 'data').
}
\item{sp.col}{
The name or number of the column of x that contains species names
}
\item{distmodel}{
Copied from \code{\link{dist.dna}}: a character string specifying the evolutionary model to be used; must be one of "raw", "N", "TS", "TV", "JC69", "K80" (the default), "F81", "K81", "F84", "BH87", "T92", "TN93", "GG95", "logdet", "paralin", "indel", or "indelblock".
}
\item{pairwise.deletion}{passed to \code{\link{dist.dna}}: a logical indicating whether to delete the sites with missing data in a pairwise way. The default is to delete the sites with at least one missing data for all sequences (ignored if model = "indel" or "indelblock").}
\item{fix.threshold}{
If NA, threshold is estimated my \code{\link{tclust}}. Else distance cutoff for clustering. E.g. 0.01 (1\%).
}
\item{...}{Unused.}
}
\details{
Finds distance based barcode-clusters using spider-functions. Some summarizing and preparing is done for Taxonomic Consistency analyses.
}
\value{
A list with the following elements:
\item{clusters}{A list of clusters, each containing the respective specimen names.}
\item{sp.in.clusters}{A list of clusters, each containing the names of species that are in the respective cluster.}
\item{homogen}{A vector containing the indices of homogeneous clusters. (Only one species contained.)}
\item{heterogen}{A vector containing the indices of heterogeneous clusters. (More than one species contained.)}
\item{no.clusters}{The number of clusters inferred.}
\item{no.homogeneous}{The number of homogeneous clusters.}
\item{no.heterogeneous}{The number of homogeneous clusters.}
\item{sp.col}{User-set column of x containing species information.}
\item{threshold}{The local minimum threshold that was used or the fixed threshold.}
}
\author{
Jonas Eberle
}
\seealso{
See \code{\link{TaxCI}} for example.
Related: \code{\link{localMinima}}, \code{\link{tclust}}, \code{\link{read.dna}}, \code{\link{dist.dna}}
}
|
0297280791e434d913407b8ecbed8f2470e6891b | 0a27d372335a12ed99536c88a8f6e2e15d5e4786 | /Course Project 1.R | 10ac6b9592e9b97287d6b81aaf6e2f0e45f1091e | [] | no_license | SanduniLakshikaKK/Course-project-1-Activity-monitoring-devices | 970b1ba7b09241228686b2723092ccfd43a92974 | 5e9bcac3b242e8c041ef7f82a72a287be08081af | refs/heads/main | 2022-12-27T06:23:55.468854 | 2020-10-07T13:28:17 | 2020-10-07T13:28:17 | 302,035,981 | 0 | 0 | null | 2020-10-07T13:28:19 | 2020-10-07T12:54:48 | R | UTF-8 | R | false | false | 2,972 | r | Course Project 1.R | echo = TRUE
setwd("F:/repdata_data_activity")
activity <- NULL
activity <- read.csv("activity.csv", header = T, sep = ",")
echo = TRUE
df_summary <- NULL
su2 <- NULL
su <- NULL
mn_int <- NULL
activity2 <- NULL
mean_su2 <- NULL
median_su2 <- NULL
activity2_weekend <- NULL
activity2_weekday <- NULL
mean_activity2_weekday <- NULL
mean_activity2_weekend <- NULL
echo = TRUE
su <- tapply(activity$steps, activity$date, sum, na.rm=T)
echo = TRUE
hist(su, xlab = "sum of steps per day", main = "histogram of steps per day")
echo = TRUE
mean_su <- round(mean(su))
median_su <- round(median(su))
print(c("The mean is",mean_su))
print(c("The median is",median_su))
echo = TRUE
mn_int <- tapply(activity$steps, activity$interval, mean, na.rm=T)
plot(mn_int ~ unique(activity$interval), type="l", xlab = "5-min interval")
echo = TRUE
mn_int[which.max(mn_int)]
echo = TRUE
table(is.na(activity) == TRUE)
summary(activity)
echo = TRUE
activity2 <- activity # creation of the dataset that will have no more NAs
for (i in 1:nrow(activity)){
if(is.na(activity$steps[i])){
activity2$steps[i]<- mn_int[[as.character(activity[i, "interval"])]]
}
}
echo = TRUE
su2 <- tapply(activity2$steps, activity2$date, sum, na.rm=T)
hist(su2, xlab = "sum of steps per day", main = "histogram of steps per day")
mean_su2 <- round(mean(su2))
median_su2 <- round(median(su2))
echo = TRUE
print(c("The mean is",mean_su2))
print(c("The median is",median_su2))
echo = TRUE
df_summary <- rbind(df_summary, data.frame(mean = c(mean_su, mean_su2), median = c(median_su, median_su2)))
rownames(df_summary) <- c("with NA's", "without NA's")
print(df_summary)
echo = TRUE
summary(activity2)
echo = TRUE
activity2$weekday <- c("weekday")
activity2[weekdays(as.Date(activity2[, 2])) %in% c("Saturday", "Sunday", "samedi", "dimanche", "saturday", "sunday", "Samedi", "Dimanche"), ][4] <- c("weekend")
table(activity2$weekday == "weekend")
activity2$weekday <- factor(activity2$weekday)
echo = TRUE
activity2_weekend <- subset(activity2, activity2$weekday == "weekend")
activity2_weekday <- subset(activity2, activity2$weekday == "weekday")
mean_activity2_weekday <- tapply(activity2_weekday$steps, activity2_weekday$interval, mean)
mean_activity2_weekend <- tapply(activity2_weekend$steps, activity2_weekend$interval, mean)
echo = TRUE
library(lattice)
df_weekday <- NULL
df_weekend <- NULL
df_final <- NULL
df_weekday <- data.frame(interval = unique(activity2_weekday$interval), avg = as.numeric(mean_activity2_weekday), day = rep("weekday", length(mean_activity2_weekday)))
df_weekend <- data.frame(interval = unique(activity2_weekend$interval), avg = as.numeric(mean_activity2_weekend), day = rep("weekend", length(mean_activity2_weekend)))
df_final <- rbind(df_weekday, df_weekend)
xyplot(avg ~ interval | day, data = df_final, layout = c(1, 2),
type = "l", ylab = "Number of steps")
|
b435b887baa550b088aa65169b58113b70037da2 | 377c8851390a7e85f2cca30246e0f9f6df7d13cc | /cycle3/Boomtown/Analysis/h14.R | 184565809323369e7881d2b8dba93533cdf2fce4 | [] | no_license | GallupGovt/ngs2 | bb9eca850dc0d76b39d7aa16aeb1ef59d6d640fb | ed9443400bcd2a46907dae6701a7bd4580499772 | refs/heads/master | 2023-05-26T16:45:47.820293 | 2020-12-17T09:48:56 | 2020-12-17T09:48:56 | 147,573,574 | 4 | 4 | null | 2023-05-22T21:36:06 | 2018-09-05T20:07:10 | R | UTF-8 | R | false | false | 1,419 | r | h14.R | ## Created by Pablo Diego Rosell, PhD, for Gallup inc. in September 2019
# Formula
formula.h14.1<-as.formula("conformity~competition+pressure+grmot1+framing+complexity+timeUncertainty+tolerance+support+centralization+leaderWeight+density+(1|group)")
# Extract number of prior parameters ('ndim') to be declared
fittedGlmer <- stan_glmer(formula.h14.1, data=factorialGroup, family=gaussian(link = "identity"), iter=3, chains=1)
ndim<-length(fittedGlmer$covmat[1,])-4
# Declare priors
# Baseline priors based on DESIM
priorSD <- 0.075
# h14.0 priors (null): Intergroup Competition does not increase Conformity
h14.0 <- normal(location = c(0.0, 0.0, 0.0, rep(0, ndim)),
scale = c(priorSD, priorSD, priorSD, rep(2.5,ndim)), autoscale=FALSE)
# h14.1 priors: Intergroup Competition increases Conformity
h14.1 <- normal(location = c(-0.15, 0.0, 0.15, rep(0, ndim)),
scale = c(priorSD, priorSD, priorSD, rep(2.5,ndim)), autoscale=FALSE)
# Run models
bridge_14.0 <- bayesLmer(formula.h14.1, h14.0, factorialGroup)
bridge_14.1 <- bayesLmer(formula.h14.1, h14.1, factorialGroup)
# Calculate BFs for all comparisons
test_1_0<-bf(bridge_14.1, bridge_14.0)$bf
# Save BFs
BFs <- data.frame(14, test_1_0)
colnames(BFs) <- c("Hypothesis",
"Prediction 1 vs. Null")
write.csv(BFs, paste(od, "BFs14.csv", sep = '/'))
|
0c616bb592938708896fc0f976b7785f054080a4 | ab894c5c21c33303e42c620a3c1a76e260531e80 | /mymle.R | 7b078ba8185719bd528a0b8cac1df620bb79a381 | [] | no_license | Yuchen21/MLEMVD | 4b294ccf8b2bd8c04816438b8f3073f7744b0c6b | c0ec3674524d52cec861f3d6447fffbfbcea3b92 | refs/heads/master | 2021-01-09T06:55:44.344931 | 2017-02-06T19:26:54 | 2017-02-06T19:26:54 | 81,020,897 | 0 | 0 | null | 2017-02-05T20:44:56 | 2017-02-05T20:44:56 | null | UTF-8 | R | false | false | 1,212 | r | mymle.R | library('nloptr')
library('pracma')
mymle<-function(logdensity,x_prime,del,param0,args){
objfun <- function(param){
# Set the objective function
objfun <- -logdensity2loglik(logdensity,x_prime,del,param,args)
}
# Optimize
# nloptr.print.options()
res<- nloptr( x0=param0,
eval_f=objfun,
eval_g_ineq=eval_g_ineq,
lb = args$l,
ub = args$u,
opts = list("algorithm"="NLOPT_LN_COBYLA", "maxeval" = args$maxiter, "xtol_rel" = args$eps, "print_level"=args$print_level))
# Compute the var-cov matrix
# The derivatives are computed numerically. The Hessian may not be
# positive definite. We report the inverse[Infomation], as well as the
# robust sandwich matrix.
H <- hessian(objfun,res$solution)
InfoMatrix <- logdensity2info(logdensity,x_prime,del,res$solution,args)
Variance <- solve(InfoMatrix)
invH <- solve(H)
Variance_Robust <- invH %*% InfoMatrix %*% t(invH)
print(InfoMatrix)
res$variance <- Variance
res$variance_robust <- Variance_Robust
res$se <-sqrt(diag(Variance))
res$se_robust <- sqrt(diag(Variance_Robust))
return(res)
}
|
cb3ce901ae8bc3b0a42d2573f37766c4e6fdc2c1 | 4fddb6c9f9a8682124f1df6cd048e6de3ffcf69c | /R/ik_gg_theme.nothing.R | f7bfea496ee3cc3da627d09952ff0925eb358ccc | [] | no_license | ikashnitsky/ik | 513175ed6d5e2e397fc68e48ab33b26b6d224c32 | 043ae0eaf57e8c8fad326ea7ac84ca290ff080d3 | refs/heads/master | 2021-01-20T11:41:27.523728 | 2018-09-25T08:26:46 | 2018-09-25T08:26:46 | 81,328,087 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 770 | r | ik_gg_theme.nothing.R | # david kahle
# sept 27, 2010
ik_gg_theme.nothing <- function (base_size = 12){
structure(list(
axis.line = element_blank(),
axis.text.x = element_blank(), axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(), axis.title.y = element_blank(),
axis.ticks.length = unit(0, "lines"), axis.ticks.margin = unit(0, "lines"),
legend.position = "none",
panel.background = element_blank(), panel.border = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.margin = unit(0, "lines"),
plot.background = element_blank(),
plot.title = element_text(size = base_size * 1.2),
plot.margin = unit(c(-1, -1, -1.5, -1.5), "lines")
), class = "options")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.