blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ec6f70ce48857ab9ff0e15a3eb1e43bd3268507 | bdab686e5894eec9db3f5bfd323735ab9f1b790a | /scripts/simulations.R | 5ae0db5ce8ce5f24794fb31759ac49cfe0f3ab4c | [] | no_license | larsgr/eve_analysis | 94f76377a510485fad9da9ffcbb361779b374caa | cf7868f89d3969f38d3b2828764a99a74f2642cf | refs/heads/master | 2020-05-16T13:33:58.415241 | 2019-04-23T19:13:37 | 2019-04-23T19:13:37 | 183,078,400 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 117 | r | simulations.R | # This file contains functions to simulate data.
# Author: Rori Rohlfs, Lars Gronvold, John Mendoza
# Date: 3/4/2019 |
f8721c7a13a338ab1f146250e5896d47c7b0c7b2 | 9e5dcacdd3e9c77c0733b4569a46079fb6d616e1 | /R/selfing.R | d9ffa6c0c865e4e0b25c15ed196cdb8630f78803 | [] | no_license | HZaw/mpMap2 | 8c6bdc8b80f25c26a75993eaf0f690cffffcf309 | 25d5cf9c1adfb9cd235ff3ee0d5f16cf42bcf730 | refs/heads/master | 2021-01-23T16:50:13.619217 | 2017-09-05T00:28:16 | 2017-09-05T00:28:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 769 | r | selfing.R | #' @include pedigree-class.R
#' @include detailedPedigree-class.R
#' @export
setGeneric("selfing<-", function(object, value) standardGeneric("selfing<-"))
#' @export
setGeneric("selfing", function(object) standardGeneric("selfing"))
setMethod(f = "selfing", signature = "pedigree", definition = function(object)
{
object@selfing
})
setReplaceMethod("selfing", "detailedPedigree", function(object, value)
{
if(value != "infinite" && value != "finite")
{
stop("Selfing must be either \"finite\" or \"infinite\"")
}
object@selfing <- value
object
})
setReplaceMethod("selfing", "pedigree", function(object, value)
{
if(value != "infinite" && value != "finite")
{
stop("Selfing must be either \"finite\" or \"infinite\"")
}
object@selfing <- value
object
})
|
c257b7a64ac65f69cce12e86df40b086822781fe | 8b21432aa164f8e69a1b0adad56383e99bffb13f | /analysis/R/plot_results_by_harm.R | 4a6417e9efbe57422e0f946516df6dc7437c89b0 | [] | no_license | gilmore-lab/infant-moco-eeg | 872f7d253434db69c54003b3af323a780f5042ba | 32d7ead6b139130b64c5b63f0f5ef882a2978f3d | refs/heads/master | 2021-09-19T21:22:38.265262 | 2018-07-31T18:36:01 | 2018-07-31T18:36:01 | 105,902,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 555 | r | plot_results_by_harm.R | plot_results_by_harm <- function(df_moco, this_harm){
chan_eff <- compute_chan_effects(df_moco, this_harm)
df_chan_stats <- make_stats_df(chan_eff)
plot_channel_effects(df_chan_stats, this_harm, group=this_group)
print_stats_table(df_chan_stats, "Pattern")
print_stats_table(df_chan_stats, "Speed")
print_stats_table(df_chan_stats, "Patt*Spd")
plot_channel_magnitudes(df_chan_stats, df_moco, "Pattern")
plot_channel_magnitudes(df_chan_stats, df_moco, "Speed")
plot_channel_magnitudes(df_chan_stats, df_moco, "Patt*Spd")
"OK"
} |
04a3d66cedb178cd2c8f43f1391d1920b4d2679a | bb9db7f3138c894ed64fc3d11d883e9754001e8f | /R/p31_drawQQ.R | b95b791b0ac05ea6d2741a88f823ae9f4c2e6c05 | [] | no_license | Global19/PDB-Outlier-Analysis | ea925200afd39a4d878e088323b7421c2a748923 | 5c1518b87abd981502ddf26e9a62158474ec8f8c | refs/heads/master | 2021-09-23T23:22:52.361482 | 2018-09-29T01:04:00 | 2018-09-29T01:04:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 997 | r | p31_drawQQ.R | ## Draw normal QQ plot of each data set
drawQQ <- function(filename_item_list, data_folder="", image_folder=""){
df_items <- read.table(filename_item_list)
v_items <- as.character(df_items[,1])
for (item in v_items){
filename_data <- paste0(item, "_outlier")
filepath_data <- paste0(data_folder, filename_data)
print(filepath_data)
data <- read.table(filepath_data, header=TRUE, sep="\t")
## Draw QQ plot
filename_image = paste0(item, "_QQ.png")
filepath_image = paste0(image_folder, filename_image)
png(filepath_image, width = 800, height = 800)
par(mfrow=c(1,1),mar=c(5.1, 5.1, 4.1, 2.1))
qqnorm(data$data, cex.axis=2.5, cex.lab=2.5, cex.main=2.5, main="")
dev.off()
}
}
filename_item_list = "items.list" # File recording each variable name in a row, which is also the data file name in data folder
data_folder <- "../Data/Outlier_hopt/"
image_folder <- "../Images/"
drawQQ(filename_item_list, data_folder, image_folder)
|
16d056b2b9bc83f9a4db924e470342c17b659043 | 4e751ea99ec33a74e76a691f81fd9b51090bb22c | /man/naEvolution.Rd | df119c6f0439ad67ca9bc07c1856b7991346c6ff | [] | no_license | slkarkar/RGCCA | cd621a66b045bcc2a7f89ac065d0eb4effbd1bc6 | 0c4894e6805097459e1d8c1b083984c9845f65bc | refs/heads/master | 2023-02-03T09:39:24.266270 | 2020-04-17T12:12:18 | 2020-04-17T12:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,152 | rd | naEvolution.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/naEvolution.r
\name{naEvolution}
\alias{naEvolution}
\title{Evolution of quality of rgcca with increasing missing values}
\usage{
naEvolution(blocks, prctNA = c(0.1, 0.2, 0.3), listMethods = c("mean"),
typeNA = "block", ncomp = rep(1, length(blocks)),
sameBlockWeight = TRUE, scale = TRUE, nDatasets = 20,
tol = 1e-06, verbose = FALSE, scheme = "centroid", seed = NULL,
connection = matrix(1, length(blocks), length(blocks)) -
diag(length(blocks)), tau = rep(1, length(blocks)))
}
\arguments{
\item{blocks}{A list that contains the \eqn{J} blocks of variables \eqn{\mathbf{X_1}, \mathbf{X_2}, ..., \mathbf{X_J}}.}
\item{prctNA}{if number, percentage of missing data required for each block. If a vector with the same size as blocks, the percentage can be adapted for each block. If a list of values, this percent is calculated per variable}
\item{listMethods}{vector containing a list of methods ("mean","complete","nipals"...)
\itemize{
\item{\code{"mean"}}{ corresponds to an imputation by the colmeans}
\item{\code{"complete"}}{ corresponds to run RGCCA only on the complete subjects (subjects with missing data are removed)}
\item{\code{"nipals"}}{ corresponds to run RGCCA on all available data (NIPALS algorithm)}
\item{\code{"em"}}{ corresponds to impute the data with EM-type algorithms}
\item{\code{"sem"}}{ corresponds to impute the data with EM-type algorithms with superblock approach}
\item{\code{"knn1"}}{ corresponds to impute the data with the 1-Nearest Neighbor. 1 can be replace by another number (such as knn3) to impute with the 3-Nearest Neighbors.}}}
\item{typeNA}{structure of missing data required ("ponc" or "block" or "byVar")}
\item{ncomp}{A \eqn{1 \times J} vector that contains the numbers of components for each block (default: rep(1, length(A)), which gives one component per block.). It can be estimated by using \link{rgcca_permutation}.}
\item{sameBlockWeight}{TRUE by default : each block have the same weight in the RGCCA analysis. If FALSE, the weight of each block depends on the number of variables of the block}
\item{scale}{If scale = TRUE, each block is standardized to zero means and unit variances (default: TRUE).}
\item{nDatasets}{Number of simulated datasets}
\item{tol}{The stopping value for convergence.}
\item{verbose}{If verbose = TRUE, the progress will be report while computing (default: TRUE).}
\item{scheme}{The value is "horst", "factorial", "centroid" or the g function (default: "centroid").}
\item{seed}{NULL by default (no reproducibility). A number representing the seed (for reproducibility)}
\item{connection}{A design matrix that describes the relationships between blocks (default: complete design).}
\item{tau}{Used for type="rgcca" only. tau is either a \eqn{1 \times J} vector or a \eqn{\mathrm{max}(ncomp) \times J} matrix, and contains the values
of the regularization parameters (default: tau = 1, for each block and each dimension).
If tau = "optimal" the regularization paramaters are estimated for each block and each dimension using the Schafer and Strimmer (2005)
analytical formula . If tau is a \eqn{1\times J} numeric vector, tau[j] is identical across the dimensions of block \eqn{\mathbf{X}_j}.
If tau is a matrix, tau[k, j] is associated with \eqn{\mathbf{X}_{jk}} (\eqn{k}th residual matrix for block \eqn{j})}
}
\value{
resultComparison A list of length pNA. Each element of the list is a result of whichNAmethod (see \link{whichNAmethod})
}
\description{
Analysis of the comparison of different NA methods on RGCCA for increasing percent of missing data in each block
}
\examples{
data(Russett)
library(parallel)
X_agric =as.matrix(Russett[,c("gini","farm","rent")])
X_ind = as.matrix(Russett[,c("gnpr","labo")])
X_polit = as.matrix(Russett[ , c("demostab", "dictator")])
A = list(agri=X_agric, ind=X_ind, polit=X_polit)
#ponctual
listResults=naEvolution(blocks=A,listMethods=c("complete","nipals","mean"),
prctNA=c(0.05,0.1,0.15,0.2,0.25,0.3,0.4),typeNA="ponc",ncomp=rep(1,3),
sameBlockWeight=FALSE)
plot(listResults,output="a",bars = "stderr",ylim=c(0,0.2))
}
|
670fd688c7a955f68d8c2155e16e750309eb27ee | 811dc1779291d2ce3515b0c8e79b71c5e19f82b4 | /R/zzzz_oldfuns.R | 1b63e5f7065e0c7d3688448da81bab199dab09a9 | [] | no_license | fcampelo/CAISEr | aa61f200df4325e9816209ef862915d6018ff894 | 5d4210b6b9607898ee90bd5a48fbe6d21ce558de | refs/heads/master | 2022-12-01T05:20:40.262508 | 2022-11-25T09:56:57 | 2022-11-25T09:56:57 | 106,617,932 | 2 | 0 | null | 2019-07-03T10:44:25 | 2017-10-11T22:56:46 | R | UTF-8 | R | false | false | 31,717 | r | zzzz_oldfuns.R | #' Determine sample sizes for a set of algorithms on a single problem instance
#'
#' Iteratively calculates the required sample sizes for K algorithms
#' on a given problem instance, so that the standard errors of the estimates of
#' the pairwise differences in performance is controlled at a predefined level.
#'
#' @section Instance:
#' Parameter `instance` must be a named list containing all relevant parameters
#' that define the problem instance. This list must contain at least the field
#' `instance$FUN`, with the name of the function implementing the problem
#' instance, that is, a routine that calculates y = f(x). If the instance
#' requires additional parameters, these must also be provided as named fields.
#'
#' @section Algorithms:
#' Object `algorithms` is a list in which each component is a named
#' list containing all relevant parameters that define an algorithm to be
#' applied for solving the problem instance. In what follows `algorithm[[k]]`
#' refers to any algorithm specified in the `algorithms` list.
#'
#' `algorithm[[k]]` must contain an `algorithm[[k]]$FUN` field, which is a
#' character object with the name of the function that calls the algorithm; as
#' well as any other elements/parameters that `algorithm[[k]]$FUN` requires
#' (e.g., stop criteria, operator names and parameters, etc.).
#'
#' The function defined by the routine `algorithm[[k]]$FUN` must have the
#' following structure: supposing that the list in `algorithm[[k]]` has
#' fields `algorithm[[k]]$FUN = "myalgo"`, `algorithm[[k]]$par1 = "a"` and
#' `algorithm$par2 = 5`, then:
#'
#' \preformatted{
#' myalgo <- function(par1, par2, instance, ...){
#' # do stuff
#' # ...
#' return(results)
#' }
#' }
#'
#' That is, it must be able to run if called as:
#'
#' \preformatted{
#' # remove '$FUN' and '$alias' field from list of arguments
#' # and include the problem definition as field 'instance'
#' myargs <- algorithm[names(algorithm) != "FUN"]
#' myargs <- myargs[names(myargs) != "alias"]
#' myargs$instance <- instance
#'
#' # call function
#' do.call(algorithm$FUN,
#' args = myargs)
#' }
#'
#' The `algorithm$FUN` routine must return a list containing (at
#' least) the performance value of the final solution obtained, in a field named
#' `value` (e.g., `result$value`) after a given run.
#'
#' @section Initial Number of Observations:
#' In the **general case** the initial number of observations per algorithm
#' (`nstart`) should be relatively high. For the parametric case
#' we recommend between 10 and 20 if outliers are not expected, or between 30
#' and 50 if that assumption cannot be made. For the bootstrap approach we
#' recommend using at least 20. However, if some distributional assumptions can
#' be made - particularly low skewness of the population of algorithm results on
#' the test instances), then `nstart` can in principle be as small as 5 (if the
#' output of the algorithms were known to be normal, it could be 1).
#'
#' In general, higher sample sizes are the price to pay for abandoning
#' distributional assumptions. Use lower values of `nstart` with caution.
#'
#' @section Pairwise Differences:
#' Parameter `dif` informs the type of difference in performance to be used
#' for the estimation (\eqn{\mu_a} and \eqn{\mu_b} represent the mean
#' performance of any two algorithms on the test instance, and \eqn{mu}
#' represents the grand mean of all algorithms given in `algorithms`):
#'
#' - If `dif == "perc"` and `comparisons == "all.vs.first"`, the estimated quantity is
#' \eqn{\phi_{1b} = (\mu_1 - \mu_b) / \mu_1 = 1 - (\mu_b / \mu_1)}.
#'
#' - If `dif == "perc"` and `comparisons == "all.vs.all"`, the estimated quantity is
#' \eqn{\phi_{ab} = (\mu_a - \mu_b) / \mu}.
#'
#' - If `dif == "simple"` it estimates \eqn{\mu_a - \mu_b}.
#'
#' @param instance a list object containing the definitions of the problem
#' instance.
#' See Section `Instance` for details.
#' @param algorithms a list object containing the definitions of all algorithms.
#' See Section `Algorithms` for details.
#' @param se.max desired upper limit for the standard error of the estimated
#' difference between pairs of algorithms. See Section
#' `Pairwise Differences` for details.
#' @param dif type of difference to be used. Accepts "perc" (for percent
#' differences) or "simple" (for simple differences)
#' @param comparisons type of comparisons being performed. Accepts "all.vs.first"
#' (in which cases the first object in `algorithms` is considered to be
#' the reference algorithm) or "all.vs.all" (if there is no reference
#' and all pairwise comparisons are desired).
#' @param method method to use for estimating the standard errors. Accepts
#' "param" (for parametric) or "boot" (for bootstrap)
#' @param nstart initial number of algorithm runs for each algorithm.
#' See Section `Initial Number of Observations` for details.
#' @param nmax maximum **total** allowed sample size.
#' @param seed seed for the random number generator
#' @param boot.R number of bootstrap resamples to use (if `method == "boot"`)
#' @param ncpus number of cores to use
#' @param force.balanced logical flag to force the use of balanced sampling for
#' the algorithms on each instance
#' @param save.to.file logical flag: should the results be saved to a file?
#' @param load.from.file logical flag: should the results be loaded from a file?
#' @param folder directory to save/load files
#'
#'
#' @return a list object containing the following items:
#' \itemize{
#' \item \code{instance} - alias for the problem instance considered
#' \item \code{Xk} - list of observed performance values for all `algorithms`
#' \item \code{Nk} - vector of sample sizes generated for each algorithm
#' \item \code{Diffk} - data frame with point estimates, standard errors and
#' other information for all algorithm pairs of interest
#' \item \code{seed} - seed used for the PRNG
#' \item \code{dif} - type of difference used
#' \item \code{method} - method used ("param" / "boot")
#' \item \code{comparisons} - type of pairings ("all.vs.all" / "all.vs.first")
#' }
#'
#' @author Felipe Campelo (\email{fcampelo@@gmail.com})
#'
#' @references
#' - F. Campelo, F. Takahashi:
#' Sample size estimation for power and accuracy in the experimental
#' comparison of algorithms. Journal of Heuristics 25(2):305-338, 2019.
#' - P. Mathews.
#' Sample size calculations: Practical methods for engineers and scientists.
#' Mathews Malnar and Bailey, 2010.
#' - A.C. Davison, D.V. Hinkley:
#' Bootstrap methods and their application. Cambridge University Press (1997)
#' - E.C. Fieller:
#' Some problems in interval estimation. Journal of the Royal Statistical
#' Society. Series B (Methodological) 16(2), 175–185 (1954)
#' - V. Franz:
#' Ratios: A short guide to confidence limits and proper use (2007).
#' https://arxiv.org/pdf/0710.2024v1.pdf
#' - D.C. Montgomery, C.G. Runger:
#' Applied Statistics and Probability for Engineers, 6th ed. Wiley (2013)
#'
#' @export
#'
#' @examples
#' # Example using dummy algorithms and instances. See ?dummyalgo for details.
#' # We generate 4 dummy algorithms, with true means 15, 10, 30, 15; and true
#' # standard deviations 2, 4, 6, 8.
#' algorithms <- mapply(FUN = function(i, m, s){
#' list(FUN = "dummyalgo",
#' alias = paste0("algo", i),
#' distribution.fun = "rnorm",
#' distribution.pars = list(mean = m, sd = s))},
#' i = c(alg1 = 1, alg2 = 2, alg3 = 3, alg4 = 4),
#' m = c(15, 10, 30, 15),
#' s = c(2, 4, 6, 8),
#' SIMPLIFY = FALSE)
#'
#' # Make a dummy instance with a centered (zero-mean) exponential distribution:
#' instance = list(FUN = "dummyinstance", distr = "rexp", rate = 5, bias = -1/5)
#'
#' se.max = 0.05
#' dif = "perc"
#' comparisons = "all.vs.all"
#' method = "param"
#' seed = 1234
#' nstart = 20
#' nmax = 1000
#' ncpus = 1
#'
#' myreps <- calc_nreps(instance = instance, algorithms = algorithms,
#' se.max = se.max, dif = dif,
#' comparisons = comparisons, method = method,
#' nstart = nstart, nmax = nmax, seed = seed)
#' myreps$Diffk
# TESTED: OK
# calc_nreps_old <- function(instance, # instance parameters
# algorithms, # algorithm parameters
# se.max, # desired (max) standard error
# dif = "simple", # type of difference
# comparisons = "all.vs.all", # differences to consider
# method = "param", # method ("param", "boot")
# nstart = 20, # initial number of samples
# nmax = 200, # maximum allowed sample size
# seed = NULL, # seed for PRNG
# boot.R = 499, # number of bootstrap resamples
# ncpus = 1, # number of cores to use
# force.balanced = FALSE, # force balanced sampling?
# save.to.file = FALSE, # save results to tmp file?
# load.from.file = FALSE, # load results from file?
# folder = "./nreps_files") # directory to save tmp file
# {
#
# # ========== Error catching ========== #
# assertthat::assert_that(
# is.list(instance),
# assertthat::has_name(instance, "FUN"),
# is.list(algorithms),
# all(sapply(X = algorithms, FUN = is.list)),
# all(sapply(X = algorithms,
# FUN = function(x){assertthat::has_name(x, "FUN")})),
# is.numeric(se.max) && length(se.max) == 1,
# dif %in% c("simple", "perc"),
# comparisons %in% c("all.vs.all", "all.vs.first"),
# method %in% c("param", "boot"),
# assertthat::is.count(nstart),
# is.infinite(nmax) || assertthat::is.count(nmax),
# nmax >= length(algorithms) * nstart,
# is.null(seed) || seed == seed %/% 1,
# assertthat::is.count(boot.R), boot.R > 1,
# is.logical(force.balanced), length(force.balanced) == 1,
# is.logical(save.to.file), length(save.to.file) == 1,
# is.logical(load.from.file), length(load.from.file) == 1)
# # ==================================== #
#
# # set PRNG seed
# if (is.null(seed)) {
# if (!exists(".Random.seed")) stats::runif(1)
# seed <- .Random.seed #i.e., do not change anything
# } else{
# set.seed(seed)
# }
#
# # Get/set instance alias
# if (!("alias" %in% names(instance))) {
# instance$alias <- instance$FUN
# }
#
# if (load.from.file){
# # Get the filename
# filename <- paste0(folder, "/",
# instance$alias,
# ".rds")
#
# if (file.exists(filename)){
# output <- readRDS(filename)
# cat("\nSampling of instance", instance$alias, "loaded from file.")
# return(output)
# } else
# cat("\n**NOTE: Instance file", filename, "not found.**")
# }
#
# # Echo some information for the user
# cat("\nSampling algorithms on instance", instance$alias, ": ")
#
# # generate initial samples
# Nk <- rep(nstart, length(algorithms))
# Xk <- parallel::mcmapply(FUN = get_observations,
# algo = algorithms,
# n = Nk,
# MoreArgs = list(instance = instance),
# mc.cores = ncpus,
# SIMPLIFY = FALSE)
#
# # Calculate point estimates, SEs, and sample size ratios (current x optimal)
# Diffk <- calc_se(Xk = Xk,
# dif = dif,
# comparisons = comparisons,
# method = method,
# boot.R = boot.R)
#
# while(any(Diffk$SE > se.max) & (sum(Nk) < nmax)){
# # Echo something for the user
# if (!(sum(Nk) %% nstart)) cat(".")
#
# if (force.balanced) {
# # Generate a single new observation for each algorithm
# newX <- parallel::mcmapply(FUN = get_observations,
# algo = algorithms,
# n = 1,
# MoreArgs = list(instance = instance),
# mc.cores = ncpus,
# SIMPLIFY = FALSE)
#
# # Append new observation to each algo list and update sample size counters
# Xk <- mapply(FUN = c, Xk, newX,
# SIMPLIFY = FALSE)
# Nk <- Nk + 1
# } else {
# # Get pair that has the worst SE
# worst.se <- Diffk[which.max(Diffk$SE), ]
#
# # Determine algorithm that should receive a new observation
# if (worst.se$r <= worst.se$ropt){
# ind <- worst.se[1, 1]
# } else {
# ind <- worst.se[1, 2]
# }
# # Generate new observation and update Nk counter
# Xk[[ind]] <- c(Xk[[ind]],
# get_observations(algo = algorithms[[ind]],
# instance = instance,
# n = 1))
# Nk[ind] <- Nk[ind] + 1
#
# # Recalculate point estimates, SEs, and sample size ratios
# Diffk <- calc_se(Xk = Xk,
# dif = dif,
# comparisons = comparisons,
# method = method,
# boot.R = boot.R)
# }
# }
#
# # Assemble output list
# names(Nk) <- lapply(algorithms, function(x)x$alias)
# output <- list(instance = instance$alias,
# Xk = Xk,
# Nk = Nk,
# Diffk = Diffk,
# dif = dif,
# method = method,
# comparisons = comparisons,
# seed = seed)
#
# # Save to file if required
# if (save.to.file){
# # Get folder
# if(!dir.exists(folder)) dir.create(folder)
#
# # Get a unique filename
# filename <- paste0(folder, "/",
# instance$alias,
# ".rds")
#
# # save output to file
# saveRDS(output, file = filename)
# }
#
# # Return output
# return(output)
# }
#' Run a full experiment for comparing multiple algorithms using multiple
#' instances
#'
#' Design and run a full experiment - calculate the required number of
#' instances, run the algorithms on each problem instance using the iterative
#' approach based on optimal sample size ratios, and return the results of the
#' experiment. This routine builds upon [calc_instances()] and [calc_nreps()],
#' so refer to the documentation of these two functions for details.
#'
#' @section Instance List:
#' Parameter `instances` must contain a list of instance objects, where
#' each field is itself a list, as defined in the documentation of function
#' [calc_nreps()]. In short, each element of `instances` is an `instance`, i.e.,
#' a named list containing all relevant parameters that define the problem
#' instance. This list must contain at least the field `instance$FUN`, with the
#' name of the problem instance function, that is, a routine that calculates
#' y = f(x). If the instance requires additional parameters, these must also be
#' provided as named fields.
#' An additional field, "instance$alias", can be used to provide the instance
#' with a unique identifier (e.g., when using an instance generator).
#'
#' @section Algorithm List:
#' Object `algorithms` is a list in which each component is a named
#' list containing all relevant parameters that define an algorithm to be
#' applied for solving the problem instance. In what follows `algorithms[[k]]`
#' refers to any algorithm specified in the `algorithms` list.
#'
#' `algorithms[[k]]` must contain an `algorithms[[k]]$FUN` field, which is a
#' character object with the name of the function that calls the algorithm; as
#' well as any other elements/parameters that `algorithms[[k]]$FUN` requires
#' (e.g., stop criteria, operator names and parameters, etc.).
#'
#' The function defined by the routine `algorithms[[k]]$FUN` must have the
#' following structure: supposing that the list in `algorithms[[k]]` has
#' fields `algorithm[[k]]$FUN = "myalgo"`, `algorithms[[k]]$par1 = "a"` and
#' `algorithms[[k]]$par2 = 5`, then:
#'
#' \preformatted{
#' myalgo <- function(par1, par2, instance, ...){
#' #
#' # <do stuff>
#' #
#' return(results)
#' }
#' }
#'
#' That is, it must be able to run if called as:
#'
#' \preformatted{
#' # remove '$FUN' and '$alias' field from list of arguments
#' # and include the problem definition as field 'instance'
#' myargs <- algorithm[names(algorithm) != "FUN"]
#' myargs <- myargs[names(myargs) != "alias"]
#' myargs$instance <- instance
#'
#' # call function
#' do.call(algorithm$FUN,
#' args = myargs)
#' }
#'
#' The `algorithm$FUN` routine must return a list containing (at
#' least) the performance value of the final solution obtained, in a field named
#' `value` (e.g., `result$value`) after a given run. In general it is easier to
#' write a small wrapper funciton around existing implementations.
#'
#' @section Initial Number of Observations:
#' In the _general case_ the initial number of observations / algorithm /
#' instance (`nstart`) should be relatively high. For the parametric case
#' we recommend 10~15 if outliers are not expected, and 30~40 (at least) if that
#' assumption cannot be made. For the bootstrap approach we recommend using at
#' least 15 or 20. However, if some distributional assumptions can be
#' made - particularly low skewness of the population of algorithm results on
#' the test instances), then `nstart` can in principle be as small as 5 (if the
#' output of the algorithm were known to be normal, it could be 1).
#'
#' In general, higher sample sizes are the price to pay for abandoning
#' distributional assumptions. Use lower values of `nstart` with caution.
#'
#' @section Pairwise Differences:
#' Parameter `dif` informs the type of difference in performance to be used
#' for the estimation (\eqn{\mu_a} and \eqn{\mu_b} represent the mean
#' performance of any two algorithms on the test instance, and \eqn{mu}
#' represents the grand mean of all algorithms given in `algorithms`):
#'
#' - If `dif == "perc"` and `comparisons == "all.vs.first"`, the estimated
#' quantity is:
#' \eqn{\phi_{1b} = (\mu_1 - \mu_b) / \mu_1 = 1 - (\mu_b / \mu_1)}.
#'
#' - If `dif == "perc"` and `comparisons == "all.vs.all"`, the estimated
#' quantity is:
#' \eqn{\phi_{ab} = (\mu_a - \mu_b) / \mu}.
#'
#' - If `dif == "simple"` it estimates \eqn{\mu_a - \mu_b}.
#'
#' @section Sample Sizes for Nonparametric Methods:
#' If the parameter `` is set to either `Wilcoxon` or `Binomial`, this
#' routine approximates the number of instances using the ARE of these tests
#' in relation to the paired t.test:
#' - `n.wilcox = n.ttest / 0.86 = 1.163 * n.ttest`
#' - `n.binom = n.ttest / 0.637 = 1.570 * n.ttest`
#'
#' @inheritParams calc_nreps
#' @inheritParams calc_instances
#' @param instances list object containing the definitions of the
#' _available_ instances. This list may (or may not) be exhausted in the
#' experiment. To estimate the number of required instances,
#' see [calc_instances()]. For more details, see Section `Instance List`.
#' @param power (desired) test power. See [calc_instances()] for details.
#' Any value equal to or greater than one will force the method to use all
#' available instances in `Instance.list`.
#' @param d minimally relevant effect size (MRES), expressed as a standardized
#' effect size, i.e., "deviation from H0" / "standard deviation".
#' See [calc_instances()] for details.
#' @param sig.level family-wise significance level (alpha) for the experiment.
#' See [calc_instances()] for details.
#' @param alternative type of alternative hypothesis ("two.sided" or
#' "one.sided"). See [calc_instances()] for details.
#' @param save.partial.results logical, should partial results be saved to file?
#' @param load.partial.results logical, should previously saved partial results
#' be reloaded as part of the experiment?
#'
#' @return a list object containing the following fields:
#' \itemize{
#' \item \code{Configuration} - the full input configuration (for reproducibility)
#' \item \code{data.raw} - data frame containing all observations generated
#' \item \code{data.summary} - data frame summarizing the experiment.
#' \item \code{N} - number of instances sampled
#' \item \code{N.star} - number of instances required
#' \item \code{total.runs} - total number of algorithm runs performed
#' \item \code{instances.sampled} - names of the instances sampled
#' \item \code{Underpowered} - flag: TRUE if N < N.star
#' }
#'
#' @author Felipe Campelo (\email{fcampelo@@ufmg.br},
#' \email{f.campelo@@aston.ac.uk})
#'
#' @references
#' - F. Campelo, F. Takahashi:
#' Sample size estimation for power and accuracy in the experimental
#' comparison of algorithms. Journal of Heuristics 25(2):305-338, 2019.
#' - P. Mathews.
#' Sample size calculations: Practical methods for engineers and scientists.
#' Mathews Malnar and Bailey, 2010.
#' - A.C. Davison, D.V. Hinkley:
#' Bootstrap methods and their application. Cambridge University Press (1997)
#' - E.C. Fieller:
#' Some problems in interval estimation. Journal of the Royal Statistical
#' Society. Series B (Methodological) 16(2), 175–185 (1954)
#' - V. Franz:
#' Ratios: A short guide to confidence limits and proper use (2007).
#' https://arxiv.org/pdf/0710.2024v1.pdf
#' - D.C. Montgomery, C.G. Runger:
#' Applied Statistics and Probability for Engineers, 6th ed. Wiley (2013)
#' - D.J. Sheskin:
#' Handbook of Parametric and Nonparametric Statistical Procedures,
#' 4th ed., Chapman & Hall/CRC, 1996.
#'
#'
#' @examples
#' \dontrun{
#' # Example using four dummy algorithms and 100 dummy instances.
#' # See [dummyalgo()] and [dummyinstance()] for details.
#' # Generating 4 dummy algorithms here, with means 15, 10, 30, 15 and standard
#' # deviations 2, 4, 6, 8.
#' algorithms <- mapply(FUN = function(i, m, s){
#' list(FUN = "dummyalgo",
#' alias = paste0("algo", i),
#' distribution.fun = "rnorm",
#' distribution.pars = list(mean = m, sd = s))},
#' i = c(alg1 = 1, alg2 = 2, alg3 = 3, alg4 = 4),
#' m = c(15, 10, 30, 15),
#' s = c(2, 4, 6, 8),
#' SIMPLIFY = FALSE)
#'
#' # Generate 100 dummy instances with centered exponential distributions
#' instances <- lapply(1:100,
#' function(i) {rate <- runif(1, 1, 10)
#' list(FUN = "dummyinstance",
#' alias = paste0("Inst.", i),
#' distr = "rexp", rate = rate,
#' bias = -1 / rate)})
#'
#' my.results <- run_experiment(instances, algorithms,
#' d = .5, se.max = .1,
#' power = .9, sig.level = .05,
#' power.target = "mean",
#' dif = "perc", comparisons = "all.vs.all",
#' seed = 1234)
#'
#' # Take a look at the results
#' summary(my.results)
#' print(my.results)
#'}
#'
# run_experiment_old <- function(instances, algorithms, d, se.max,
# power = 0.8, sig.level = 0.05,
# power.target = "mean",
# dif = "simple", comparisons = "all.vs.all",
# alternative = "two.sided", test = "t.test",
# method = "param",
# nstart = 20, nmax = 100 * length(algorithms),
# force.balanced = FALSE,
# ncpus = 2, boot.R = 499, seed = NULL,
# save.partial.results = FALSE,
# load.partial.results = FALSE,
# folder = "./nreps_files")
# {
#
# # TODO:
# # save/load.partial.results can be either a folder, a vector of
# # file names, or NULL
# # If it is a folder, then filenames are generated based on instance aliases
# #
# # The call to calc_nreps will need to be changed from lapply to mapply
#
# # ======== Most error catching to be performed by specific routines ======== #
# assertthat::assert_that(assertthat::is.count(ncpus),
# is.null(seed) || seed == seed %/% 1)
# if (alternative == "one.sided"){
# assertthat::assert_that(comparisons == "all.vs.first")
# }
#
# # Fix a common mistake
# if (tolower(dif) == "percent") dif <- "perc"
#
# # Capture input parameters
# var.input.pars <- as.list(environment())
#
# # set PRNG seed
# if (is.null(seed)) {
# if (!exists(".Random.seed")) stats::runif(1)
# seed <- .Random.seed #i.e., do not change anything
# } else {
# set.seed(seed)
# }
#
#
# # Set up parallel processing
# if ((.Platform$OS.type == "windows") & (ncpus > 1)){
# cat("\nAttention: multicore not currently available for Windows.\n
# Forcing ncpus = 1.")
# ncpus <- 1
# } else {
# available.cores <- parallel::detectCores()
# if (ncpus >= available.cores){
# cat("\nAttention: ncpus too large, we only have ", available.cores,
# " cores.\nUsing ", available.cores - 1,
# " cores for run_experiment().")
# ncpus <- available.cores - 1
# }
# }
#
# # Fill up algorithm and instance aliases if needed
# for (i in 1:length(instances)){
# if (!("alias" %in% names(instances[[i]]))) {
# instances[[i]]$alias <- instances[[i]]$FUN
# }
# }
# for (i in 1:length(algorithms)){
# if (!("alias" %in% names(algorithms[[i]]))) {
# algorithms[[i]]$alias <- algorithms[[i]]$FUN
# }
# }
#
# # Calculate N*
# n.available <- length(instances)
# n.algs <- length(algorithms)
# n.comparisons <- switch(comparisons,
# all.vs.all = n.algs * (n.algs - 1) / 2,
# all.vs.first = n.algs - 1)
#
# ss.calc <- calc_instances(ncomparisons = n.comparisons,
# d = d,
# power = power,
# sig.level = sig.level,
# alternative = alternative,
# test = test,
# power.target = power.target)
# if (power >= 1) {
# N.star <- n.available
# } else {
# N.star <- ss.calc$ninstances
# if (N.star < n.available){
# # Randomize order of presentation for available instances
# instances <- instances[sample.int(n.available)]
# }
# }
#
# # Echo some information for the user
# cat("CAISEr running")
# cat("\n-----------------------------")
# cat("\nRequired number of instances:", N.star)
# cat("\nAvailable number of instances:", n.available)
# cat("\nUsing", ncpus, "cores.")
# cat("\n-----------------------------")
#
# # Sample instances
# if(ncpus > 1){
# my.results <- pbmcapply::pbmclapply(X = instances[1:min(N.star, n.available)],
# FUN = calc_nreps,
# # Arguments for calc_nreps:
# algorithms = algorithms,
# se.max = se.max,
# dif = dif,
# comparisons = comparisons,
# method = method,
# nstart = nstart,
# nmax = nmax,
# boot.R = boot.R,
# force.balanced = force.balanced,
# load.file = NULL,
# save.file = NULL,
# # other pbmclapply arguments:
# mc.cores = ncpus)
# } else {
# my.results <- lapply(X = instances[1:min(N.star, n.available)],
# FUN = calc_nreps,
# # Arguments for calc_nreps:
# algorithms = algorithms,
# se.max = se.max,
# dif = dif,
# comparisons = comparisons,
# method = method,
# nstart = nstart,
# nmax = nmax,
# boot.R = boot.R,
# force.balanced = force.balanced,
# save.to.file = save.partial.results,
# load.from.file = load.partial.results,
# folder = folder)
# }
# # Consolidate raw data
# data.raw <- lapply(X = my.results,
# FUN = function(x){
# inst <- x$instance
# nj <- sum(x$Nk)
# data.frame(Algorithm = do.call(what = c,
# mapply(rep,
# names(x$Nk),
# x$Nk,
# SIMPLIFY = FALSE)),
# Instance = rep(inst, nj),
# Observation = do.call(c, x$Xk))})
#
# data.raw <- do.call(rbind, data.raw)
# rownames(data.raw) <- NULL
#
# # Consolidate summary data
# data.summary <- lapply(X = my.results,
# FUN = function(x){
# cbind(Instance = rep(x$instance, nrow(x$Diffk)),
# x$Diffk)})
#
# data.summary <- do.call(rbind, data.summary)
# algonames <- sapply(algorithms, function(x) x$alias)
# rownames(data.summary) <- NULL
# data.summary$Alg1 <- as.factor(algonames[data.summary$Alg1])
# data.summary$Alg2 <- as.factor(algonames[data.summary$Alg2])
#
#
# # Assemble output
# output <- list(Configuration = var.input.pars,
# data.raw = data.raw,
# data.summary = data.summary,
# N = min(N.star, n.available),
# N.star = N.star,
# total.runs = nrow(data.raw),
# instances.sampled = unique(data.raw$Instance),
# Underpowered = (N.star > n.available),
# samplesize.calc = ss.calc)
#
# class(output) <- c("CAISEr", "list")
#
# return(output)
# }
|
8066a19c9829afb11e32a04edef8c6dc6e14223f | 11770c57047327c8cd3e3885a5f3547682b8b3b8 | /plot2.R | 5f6dd8d8ff9237aa56ccde501ddbc589cec434ab | [] | no_license | AbidRaza1/Exploring-power-consumption | 7462919ec6736aa34123e47b51bf74e87c787e83 | 4043c73a3a2ffe04e468a63d43c6050976815fb5 | refs/heads/main | 2023-06-19T00:10:12.399293 | 2021-07-12T20:30:10 | 2021-07-12T20:30:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 405 | r | plot2.R |
##-------------Plotting Global Active Power(kw) over the period of two days-----------------##
# Plotting Scatter plot
plot(df1$Date+df1$Time, df1$Global_active_power, type ="l",xlab = "", ylab = "Global Active Power(kilowatts)")
# Copying Image in .PNG format
dev.copy(png,"plot2.png",width= 480, height=480)
# Complete saving
dev.off()
|
a2f2cfd43a73f9ee34efb63c219b766d9a71176b | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/cairoGetScaledFont.Rd | f495c63e3f306a02937a15d0a143dffe5bdb1ccf | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 909 | rd | cairoGetScaledFont.Rd | \alias{cairoGetScaledFont}
\name{cairoGetScaledFont}
\title{cairoGetScaledFont}
\description{Gets the current scaled font for a \code{\link{Cairo}}.}
\usage{cairoGetScaledFont(cr)}
\arguments{\item{\verb{cr}}{[\code{\link{Cairo}}] a \code{\link{Cairo}}}}
\details{ Since 1.4}
\value{[\code{\link{CairoScaledFont}}] the current scaled font. To keep a reference to it,
This function never returns \code{NULL}. If memory cannot be allocated, a
special "nil" \code{\link{CairoScaledFont}} object will be returned on which
\code{\link{cairoScaledFontStatus}} returns \code{CAIRO_STATUS_NO_MEMORY}. Using
this nil object will cause its error state to propagate to other
objects it is passed to, (for example, calling
\code{\link{cairoSetScaledFont}} with a nil font will trigger an error that
will shutdown the \code{\link{Cairo}} object).}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
3cf11a221586a4b2175bfe2133e21ed3c77dac59 | 8c6ee4c1df82aa3d76634920c912a817fba979e6 | /Templates/R/Regression/RandomForestRegression.R | 1e88c7e4b0fccbdda004dde7c5146cf34cfd8f6b | [] | no_license | kenilshah27/Data-Science-Analytics | f83400e753b3fcd7fb0a239e9ae1efd4a9f444d0 | 2696631dc4e735b275d80e744da9b146111e5ea4 | refs/heads/master | 2020-04-18T16:14:41.172950 | 2019-08-03T21:45:24 | 2019-08-03T21:45:24 | 167,629,661 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,123 | r | RandomForestRegression.R |
dataset <- read.csv('Dataset Name')
summary(dataset) # provides a summary of the dataset
str(dataset) # gives us a structure of the dataset
names(dataset) # names of the column
library(caTools)
datasplit <- sample.split(dataset$DependentVariable,SplitRatio = 0.8)
train <- subset(dataset,datasplit = TRUE)
test <- sbuset(dataset,datasplit = FALSE)
#check for correlation
#tocheck for two columns
cor(dataset&column1,dataset&column2,method = 'pearson') # other methods can be used
#between all the variables
library(ggpubr)
ggscatter(dataset,x= 'column 1',y = 'column 2',add = 'reg.line',
conf.int = TRUE , cor.coef = TRUE , cor.method = 'pearson',
xlab = 'Column 1',ylab = 'Column 2')
#Correltion diagram
library(corrplot)
x <- cor(dataset)
corrplot(x, type="upper", order="hclust")
#Correlation Matrix
corr.test(dataset)
#Correlation of one variable will every variable
library(corrr)
dataset %>% correlate() %>% focus(columnname)
#Do scaling if needed
#Convert into categorical variable if required
#Before creating the model, you might need to bin few values . You can use mapvalue function for that
library(plyr)
train$column <- mapvalues(train$column,from = c('A','B','C'),to = c('D','E','F')) # to map A to D, B to E,c to F
test$column <- mapvalues(test$column,from = c('A','B','C'),to = c('D','E','F')) # we need to map values in test as well
#Model
library(randomForest)
model <- randomForest(x = train(without the DependentVarialbe),
y = train$DependentVariable,
ntree = 500) # can change various constraints to get different trees
#Predicting values
predictedvalues <- predict(model,test)
#coefficients of the independent variables
model$coefficients
#Summary of the model will have the coefficients value, mean square error, adjusted r square and many other metrics
summary(model)
#Residual plot to check how linear the model is
plot(model$fitted.values,model$residuals)
#Saving the output file
write.csv(predictedvalues,'Filename',sep = '\t')
|
de0857734b6a0a7ee7d7c3f968e09c8c03a8e7cd | 16456431fa3834700e777c9d3b4919de19fda7c0 | /man/plotSamplesMut.Rd | 138a4bb737968da54106d53855c2b7af2ed37d8e | [
"MIT"
] | permissive | yulab41/cDriver | 69b4985c8142b887342d7d0b66a89c73ee5b36df | aa466ae92095f877a1f68ee7d2e6136cba9b02ea | refs/heads/master | 2021-11-26T07:53:06.780293 | 2018-01-18T03:04:44 | 2018-01-18T03:04:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,588 | rd | plotSamplesMut.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plotSamplesMut}
\alias{plotSamplesMut}
\title{Plot mutations per sample distribution.}
\usage{
plotSamplesMut(sample.mutations, indels = TRUE, silent = TRUE,
fill = FALSE, Tumor_Sample_Barcode = NULL, Hugo_Symbol = NULL,
Variant_Classification = NULL, Variant_Type = NULL)
}
\arguments{
\item{sample.mutations}{data frame in MAF like format.
Columns (with exactly same names) which \code{sample.mutations} should have are:
\itemize{
\item Variant_Classification column specifed by MAF format, used to distinguish between silent and nonsilent SNVs
\item Hugo_Symbol column specifed by MAF format, which reports gene for each SNV.
\item Tumor_Sample_Barcode column specifed by MAF format, reporting for each SNV in wich patient was found.
\item Variant_Type columns pecifed by MAF format; is mutation SNV or InDel
}}
\item{indels}{a boolean value indicating should indels be counted. By default it is True.}
\item{silent}{a boolean value indicating should silent mutations be counted. By default it is True.}
\item{fill}{a boolean value indicating should plot only represent proportion between 3 types of mutations, not counts. By default it is False.}
\item{Tumor_Sample_Barcode}{(optional) integer/numeric value indicating column in \code{sample.mutations} which have sample ids for SNVs/Indels.
Default is NULL value (in this case \code{sample.mutations} should already have this column)}
\item{Hugo_Symbol}{(optional) integer/numeric value indicating column in \code{sample.mutations} having gene names for reported SNVs/Indels.
Default is NULL value (in this case \code{sample.mutations} should already have this column)}
\item{Variant_Classification}{(optional) integer/numeric value indicating column in \code{sample.mutations} which contain classification for SNV (Silent or not).
Default is NULL value (in this case \code{sample.mutations} should already have this column)}
\item{Variant_Type}{(optional) integer/numeric value indicating column in \code{sample.mutations} which contains indormation if mutations is SNV or InDel .
Default is NULL value (in this case \code{sample.mutations} should already have this column)}
}
\value{
ggplot2 object
}
\description{
\code{plot.samplesMut} Function to plot samples mutation counts.
}
\examples{
\donttest{
# plot sample's mutations , all of them
plotSamplesMut(sample.genes.mutect)
# plot proportion of silent and nonsilent, without indels
plotSamplesMut(sample.genes.mutect, indels=FALSE, fill=TRUE)
}
}
|
407efe35ecdb29da349a484510574d8bd07c7e56 | 4151705d873af486f359ebc60427c4e439044510 | /old_files_not_used/deseq/logfc.R | 006cb7ef5dbc1a3bd2f92bd1557ec709bf201ab3 | [] | no_license | GW-HIVE/bioxpress | 375e74221ff8841049530cf5c60eb440436d8d1f | 6ca86916958c08b88985b745a3f795c0156d85e3 | refs/heads/main | 2023-08-12T08:12:53.736967 | 2021-10-08T13:53:26 | 2021-10-08T13:53:26 | 392,076,238 | 1 | 1 | null | 2021-10-08T13:53:27 | 2021-08-02T19:51:53 | Python | UTF-8 | R | false | false | 1,062 | r | logfc.R | library(pheatmap)
args <- commandArgs(TRUE)
setwd(args[1])
assayFile <- args[2]
colFile <- args[3]
progressPath <- args[4]
progressFile <- file(progressPath)
#read in hit counts
write("Reading in hit counts (assayFile)",file = progressFile)
assay<-read.csv(assayFile, row.names=1)
#read in category table
write("Reading in category table (colFile)",file = progressFile, append = TRUE)
coldata<-read.csv(colFile)
#upload DESeq library
write("Loading DESeq libarary",file = progressFile, append = TRUE)
library(DESeq2)
#create deseq data set
write("Creating deseq data set from matrix",file = progressFile, append = TRUE)
ddsMat <- DESeqDataSetFromMatrix(countData = assay,
colData = coldata,
design = ~status )
rld <- rlogTransformation( ddsMat )
res <- data.frame(
assay(rld),
avgLogExpr = ( assay(rld)[,2] + assay(rld)[,1] ) / 2,
rLogFC = assay(rld)[,2] - assay(rld)[,1] )
resOrdered <- res[ order(res$rLogFC), ]
write.csv(as.data.frame(resOrdered), file="results.csv")
|
616684324c05ecf012e9f99aab47cd5978ee43e6 | 92d988ba40fbc2bd1d3771a3f01faba1efb28a7d | /ui.R | ff89da8af475c4ee1251a2e4d453e44f6e97d7a2 | [] | no_license | savirhuitron/ds_capm | b9f1afbf23fab3b16f983f4acee8ba08ebe031ee | 8a5990b8bc291684cf4ef083e3a9bc19433fc00e | refs/heads/master | 2021-01-01T05:10:43.691500 | 2016-04-21T02:10:34 | 2016-04-21T02:10:34 | 56,734,505 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,103 | r | ui.R | library(shinydashboard)
library(rCharts)
library(markdown, warn = F)
library(knitr, warn = F)
library(dplyr, warn = F)
#carpeta2 <- 'O:/Users/shuitrong/Documents/CAPM'
source("CAPM_fa.R")
header <- dashboardHeader(title = "CAPM")
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem("AN INTRODUCTION", tabName = "explain", icon = icon("pencil")),
menuItem("MODEL & GRAPHS", tabName = "test", icon = icon("bar-chart")),
menuItem("BY SECTOR", tabName = "test1", icon = icon("bar-chart"))
)
)
body <- dashboardBody(
tabItems(
tabItem(tabName = "explain",
fluidRow(
box(title = "The Model CAPM", collapsible = TRUE, status = "primary", solidHeader = T, width = 12,
includeMarkdown("include.md")
)
)
),
tabItem(tabName = "test",
fluidRow(
box(title = "Betas", collapsible = TRUE, status = "primary",
solidHeader = TRUE, showOutput("myChart", "highcharts"), width = 10
),
box(title = "Nivels of Beta", collapsible = TRUE, status = "primary",
solidHeader = TRUE, checkboxGroupInput("niveles", "",
choices = c("HIGH" = "HIGH", "MEDIA" = "MEDIA",
"LOW" = "LOW", "MINIMUM" = "MINIMUM"), selected = "HIGH"
), width = 2
)
),# aqui termina el primer fluidRow
fluidRow(
box(title = "Table of CAPM", collapsible = TRUE, status = "primary",
solidHeader = TRUE, DT::dataTableOutput('tbl1'), width = 8, downloadButton("downloadData", "Download")
),
box( title = "Choose a Risk Free Rate", status = "primary", solidHeader = T,
collapsible = T ,width = 2, numericInput("rf", "", value = 3.5)),
box( title = "Choose a Risk Market Rate", status = "primary", solidHeader = T,
collapsible = T ,width = 2, numericInput("rm", "", value = 8.29))
)#,
),# parentesis del Tab Item
tabItem(tabName = "test1",
fluidRow(
box(title = "MARKET CAP", status = "primary", solidHeader = T, collapsible = T
, showOutput("bar1", "highcharts") , width = 5),
box(title = "PIE", status = "primary", solidHeader = T, collapsible = T
, showOutput("pie1", "highcharts"), width = 5 ),#,
box(title = "SECTOR", status = "primary", solidHeader = T, collapsible = T,
checkboxGroupInput("sector1", "",
choices = c("MARKET" = "MARKET", "CONSUMER GOODS" = "CONSUMER GOODS", "INDUSTRIAL GOODS" = "INDUSTRIAL GOODS",
"TECHNOLOGY" = "TECHNOLOGY", "FINANCIAL" = "FINANCIAL", "SERVICES" = "SERVICES", "ENERGY" = "ENERGY", "HEALTHCARE" = "HEALTHCARE"),
selected = c("CONSUMER GOODS", "SERVICES", "FINANCIAL", "MARKET"))
, width = 2 )
)
)
)
)
ui <- dashboardPage(header, sidebar, body)
|
c8c2222a4ee8cf2feb58ce8b6dc0ee9be2c1de6d | e2acc4f6e860580d8b1bb6d2e27befc47b82be29 | /R/hyp-test.R | 7000a991e707288f249c383bfdda2c1d3d45b902 | [] | no_license | markwh/rtvalidate | a3bc2dd8c99a2c0d235012a5fbcbc6c1dec6cec2 | 676cdb023778e0e14a8e079203ad868d48be4786 | refs/heads/master | 2020-07-01T21:33:39.579875 | 2019-09-18T18:13:03 | 2019-09-18T18:13:03 | 201,308,748 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,081 | r | hyp-test.R | # Hypothesis testing
#' Test uncertainty estimates against chi-square distribution
#'
#' @param valdata A data.frame as returned by \code{rt_valdata()}
#' @param debias Remove effect of bias? Defaults to FALSE
#' @param sides 1 for 1-sided, or 2 for 2-sided hypothesis test. If 1, test is on upper tail.
#' @param log.p Report p-value as log-transformed value?
#' @importFrom dplyr group_by mutate summarize
#' @export
rt_hyptest <- function(valdata, debias = FALSE, sides = 2, log.p = FALSE) {
out <- valdata %>%
mutate(relerr = pixc_err / sigma_est) %>%
group_by(variable) %>%
mutate(meanrelerr = mean(relerr, na.rm = TRUE))
if (!debias) out[["meanrelerr"]] <- 0
out <- summarize(out,
teststat = sum((relerr - meanrelerr)^2, na.rm = TRUE),
df = sum(!is.na(relerr)) - 1)
logpval <- pchisq(out$teststat, df = out$df, log.p = TRUE, lower.tail = FALSE)
if (sides == 2) {
logpval <- log(2) + ifelse(logpval > log(0.5), log(1 - exp(logpval)), logpval)
}
out$pval <- if (log.p) logpval else exp(logpval)
out
}
|
fc59b6af38c29a29a7395915ea4efcc16954fc98 | 47563c6a77e1b3a92581d99cff4792cfb41182db | /R/handle_errors.R | d13c16da58c72ceaeb1b105b43c244de52d5d059 | [
"MIT"
] | permissive | ropensci/oai | d7422a643081b6a92364de938cfdc373550df681 | 866af429d8cd4a97735c0efa1285fdde6cccf950 | refs/heads/master | 2022-11-13T01:30:08.422265 | 2022-11-10T16:46:58 | 2022-11-10T16:46:58 | 37,342,428 | 14 | 7 | NOASSERTION | 2022-09-22T16:58:42 | 2015-06-12T20:14:33 | R | UTF-8 | R | false | false | 1,246 | r | handle_errors.R | # Look for OAI-PMH exceptions
# https://www.openarchives.org/OAI/openarchivesprotocol.html#ErrorConditions
# xml = parsed xml
# Return TRUE if OK or stop
# handle_errors <- function(xml) {
# nodeset <- xml2::xml_find_all(xml, ".//*[local-name()='error']")
# if( length(nodeset) > 0 ) {
# msgs <- sapply(nodeset, function(n)
# paste0( xml2::xml_attr(n, "code"), ": ", xml2::xml_text(n) ) )
# stop( paste0("OAI-PMH exceptions: ", paste(msgs, collapse="\n")) )
# }
#
# }
#
handle_errors <- function(xml) {
# find error tags
req <- xml2::xml_text(xml2::xml_find_first(xml, ".//*[local-name()='request']" ))
nodeset <- xml2::xml_find_all(xml, ".//*[local-name()='error']")
# collect error information, if any
if( length(nodeset) == 0 ) {
return(TRUE)
} else {
errors <- lapply(nodeset, function(n)
c(code=xml2::xml_attr(n, "code"),
message=xml2::xml_text(n) ) )
cond <- condition(c("oai-pmh_error", "error"),
message = paste0("OAI-PMH errors: ",
paste( sapply(errors, paste, collapse=": "), collapse=",\n")),
request=req,
error_codes = sapply(errors, "[", "code") )
stop(cond)
}
}
|
621435845139d8ea153b2a4b4bc2178f46eb0287 | e98214e129c6f336b2a380c36bb32492b635a007 | /wp-content/uploads/2012/10/henon.r | 9fbd628c3059c03e0810c5602987d28c92d92413 | [] | no_license | hewner/hewner.github.io | 7d3bb99e214c939f3e64d8070b021b828a0dc6b0 | e5a07797b809e565dc1f1a3b76c3a8e4d8451715 | refs/heads/master | 2022-06-10T07:22:30.556376 | 2022-05-18T19:46:53 | 2022-05-18T19:46:53 | 141,222,806 | 0 | 0 | null | 2021-05-19T12:07:05 | 2018-07-17T02:54:38 | TeX | UTF-8 | R | false | false | 565 | r | henon.r | generateHenon <- function()
{
numberToPlot <- 10000
startX = 0.000001
startY = 0
HenonX = rep(0,numberToPlot)
HenonY = rep(0,numberToPlot)
HenonX[1] = startX
HenonY[1] = startY
a = 1.4
b = 0.3
for (i in 2:numberToPlot) {
prev = i - 1
HenonX[i] = HenonY[prev] + 1 - a*HenonX[prev]^2
HenonY[i] = b*HenonX[prev]
}
plot(HenonX, HenonY, main="Henon Attractor",
xlab="X", ylab="Y", pch=".")
# shows how the points jump around
#lines(HenonX[90:130], HenonY[90:130])
}
generateHenon()
|
4b82c74a931344fdc3572c6de1edb0f6c961ff26 | cdbc8fc811953f15bd9587dae98c1ef98d3ac20b | /man/mytest.Rd | 2b7d4730eac86df9dd0a2ecf8cba7f0f1144b2fa | [] | no_license | hodgesse1/newFort1 | d21e20d21ccfe69c090020925a6de51b36440c1a | 6d397efc08d8f8f88632a70d78cc89120a7ea5d8 | refs/heads/main | 2023-08-30T20:16:59.581793 | 2021-10-17T04:24:52 | 2021-10-17T04:24:52 | 418,015,800 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 355 | rd | mytest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mytest.R
\name{mytest}
\alias{mytest}
\title{Produce the output from myone}
\usage{
mytest(n = 100)
}
\arguments{
\item{n}{An integer}
}
\value{
\code{x} which is a vector of random normal values,
\code{y} which is \code{x} + 4.
}
\description{
Produce the output from myone
}
|
ca321a51be24f83cb16b95c7ed57f675fb002c16 | 93053d8d9226645f2be4fbbc3de1fe7a444c7746 | /R/hfr_append_sources.R | bdacb90ecd55dbd2dc7b1a38e6d36aa38a327ac5 | [
"MIT"
] | permissive | USAID-OHA-SI/Wavelength | 500a55dd3c1fba630a481b0610b8a264d1fcddca | 270e88233a0316f521d64adefe70112954c9ab33 | refs/heads/main | 2023-04-04T22:50:52.704479 | 2023-03-16T15:57:57 | 2023-03-16T15:57:57 | 179,119,038 | 3 | 0 | NOASSERTION | 2023-02-10T14:27:14 | 2019-04-02T16:38:22 | R | UTF-8 | R | false | false | 8,402 | r | hfr_append_sources.R | #' #' Append HFR and DATIM Data
#'
#' @param folderpath_hfr folder path to HFR processed data
#' @param folderpath_datim folder path to DATIM extracts, see `extract_datim()`
#' @param start_date start date of HFR period, YYYY-MM-DD format
#' @param weeks number of weeks to create, default = 4
#' @param max_date cut off data at max date? default = NULL
#' @param folderpath_output folder path for saving the output
#'
#' @export
#'
hfr_append_sources <- function(folderpath_hfr,
folderpath_datim,
start_date,
weeks = 4,
max_date = TRUE,
folderpath_output){
# IMPORT ------------------------------------------------------------------
#ensure pulling the lastest HFR files
hfr_files_newest <- list.files(folderpath_hfr, full.names = TRUE) %>%
tibble::enframe(name = NULL, value = "full_path") %>%
dplyr::mutate(name = basename(full_path) %>% stringr::str_remove_all("HF(R|D)_|\\.csv")) %>%
tidyr::separate(name, c(NA, "ou", "date"), sep = "_", extra = "drop") %>%
dplyr::mutate(date = lubridate::as_date(date)) %>%
dplyr::arrange(ou) %>%
dplyr::group_by(ou) %>%
dplyr::mutate(newest = date == max(date)) %>%
dplyr::filter(newest) %>%
dplyr::pull(full_path)
#pull in HFR data
df_hfr <- purrr::map_dfr(.x = hfr_files_newest,
.f = ~ readr::read_csv(.x, col_types = c(.default = "c")))
#pull in DATIM target files
df_datim <- purrr::map_dfr(.x = list.files(folderpath_datim, full.names = TRUE),
.f = ~ readr::read_tsv(.x, col_types = c(.default = "c")))
df_datim <- df_datim %>%
dplyr::select(-dplyr::matches("Type of organisational unit"))
# EXTRACT MISSING PARTNER NAME --------------------------------------------
if(!"primepartner" %in% names(df_datim)){
#access current mechanism list posted publically to DATIM
sql_view_url <- "https://www.datim.org/api/sqlViews/fgUtV6e9YIX/data.csv"
mech_official <- readr::read_csv(sql_view_url,
col_types = readr::cols(.default = "c"))
mech_info <- mech_official %>%
dplyr::filter(agency == "USAID") %>%
dplyr::select(mech_code = code, primepartner_d = partner)
df_datim <- df_datim %>%
tibble::add_column(primepartner = as.character(NA), .before = "mech_code") %>%
dplyr::left_join(mech_info, by = "mech_code") %>%
dplyr::mutate(primepartner = primepartner_d) %>%
dplyr::select(-primepartner_d) %>%
dplyr::glimpse()
rm(sql_view_url, mech_official, mech_info)
}
# MAP MECHANISM INFORMATION -----------------------------------------------
#pull list of mechanism from DATIM targets
df_mech_map <- df_datim %>%
dplyr::distinct(mech_code, primepartner, mech_name) %>%
dplyr::rename_at(dplyr::vars(primepartner, mech_name), ~ paste0(., "_d"))
#merge on mech_code, replacing HFR with DATIM names
df_hfr <- df_hfr %>%
dplyr::left_join(df_mech_map, by = "mech_code") %>%
dplyr::mutate(primepartner = primepartner_d,
mech_name = mech_name_d) %>%
dplyr::select(-dplyr::ends_with("_d"))
rm(df_mech_map)
# MAP ORG HIERARCHY -------------------------------------------------------
#pull list of org hierarchy from DATIM targets
df_org_map <- df_datim %>%
dplyr::distinct(orgunit, orgunituid, snu1, psnu) %>%
dplyr::rename_all(~ paste0(., "_d"))
#merge for those with facility uids
df_hfr_orguids <- df_hfr %>%
dplyr::filter(!is.na(orgunituid)) %>%
dplyr::left_join(df_org_map, by = c("orgunituid" = "orgunituid_d")) %>%
dplyr::mutate(#snu1 = ifelse(!is.na(snu1_d), snu1_d, snu1),
psnu = ifelse(!is.na(psnu_d), psnu_d, psnu),
orgunit = ifelse(!is.na(orgunit_d), orgunit_d, orgunit)) %>%
dplyr::select(-dplyr::ends_with("_d"))
#merge for those without facility uids
df_org_map_missing <- dplyr::distinct(df_org_map, orgunit_d, .keep_all= TRUE)
df_hfr_orguids_missing <- df_hfr %>%
dplyr::filter(is.na(orgunituid)) %>%
dplyr::left_join(df_org_map_missing, by = c("orgunit" = "orgunit_d")) %>%
dplyr::mutate(#snu1 = ifelse(!is.na(snu1_d), snu1_d, snu1),
psnu = ifelse(!is.na(psnu_d), psnu_d, psnu),
orgunituid = orgunituid_d) %>%
dplyr::select(-dplyr::ends_with("_d"))
#append df with org hierarchy together
df_hfr <- dplyr::bind_rows(df_hfr_orguids, df_hfr_orguids_missing)
rm(df_hfr_orguids, df_hfr_orguids_missing, df_org_map, df_org_map_missing)
# REMOVE ENDING MECHANISMS ------------------------------------------------
# #access current mechanism list posted publically to DATIM
# sql_view_url <- "https://www.datim.org/api/sqlViews/fgUtV6e9YIX/data.csv"
# mech_official <- readr::read_csv(sql_view_url,
# col_types = readr::cols(.default = "c"))
#
# #rename variables to match MSD and remove mechid from mech name
# ending_mechs <- mech_official %>%
# dplyr::filter(agency == "USAID") %>%
# dplyr::mutate(enddate = lubridate::ymd(enddate)) %>%
# dplyr::select(mech_code = code, enddate) %>%
# dplyr::filter(lubridate::year(enddate) < 2020) %>%
# dplyr::pull(mech_code)
#
# df_datim <- dplyr::filter(df_datim, !mech_code %in% ending_mechs)
#
# rm(sql_view_url, mech_official, ending_mechs)
# DUPICATE TARGETS --------------------------------------------------------
#clean up age and sex
df_datim <- df_datim %>%
dplyr::mutate(agecoarse = stringr::str_remove(agecoarse, " Age"),
sex = stringr::str_remove(sex, " sex"))
#duplicate targets for each week (DATIM)
dates <- lubridate::as_date(start_date) %>% seq(by = 7, length.out = weeks)
df_datim_rpt <- purrr::map_dfr(.x = dates,
.f = ~dplyr::mutate(df_datim, date = .x)) %>%
hfr_assign_pds()
rm(df_datim, dates)
# APPEND HFR AND TARGETS --------------------------------------------------
df_hfr <- df_hfr %>%
dplyr::mutate(#date = lubridate::mdy(date),
date = as.Date(date),
fy = as.integer(fy),
hfr_pd = as.integer(hfr_pd),
sex = ifelse(sex == "Unknown", "Unspecified", sex),
operatingunit = ifelse(operatingunit == "DRC","Democratic Republic of the Congo", operatingunit),
operatingunit = ifelse(operatingunit == "Viet Nam","Vietnam", operatingunit)) %>%
#assign_pds() %>%
dplyr::bind_rows(df_datim_rpt)
#aggregate to reduce # of lines
sum_vars <- c("mer_results", "mer_targets", "targets_gap", "weekly_targets", "weekly_targets_gap", "val")
df_hfr <- df_hfr %>%
dplyr::mutate_at(dplyr::vars(sum_vars), as.numeric) %>%
dplyr::group_by_at(setdiff(names(df_hfr), c("partner", "disaggregate", sum_vars))) %>%
dplyr::summarise_at(dplyr::vars(sum_vars), sum, na.rm = TRUE) %>%
dplyr::ungroup()
rm(df_datim_rpt, sum_vars)
#cap date at last date of reporting of collection period
if(max_date == TRUE){
max <- as.Date(start_date)+(7*(weeks-1))
df_hfr <- dplyr::filter(df_hfr, date <= as.Date(max))
}
# ARRANGE COLUMNS ---------------------------------------------------------
df_hfr <- dplyr::select(df_hfr,
operatingunit, orgunit, orgunituid, date, fy, hfr_pd,
mech_code, mech_name, primepartner, fundingagency, snu1,
psnu, community, indicator, agecoarse, sex, otherdisaggregate,
val, mer_results, mer_targets, targets_gap, weekly_targets,
weekly_targets_gap, dplyr::everything())
# EXPORT ------------------------------------------------------------------
if(!is.null(folderpath_output)){
pd <- df_hfr %>%
dplyr::distinct(fy, hfr_pd) %>%
dplyr::mutate(pd = fy + (hfr_pd/100)) %>%
dplyr::pull(pd) %>%
max()
readr::write_csv(df_hfr,
file.path(folderpath_output,
paste0("HFR_",
pd, "_Global_output_",
format(Sys.time(),"%Y%m%d.%H%M"),
".csv")),
na = "")
}
invisible(df_hfr)
}
|
d66582be7c98c8559b300137ba9c9edfd8408643 | b9d37167ebba26822213ba72e330a2237977e7db | /tests/testthat/test_listspecies.R | cfcbb8fb5665c88cfbf7bdd824476831badd93e5 | [] | no_license | phylotastic/rphylotastic | 6e90f5a4704b339aad5eef401f3ac42dd007c20f | 1cd60f2d99cc31e485ef9e38f76ff7f80c7f958c | refs/heads/master | 2023-08-24T08:09:07.411924 | 2023-08-08T23:21:33 | 2023-08-08T23:21:33 | 84,949,960 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,927 | r | test_listspecies.R | #test_that("Inserting a list of species", {
# userid <- "abusalehmdtayeen@gmail.com"
# listObj <- list(list_extra_info="", list_description="A sublist on the bird species added", list_keywords=c("bird", "endangered species", "Everglades"),list_curator="HD Laughinghouse", list_origin="webapp", list_curation_date="02-24-2016", list_source="des", list_focal_clade="Aves", list_title="Bird Species List", list_author=c("Bass", "O. & Cunningham", "R."), list_date_published="01-01-2017", is_list_public=TRUE, list_species=list(list(family="",scientific_name="Aix sponsa",scientific_name_authorship="", vernacular_name="Wood Duck",phylum="",nomenclature_code="ICZN",order="Anseriformes",class=""), list(family="",scientific_name="Anas strepera",scientific_name_authorship="", vernacular_name="Gadwall",phylum="",nomenclature_code="ICZN",order="Anseriformes",class="") ))
# result <- insert_species_in_list(userid, listObj)
# list_id <- result$list_id
# msg<-result$message
# expect_equal(msg, "Success")
#})
#test_that("Replacing a list of species", {
# userid <- "abusalehmdtayeen@gmail.com"
# access_token <- "ya29..zQLmLjbyujJjwV6RVSM2sy-mkeaKu-9"
# list_id <- 12
# speciesObj <- list( list(family="",scientific_name="Aix sponsa",scientific_name_authorship="", vernacular_name="Wood Duck",phylum="",nomenclature_code="ICZN",order="Anseriformes",class="") )
# result <- replace_species_in_list(userid, access_token, list_id, speciesObj)
# msg<-result$message
# expect_equal(msg, "Success")
#})
#test_that("Updating metadata of a list of species", {
# userid <- "abusalehmdtayeen@gmail.com"
# access_token <- "ya29..zQLmLjbyujJjwV6RVSM2sy-mkeaKu-9"
# list_id <- 12
# listObj <- list(list_description="A sublist on the bird species", list_keywords=c("bird","Everglades"))
# result <- update_species_in_list(userid, access_token, list_id, listObj)
# msg <-result$message
# expect_equal(msg, "Success")
#})
|
ba6b0778f84a3da9fc235fa3c89c058d87f76fb1 | 485fbaa39d76f76eef450cbd2ff34376a471f571 | /functions/new_partitioning.R | 9205176922d5da979b25452c111ab8baeec98af2 | [] | no_license | stinenyhus/Bachelor_deluxe | 00790ed5d572ec81a96f6de98579d7cba024f09f | edd0eb7b72978de187dab83211b97cee6ee5158c | refs/heads/main | 2023-07-11T18:00:21.283612 | 2021-08-18T11:50:49 | 2021-08-18T11:50:49 | 343,839,627 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,421 | r | new_partitioning.R | ###Only males partitioning
##Danish partitioning
partition_dk <- function(demo, features, hold_size = 0.2){
set.seed(1234)
#Using demoset to partition participants, then splitting data from the feature dataframe
#How many participants should go into hold-out set from each subgroup (gender/diagnosis)
n = round((nrow(demo) * hold_size)/2,0)
#Males
male_asd <- demo %>%
filter(Diagnosis == "ASD") %>% sample_n(n)
male_td <- demo %>%
filter(Diagnosis == "TD") %>% sample_n(n)
hold_out_1 <- rbind(male_asd,male_td)
hold_out_2 <- features[features$ID %in% hold_out_1$ID,]
train <- features[!(features$ID %in% hold_out_1$ID),]
return(list(hold_out_2,train)) #hold out first, train second
}
##Danish partitioning
partition_us <- function(demo, features, hold_size = 0.2){
set.seed(1234)
#Using demoset to partition participants, then splitting data from the feature dataframe
#How many participants should go into hold-out set from each subgroup (gender/diagnosis)
n = round((nrow(demo) * hold_size)/2,0)
#Males
male_asd <- demo %>%
filter(Diagnosis == "ASD") %>% sample_n(n)
male_td <- demo %>%
filter(Diagnosis == "TD") %>% sample_n(n)
hold_out_1 <- rbind(male_asd,male_td)
hold_out_2 <- features[features$ID %in% hold_out_1$ID,]
train <- features[!(features$ID %in% hold_out_1$ID),]
return(list(hold_out_2,train)) #hold out first, train second
} |
259b9dd76c258e58a1adbc64706613bcecd2844f | 1912ddbbd7b67235ca7b3a0f06ad8533a4538022 | /cachematrix.R | 636db80f7fda96e079f3ac793ae93919347bfd40 | [] | no_license | riccicc/ProgrammingAssignment2 | 715da2c61cb97acc155d2dc14b09515914a1ad63 | ca13dd5fbe4b6709ac1d421ebefaee91b1d82d72 | refs/heads/master | 2021-01-18T07:19:15.107791 | 2014-05-25T22:14:36 | 2014-05-25T22:14:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,500 | r | cachematrix.R | ## ------------------------------------------------------------------------------
## R Programming Course
## John Hopkins University
## May 2014
##
## Programming Assignment 2
##
## To avoid costly computational time, these functions will compute
## the inverse of a square matrix and will cache the inverse of a matrix. Then
## either the cached version will be returned or the inverse calculation can be
## performed.
## ------------------------------------------------------------------------------
## "This function creates a special 'matrix' object that can cache its inverse."
makeCacheMatrix <- function(x = matrix()) {
seq1 <<- x
mat1 <<- matrix(seq1, 3,3)
inverseMatrix <<- solve(mat1[3,3])
} ## end of makeCacheMatrix function.
## ----------------------------------------------------------------------------
## If needed, this function computes the inverse of the special 'matrix'
## returned by the above matrix. If the inverse has already been calculated and
## the matrix has not changed, then this function will retrieve the inverse from
## the cache and display a message.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
testInput <- x
if(testInput == seq1) {
message("...getting cached data.")
return(inverseMatrix)
}
inverseMatrix <- solve(testInput[3,3])
inverseMatrix
} ## end of cacheSolve function.
|
8e12edb2d11e9eaeb62be477af59e412b1760d03 | 1db06f2f5819f01ea9683cda921c01634776974d | /exercicio2/source.R | 19f81bdf4885dea8b4341bc6bd91d24ae4e2d2b3 | [] | no_license | juliomaister/IAA_2019_R_trabalho | b70a1f0a3bedb29b259c1eb066c01464ad7891f2 | 7f5a082794ad0975a51b9f161f40b6c8c86e02fe | refs/heads/master | 2020-06-04T12:15:47.803324 | 2019-06-15T02:24:56 | 2019-06-15T02:24:56 | 192,017,267 | 0 | 0 | null | 2019-06-14T23:48:12 | 2019-06-14T23:48:12 | null | UTF-8 | R | false | false | 4,146 | r | source.R | library("caret")
set.seed(7)
# -----------------------------------------------------------------------------
# CARREGAMENTO E CONSTRUÇÃO DOS DADOS
# -----------------------------------------------------------------------------
volumes = read.csv("Volumes.csv", sep = ";", dec = ",")
dataset = subset(volumes, select = -NR)
indices <- createDataPartition(dataset$VOL, p=0.80, list = FALSE)
treinamento <- dataset[indices,]
teste <- dataset[-indices,]
# -----------------------------------------------------------------------------
# TREINAMENTOS
# -----------------------------------------------------------------------------
# Random Forest
rf <- train(VOL~., data=treinamento, method="rf")
predicoes.rf <- predict(rf, teste)
cor.rf <- cor(teste$VOL, predicoes.rf)
# Svm Radial
svm <-train(VOL~., data=treinamento, method="svmRadial")
predicoes.svm <- predict(svm, teste)
cor.svm <- cor(teste$VOL, predicoes.svm)
# Neural Network
nnet <- train(VOL~., data = treinamento, method = "nnet", trace = F, linout=T,
tuneGrid=expand.grid(size = 10, decay = 0.1))
predicoes.nnet <- predict(nnet, teste)
cor.nnet <- cor(teste$VOL, predicoes.nnet)[1]
# Alometric Spurr
alom <- nls(VOL ~ b0 + b1*(DAP*DAP)*HT, treinamento, start = list(b0=0.5,b1=0.5))
predicoes.alom <- predict(alom, teste)
cor.alom <- cor(teste$VOL, predicoes.alom)
# -----------------------------------------------------------------------------
# ESCOLHA E TREINAMENTO DO MELHOR MODELO
# -----------------------------------------------------------------------------
# Accuracy of models
correlacoes <- as.data.frame(list(rf=cor.rf, nnet=cor.nnet, svmRadial=cor.svm, alom=cor.alom))
melhor_metodo <- colnames(correlacoes)[which(correlacoes == max(correlacoes))]
# Treinamento do melhor modelo
if (melhor_metodo == "rf" || melhor_metodo == "svmRadial"){
melhor_modelo <- train(VOL~., data=dataset, method=melhor_metodo)
} else if(melhor_metodo == "nnet"){
melhor_modelo <- train(VOL~., data = dataset, method = "nnet", trace = F, linout=T,
tuneGrid=expand.grid(size = 10, decay = 0.1))
} else if (melhor_metodo == "alom"){
melhor_modelo <- nls(VOL ~ b0 + b1*(DAP*DAP)*HT, dataset, start = list(b0=0.5,b1=0.5))
}
save(melhor_modelo, file = "melhor_modelo_atual.RData")
melhor_predicao <- predict(melhor_modelo, dataset)
melhor_cor <- cor(dataset$VOL, melhor_predicao)
# -----------------------------------------------------------------------------
# GRÁFICOS
# -----------------------------------------------------------------------------
maior_predicao = max(predicoes.rf)
# Gráfico de comparação das correlações dos métodos
plot(teste$VOL, pch = 19, cex = 0.5, ylab="Predições", main = "Predições dos métodos")
with(dataset, points(x = predicoes.rf, pch = 19, cex = 0.5, col="red"))
with(dataset, points(x = predicoes.alom, pch = 19, cex = 0.5, col="green"))
with(dataset, points(x = predicoes.svm, pch = 19, cex = 0.5, col = "blue"))
with(dataset, points(x = predicoes.nnet, pch = 19, cex = 0.5, col="purple"))
dev.copy(png,'metodos.png')
dev.off()
# Gráfico de comparação das correlações do melhor método atual
xgrid = expand.grid(X1=volumes$NR,X2=seq(0, max(c(max(melhor_predicao),max(dataset$VOL))), 0.01))
plot(xgrid, pch = 20, cex = 0.1, col = "grey", main = paste("Comparação entre amostra e predições para", melhor_metodo), ylab="Volume", xlab = "Distribuição de amostras")
points(dataset$VOL, pch = 19, cex=0.3)
points(melhor_predicao, pch = 19, col = "red", cex=0.3)
dev.copy(png,'metodo_com_melhor_correlacao.png')
dev.off()
# -----------------------------------------------------------------------------
# OUTPUTS
# -----------------------------------------------------------------------------
sink("output.txt")
print("Comparação de correlações entre os métodos")
print(" - Random Forest")
print(" - Support Vector Machine Radial")
print(" - Neural Network")
print("Correlações dos métodos utilizados")
print(correlacoes)
print("Método com melhor correlação:")
print(melhor_metodo)
print("Correlação do método escolhido com toda a amostra de dados")
print(melhor_cor)
sink() |
3158b229f984835fc0a0b23a34dae5ed6b5bb37a | eeff7caad6623a2bb401cf02544ee4bb0654a716 | /Gene-Expression/diff_gene_expr.R | b408058ff635f56e61a9ae259372d32e2c048624 | [] | no_license | Boyle-Lab/TE-Driven-CTCF-Loop-Evol | f17209ccfa96123e278e8da13d3becea5f1c0e53 | 96db32b3d6afb3d778ece40fe41172341245566f | refs/heads/master | 2021-07-10T23:40:17.861086 | 2021-06-29T15:45:57 | 2021-06-29T15:45:57 | 132,179,189 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 21,522 | r | diff_gene_expr.R | # R pipeline to produce histone mod and gene expression plots in Figure 7 and Sup. Fig. 13.
require("heatmap2")
# Read in data on loop annotations.
load("../TE-Loop-Intersection/loop_intersection.Rdata")
# Read in gene expression data.
gene_expr_data = read.table("../data/gene_exp/rna_seq/normCountsComplete.tab", sep="\t", header=TRUE, stringsAsFactors=FALSE)[,c(1,2,4:6)]
colnames(gene_expr_data) = c("gene_mm9", "gene_hg19", "CH12", "K562", "GM12878")
# Load up functions
source("add_bigwig_annotation.R")
source("annotate_enhancer_promoters.R")
# Annotate loop data with histone modifications at anchor loci.
tmp = add_all_histmods(annotated_loop_data, bigwig_path="../data/histmods", mods=c("H3K4me3", "H3K4me1", "H3K27me3", "H3K27ac"))
# Apply min-max normalization to histmod annotations
hist_annotated_loop_data = norm_all_histmods(tmp, mods=c("H3K4me3", "H3K4me1", "H3K27me3", "H3K27ac"))
# Produce histone mod plots for enhancer-enhancer, promoter-enhancer, and
# promoter-promoter interactions. (Sup. Fig. 13A)
dat = hist_annotated_loop_data[which(hist_annotated_loop_data$cell_q == "GM12878" & ((hist_annotated_loop_data$H3K4me3.max.l >= 5 & hist_annotated_loop_data$H3K4me1.max.r >= 7) | (hist_annotated_loop_data$H3K4me3.max.l >= 5 & hist_annotated_loop_data$H3K4me1.max.r >= 7) | (hist_annotated_loop_data$H3K4me1.max.l >= 7 & hist_annotated_loop_data$H3K4me1.max.r >= 7) | (hist_annotated_loop_data$H3K4me3.max.l >= 5 & hist_annotated_loop_data$H3K4me3.max.r >= 5)) ),]
for (i in 1:nrow(dat)) { if (dat[i,47] > dat[i,46]) { dat[i,c(46,48,47,49)] = dat[i,c(47,49,46,48)] }}
pdf("eh-histmods.GM12878.pdf", height=10, width=5)
heatmap.2(as.matrix(dat[,c(46,48,47,49)]), trace="none", dendrogram="none", Colv=FALSE, col=brewer.pal(9,"Reds"), cexCol=1)
dev.off()
dat = hist_annotated_loop_data[which(hist_annotated_loop_data$cell_q == "K562" & ((hist_annotated_loop_data$H3K4me3.max.l >= 30 & hist_annotated_loop_data$H3K4me1.max.r >= 9) | (hist_annotated_loop_data$H3K4me3.max.l >= 30 & hist_annotated_loop_data$H3K4me1.max.r >= 9) | (hist_annotated_loop_data$H3K4me1.max.l >= 9 & hist_annotated_loop_data$H3K4me1.max.r >= 9) | (hist_annotated_loop_data$H3K4me3.max.l >= 30 & hist_annotated_loop_data$H3K4me3.max.r >= 30)) & hist_annotated_loop_data$H3K4me3.max.l <= 165 & hist_annotated_loop_data$H3K4me3.max.r <= 165),]
for (i in 1:nrow(dat)) { if (dat[i,47] > dat[i,46]) { dat[i,c(46,48,47,49)] = dat[i,c(47,49,46,48)] }}
pdf("eh-histmods.K562.pdf", height=10, width=5)
heatmap.2(as.matrix(dat[,c(46,48,47,49)]), trace="none", dendrogram="none", Colv=FALSE, col=brewer.pal(9,"Reds"), cexCol=1)
dev.off()
dat = hist_annotated_loop_data[which(hist_annotated_loop_data$cell_q == "CH12" & ((hist_annotated_loop_data$H3K4me3.max.l >= 12 & hist_annotated_loop_data$H3K4me1.max.r >= 2) | (hist_annotated_loop_data$H3K4me3.max.l >= 12 & hist_annotated_loop_data$H3K4me1.max.r >= 2) | (hist_annotated_loop_data$H3K4me1.max.l >= 2 & hist_annotated_loop_data$H3K4me1.max.r >= 2) | (hist_annotated_loop_data$H3K4me3.max.l >= 12 & hist_annotated_loop_data$H3K4me3.max.r >= 12)) & hist_annotated_loop_data$H3K4me1.max.l <= 7 & hist_annotated_loop_data$H3K4me1.max.r <= 7 & hist_annotated_loop_data$H3K4me3.max.l <= 200 & hist_annotated_loop_data$H3K4me3.max.r <= 200),]
for (i in 1:nrow(dat)) { if (dat[i,47] > dat[i,46]) { dat[i,c(46,48,47,49)] = dat[i,c(47,49,46,48)] }}
pdf("eh-histmods.CH12.pdf", height=10, width=5)
heatmap.2(as.matrix(dat[,c(46,48,47,49)]), trace="none", dendrogram="none", Colv=FALSE, col=brewer.pal(9,"Reds"), cexCol=1)
dev.off()
# Annotate loop anchors with their nearest TSS.
gene_hist_annotated_loop_data = annotate_anchor_tss(hist_annotated_loop_data)
# Test for differences in delta-TPM between conserved and nonconserved loops
# for each pair of cell types.
# GM12878 to K562
tmp.n = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "GM12878" & gene_hist_annotated_loop_data$class_t2 != "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
tmp.c = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "GM12878" & gene_hist_annotated_loop_data$class_t2 == "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
delta_exp.n = unlist(apply(tmp.n, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t2.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t2.r"])) ) } } ))
delta_exp.c = unlist(apply(tmp.c, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t2.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t2.r"])) ) } } ))
# Expected stats:
#nrow(tmp.n)
#[1] 2976
#nrow(tmp.c)
#[1] 1436
#
#summary(delta_exp.n)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.000 0.101 1.373 20.305 14.397 2055.215
#
#summary(delta_exp.c)
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# 0.0000 0.0499 0.7218 14.5510 8.5328 2055.2150 1
#
#wilcox.test(delta_exp.n, delta_exp.c, alt="g")
# Wilcoxon rank sum test with continuity correction
#data: delta_exp.n and delta_exp.c
##W = 2325213, p-value = 8.055e-07
#alternative hypothesis: true location shift is greater than 0
# GM12878 to CH12
tmp.n = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "GM12878" & gene_hist_annotated_loop_data$class_t1 != "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
tmp.c = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "GM12878" & gene_hist_annotated_loop_data$class_t1 == "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
delta_exp.n = unlist(apply(tmp.n, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t1.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t1.r"])) ) } } ))
delta_exp.c = unlist(apply(tmp.c, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t1.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t1.r"])) ) } } ))
# Expected stats:
#length(delta_exp.c)
#[1] 63
#length(delta_exp.n)
#[1] 4349
#
#summary(delta_exp.n)
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# 0.000 0.124 1.282 29.523 14.743 4084.146 1
#summary(delta_exp.c)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.00000 0.04133 0.29874 8.26716 5.64290 134.48700
#
#wilcox.test(delta_exp.n, delta_exp.c, alt="g")
# Wilcoxon rank sum test with continuity correction
#data: delta_exp.n and delta_exp.c
#W = 160216, p-value = 0.01019
#alternative hypothesis: true location shift is greater than 0
# K562 to GM12878
tmp.n = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "K562" & gene_hist_annotated_loop_data$class_t2 != "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
tmp.c = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "K562" & gene_hist_annotated_loop_data$class_t2 == "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
delta_exp.n = unlist(apply(tmp.n, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t2.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t2.r"])) ) } } ))
delta_exp.c = unlist(apply(tmp.c, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t2.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t2.r"])) ) } } ))
# Expected stats:
#summary(delta_exp.n)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.0000 0.0699 1.0109 18.3965 10.8295 2055.2150
#summary(delta_exp.c)
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# 0.0000 0.0510 0.7312 14.8593 8.6922 2055.2150 1
#length(delta_exp.c)
#[1] 1438
#length(delta_exp.n)
#[1] 1353
#wilcox.test(delta_exp.n, delta_exp.c, alt="g")
# Wilcoxon rank sum test with continuity correction
#data: delta_exp.n and delta_exp.c
#W = 1011256, p-value = 0.03279
#alternative hypothesis: true location shift is greater than 0
# K562 to CH12
tmp.n = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "K562" & gene_hist_annotated_loop_data$class_t1 != "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
tmp.c = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "K562" & gene_hist_annotated_loop_data$class_t1 == "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
delta_exp.n = unlist(apply(tmp.n, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t1.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t1.r"])) ) } } ))
delta_exp.c = unlist(apply(tmp.c, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t1.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t1.r"])) ) } } ))
# Expected stats:
#length(delta_exp.c)
#[1] 45
#length(delta_exp.n)
#[1] 2746
#summary(delta_exp.n)
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# 0.000 0.082 1.205 25.942 15.457 3947.808 1
#summary(delta_exp.c)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.00000 0.04127 0.32399 17.46553 7.09201 296.44700
#wilcox.test(delta_exp.n, delta_exp.c, alt="g")
# Wilcoxon rank sum test with continuity correction
#data: delta_exp.n and delta_exp.c
#W = 70671, p-value = 0.04806
#alternative hypothesis: true location shift is greater than 0
# CH12 to GM12878
tmp.n = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "CH12" & gene_hist_annotated_loop_data$class_t1 != "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
tmp.c = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "CH12" & gene_hist_annotated_loop_data$class_t1 == "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
delta_exp.n = unlist(apply(tmp.n, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t1.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t1.r"])) ) } } ))
delta_exp.c = unlist(apply(tmp.c, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t1.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t1.r"])) ) } } ))
# Expected stats:
#summary(delta_exp.n)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.0000 0.1969 1.3902 24.0994 13.6744 935.9437
#summary(delta_exp.c)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.00000 0.05141 0.51529 8.36820 6.31739 134.48700
#wilcox.test(delta_exp.n, delta_exp.c, alt="g")
# Wilcoxon rank sum test with continuity correction
#data: delta_exp.n and delta_exp.c
#W = 7963, p-value = 0.01471
#alternative hypothesis: true location shift is greater than 0
# CH12 to K562
tmp.n = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "CH12" & gene_hist_annotated_loop_data$class_t2 != "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
tmp.c = gene_hist_annotated_loop_data[which(gene_hist_annotated_loop_data$cell_q == "CH12" & gene_hist_annotated_loop_data$class_t2 == "C" & ( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) )) ,]
delta_exp.n = unlist(apply(tmp.n, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t2.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t2.r"])) ) } } ))
delta_exp.c = unlist(apply(tmp.c, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t2.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t2.r"])) ) } } ))
# Expected stats:
#summary(delta_exp.n)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.0000 0.1587 2.1077 26.1118 22.0336 895.8701
#summary(delta_exp.c)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.0000 0.0445 0.3811 19.1285 11.1703 296.4470
#wilcox.test(delta_exp.n, delta_exp.c, alt="g")
# Wilcoxon rank sum test with continuity correction
#data: delta_exp.n and delta_exp.c
#W = 6743, p-value = 0.02512
#alternative hypothesis: true location shift is greater than 0
# Full delta-TPM comparison graphs for Sup. Figs 13B-C
dat = gene_hist_annotated_loop_data[which( (abs(gene_hist_annotated_loop_data$tss_dist.l) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.r) >= 5000) | (abs(gene_hist_annotated_loop_data$tss_dist.r) <= 1000 & abs(gene_hist_annotated_loop_data$tss_dist.l) >= 5000) ),]
dat$delta_exp.t1 = unlist(apply(dat, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t1.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t1.r"])) ) } } ))
dat$cons.t1 = unlist(apply(dat, 1, function(x) { if (x["class_t1"] == "C") { return("C") } else { return("N") } } ))
dat$delta_exp.t2 = unlist(apply(dat, 1, function(x) { if (x["tss_dist.l"] < x["tss_dist.r"]) { return( abs(as.numeric(x["expr_q.l"]) - as.numeric(x["expr_t2.l"])) ) } else { return( abs(as.numeric(x["expr_q.r"]) - as.numeric(x["expr_t2.r"])) ) } } ))
dat$cons.t2 = unlist(apply(dat, 1, function(x) { if (x["class_t2"] == "C") { return("C") } else { return("N") } } ))
# Compile into a long-format table
pdat = data.frame("cell_q"=dat$cell_q, "cell_t"=dat$cell_t1, "cons"=dat$cons.t1, "delta_exp"=dat$delta_exp.t1, "te_derived"=dat$te_derived, stringsAsFactors=FALSE)
pdat = rbind(pdat, data.frame("cell_q"=dat$cell_q, "cell_t"=dat$cell_t2, "cons"=dat$cons.t2, "delta_exp"=dat$delta_exp.t2, "te_derived"=dat$te_derived, stringsAsFactors=FALSE))
pdat$comp = paste(pdat$cell_q, pdat$cell_t, sep="-to-")
# Sup. Fig. 13B
pdf("delta_exp_cons-v-noncons.full.te.pdf")
x = ggplot(pdat[which(pdat$te_derived==TRUE),], aes(comp))
x = x + geom_bar(aes(y=delta_exp, fill=cons), position="dodge", stat="summary", fun.y="mean", na.rm=TRUE)
x
dev.off()
# Sup. Fig. 13C
pdf("delta_exp_cons-v-noncons.full.non-te.pdf")
x = ggplot(pdat[which(pdat$te_derived==FALSE),], aes(comp))
x = x + geom_bar(aes(y=delta_exp, fill=cons), position="dodge", stat="summary", fun.y="mean", na.rm=TRUE)
x
dev.off()
# Combined plot for Fig. 7A (Created in two parts)
pdat$sp_comp = apply(pdat, 1, function(x, species_idx=c("GM12878"="Human", "K562"="Human", "CH12"="Mouse")) { return(paste(species_idx[x["cell_q"]], species_idx[x["cell_t"]], sep="-")) } )
# Further simplify
pdat[which(pdat$sp_comp == "Human-Mouse"),"sp_comp"] = "Mouse-Human"
# TE-derived panel
pdf("delta_exp_cons-v-noncons.summary.te.pdf")
x = ggplot(pdat[which(pdat$te_derived==TRUE),], aes(sp_comp))
x = x + geom_bar(aes(y=delta_exp, fill=cons), position="dodge", stat="summary", fun.y="mean", na.rm=TRUE)
x + geom_point(data=pdat[which(pdat$te_derived==TRUE & pdat$cons=="C" & pdat$sp_comp=="Mouse-Human"),], aes(sp_comp, delta_exp, color=cons), position=position_jitter(width=0.02, height=0.02))
dev.off()
# Non-TE panel
pdf("delta_exp_cons-v-noncons.summary.non-te.pdf")
x = ggplot(pdat[which(pdat$te_derived==FALSE),], aes(sp_comp))
x = x + geom_bar(aes(y=delta_exp, fill=cons), position="dodge", stat="summary", fun.y="mean", na.rm=TRUE)
x
dev.off()
# Significance tests between sets with expected results and
# observation count.
# Human-Human Non-TE
#wilcox.test(pdat[which(pdat$te_derived==FALSE & pdat$sp_comp=="Human-Human" & pdat$cons=="N"),"delta_exp"], pdat[which(pdat$te_derived==FALSE & pdat$sp_comp=="Human-Human" & pdat$cons=="C"),"delta_exp"], alt="g")
# Wilcoxon rank sum test with continuity correction
#data: pdat[which(pdat$te_derived == FALSE & pdat$sp_comp == "Human-Human" &# and pdat[which(pdat$te_derived == FALSE & pdat$sp_comp == "Human-Human" & # pdat$cons == "N"), "delta_exp"] and pdat$cons == "C"), "delta_exp"]
#W = 4351314, p-value = 2.849e-06
#alternative hypothesis: true location shift is greater than 0
#nrow(pdat[which(pdat$te_derived==FALSE & pdat$sp_comp=="Human-Human" & pdat$cons=="N"),])
#[1] 3356
#nrow(pdat[which(pdat$te_derived==FALSE & pdat$sp_comp=="Human-Human" & pdat$cons=="C"),])
#[1] 2424
# Human-Human TE-derived
#wilcox.test(pdat[which(pdat$te_derived==TRUE & pdat$sp_comp=="Human-Human" & pdat$cons=="N"),"delta_exp"], pdat[which(pdat$te_derived==TRUE & pdat$sp_comp=="Human-Human" & pdat$cons=="C"),"delta_exp"], alt="g")
# Wilcoxon rank sum test with continuity correction
#data: pdat[which(pdat$te_derived == TRUE & pdat$sp_comp == "Human-Human" & # and pdat[which(pdat$te_derived == TRUE & pdat$sp_comp == "Human-Human" & # pdat$cons == "N"), "delta_exp"] and pdat$cons == "C"), "delta_exp"]
#W = 243500, p-value = 0.0001866
#alternative hypothesis: true location shift is greater than 0
#nrow(pdat[which(pdat$te_derived==TRUE & pdat$sp_comp=="Human-Human" & pdat$cons=="C"),])
#[1] 450
#nrow(pdat[which(pdat$te_derived==TRUE & pdat$sp_comp=="Human-Human" & pdat$cons=="N"),])
#[1] 973
# Mouse-Human Non-TE
#wilcox.test(pdat[which(pdat$te_derived==FALSE & pdat$sp_comp=="Mouse-Human" & pdat$cons=="N"),"delta_exp"], pdat[which(pdat$te_derived==FALSE & pdat$sp_comp=="Mouse-Human" & pdat$cons=="C"),"delta_exp"], alt="g")
# Wilcoxon rank sum test with continuity correction
#data: pdat[which(pdat$te_derived == FALSE & pdat$sp_comp == "Mouse-Human" &# and pdat[which(pdat$te_derived == FALSE & pdat$sp_comp == "Mouse-Human" & # pdat$cons == "N"), "delta_exp"] and pdat$cons == "C"), "delta_exp"]
#W = 755044, p-value = 6.449e-05
#alternative hypothesis: true location shift is greater than 0
#nrow(pdat[which(pdat$te_derived==FALSE & pdat$sp_comp=="Mouse-Human" & pdat$cons=="C"),])
#[1] 221
#nrow(pdat[which(pdat$te_derived==FALSE & pdat$sp_comp=="Mouse-Human" & pdat$cons=="N"),])
#[1] 5935
# Mouse-Human TE-derived
#wilcox.test(pdat[which(pdat$te_derived==TRUE & pdat$sp_comp=="Mouse-Human" & pdat$cons=="N"),"delta_exp"], pdat[which(pdat$te_derived==TRUE & pdat$sp_comp=="Mouse-Human" & pdat$cons=="C"),"delta_exp"], alt="g")
# Wilcoxon rank sum test with continuity correction]
#data: pdat[which(pdat$te_derived == TRUE & pdat$sp_comp == "Mouse-Human" & # and pdat[which(pdat$te_derived == TRUE & pdat$sp_comp == "Mouse-Human" & # pdat$cons == "N"), "delta_exp"] and pdat$cons == "C"), "delta_exp"]
#W = 8306.5, p-value = 0.1811
#alternative hypothesis: true location shift is greater than 0
nrow(pdat[which(pdat$te_derived==TRUE & pdat$sp_comp=="Mouse-Human" & pdat$cons=="N"),])
#[1] 1572
nrow(pdat[which(pdat$te_derived==TRUE & pdat$sp_comp=="Mouse-Human" & pdat$cons=="C"),])
#[1] 9
# Heat map of CAMK2D expression values. (Fig. 7B)
pdf("gene_exp_vals.CAM2KD.pdf")
heatmap.2(as.matrix(cbind(rep(0,3),t(gene_expr_data[which(gene_expr_data$gene_hg19 == "CAMK2D"),c(3,5,4)]))), trace="none", dendrogram="none", Colv=FALSE, col=colorRampPalette(c("white","red")), cexCol=1)
dev.off()
|
30bbd0488f80e8b5da8d38fa18bd38b6b28afe9f | 8744ef81426158f46ad0774cade48ea9362a668e | /CatalogApp_Location/global.R | 8b35463007350c92a7f9245e7d25411d3cd6894c | [
"BSD-3-Clause"
] | permissive | WSWCWaterDataExchange/Apps | 19d794e720e05ba4015af04efd49405b8156e08a | 1809bbfd3d3f8422f7a64372a381c0f1acb28b8a | refs/heads/master | 2020-04-24T16:25:03.032510 | 2019-02-22T17:48:28 | 2019-02-22T17:48:28 | 98,689,871 | 0 | 1 | null | 2017-08-14T06:45:45 | 2017-07-28T21:33:28 | R | UTF-8 | R | false | false | 313 | r | global.R | #Load packages
library(shiny)
library(leaflet)
#Load local data
load('data/CustomRU_available.Rdata')
load('data/HUC_available.RData')
load('data/CO_available.RData')
load('data/CA.RData')
load('data/DAU.RData')
load('data/HR.RData')
load('data/WA_Cust.RData')
load('data/PA.RData')
load('data/allstates.RData')
|
c0d25eca6a1d5321a56f7ec67d93f4e562fb9b5c | a28d9fa98ac838b82fc7e12c83c8a744990b6645 | /filing_docs/scrape_filing_docs.R | 918d5aaca2485a7f38be3db6d504b540dd2b749b | [] | no_license | vadim-cherepanov/edgar | 4c2937d1ccb7fb5feb9f34cfe75aeb0916f880e0 | bde77940bb1888ea24fdf36d6c2837c447bfd24e | refs/heads/master | 2023-07-15T03:54:08.916039 | 2021-02-13T16:10:58 | 2021-02-13T16:11:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,730 | r | scrape_filing_docs.R | #!/usr/bin/env Rscript
library(dplyr, warn.conflicts = FALSE)
library(DBI)
library(parallel)
target_schema <- "edgar"
target_table <- "filing_docs"
source("filing_docs/scrape_filing_docs_functions.R")
pg <- dbConnect(RPostgres::Postgres())
rs <- dbExecute(pg, "SET search_path TO edgar, public")
rs <- dbExecute(pg, "SET work_mem = '5GB'")
filings <- tbl(pg, "filings")
file_names <-
filings %>%
select(file_name)
new_table <- !dbExistsTable(pg, "filing_docs")
if (!new_table) {
filing_docs <- tbl(pg, "filing_docs")
def14_a <- file_names %>% anti_join(filing_docs, by = "file_name")
} else {
def14_a <- file_names
}
get_file_names <- function() {
def14_a %>%
collect(n = 1000)
}
batch <- 0
new <- lubridate::now()
while(nrow(file_names <- get_file_names()) > 0) {
batch <- batch + 1
cat("Processing batch", batch, "\n")
temp <- mclapply(file_names$file_name, filing_docs_df, mc.cores = 8)
if (length(temp) > 0) {
df <- bind_rows(temp)
if (nrow(df) > 0) {
cat("Writing data ...\n")
dbWriteTable(pg, "filing_docs",
df, append = TRUE, row.names = FALSE)
} else {
cat("No data ...\n")
}
}
old <- new; new <- lubridate::now()
cat(difftime(new, old, units = "secs"), "seconds\n")
temp <- unlist(temp)
}
if (new_table) {
pg <- dbConnect(RPostgres::Postgres())
rs <- dbExecute(pg, "SET search_path TO edgar, public")
rs <- dbExecute(pg, "CREATE INDEX ON filing_docs (file_name)")
rs <- dbExecute(pg, "ALTER TABLE filing_docs OWNER TO edgar")
rs <- dbExecute(pg, "GRANT SELECT ON TABLE filing_docs TO edgar_access")
rs <- dbDisconnect(pg)
}
|
bfecc27b31c25eb7f326dbd1c8468d49b5265912 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.management/man/opsworkscm_delete_server.Rd | bf6f0e23a39f6d9941e6ffcb19ce9050bc5cbb16 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 876 | rd | opsworkscm_delete_server.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opsworkscm_operations.R
\name{opsworkscm_delete_server}
\alias{opsworkscm_delete_server}
\title{Deletes the server and the underlying AWS CloudFormation stacks
(including the server's EC2 instance)}
\usage{
opsworkscm_delete_server(ServerName)
}
\arguments{
\item{ServerName}{[required] The ID of the server to delete.}
}
\description{
Deletes the server and the underlying AWS CloudFormation stacks (including the server's EC2 instance). When you run this command, the server state is updated to \code{DELETING}. After the server is deleted, it is no longer returned by \code{DescribeServer} requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
See \url{https://www.paws-r-sdk.com/docs/opsworkscm_delete_server/} for full documentation.
}
\keyword{internal}
|
02f5591166a25262a5fc23f06015499f26e42a16 | 9c2f40ae8269413feb32cffa6d581dfe9f931dd0 | /R/split_data.R | 4101829ea4324be8488e56fd057b3ae5b3b6a1a8 | [
"MIT"
] | permissive | tpetricek/datadiff | ed5ca6cdfe8129ed947c24a42c62ea265aad86ef | 8941269b483da9abcacde804b7f6b6e0a122a57a | refs/heads/master | 2020-07-31T19:09:25.118489 | 2019-09-25T23:55:01 | 2019-09-25T23:55:01 | 210,723,004 | 0 | 0 | MIT | 2019-09-25T00:39:49 | 2019-09-25T00:39:49 | null | UTF-8 | R | false | false | 424 | r | split_data.R | #' Split a data frame into two pieces
#'
#' @param df
#' A data frame.
#' @param split
#' A number in the unit interval specifying the splitting ratio.
#'
#' @return A list containing two data frames.
#'
#' @export
#'
#' @examples
#' split_data(head(mtcars), split = 1/2)
#'
split_data <- function(df, split) {
rows <- sort(sample.int(nrow(df), size = split * nrow(df), replace = FALSE))
list(df[rows, ], df[-rows, ])
}
|
e47b8c9eab31c353c6082a5d9115634bf4e94174 | f54870a84073a98c44461fb81c8782a015413823 | /cachematrix.R | 59f85ddbac455c3f981c8b7628a6f36a840e7f7f | [] | no_license | dmungas/ProgrammingAssignment2 | 626ed88f45c859589eb196e429a9000807835088 | 9c7f6bc194beeac1bbf61cf8d7b7c04bdde7ff29 | refs/heads/master | 2021-01-16T00:18:06.605235 | 2015-05-24T18:33:01 | 2015-05-24T18:33:01 | 36,185,814 | 0 | 0 | null | 2015-05-24T18:13:52 | 2015-05-24T18:13:51 | null | UTF-8 | R | false | false | 2,192 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
# These two functions enable calculation and caching of the inverse of an
# invertable square matrix. The inverse of the input matrix is cached the
# first time that the inverse of the matrix is calculated. Subsequent calls for
# the inverse of the matrix retrieve the cached value, so that repeated
# calculation of the inverse is not required.
# To use these functions:
# 1) The input matrix is initiated and stored by the command:
# x <- makeCacheMatrix(<input_matrix_specification>)
# 2) The inverse of the input matrix is obtained by:
# x_inv <- cacheSolve(x)
## Write a short comment describing this function
# This function creates a special matrix with an associated a list of functions that:
# 1) set the value of the matrix, 2) get the value of the matrix,
# 3) set the inverse of the matrix, 4) get the inverse of the matrix
# This special matrix is initialized by:
# x <- makeCacheMatrix()
# Special matrix functions are called using the $ operator,
# e.g. x$set(<matrix_label>)
makeCacheMatrix <- function(x) {
minv <- NULL
set <- function(y) {
x <<- y
minv <<- NULL
}
get <- function() x
setinv <- function(solve) minv <<- solve
getinv <- function() minv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
# This function inputs a matrix and checks to see if the inverse of the matrix
# has previously be calculated and stored. It uses functions from makeCacheMatrix().
# If the inverse has been calculated and stored, it uses getinv() to retrive
# the inverse and prints the message "getting cached data".
# If the inverse has not been calculated, the inverse is calculated,
# saved using setinv(), and returned.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
minv <- x$getinv()
if(!is.null(minv)) {
message("getting cached data")
return(minv)
}
data <- x$get()
minv <- solve(data, ...)
x$setinv(minv)
minv
}
|
5895a1d7b1b89a73b0fff208947edabfb43a340e | e5a5ba3c849ee058a51a1f5a217e3bca5c1824c1 | /plot_map.R | c981fc931cd9b475e40d64919a4b2d680d0d59cd | [] | no_license | kamarambi/utilities | ebca2832633e38e1073b33e0325d573a40c8df72 | c03cfb2140ff58c47c3367bc7c453477850bc061 | refs/heads/master | 2021-09-13T11:46:54.920589 | 2018-04-29T10:10:11 | 2018-04-29T10:10:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,845 | r | plot_map.R | plot_map <- function( arr, lev, file=NA, positive=TRUE, toplefttext=NA, toprighttext=NA, color=NA, minval=NA, maxval=NA ){
require( ncdf4, quietly = TRUE )
require( fields, quietly = TRUE )
require( sp, quietly = TRUE )
require( maptools, quietly = TRUE )
require( dplyr, quietly = TRUE )
if ( dim(arr)[1]==720 && dim(arr)[2]==360 ){
## half degree resolution
lon <- seq(-179.75, 179.75, 0.5)
lat <- seq(-89.75, 89.75, 0.5)
} else if ( dim(arr)[1]==360 && dim(arr)[2]==180 ){
## one degree resolution
lon <- seq(-179.5, 179.5, 1.0 )
lat <- seq(-89.5, 89.5, 1.0 )
}
magn <- 4
ncols <- 2
nrows <- 1
widths <- rep(1.6*magn,ncols)
widths[2] <- 0.15*widths[1]
heights <- rep(magn,nrows)
order <- matrix( c(1,2), nrows, ncols, byrow=FALSE)
ylim <- c(-60,85)
lat.labels <- seq(-90, 90, 30)
lat.short <- seq(-90, 90, 10)
lon.labels <- seq(-180, 180, 60)
lon.short <- seq(-180, 180, 10)
a <- sapply( lat.labels, function(x) bquote(.(x)*degree ~ N) )
b <- sapply( lon.labels, function(x) bquote(.(x)*degree ~ E) )
if (!is.na(file)) pdf( file, width=sum(widths), height=sum(heights) )
panel <- layout(
order,
widths=widths,
heights=heights,
TRUE
)
# layout.show( panel )
## Color key
if (is.na(color)){
## use default colors
if (positive){
color <- c( "wheat", "tomato2", "tomato4" )
} else {
color <- c( "royalblue4", "royalblue2", "wheat", "tomato2", "tomato4" )
}
}
out.mycolorbar <- mycolorbar( color, lev, orient="v", plot=FALSE, minval=minval, maxval=maxval )
par( mar=c(3,3,1,1),xaxs="i", yaxs="i",las=1)
image(
lon, lat,
arr,
ylim=c(-60,85),
# zlim=range(lev),
yaxt="n", xaxt="n",
col=out.mycolorbar$colors, breaks=out.mycolorbar$margins,
xlab="", ylab=""
)
map( add=TRUE, interior=FALSE, resolution=0, lwd=0.5 )
axis( 2, at=lat.labels, lab=do.call(expression,a), cex.axis=0.7, lwd=1.5 )
axis( 2, at=lat.short, lab=F, lwd=1, tck=-0.01 )
axis( 4, at=lat.labels, lab=F, lwd=1.5 )
axis( 4, at=lat.short, lab=F, lwd=1, tck=-0.01 )
axis( 1, at=lon.labels, lab=do.call(expression,b), cex.axis=0.7, lwd=1.5 )
axis( 1, at=lon.short, lab=F, lwd=1, tck=-0.01 )
axis( 3, at=lon.labels, lab=F, lwd=1.5 )
axis( 3, at=lon.short, lab=F, lwd=1, tck=-0.01 )
if (!is.na(toplefttext)) mtext( toplefttext, line=1, adj=0, font=2 )
if (!is.na(toprighttext)) mtext( toprighttext, line=1, adj=1 )
## Color key
par( mar=c(3,3,1,1),xaxs="i", yaxs="i",las=1)
out.mycolorbar <- mycolorbar( color, lev, orient="v", plot=TRUE, maxval=1 )
if (!is.na(file)) dev.off()
} |
ea0f5f5921cb85e6d75f99a69fe28a3e0632ab1e | 2cd4292520d1376bbd909c030ae9b4e1ffcf9014 | /Hillot_to_jam.R | e4faf9db08acfc75723bac4b69675da9b16a67b0 | [] | no_license | Sirke/IODS-project | 29b06e6688d12814f28779ba53d53fe85e878c46 | 94163a4c36242d6cb029f1b8aafb298ffb22adf5 | refs/heads/master | 2020-04-02T16:43:14.904162 | 2018-12-06T19:43:18 | 2018-12-06T19:43:18 | 154,625,518 | 0 | 0 | null | 2018-10-25T07:07:01 | 2018-10-25T07:07:01 | null | WINDOWS-1252 | R | false | false | 255 | r | Hillot_to_jam.R | cashflow<-read.csv(file="Sirkenhillot.csv",header=TRUE,sep=";")
columns_in<-c("Pvm","Määrä")
money_out<-select(cashflow,columns_in)
money_out<-rename(money_out,amount=Määrä)
money_out<-rename(money_out,date=Pvm)
write.csv(money_out,"Sirkesjam.csv")
|
99ca1d9ff70d5bdaae3379bd266b80d96baf4228 | 459749b2629cf7697105c3e1289b2b875a1d5822 | /R/teams.R | 9485c4ad96b638dcfad6206fada8930e13b15b00 | [] | no_license | GitBrianLaw/rcrunch | 52d0be58a552c9609fb487fdb8de408d081d9577 | f75deb52282175b6f9d4c69db954ee7ea8b89a9b | refs/heads/master | 2021-01-09T08:05:51.008417 | 2018-01-25T22:08:16 | 2018-01-25T22:08:16 | 48,709,208 | 0 | 0 | null | 2016-02-10T19:20:37 | 2015-12-28T19:37:57 | R | UTF-8 | R | false | false | 3,048 | r | teams.R | #' Teams
#'
#' Teams contain users and datasets. You can share a dataset with a group of
#' users by sharing the dataset with a team. You can also share a set of
#' datasets with a user all at once by adding the user to a team that contains those
#' datasets.
#'
#' These methods allow you to work with teams. Find your teams with the
#' [getTeams()] function, which returns your `TeamCatalog`. You can extract an individual team by name,
#' or create a team by assigning into the function. To create a team by assignment, assign a list
#' to `teams("myteam") <- value_list`, the `value_list` can either empty (to just create a team
#' with that name), or can contain a "members" element with the emails or URLs of
#' users to add to the team. Users can be also be added later with the `members<-`
#' method.
#'
#' @param x a `CrunchTeam`
#' @param value for `members<-`, a character vector of emails or URLs of
#' users to add to the team.
#' @return `members` returns a
#' `MemberCatalog`, which has references to the users that are members
#' of the team. `members<-` returns `x` with the given users added
#' to the members catalog.
#' @aliases members members<-
#' @seealso [`getTeams`]
#' @name teams
NULL
#' Retrieve your teams
#'
#' @return A `TeamCatalog`. Extract an individual team by name. Create
#' a team by assigning in with a new name.
#' @seealso [`teams`]
#' @export
getTeams <- function () {
TeamCatalog(crGET(sessionURL("teams")))
}
#' @rdname catalog-extract
#' @export
setMethod("[[", c("TeamCatalog", "numeric"), function (x, i, ...) {
getEntity(x, i, CrunchTeam, ...)
})
#' @rdname catalog-extract
#' @export
setMethod("[[<-", c("TeamCatalog", "character", "missing", "list"),
function (x, i, j, value) {
if (i %in% names(x)) {
## TODO: update team attributes
halt("Cannot (yet) modify team attributes")
} else {
## Creating a new team
u <- crPOST(self(x), body=toJSON(list(name=i)))
x <- refresh(x)
## Add members to team, if given
if (!is.null(value[["members"]]))
members(x[[i]]) <- value[["members"]]
return(x)
}
})
#' @rdname catalog-extract
#' @export
setMethod("[[<-", c("TeamCatalog", "character", "missing", "CrunchTeam"),
function (x, i, j, value) {
## TODO: something
## For now, assuming that modifications have already been persisted
## by other operations on the team entity (like members<-)
return(x)
})
#' @rdname teams
#' @export
setMethod("members", "CrunchTeam", function (x) {
MemberCatalog(crGET(shojiURL(x, "catalogs", "members")))
})
#' @rdname delete
#' @export
setMethod("delete", "CrunchTeam", function (x, ...) {
prompt <- paste0("Really delete team ", dQuote(name(x)), "? ",
"This cannot be undone.")
if (!askForPermission(prompt)) {
halt("Must confirm deleting team")
}
u <- self(x)
out <- crDELETE(u)
dropCache(absoluteURL("../", u))
invisible(out)
})
|
fcaf9902ec5a439c7e80a08ad477268e0f7dfd93 | 4a0350925583ac1cec0a424ae06e0f35af75ac75 | /misc/annotation.R | 6864a33102d549e6c35e2918edb2641c5383599c | [] | no_license | apatil1/MethylAnalyser | 225b80acb33e993ec337c4b1c713d485043d798c | cd437bc37d91201a2a27a9225744203bc27216db | refs/heads/master | 2021-01-19T21:28:03.022713 | 2015-07-06T14:42:44 | 2015-07-06T14:42:44 | 38,624,366 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,248 | r | annotation.R | ############################################################################################################################################################
#ANNOTATION DATA
#Illumina 450K probe annotation using IlluminaHumanMethylation450k.db
############################################################################################################################################################
library(IlluminaHumanMethylation450k.db)
probe.loc.in.gene <- as.list(IlluminaHumanMethylation450kPROBELOCATION) #Map a probe ID with its location in the gene
CpGloc <- as.list(IlluminaHumanMethylation450kCPGILOCATION) #IlluminaHumanMethylation450kCPGILOCATION maps between Illumina probe IDs and the UCSC CpG island features, if any, with which they are associated
island.probes <- unlist(sapply(c(1:length(CpGloc)), FUN=function(i) {return(grep("island" , CpGloc[i], ignore.case=T, value=T))})) #gets island probes
shelf.probes <- unlist(sapply(c(1:length(CpGloc)), FUN=function(i) {return(grep("shelf" , CpGloc[i], ignore.case=T, value=T))})) #gets shelf probes
shore.probes <- unlist(sapply(c(1:length(CpGloc)), FUN=function(i) {return(grep("shore" , CpGloc[i], ignore.case=T, value=T))})) #gets shelf probes
# Get postion of probes relative to gene structure
unique(sapply(probe.loc.in.gene, FUN=function(i){
strsplit(i, ":" )[[1]][2]
}))# gets the various positional information relative to the gene structure for the probes in the annotaiton data.
tss.probes <- unlist(sapply(c(1:length(probe.loc.in.gene)), FUN=function(i) {return(grep("TSS" , probe.loc.in.gene[i], ignore.case=T, value=T))})) #gets probes that are near TSS (200 or 1500 bp near TSS)
body.probes <- unlist(sapply(c(1:length(probe.loc.in.gene)), FUN=function(i) {return(grep("Body" , probe.loc.in.gene[i], ignore.case=T, value=T))})) #gets probes that are in the body of gene
island.tss.probes <- intersect(names(island.probes), names(tss.probes)) #gets probes near TSS and in CpG island
names(island.tss.probes) <- island.tss.probes # this is done because various plot function below require the input as names vectors.
length(names(island.probes)); length(names(tss.probes)); length(island.tss.probes)
# Map infinium probe names to gene symbols
x <- IlluminaHumanMethylation450kSYMBOL
mapped_probes <- mappedkeys(x)
probe.to.genesymbol <- as.list(x[mapped_probes])
head(probe.to.genesymbol)
# Map infinium probes to starting position of the gene (is this TSS?)
x <- IlluminaHumanMethylation450kCHRLOC
mapped_probes <- mappedkeys(x)
probe.to.GeneStart <- as.list(x[mapped_probes])
head(probe.to.GeneStart)
# Map infinium probe names to CpG coordinate
x <- IlluminaHumanMethylation450kCPGCOORDINATE
mapped_probes <- mappedkeys(x)
probe.to.CpG_coordinate <- as.list(x[mapped_probes])
head(probe.to.CpG_coordinate)
# Map infinium probe names to chromosomes
x <- IlluminaHumanMethylation450kCHR37
mapped_probes <- mappedkeys(x)
probe.to.chr <- as.data.frame(x[mapped_probes])
head(probe.to.chr)
# Remove X, Y and MULTI chromosome probes
probe.to.chr <- probe.to.chr[ !probe.to.chr$Chromosome_37 %in% c("X", "Y", ""), ] #removes probes corresponding to X, Y, MULTI and "" chromosomes
unique(probe.to.chr$Chromosome_37)
#save image
save.image("annotation.RData")
|
15f34dd453f9047da93a28828382cbe4c78a63a0 | 699216784cf1879e7ea3a82d1f6c2a9ccf357d7d | /R/sf.r | f972bac75ccc2d53b57ab0be1f5b10a920784c2d | [] | no_license | cran/vows | 61ce84cebbc7abb489f48aa3f76cdefb16617034 | 3900bad7378e6122c08f7595301d82f37652b681 | refs/heads/master | 2020-12-25T17:34:10.506115 | 2016-08-21T12:22:51 | 2016-08-21T12:22:51 | 17,700,819 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,101 | r | sf.r | #' Defining smooth functions in semiparametric model formulae
#'
#' This function is called by \code{\link{semipar.mp}} to define B-spline
#' smooths.
#'
#'
#' @param argvals a vector or matrix of covariates.
#' @param effect predictor whose effect varies with respect to \code{argvals}.
#' E.g., if the effect of \code{diagnosis} varies with \code{age}, use
#' \code{sf(age, effect = diagnosis)}. Similar to argument \code{by} in
#' \code{\link[mgcv]{s}}.
#' @param k number of B-spline basis functions.
#' @param norder order of B-splines: the default, \code{4}, gives cubic
#' B-splines.
#' @param pen.order order of the penalty, i.e., of the derivative defining the
#' penalty.
#' @param range.basis a numeric vector of length 2 defining the interval over
#' which the B-spline basis is created. If \code{NULL}, set to the range of the
#' variable.
#' @param knots knots placement method for B-spline smoothing. The default,
#' "quantile", places the knots at equally spaced quantiles of the data;
#' "equispaced" gives equally spaced knots.
#' @author Yin-Hsiu Chen \email{enjoychen0701@@gmail.com} and Philip Reiss
#' \email{phil.reiss@@nyumc.org}
#' @export
sf <- function(argvals, effect=NULL, k = 10, norder = 4, pen.order = 2, range.basis = NULL, knots = "quantile") {
if (is.null(range.basis)) range.basis = range(argvals)
if (knots == "quantile") basis = create.bspline.basis(range.basis, breaks = quantile(argvals, seq(0,1,length.out=k-norder+2)), norder=norder)
else if (knots == "equispaced") basis = create.bspline.basis(range.basis, norder=norder, nbasis = k)
modmat = eval.basis(argvals, basis)
if (!is.null(effect)) modmat = diag(effect) %*% modmat
penmat = getbasispenalty(basis, pen.order)
constraint = if (is.null(effect)) colSums(modmat) else NULL
sf.out = list(basis = basis, modmat = modmat, penmat = penmat,
constraint = constraint, argvals = argvals, effect = effect, k = k,
norder = norder, pen.order = pen.order)
class(sf.out) = "sf"
sf.out
}
|
6fa46aafd9242b783129b652dd819ee703ffe53a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/netrankr/examples/exact_rank_prob.Rd.R | 4043c128bd60096c02f1917e7aa9db77a0cf6478 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 385 | r | exact_rank_prob.Rd.R | library(netrankr)
### Name: exact_rank_prob
### Title: Probabilistic centrality rankings
### Aliases: exact_rank_prob
### ** Examples
P <- matrix(c(0,0,1,1,1,0,0,0,1,0,0,0,0,0,1,rep(0,10)),5,5,byrow=TRUE)
P
res <- exact_rank_prob(P)
#a warning is displayed if only one ranking is possible
tg <- threshold_graph(20,0.2)
P <- neighborhood_inclusion(tg)
res <- exact_rank_prob(P)
|
faf2479049a15a68e834ddb2dea45a252126ee37 | 97e9c55dc2cf1a99b42e5f87aaab5f4378b272cf | /BE/LP1/Data Analytics/Assignment 2. Naive Bayes/R/diabetes.r | a27f9a68686f9e317a10dfe8dc8d2f9f40ef80f6 | [] | no_license | riamittal8/Engineering-Assignments | 1f133971ecedf301fe0416427631436675959f21 | c62a55eaa1abec9854e3facf6743ee168f3c6ab0 | refs/heads/master | 2022-08-11T22:18:15.733246 | 2022-07-20T06:44:11 | 2022-07-20T06:44:11 | 216,047,821 | 0 | 0 | null | 2022-07-20T06:44:12 | 2019-10-18T14:56:09 | Java | UTF-8 | R | false | false | 1,577 | r | diabetes.r | # Download Pima Indians Diabetes dataset.
# Use Naive Bayes Algorithm for classification
# Load the data from CSV file and split it into training and test datasets.
# summarize the properties in the training dataset so that we can calculate probabilities and make predictions.
# Classify samples from a test dataset and a summarized training dataset.
library(e1071) #library required for the naiveBayes function
library(caTools) #library required for the sample.split function
library(caret)
setwd("C:\\Users\\DELL\\Desktop\\Sem 7 submissions\\Lab Practise 1\\Data Analytics\\Assignment 2 - naive bayes\\R")
diabetes <- read.csv("PimaIndiansDiabetes.csv")
head(diabetes)
diabetes_split <- sample.split(diabetes, SplitRatio = 0.9) #splitting it into training and test data in ratio 0.9
diabetes_train <- subset(diabetes, diabetes_split == TRUE)
diabetes_test <- subset(diabetes, diabetes_split == FALSE)
nb_default <- naiveBayes(Class~.,data = diabetes_train) #training the model with training data to predict the class lable taking all other attributes into consideration
nb_predict <- predict(nb_default, newdata = diabetes_test, "raw") #applying the trained model on test data
highest_prob <- as.factor(colnames(nb_predict)[apply(nb_predict, 1, which.max)]) #applying appropriate label based on predicted values
table(highest_prob, diabetes_test[,9]) #displaying table of predicted outcome vs actual outcome
table(highest_prob)
table(diabetes_test[,9])
png(file = "pie90-10.png")
labs <- c("No diabetes","Diabetes")
pie(table(highest_prob),labels = labs)
dev.off()
|
a4f925b61513745f3da92a25636f6d3fdbb1ec81 | 3e7665fc13f600759c95aae56b52bdb8384fe369 | /analysis/multiSpp-singleSea/src/initialize.R | ad10c2783a8a07b0b74664d672e5596bc322fdf6 | [] | no_license | lponisio/hierarchical | 988aa7d02c5c32656a35c13cddb856ab21efd278 | 128fd223bea2af65dab18bbb57df81d777acd916 | refs/heads/master | 2023-01-22T13:53:45.120057 | 2023-01-18T20:31:38 | 2023-01-18T20:31:38 | 66,016,092 | 8 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,742 | r | initialize.R | args <- commandArgs(trailingOnly=TRUE)
if(length(args) == 0){
run.models <- FALSE
make.comp.plots <- FALSE
mcmc.scale <- 2e2
} else{
run.models <- args[1]
make.comp.plots <- args[2]
mcmc.scale <- as.numeric(args[3])
}
library(nimble)
library(igraph)
library(reshape)
source("../all/plotting.R")
source("src/plotting.R")
source("src/setup.R")
source("src/multispeciesOcc.R")
source("src/models.R")
source("src/customSamplerSpec.R")
source("../all/misc.R")
save.dir <- "../../../hierarchical_saved/multiSpp-singleSea/saved"
survey.data <- read.csv("data/occupancy_data.csv")
species.groups <- read.csv("data/species_groups.csv")
survey.dates <- read.csv("data/survey_dates.csv")
habitat <- read.csv("data/habitat.csv")
## mcmc settings
burnin <- 1e2*mcmc.scale
niter <- (1e3)*mcmc.scale
runAllMCMC <- function(i, input1, niter, burnin, latent,
hyper.param, MCMCdefs){
print(sprintf("hyperparam%s_latent%s_sampler%s",
hyper.param,
latent, i))
if(i == 'nimble' | i == 'jags'){
ms.ss.samples <- compareMCMCs(input1,
MCMCs=i,
niter=niter,
burnin = burnin,
summary=FALSE,
check=FALSE)
} else{
ms.ss.samples <- compareMCMCs(input1,
MCMCs=i,
MCMCdefs = MCMCdefs[i],
niter=niter,
burnin = burnin,
summary=FALSE,
check=FALSE)
}
save(ms.ss.samples,
file=file.path(save.dir,
sprintf("hyperparam%s_latent%s_sampler%s.Rdata",
hyper.param,
latent, i)))
}
runAllModels <- function(latent, hyper.param, niter, burnin,
MCMCs, MCMCdefs){
model.input <- prepMutiSpData(survey.data,
survey.dates,
species.groups,
habitat,
n.zeroes =0, ## don't augment data
remove.zs=!latent,
hyper.param=hyper.param)
ms.ss.occ <- makeModel(latent, hyper.param)
input1 <- c(code=ms.ss.occ,
model.input)
lapply(MCMCs, runAllMCMC, input1, niter, burnin, latent,
hyper.param, MCMCdefs)
}
|
98816fd0ce0f7e6281ef1a29828c0dc943bbeb41 | e7473a6b1850f94f0901cf20c89ac0151db9029f | /topnet_data_service/TOPNET_Function/RCODE/get_USGS_streamflow.r | 6bdafa382221633c1446d86c263d93704e13a889 | [] | no_license | prasanna310/hydrods-dev | d3da8c4490d60d4702137f857638c78130d892db | d5e18cb69aa9054b170c7100744a8f2c7cc178b1 | refs/heads/master | 2021-09-06T17:39:14.772404 | 2018-02-09T03:50:27 | 2018-02-09T03:50:27 | 111,299,830 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,174 | r | get_USGS_streamflow.r | args<-commandArgs(TRUE)
# args <- c(USGS_gage, startYear, EndYear, output_directory, output_fname)
# args <- c('10109001', '2010', '2011', '.', 'streamflow_calibration.dat')
print ('Progress --> About to execute get_USGS_streamflow.r ')
print (args)
require(zoo)
require(XML)
require(RCurl)
require(dataRetrieval)
#Get daily data
setwd(args[4])
siteNumber=args[1]
dates=seq(as.Date(args[2]), as.Date(args[3]), "day")
print("start Downloading")
streamflow_Daily = readNWISdv(siteNumber,"00060",args[2],args[3]) # THIS command/function seem to have changed!
streamflow=data.frame(streamflow_Daily )
print(streamflow)
daily_streamflow=streamflow[,4]*0.0283168466 # convert to cfs to m3/s
match_index=match(dates,streamflow_Daily$Date)
match_flow=data.frame(daily_streamflow[match_index])
print("Finish Downloading")
##there are some missing data match_index=match(dates,streamflow_Daily$Date)
match_flow[] <- lapply(match_flow, function(.col){ if (is.numeric(.col)) return(sprintf("%8.2f",.col))else return(.col)})
streamflow=data.matrix(match_flow)
streamflow[is.na(streamflow)] <- -999
strDates= as.character(dates)
gh=gsub("-","", strDates, fixed=TRUE)
dss=data.frame(time=gh)
hr=rep.int(240000, nrow(dss))
observed_flow=data.frame(streamflow,dss[,1],hr)
print("start Writing")
filename=args[5]
sink(filename)
cat(sprintf("This file provides mean daily values of streamflow"),file=filename,append=TRUE)
cat("\n", file=filename, append=TRUE)
cat(sprintf("Flow values are provided in m3/sec"),file=filename,append=TRUE)
cat("\n", file=filename, append=TRUE)
cat(sprintf(paste("USGS gauge number",as.character(siteNumber),sep="")),file=filename,append=TRUE)
cat("\n", file=filename, append=TRUE)
sites=seq(1,length(siteNumber),1)
cat(sprintf("%s %d ", "ver2",length(siteNumber)),file=filename,append=TRUE)
cat(sprintf( "%d", sites),(sprintf( "%s", "Date Hour","\n")),file=filename,append=TRUE)
cat("\n", file=filename, append=TRUE)
write.table(observed_flow, file = filename,row.names=FALSE,col.names=FALSE,quote=FALSE,append=TRUE)
sink()
print("Finish Writing")
#
#
# args<-commandArgs(TRUE)
#
# require(zoo)
# require(XML)
# require(RCurl)
# require(dataRetrieval)
#
# #Get daily data
# setwd(args[4])
# siteNumber=args[1]
# dates=seq(as.Date(args[2]), as.Date(args[3]), "day")
# print("start Downloading")
# streamflow_Daily = readNWISdv(siteNumber,"00060",args[2],args[3])
# streamflow=data.frame(streamflow_Daily )
# print(streamflow)
# daily_streamflow=streamflow[,4]*0.0283168466 # convert to cfs to m3/s
# match_index=match(dates,streamflow_Daily$Date)
# match_flow=data.frame(daily_streamflow[match_index])
#
# print("Finish Downloading")
# ##there are some missing data match_index=match(dates,streamflow_Daily$Date)
# match_flow[] <- lapply(match_flow, function(.col){ if (is.numeric(.col)) return(sprintf("%8.2f",.col))else return(.col)})
# streamflow=data.matrix(match_flow)
# streamflow[is.na(streamflow)] <- -999
#
# strDates= as.character(dates)
# gh=gsub("-","", strDates, fixed=TRUE)
# dss=data.frame(time=gh)
# hr=rep.int(240000, nrow(dss))
# observed_flow=data.frame(streamflow,dss[,1],hr)
# print("start Writing")
#
# sink('streamflow_calibration.dat')
# cat(sprintf("This file provides mean daily values of streamflow"),file='streamflow_calibration.dat',append=TRUE)
# cat("\n", file="streamflow_calibration.dat", append=TRUE)
# cat(sprintf("Flow values are provided in m3/sec"),file='streamflow_calibration.dat',append=TRUE)
# cat("\n", file="streamflow_calibration.dat", append=TRUE)
# cat(sprintf(paste("USGS gauge number",as.character(siteNumber),sep="")),file='streamflow_calibration.dat',append=TRUE)
# cat("\n", file="streamflow_calibration.dat", append=TRUE)
#
# sites=seq(1,length(siteNumber),1)
# cat(sprintf("%s %d ", "ver2",length(siteNumber)),file='streamflow_calibration.dat',append=TRUE)
# cat(sprintf( "%d", sites),(sprintf( "%s", "Date Hour","\n")),file='streamflow_calibration.dat',append=TRUE)
# cat("\n", file="streamflow_calibration.dat", append=TRUE)
# write.table(observed_flow, file = "streamflow_calibration.dat",row.names=FALSE,col.names=FALSE,quote=FALSE,append=TRUE)
# sink()
#
# print("Finish Writing") |
4362829dc60c28556c36af3e33d85742a72a584c | adb0614b2ec762471e5961cc61a191bca31b9006 | /R/similarity.R | ce2eeb187499b7ed84265a647f130af1601e1504 | [] | no_license | cran/multigroup | 1bc148965caa412353bd1e40e8546a7b82308953 | 32302a6735e330c0c9ae90ded31a3fb84e8aa1a4 | refs/heads/master | 2021-01-25T08:54:27.665530 | 2020-02-23T16:50:05 | 2020-02-23T16:50:05 | 17,697,738 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 795 | r | similarity.R | #' @title similarity function
#' @description
#' To calculate similarity among common and group loadings
#' @param x a numeric vector
#' @export
#' @keywords internal
similarity_function<-function(loadings_matrices, NAMES){
nb= length(loadings_matrices)
H=ncol(loadings_matrices[[1]])
sim=vector("list",H)
for(h in 1:H){
MM=matrix(h, nrow=nb, ncol=nb)
for(aa in 1:(nb-1)){
for(bb in (aa+1):nb){
cc=0
for(i in 1:h){
cc=cc+abs(as.numeric(t(loadings_matrices[[aa]][,i]) %*% loadings_matrices[[bb]][,i]))
}
MM[aa,bb] = cc
MM[bb,aa] = MM[aa,bb]
}
}
MM = MM/h
colnames(MM)= NAMES
rownames(MM)= NAMES
sim[[h]]=round(MM,3)
}
return(sim)
}
|
7eaac7c1a34275a06df82812922bb3b260325685 | ce96086358022c0469d69cf49e6689869c25699a | /programs/merge_inertial_signals.R | fec8d82be052c78f7bde0429a20d149120798950 | [] | no_license | coursera-ricardor/getdata-009 | b0b730bfff1fd691a597b65de7f3cd5364c7201b | 5157ad1a66f689d3188a9085717603b60d75d31e | refs/heads/master | 2021-01-10T21:05:53.227776 | 2014-11-26T20:05:11 | 2014-11-26T20:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,322 | r | merge_inertial_signals.R | # Program: merge_inertial_signals.R
# Author: Ricardo Rodriguez 2014/11
#
# Code to merge the files contained in the
# directories:
# Basic string with generic value (XXXXXXXX) changable to: train, test
# "UCI HAR Dataset/XXXXXXXX/Inertial Signals/body_acc_x_XXXXXXXX.txt"
#
# =================================================================================
for ( eachFileName in vMerge1[grep("Inertial Signals",vMerge1)] ) {
# Assemble the File Names
#
# Notation:
# window[1..128].inertial_signal.[filename]
field_names_file <- paste0( ".inertial_signal.",
make.names( basename( gsub("_XXXXXXXX.txt","",eachFileName) ) ) )
field_names_file <- paste0("window_",c(1:128),field_names_file)
#
# load Data Frames Subject / Activity per group
#
df_merge <- NULL
test_group <- c("test","train")
for ( ch_Origin in test_group ) {
#
# 4 next versions ( improve this readings) - load once use many -
#
cFile <- paste0(dDataDir,"/","UCI HAR Dataset/",ch_Origin,"/subject_",ch_Origin,".txt")
df_subject <- read.table(cFile, sep = "", col.names = c("subject_id"))
cFile <- paste0(dDataDir,"/","UCI HAR Dataset/",ch_Origin,"/y_",ch_Origin,".txt")
df_activity <- read.table(cFile, sep = "", col.names = c("activity_id"))
#
# Read first file to merge
#
vt1File <- paste0( dDataDir,"/",gsub("XXXXXXXX",ch_Origin,eachFileName))
if (file.exists( vt1File ) ) {
df_File <- read.table(vt1File, sep = "", col.names = field_names_file)
#
# Creates the Data Frame
#
df_merge <- rbind(df_merge, cbind(df_subject,df_activity,df_File) )
} # end if file.exist
}
if (! is.null(df_merge ) ) {
#
# Hardcode pattern _XXXXXXXX defines the result merging filename
#
vtMFile <- paste0( dWorkingDir,I_temp,"/",gsub("XXXXXXXX","merged",eachFileName))
writeMyndf(df_merge,vtMFile)
} # end if df_merge
} # end for merging
rm("cFile","df_File","df_merge","vtMFile","ch_Origin")
rm("vt1File","df_activity","df_subject")
rm("field_names_file","test_group")
rm("eachFileName","eachDirName")
|
749912246396a1be890a4dab731cc7830b97924e | 094ba2cef85a502521fb4d3d7d2c09127591c906 | /3_vector_array.R | af872267b9160e63227fa1b1c929616b9675d282 | [] | no_license | SaadAhmedSalim/R-programming-Language | 931ba9069620fef449025f59f6e6a8ca972b44cb | 4745c444d2756b9b1c0c5d1e916c3cc5695d6f09 | refs/heads/main | 2023-04-19T03:41:57.677803 | 2021-04-25T07:12:06 | 2021-04-25T07:12:06 | 359,699,237 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,440 | r | 3_vector_array.R | myFirstVector <- c(3, 45,22, 345)
myFirstVector
is.numeric(myFirstVector)
is.integer(myFirstVector)
is.double(myFirstVector)
mysecondVector <- c(3L, 23L, 321L)
is.numeric(mysecondVector)
is.integer(mysecondVector)
is.double(mysecondVector)
mythirdVector <- c("o","432","Hello","Mr.White")
mythirdVector
is.character(mythirdVector)
is.numeric(mythirdVector)
myforthVector <- c("Po",9)
myforthVector
seq()
rep()
seq(1,15)
1:15
seq(1,15,2)
z <- seq(1,15,4)
z
rep(3,50)
d <- rep(3,50)
rep("a",5)
x <- c(80,20)
y <- rep(x,10)
y
x <- c(1,123,432,34,5) #combine
y <- seq(201,250,11) #sequence
z <- rep("Hi!",3) #replicate
w <- c("a","b","c")
w
w[1]
w[2]
w[-1]
w[-2]
v <- w[-3]
w[1:3]
w[c(1,3,5)]
w[c(-2,-4)]
w[1:2]
x <- rnorm(5)
x
# R-specific programming loop
for(i in x){
print(i)
}
print(x[1])
#conventional programming loop
for(j in 1:5){
print(x[j])
}
#------------
N <- 1000
a <- rnorm(N)
b <- rnorm(N)
#vectorized approach
c <- a*b
#de-vectorized approach
d <- rep(NA,N)
for(i in 1:N){
d[i] <- a[i] * b[i]
}
#----------- function
rnorm()
rnorm(n=5, sd=8)
c()
seq()
seq(from=10, to=20, length.out=100)
rep()
x <- c("a","b","c")
seq(from=10, to=20, along.with=x)
rep(x, times=10)
A <-c(1,2,3,4,5)
B <- sqrt(A)
B
print()
is.numeric()
is.integer()
is.double()
is.character()
typeof()
sqrt()
paste() |
ad860f55b6e6db5c3b2caf8e0299acf2f89f97b0 | 5eee695aa266ae3c54dda11e2731082295f82213 | /plot2.R | 2076cd83aa445fb0f4b6b9f913e58eff9dbb3696 | [] | no_license | B87/ExData_Plotting1 | 59e8c8271264ba83c81a6fa39e3f30b23eae9f3f | b08911b7a44832dbb671596f85736a3cf06fa5ac | refs/heads/master | 2021-01-15T08:37:49.737284 | 2015-07-09T15:55:36 | 2015-07-09T15:55:36 | 38,692,352 | 0 | 0 | null | 2015-07-07T14:17:35 | 2015-07-07T14:17:35 | null | UTF-8 | R | false | false | 1,274 | r | plot2.R | ## GETTING THE TIDY DATA
##set directory
setwd("C:/Users/Bernat/Desktop/MOOCS/Exploratory_Data_Analysis/exdata-data-household_power_consumption")
#read data
rawdata<-read.table("household_power_consumption.txt", header=TRUE, sep=";"
,stringsAsFactors=FALSE)
#transform Date into date class
rawdata$Date<-as.Date(rawdata$Date, format = "%d/%m/%Y")
## Subset from 1/2/2007 to 2/2/2007
subseting<- function(x,y){rawdata[rawdata$Date >= x & rawdata$Date <= y,]}
x<- as.Date("2007-02-01")
y<- as.Date("2007-02-02")
tidydata<-subseting(x,y)
##transform Time into a time class
tidydata$Time<-as.POSIXct(paste(tidydata$Date,as.character(tidydata$Time)))
## Transform factor variables to numeric ones
tidydata$Global_active_power<-as.numeric(as.character(tidydata$Global_active_power))
tidydata$Sub_metering_1<-as.numeric(tidydata$Sub_metering_1)
tidydata$Sub_metering_2<-as.numeric(tidydata$Sub_metering_2)
tidydata$Sub_metering_3<-as.numeric(tidydata$Sub_metering_3)
tidydata$Global_reactive_power<-as.numeric(tidydata$Global_reactive_power)
## PLOT
Sys.setlocale("LC_TIME", "English")
plot(tidydata$Time,tidydata$Global_active_power,type="l", xlab="", ylab="Global Active Power (kilowatts)")
## Create a png
dev.copy(png,file="plot2.png")
dev.off()
|
93aba3c9a7b4b850fce5fe49dda7fc2d3483a767 | 1d853247a544ddbba2e48a04706221f99af3640c | /man/geom_relations.Rd | 62c5020e86ec89486e13ac6bd0eb81bb2b767d1a | [
"MIT"
] | permissive | junkka/histmaps | 14353a891aa9cb4c313f2521a261381ed209c637 | bd7c3f876cd2b13542da6db83eca1eeb4f50bbf6 | refs/heads/master | 2022-04-28T18:38:08.170827 | 2022-04-14T12:37:19 | 2022-04-14T12:37:19 | 44,126,092 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 544 | rd | geom_relations.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{geom_relations}
\alias{geom_relations}
\title{Geographical relatives of units}
\format{
A \code{tibble} with 19958 rows and 5 columns
\describe{
\item{g1}{geom_id of unit 1}
\item{g2}{geom_id of unit 2}
\item{relation}{Type of relation "pre" or "succ"}
\item{year}{Year of relationship}
\item{type_id}{Unit type id}
}
}
\usage{
data(geom_relations)
}
\description{
Relations between units, succeeding and preceding units.
}
|
78b7455ff7fecfa878fb2149bd97d38229240692 | ac79d0e10669802dc128caf6f1c935a543b71a72 | /scripts/rhizoFuncs/man/make.datetimes.Rd | d6feba4447ac84aecaa3c8f4b58f1031c7e4d170 | [] | no_license | infotroph/efrhizo | 2ab0f931a8fe9cbcbede794aca539fc9120526e9 | 8783e81d50aa2d7a09fff7cd3456c7728161653b | refs/heads/master | 2020-12-26T04:15:27.176785 | 2017-06-27T03:21:58 | 2017-06-27T03:21:58 | 46,886,036 | 1 | 1 | null | 2016-08-05T06:56:26 | 2015-11-25T20:52:08 | Max | UTF-8 | R | false | false | 2,032 | rd | make.datetimes.Rd | \name{make.datetimes}
\alias{make.datetimes}
\title{
Mash WinRhizo dates and times together into a POSIXct datetime
}
\description{
Given a dataframe with a Date ("2014.07.01") and a Time ("121505") in WinRhizo's weird formats, paste them together into a POSIXct timestamp.
}
\usage{
make.datetimes(df)
}
\arguments{
\item{df}{A dataframe produced from a WinRhizo datafile.}
}
\details{
The current implementation performs the following conversions:
Coerces column "Time"" to character, then pads times <10:00 so that \code{strptime} has a consistent string length to work from.
Coerces columns "Date" and "MeasDate"" to type \code{Date} .
Creates new columns "DateTime" and "MeasDateTime" of type \code{POSIXct}.
Column "MeasTime"" is neither coerced to a time object nor checked for correct formatting -- this should probably be changed eventually.
}
\value{
Dataframe, changed from the input dataframe in the following ways:
\item{Time}{Converted to character and leading zeroes added where previously missing.}
\item{Date, MeasDate}{Converted to POSIXct.}
\item{DateTime, MeasDateTime}{New columns, POSIXct.}
}
\author{
Chris Black \email{chris@bodger.org}
}
\note{
This function only works on freshly read dataframes. It will fail if called on an object of its own creation, because Date and MeasDate formats have changed!
}
\seealso{
\code{\link{DateTimeClasses}}
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (df)
{
within(df, {
Time = as.character(Time)
Time = ifelse(nchar(Time) < 6, paste0(0, Time), Time)
DateTime = as.POSIXct(paste(Date, Time), format = "\%Y.\%m.\%d \%H\%M\%S")
Date = as.Date(Date, format = "\%Y.\%m.\%d")
MeasDateTime = as.POSIXct(paste(MeasDate, MeasTime),
format = "\%m/\%d/\%Y \%H:\%M:\%S")
MeasDate = as.Date(MeasDate, format = "\%m/\%d/\%Y")
})
}
}
|
c82144f1785e0e1cf2d913bf136157ccc5ace16b | ecb7a5e998f523fbeec64d5430aa64dffb9d55fc | /R/csbc_app_server.R | 83aad675ae500db49215e6660da0ebdce12c273f | [
"MIT"
] | permissive | jaybee84/projectlive.modules | 96037952553afcdf2663d466f41658666f5260e2 | b9d56f1a6938cae1528e730323da93bf7dbab646 | refs/heads/main | 2023-08-02T14:26:00.139505 | 2021-04-23T14:38:37 | 2021-04-23T14:38:37 | 361,023,102 | 0 | 0 | NOASSERTION | 2021-04-23T22:50:29 | 2021-04-23T22:50:29 | null | UTF-8 | R | false | false | 564 | r | csbc_app_server.R | csbc_server <- function(input, output, session) {
data <- shiny::reactive(get_csbc_data())
summary_snapshot_module_server(
id = "summary_snapshot_module",
data = data,
config = shiny::reactive(get_csbc_summary_snapshot_config())
)
publication_status_module_server(
id = "publication_status_module",
data = data,
config = shiny::reactive(get_csbc_publication_status_config())
)
study_summary_module_server(
id = "study_summary_module",
data = data,
config = shiny::reactive(get_csbc_study_summary_config())
)
}
|
2804eeff2df3b2d5b71367593ead7090609a850f | 8ae43084c27f20cd73a20946755a55d075997bd6 | /using-dplyr.R | 080159a784d6728734dfef4754fd09429cef7c66 | [] | no_license | davidenoma/R-Language-scripts | 290f991e6b6719ede8b936143991ed8322bf8872 | 5034f108ad22db84ee78e9c3d14f7df83c9285df | refs/heads/master | 2023-02-21T01:20:24.238515 | 2023-02-15T18:52:02 | 2023-02-15T18:52:02 | 240,453,728 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 819 | r | using-dplyr.R |
chicago <- readRDS('chicago.rds')
dim(chicago)
str(chicago)
names(chicago)
subset <- select(chicago, city:date)
head(subset)
chic.f <- filter(chicago,pm25tmean2 > 30)
str(chic.f)
chic.f <- filter(chicago, pm25tmean2 > 30 & tmpd > 80)
select(chic.f, date, tmpd, pm25tmean2)
chicago <- arrange(chicago, date)
head(select(chicago, date, pm25tmean2), 3)
#sorting the dates in descending order
chicago <- arrange(chicago, desc(date))
chicago <- rename(chicago, dewpoint = dptp, pm25 = pm25tmean2)
head(chicago[, 1:5], 3)
#with air pollution data, we often want to detrend the data by subtracting the mean from the data.
chicago <- mutate(chicago, pm25detrend = pm25 - mean(pm25, na.rm = TRUE))
#find information here
#https://bookdown.org/rdpeng/rprogdatascience/managing-data-frames-with-the-dplyr-package.html |
3b13f98a103bad6f0016aca64f987abbe5424ae8 | a177e3e6a34df54f6b72b18f07d4225234fed520 | /CodeR_1.R | 3640c4289b943f3a9b2a18472a1a00eec22e562f | [] | no_license | githubfun/pms | 81787bebd3c0d50c192b05a30fe788505b908af9 | b2de3cb85c37c29b40aab3196dcb551a7a3d2c89 | refs/heads/master | 2017-12-03T02:04:20.450005 | 2016-02-18T07:03:55 | 2016-02-18T07:03:55 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 2,251 | r | CodeR_1.R | setwd("~/MyFile") #Dossier emplacement à définir
PathCAC="returns1.csv"
PathDAX="returns DAX.csv"
SectorCAC="CAC40 sectors.csv"
SectorDAX="DAX30 sectors.csv"
returns=read.csv(file=PathCAC, header=TRUE ,sep=";")
A=read.csv(file=SectorCAC, header=TRUE ,sep=";")
X=as.matrix(returns)
n=ncol(returns)
M=20*10^6
bigbnd=1*10^6
list_lambda=seq(-5,5,0.25)
R=matrix(0,ncol=length(list_lambda),nrow=3+n)
R_2=matrix(0,ncol=length(list_lambda),nrow=3+n)
i=0
ll=-1/5
uu=1/5
COR=cov(returns) #Our covariance matrix
H=as.matrix(COR)
B=as.matrix(cor(returns))
#is.positive.definite(H)
#det(H)
aa=as.matrix(rbind(as.matrix(t(A[-1])),matrix(1,nrow=1,ncol=n)),nrow=11,ncol=40)
a=matrix(1,nrow=1,ncol=n)
bl2=matrix(rbind(matrix(-bigbnd/M,nrow=n,ncol=1),matrix(ll,nrow=10,ncol=1),0),nrow=n+11,ncol=1)
bu2=matrix(rbind(matrix(bigbnd/M,nrow=n,ncol=1),matrix(uu,nrow=10,ncol=1),0),nrow=n+11,ncol=1)
bl=matrix(rbind(matrix(-bigbnd/M,nrow=n,ncol=1),0),nrow=n+1,ncol=1)
bu=matrix(rbind(matrix(bigbnd/M,nrow=n,ncol=1),0),nrow=n+1,ncol=1)
istate=as.matrix(mat.or.vec(n+1,1))
x=matrix(0,nrow=n,ncol=1)
#We determine some options to the optimizer
optlist=list("Feasibility Phase Iteration Limit"=300,
"Optimality Phase Iteration Limit"=300,"Iteration Limit"=300)
#We add a control function qphess in case of a singular covariance matrix
qphess = function(n, jthcol, H, x, iwsav) {
ldh=nrow(H)
if (iwsav[365] == 3 || iwsav[365] == 4) {
hx = H %*% x
} else if (iwsav[365] == 5 || iwsav[365] == 6) {
hx = t(H) %*% H %*% x
} else {
hx = as.matrix(mat.or.vec(n, 1))
}
list(HX = as.matrix(hx), IWSAV = as.matrix(iwsav))
}
#We run the optimizer for each lambda value, and plot the optimized weights
for(lambda in list_lambda)
{
Cov=10^(lambda)*H
cvec=as.matrix(-t(X[1,]))
T<-e04nf(aa, bl, bu,cvec,Cov,qphess,istate,x,optlist)
T_2<-e04nf(aa, bl2, bu2,cvec,Cov,qphess,istate,x,optlist)
i=i+1
R[1,i]=X[1,]%*%T$X
R[2,i]=t(T$X) %*%H%*%T$X
R[3,i]=T$OBJ
R[4:(n+3),i]=T$X
R_2[1,i]=X[1,]%*%T_2$X
R_2[2,i]=t(T_2$X) %*%H%*%T_2$X
R_2[3,i]=T_2$OBJ
R_2[4:(n+3),i]=T_2$X
par(lend=2)
}
|
5e8a49d72f80f886b77c3d22fe30bf00637b1980 | de80221f25adcb6112d21af5b429aa7bcbc9212e | /plots_exp_model.R | d862f4eda4e16202a17e8c7f06499e84669c6321 | [] | no_license | jameshay218/virosolver_ons | bba1bd4619a3ca63363d2940cb8901bb46eb460b | e2e44cbc93322500eacb770a076765a7ad8a2fa4 | refs/heads/main | 2023-03-31T08:59:39.936614 | 2021-03-26T17:04:11 | 2021-03-26T17:04:11 | 320,556,888 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,245 | r | plots_exp_model.R |
res <- NULL
beta_ests <- NULL
for(i in seq_along(obs_times)){
timepoint <- obs_times[i]
chainwd_tmp <- paste0(chainwd_exp,"/",timepoint)
chain <- lazymcmc::load_mcmc_chains(chainwd_tmp, parTab,FALSE,1,mcmcPars_ct["adaptive_period"],
multi=TRUE,chainNo=TRUE,PTchain = TRUE)$chain
chain <- as.data.frame(chain)
beta_ests[[i]] <- tibble(beta=chain$beta,t=timepoint)
res[[i]] <- chain
}
beta_ests_combined <- do.call("bind_rows",beta_ests)
for(i in seq_along(obs_times)){
timepoint <- obs_times[i]
runname_use <- runname_use <- paste0(run_name,"_time_",timepoint)
obs_dat_tmp <- obs_dat_use <- obs_dat1 %>% filter(t == timepoint)
## Observation times
if(!is.na(max_age)){
obs_dat_use <- obs_dat_use %>% mutate(t = t - min(t), t = t + max_age)
}
ages <- 1:max(obs_dat_use$t)
times <- 0:max(obs_dat_use$t)
chain <- res[[i]]
chain_comb <- chain
chain_comb$sampno <- 1:nrow(chain_comb)
chain1 <- chain
chain_comb <- chain_comb[,colnames(chain_comb) != "chain"]
p_trace <- chain1[,c("sampno",unique(parTab[which(parTab$fixed == 0),"names"]),"chain")] %>%
mutate(chain = as.factor(chain)) %>%
pivot_longer(-c(sampno,chain)) %>%
ggplot() +
geom_line(aes(x=sampno,y=value,col=chain)) +
facet_wrap(~name,scales="free_y")+
scale_x_continuous(breaks=seq(min(chain$sampno),max(chain$sampno),length.out=5)) +
export_theme
## Get smoothed growth rates
samps <- sample(unique(chain_comb$sampno),n_samp)
trajs <- matrix(0, nrow=n_samp,ncol=length(times))
for(ii in seq_along(samps)){
trajs[ii,] <- pmax(inc_func_use(get_index_pars(chain_comb, samps[ii]),times),0.0000001)
}
trajs1 <- t(apply(trajs, 1, function(x) log(x[2:length(x)]/x[1:(length(x)-1)])))
trajs1_quants <- t(apply(trajs1, 2, function(x) quantile(x,c(0.025,0.5,0.975))))
trajs1_quants <- as.data.frame(trajs1_quants)
trajs1_quants$t <- 1:nrow(trajs1_quants)
colnames(trajs1_quants) <- c("lower","median","upper","t")
## Growth rate plot
p_gr <- ggplot(trajs1_quants) + geom_ribbon(aes(x=t,ymin=lower,ymax=upper),alpha=0.25) +
geom_line(aes(x=t,y=median)) +
coord_cartesian(ylim=c(-0.5,0.5))
trajs_quants <- t(apply(trajs, 2, function(x) quantile(x,c(0.025,0.25,0.5,0.75,0.975))))
trajs_quants <- as.data.frame(trajs_quants)
trajs_quants$t <- 1:nrow(trajs_quants)
colnames(trajs_quants) <- c("lower","mid_lower","median","mid_upper","upper","t")
## Growth rate plot
p_inc <- ggplot(trajs_quants) +
geom_ribbon(aes(x=t,ymin=lower,ymax=upper),alpha=0.25) +
geom_ribbon(aes(x=t,ymin=mid_lower,ymax=mid_upper),alpha=0.5) +
geom_line(aes(x=t,y=median)) +
export_theme +
ylab("Per capita incidence") +
xlab("Days since start") +
coord_cartesian(ylim=c(0,0.03))
vl_trajs <- matrix(0, nrow=n_samp,ncol=length(ages))
for(ii in 1:n_samp){
tmp_pars <- get_index_pars(chain_comb, samps[ii])
tmp <- viral_load_func(tmp_pars,ages,FALSE)
tmp1 <- extraDistr::rgumbel(length(tmp),tmp, tmp_pars["obs_sd"])
vl_trajs[ii,] <- tmp1
}
vl_trajs1_quants <- t(apply(vl_trajs, 2, function(x) quantile(x,c(0.025,0.5,0.975))))
vl_trajs1_quants <- as.data.frame(vl_trajs1_quants)
vl_trajs1_quants$t <- 1:nrow(vl_trajs1_quants)
colnames(vl_trajs1_quants) <- c("lower","median","upper","t")
## Growth rate plot
p_vl <- ggplot(vl_trajs1_quants) + geom_ribbon(aes(x=t,ymin=lower,ymax=upper),alpha=0.25) +
geom_line(aes(x=t,y=median))
dir.create(paste0(plot_wd,"/traces/"),recursive = TRUE)
dir.create(paste0(plot_wd,"/predictions/"),recursive = TRUE)
dir.create(paste0(plot_wd,"/grs/"),recursive = TRUE)
ggsave(paste0(plot_wd,"/traces/",runname_use,"_trace.png"),p_trace,width=7,height=4)
ggsave(paste0(plot_wd,"/predictions/",runname_use,"_predictions.png"),p_dat/p_inc,width=7,height=7)
ggsave(paste0(plot_wd,"/grs/",runname_use,"_grs.png"),p_gr,width=7,height=4)
}
p_exp_betas <- beta_ests_combined %>%
ggplot() + geom_violin(aes(x=t,y=beta,group=t),draw_quantiles=c(0.025,0.5,0.975),fill="grey70") +
geom_hline(yintercept=0,linetype="dashed") +
ylab("35-day growth rate using exponential model") +
xlab("Time") +
export_theme
|
e111d2820d79f89bad0265bba33349f014686c2c | 00cae1c8d9039c1177da75d2233b8903bce98b53 | /R/taxizedb-package.R | 686b1614031a9be95fe1510b42f5e15ac507b59f | [
"MIT"
] | permissive | ropensci/taxizedb | 9bb20cc44f8bab80592d6cb5e2cc8a5877c9eb9d | 3a78dd442680101d581e131010f0e15aab83a9ae | refs/heads/master | 2023-05-22T14:11:25.952182 | 2023-04-24T07:52:39 | 2023-04-24T07:52:39 | 53,961,466 | 25 | 12 | NOASSERTION | 2023-04-24T07:52:40 | 2016-03-15T16:40:20 | R | UTF-8 | R | false | false | 4,671 | r | taxizedb-package.R | #' @title taxizedb
#' @description Taxonomic databases interface
#'
#' @importFrom DBI dbConnect dbDisconnect dbSendQuery
#' @importFrom RSQLite SQLite
#' @importFrom dplyr tbl sql collect n
#' @importFrom dbplyr src_dbi
#' @importFrom rlang .data
#' @importFrom utils tail
#' @importFrom curl curl_download
#' @importFrom magrittr %>%
#' @name taxizedb-package
#' @aliases taxizedb
#' @docType package
#' @keywords package
#'
#' @section Supported data sources and database structure:
#' All are using SQLite as the database
#'
#' - NCBI: text files are provided by NCBI, which we stitch into a sqlite db
#' - ITIS: they provide a sqlite dump, which we use here
#' - The PlantList: created from stitching together csv files. this
#' source is no longer updated as far as we can tell. they say they've
#' moved focus to the World Flora Online
#' - Catalogue of Life: created from Darwin Core Archive dump. Using the
#' latest monthly edition via
#' http://www.catalogueoflife.org/DCA_Export/archive.php
#' - GBIF: created from Darwin Core Archive dump. right now we only have
#' the taxonomy table (called gbif), but will add the other tables in the
#' darwin core archive later
#' - Wikidata: aggregated taxonomy of Open Tree of Life, GLoBI and Wikidata.
#' On Zenodo, created by Joritt Poelen of GLOBI.
#' - World Flora Online: http://www.worldfloraonline.org/
#'
#' @section Update schedule for databases:
#'
#' - NCBI: since `db_download_ncbi` creates the database when the function
#' is called, it's updated whenever you run the function
#' - ITIS: since ITIS provides the sqlite database as a download, you can
#' delete the old file and run `db_download_itis` to get a new dump;
#' they I think update the dumps every month or so
#' - The PlantList: no longer updated, so you shouldn't need to download
#' this after the first download
#' - Catalogue of Life: a GitHub Actions job runs once a day at 00:00 UTC,
#' building the lastest COL data into a SQLite database thats hosted on
#' Amazon S3
#' - GBIF: a GitHub Actions job runs once a day at 00:00 UTC,
#' building the lastest COL data into a SQLite database thats hosted on
#' Amazon S3
#' - Wikidata: last updated April 6, 2018. Scripts are available to
#' update the data if you prefer to do it yourself.
#' - World Flora Online: since `db_download_wfo` creates the database when
#' the function is called, it's updated whenever you run the function
#'
#' @section Links:
#'
#' - NCBI: ftp://ftp.ncbi.nih.gov/pub/taxonomy/
#' - ITIS: https://www.itis.gov/downloads/index.html
#' - The PlantList - http://www.theplantlist.org/
#' - Catalogue of Life:
#' via http://www.catalogueoflife.org/content/annual-checklist-archive
#' - GBIF: http://rs.gbif.org/datasets/backbone/
#' - Wikidata: https://zenodo.org/record/1213477
#' - World Flora Online: http://www.worldfloraonline.org/
#'
#' @examples \dontrun{
#' library(dplyr)
#'
#' # data source: NCBI
#' db_download_ncbi()
#' src <- src_ncbi()
#' df <- tbl(src, "names")
#' filter(df, name_class == "scientific name")
#'
#' # data source: ITIS
#' ## download ITIS database
#' db_download_itis()
#' ## connect to the ITIS database
#' src <- src_itis()
#' ## use SQL syntax
#' sql_collect(src, "select * from hierarchy limit 5")
#' ### or pipe the src to sql_collect
#' src %>% sql_collect("select * from hierarchy limit 5")
#' ## use dplyr verbs
#' src %>%
#' tbl("hierarchy") %>%
#' filter(ChildrenCount > 1000)
#' ## or create tbl object for repeated use
#' hiers <- src %>% tbl("hierarchy")
#' hiers %>% select(TSN, level)
#'
#' # data source: The PlantList
#' ## download tpl datababase
#' db_download_tpl()
#' ## connecto the tpl database
#' src <- src_tpl()
#' ## do queries
#' tpl <- tbl(src, "tpl")
#' filter(tpl, Family == "Pinaceae")
#'
#' # data source: Catalogue of Life
#' ## download col datababase
#' db_download_col()
#' ## connec to the col database
#' src <- src_col()
#' ## do queries
#' names <- tbl(src, "taxa")
#' select(names, taxonID, scientificName)
#'
#' # data source: GBIF
#' ## download gbif datababase
#' db_download_gbif()
#' ## connecto the gbif database
#' src <- src_gbif()
#' ## do queries
#' df <- tbl(src, "gbif")
#' select(df, taxonID, scientificName)
#'
#' # data source: Wikidata
#' db_download_wikidata()
#' src <- src_wikidata()
#' df <- tbl(src, "wikidata")
#' filter(df, rank_id == "Q7432")
#'
#' # data source: World Flora Online
#' db_download_wfo()
#' src <- src_wfo()
#' df <- tbl(src, "wfo")
#' filter(df, taxonID == "wfo-0000000010")
#' }
# Needed for use of . in magrittr pipelines
utils::globalVariables(c(".", "rank_id", "rank_name", "kingdom_id",
"name", "id", "references", "desc"))
NULL
|
8fb64b1fd8994152583dfe7037a35604b3caaaba | de39504064981200eac1ea08cf2005a1d8b73512 | /R/9-global.R | e9232b0d2b8717192cf155a659569592f2cd242d | [] | no_license | tpq/balance | 2dabf69fd127b84730feec722c0891a0221aee5a | b8a46924c5d1c14a50ccb8908b074aff6a5a30b4 | refs/heads/master | 2021-08-02T20:46:38.013874 | 2021-08-02T01:53:12 | 2021-08-02T01:53:12 | 142,627,965 | 7 | 2 | null | null | null | null | UTF-8 | R | false | false | 140 | r | 9-global.R | #' Example Compositional Data
#'
#' Taken from the wonderful \code{robCompositions} package.
#'
#' @usage data(expenditures)
"expenditures"
|
a5b33fc8df5258d7b2caa607b2305f6537c06ed4 | 1d25525f865d342f635ed3a07af4e313b76c15d8 | /R/readCounts_functions.R | a1955dcb00520e9d3025836745a8efc832e24e86 | [] | no_license | estepi/ASpli | 94da5e8eed8b0f9d008da1876dcb419627492c28 | aeef195b211a657bfee9fe12a3076228dbf7a3b5 | refs/heads/master | 2021-01-20T17:02:46.312350 | 2017-02-25T13:43:59 | 2017-02-25T13:43:59 | 60,658,011 | 7 | 2 | null | null | null | null | UTF-8 | R | false | false | 12,794 | r | readCounts_functions.R | #funciones para read counts
.counterGenes <-
function(reads, feature, cores=NULL)
{
if (is.null(cores) )
#it wont paralelize, it wouldn require multicore library
{
hits <- lapply(reads, function(x){countOverlaps(feature, x,
ignore.strand = TRUE) })
}
else
{
hits <- mclapply(reads, mc.cores=cores, function(x){countOverlaps(feature, x,
ignore.strand = TRUE)})
}
hits.ul <- do.call(cbind.data.frame, hits)
wi <- width(feature)
wit <- sum(wi)
geneStarts <- sapply(start(feature),min)
geneEnds <- sapply(end(feature),max)
geneChr <- sapply(seqnames(feature),unique)
strand <- min(strand(feature))
strand[strand==1] <- "+"
strand[strand==2] <- "-"
genes <- GRanges(seqnames=geneChr,
strand=strand,
ranges=IRanges(geneStarts,geneEnds),
effective_length=wit)
names(genes) <- names(feature)
gene_coordinates <- feature@elementMetadata$gene_coordinates
mcols(genes) <- append(mcols(genes), DataFrame(gene_coordinates=gene_coordinates))
locus_overlap <- feature@elementMetadata$locus_overlap
mcols(genes) <- append(mcols(genes), DataFrame(locus_overlap=locus_overlap))
symbol <- feature@elementMetadata$symbol #symbol
mcols(genes) <- append(mcols(genes), DataFrame(symbol=symbol))
aa <- data.frame(as.data.frame(genes@elementMetadata$symbol),
as.data.frame(genes@elementMetadata$locus_overlap),
as.data.frame(genes@elementMetadata$gene_coordinates),
as.data.frame(genes@ranges),
effective_length=as.data.frame(genes@elementMetadata$effective_length),
hits)
colnames(aa)[1:8] <- c("symbol","locus_overlap","gene_coordinates",
"start","end", "length","name","effective_length" )
aa$name <- NULL
return(aa)
}
#########################################################
.counterBin <-
function(reads, feature, genes, cores=NULL)
{
if (is.null(cores) )#no aplica paralelizacion, no necesita library de paralelizacion
{
hits <- lapply(reads,function(x){countOverlaps(feature, x,
ignore.strand = TRUE)})#OK
}
else
{
hits <- mclapply(reads,mc.cores=cores,function(x){countOverlaps(feature, x,
ignore.strand = TRUE)})#OK
}
hits.ul <- do.call(cbind.data.frame, hits)
te <- match(feature@elementMetadata$locus, rownames(genes))
gene_coordinates <- genes$gene_coordinates[te]
aa <- data.frame(as.data.frame(feature@elementMetadata$feature),
as.data.frame(feature@elementMetadata$event),
as.data.frame(feature@elementMetadata$locus),
as.data.frame(feature@elementMetadata$locus_overlap),
as.data.frame(feature@elementMetadata$symbol),
gene_coordinates,
as.data.frame(feature@ranges), hits)
aa$names <- NULL
colnames(aa)[1:9] <- c("feature","event","locus","locus_overlap","symbol",
"gene_coordinates","start","end","length")
return(aa)
}
#########################################################
.counterJbin <-
function(reads, feature, genes, cores=NULL, l)
{
ungapped <- lapply(reads, function(x) {x[njunc(x)==0,]}) #extraigo los que no tienen GAPS
if (is.null(cores) )#no aplica paralelizacion, no necesita library de paralelizacion
{
hits <- lapply(ungapped,function(x){countOverlaps(feature, x,
ignore.strand = TRUE, minoverlap = l)})#OK
}
else
{
hits <- mclapply(ungapped,mc.cores=cores,function(x){countOverlaps(feature, x,
ignore.strand = TRUE, minoverlap = l)})#OK
}
hits.ul <- do.call(cbind.data.frame, hits)
te <- match(feature@elementMetadata$locus, rownames(genes))
gene_coordinates <- genes$gene_coordinates[te]
aa <- data.frame(as.data.frame(feature@elementMetadata$event),
as.data.frame(feature@elementMetadata$locus),
as.data.frame(feature@elementMetadata$locus_overlap),
as.data.frame(feature@elementMetadata$symbol),
gene_coordinates,
as.data.frame(feature@ranges),
hits)
aa$names <- NULL
colnames(aa)[1:8] <- c("event","locus","locus_overlap","symbol",
"gene_coordinates","start","end","length")
return(aa)
}
###############################################################
.ovBinJunction<-function(features, jranges)
{
annJunctions <- featuresj(features)
jname <- rep("-", length(jranges))
hitBin <- rep("-", length(jranges))
hitGen <- rep("-", length(jranges))
hitGenStrand <- rep("*", length(jranges))
gene_coordinates <- rep("-", length(jranges))
ambiguos <- rep("-", length(jranges))
j_within_bin <- rep("-", length(jranges))
##############nuevos#####################
feature <- featuresg(features)
geneStarts <- sapply(start(feature),min)
geneEnds <- sapply(end(feature),max)
geneChr<-sapply(seqnames(feature),unique)
strand <- min(strand(feature))
strand[strand==1] <- "+"
strand[strand==2] <- "-"
genes <- GRanges(seqnames=geneChr,
strand=strand,
ranges=IRanges(geneStarts,geneEnds),
gene_coordinates=feature@elementMetadata$gene_coordinates,
symbol=feature@elementMetadata$symbol)
names(genes) <- names(feature)
overGene <- findOverlaps(jranges, genes, type="within")
overGeneDF <- as.data.frame(overGene)#get a df
posJrange <- overGeneDF$queryHits
#replace index numbers by names
posGene <- overGeneDF$subjectHits
#replace index numbers by names; posGene[1:10]
overGeneDF$queryHits <- names(jranges)[as.numeric(overGeneDF$queryHits)]
overGeneDF$subjectHits <- names(genes)[as.numeric(overGeneDF$subjectHits)]
table <- table(overGeneDF$queryHits)
ttG <- data.frame(aggregate(subjectHits ~ queryHits, data = overGeneDF, paste, collapse=";"))
dd0 <- match(ttG$queryHits,names(jranges))
hitGen[dd0] <- ttG$subjectHits
dd <- match(ttG$queryHits,names(table))
ttG$undef <- table[dd]
ttG$tag <- rep("-",nrow(ttG))
ttG$tag[ttG$undef>1] <- "yes"
ambiguos[dd0] <- ttG$tag
################################################
hitGen[posJrange] <- names(genes[posGene])
#remplazo usando el indice de la juntura, el nombre del gen
hitGen[-posJrange] <- "noHit"
hitGenStrand[posJrange] <- as.character(strand(genes)[posGene])
gene_coordinates[posJrange] <- genes@elementMetadata$gene_coordinates[posGene]
#short coord viene del objeto genes
#######################################################################
overJ <- findOverlaps(jranges, annJunctions, type="equal") #identify annotated junctions
overJDF <- as.data.frame(overJ) #get a df
namesJ <- as.numeric(overJDF[,1]) #get index of jrangs that hit against annJunctions
namesAnnJ <- as.numeric(overJDF[,2]) #get index of annJunctions thta hit against jranges
jname[namesJ] <- names(jranges[namesJ])
hitBin[namesJ] <- names(annJunctions[namesAnnJ]) #ok, metadata vector
hitBin[-namesJ] <- "noHit" #ok, metadata vector.
#name of the annotated junction in the positon of the experimental one
#Identify which gene contain the junction.
#In case I have not hit against annotated junction, this is a very useful
#information
##########spanning exons bins
#3 useful information about which bins the junctions span.
#any krahit is considered # aca esta el problema
exonsBins <- featuresb(features)[featuresb(features)@elementMetadata$feature=="E",]
over <- findOverlaps(jranges, exonsBins)
overDF <- as.data.frame(over)
namesJ <- as.numeric(overDF[,1])
overDF[,1] <- names(jranges[namesJ])
namesBins <- as.numeric(overDF[,2])
overDF[,2] <- names(exonsBins[namesBins])
tt <- data.frame(aggregate(subjectHits ~ queryHits, data = overDF, paste, collapse=";"))
span <- rep("-", length(jranges))
te <- match(names(jranges), tt$queryHits) #ok
span <- tt$subjectHits[te]
#####################################################################
overJunctionWithinBins <- findOverlaps(jranges, exonsBins, type="within")
overJunctionWithinBinsDF <- as.data.frame(overJunctionWithinBins)
namesJ <- as.numeric(overJunctionWithinBinsDF[,1])
namesB <- as.numeric(overJunctionWithinBinsDF[,2])
overJunctionWithinBinsDF[,1] <- names(jranges[namesJ])
overJunctionWithinBinsDF[,2] <- names(exonsBins[namesB])
agtt <- data.frame(aggregate(subjectHits ~ queryHits,
data = overJunctionWithinBinsDF, paste, collapse=";"))
tw <- match(names(jranges), agtt$queryHits) #ok;
j_within_bin <- agtt$subjectHits[tw]
symbol <- rep("-", length(jranges))
symbol[posJrange] <- as.character(genes@elementMetadata$symbol[posGene])
mcols(jranges) <- append(mcols(jranges), DataFrame(hitBin=hitBin,
hitGen=hitGen,
hitGenStrand=hitGenStrand,
gene_coordinates=gene_coordinates,
undef=ambiguos,
bin_spanned=span,
j_within_bin=j_within_bin,
symbol=symbol))
return(jranges)
}
##########################################################
.counterJunctions <-
function(features, bam, cores, maxISize)
{
if (is.null(cores) )#no aplica paralelizacion, no necesita library de paralelizacion
{
ujunctions <- lapply (bam, function(x) {
junctions <- unlist(junctions(x) )
strand(junctions) <- "*"
start(junctions) <- start(junctions)-1
end(junctions) <- end(junctions)+1
ujunctions <- unique(junctions)
return(ujunctions)
} )
}
else
{
ujunctions <- mclapply(bam, mc.cores=cores, function(x) {
junctions <- unlist(junctions(x) )
strand(junctions) <- "*"
start(junctions) <- start(junctions)-1
end(junctions) <- end(junctions)+1
ujunctions <- unique(junctions)
return(ujunctions)
})
}
#here I have unique elements of all the junctiosn
jranges <- unique(unlist(GRangesList(unlist(ujunctions))))
maxWidth <- maxISize+2
jranges <- jranges[width(jranges)<= maxISize]
#Here I summarize hits agains the element
fcoord <- paste(seqnames(jranges),
start(jranges),
end(jranges) , sep="." )
jranges@ranges@NAMES <- fcoord
#########################################
jcounts<-lapply(bam, function(x)
{
junctions <- unlist(junctions(x) )
strand(junctions)<- "*"
start(junctions) <- start(junctions)-1
end(junctions) <- end(junctions)+1
count <- countMatches(jranges, junctions)
jc <- data.frame(row.names=names(jranges), count)
return(jc)
})
df <- do.call("cbind", jcounts); head(df)
colnames(df) <- names(jcounts)
#desde aca la bifurcacion:parte critica!
jranges <- .ovBinJunction(features, jranges)
jrdf <- data.frame(
as.data.frame(jranges@elementMetadata$hitBin),
as.data.frame(jranges@elementMetadata$hitGen),
as.data.frame(jranges@elementMetadata$hitGenStrand),
as.data.frame(jranges@elementMetadata$undef),
as.data.frame(jranges@elementMetadata$symbol),
as.data.frame(jranges@elementMetadata$gene_coordinates),
as.data.frame(jranges@elementMetadata$bin_spanned),
as.data.frame(jranges@elementMetadata$j_within_bin),
row.names=names(jranges) )
colnames(jrdf) <- c("junction",
"gene",
"strand",
"multipleHit",
"symbol",
"gene_coordinates",
"bin_spanned",
"j_within_bin")
rownames(jrdf) <- names(jranges)
aa <- merge(jrdf, df, by.x="row.names", by.y="row.names", sort=FALSE)
rnames <- paste(start(jranges)-1,end(jranges)+1, sep="-" )
rownames(aa) <- fcoord
aa$Row.names <- NULL
return(aa)
}
################################################################
|
25eb7e62a842aede82f402ed72bb4bb35970ce5b | f0489c47853fc78a49bfbc28ca3cf39798b17431 | /man/show-NMFns-method.Rd | 65635152829709af9ecf8d0e01e6600eb77a0ce3 | [] | no_license | pooranis/NMF | a7de482922ea433a4d4037d817886ac39032018e | c9db15c9f54df320635066779ad1fb466bf73217 | refs/heads/master | 2021-01-17T17:11:00.727502 | 2019-06-26T07:00:09 | 2019-06-26T07:00:09 | 53,220,016 | 0 | 0 | null | 2016-03-05T19:46:24 | 2016-03-05T19:46:24 | null | UTF-8 | R | false | true | 318 | rd | show-NMFns-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NMFns-class.R
\docType{methods}
\name{show,NMFns-method}
\alias{show,NMFns-method}
\title{Show method for objects of class \code{NMFns}}
\usage{
\S4method{show}{NMFns}(object)
}
\description{
Show method for objects of class \code{NMFns}
}
|
96e1090d671dd31d31e521f54f277ff21ec31a36 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/dtwclust/R/UTILS-reinterpolate.R | eaf4c31d6ace609072a43313429f39fd6474a99e | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,780 | r | UTILS-reinterpolate.R | #' Wrapper for simple linear reinterpolation
#'
#' This function is just a wrapper for the native function [stats::approx()] to do simple linear
#' reinterpolation. It also supports matrices, data frames, and lists of time series.
#'
#' @export
#' @importFrom stats approx
#'
#' @param x Data to reinterpolate. Either a vector, a matrix/data.frame where each row is to be
#' reinterpolated, or a list of vectors/matrices.
#' @param new.length Desired length of the output series.
#' @param multivariate Is `x` a multivariate time series? It will be detected automatically if a
#' list is provided in `x`.
#'
#' @details
#'
#' Multivariate series must have time spanning the rows and variables spanning the columns.
#'
#' @return Reinterpolated time series
#'
#' @examples
#'
#' data(uciCT)
#'
#' # list of univariate series
#' series <- reinterpolate(CharTraj, 205L)
#'
#' # list of multivariate series
#' series <- reinterpolate(CharTrajMV, 205L)
#'
#' # single multivariate series
#' series <- reinterpolate(CharTrajMV[[1L]], 205L, TRUE)
#'
reinterpolate <- function(x, new.length, multivariate = FALSE) {
if (is.list(x) && !is.data.frame(x)) {
x <- lapply(x, reinterpolate, new.length = new.length, multivariate = is_multivariate(x))
}
else if (!multivariate && (is.matrix(x) || is.data.frame(x))) {
x <- t(apply(x, 1L, reinterpolate, new.length = new.length))
}
else {
if (is.data.frame(x)) x <- base::as.matrix(x)
check_consistency(x, "ts")
if (multivariate && !is.null(dim(x)))
x <- apply(x, 2L, reinterpolate, new.length = new.length)
else
x <- stats::approx(x, method = "linear", n = new.length)$y
}
# return
x
}
|
39e25d3e6d3adb597d89b6edaf80fda960a3adbc | 6fbc03d70486eda587971ff9aa2a1ff06c0b8cf8 | /Simulation_code/stochastic_sim.R | 64d4ca0ba490f831b1ecd4df6927be42eaf4aa55 | [] | no_license | apsicle/Network-Models-Thesis | 8eaf03a4fed02606c2cb201eebbf04447ab78dde | ccfe0268ef06c44ee0811fb4fc6a136eddec36ac | refs/heads/master | 2021-06-15T16:04:59.115164 | 2017-02-22T23:10:16 | 2017-02-22T23:10:16 | 50,934,199 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,873 | r | stochastic_sim.R | #Stochastic implementation of 3-d transport system simulation
#P = n-dimensional transition matrix
#s_0 = n by 1 column vector of population in whole numbered values
#r_0 = rate of logistic growth
#t = amount of time to run simulation for
#N = number of individuals in initial population
#b = birth rate
#d = death rate
#v = movement rate
stochastic_sim <- function(P, s_0, r_0, t, N) {
results <- list()
event <- 0
time = 0
s <- N * (s_0 / sum(s_0))
while(time < t) {
#Generate timestep to next event
rnum <- runif(1)
timestep <- (1 / sum(s))*(log(1 / rnum))
r <- sum(s)*rnum
#Logistic growth during timestep. Growth is bounded to N.
#Fractional growth is removed (floor function)
s <- floor((exp(r_0*timestep)*s) / (1 + s*((exp(r_0*timestep) - 1) / N)))
#See where the individual moves out of at the event
if(r < s[1]) {
#Moves out from population 1
rates <- P[,1]
rand <- runif(1)
if(rand < rates[1]) {
#Nothing happens because 1 transfers to self
}
else if(rand < rates[1] + rates[2]) {
#1 transfers to 2
s[2] <- s[2] + 1
s[1] <- s[1] - 1
}
else {
#1 transfers to 3
s[3] <- s[3] + 1
s[1] <- s[1] - 1
}
}
else if(r < s[1] + s[2]) {
#Moves out from population 2
rates <- P[,2]
rand <- runif(1)
if(rand < rates[1]) {
s[1] <- s[1] + 1
s[2] <- s[2] - 1
}
else if(rand < rates[1] + rates[2]) {
#Nothing happens because 2 transfers to self
}
else {
#2 transfers to 3
s[3] <- s[3] + 1
s[2] <- s[2] - 1
}
}
else {
#Moves out from population 3
rates <- P[,3]
rand <- runif(1)
if(rand < rates[1]) {
#3 transfers to 1
s[1] <- s[1] + 1
s[3] <- s[3] - 1
}
else if(rand < rates[1] + rates[2]) {
#3 transfers to 2
s[2] <- s[2] + 1
s[3] <- s[3] - 1
}
else {
#Nothing happens because 3 transfers to self
}
}
#Update everything
time = time + timestep
event = event + 1
results[[event]] <- c(s, time, event)
}
results <- data.frame(matrix(unlist(results), nrow=length(results), byrow=T),
stringsAsFactors=FALSE)
names(results) <- (c("N1", "N2", "N3", "Time", "Step"))
results
}
|
aa1f3292def1c79231e93b16a448b16883c1414f | cf606e7a3f06c0666e0ca38e32247fef9f090778 | /test/integration/example-models/ARM/Ch.9/electric_grade1_supp.data.R | a32c3a3b25004df568f73803140101ee429b96e2 | [
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | nhuurre/stanc3 | 32599a71d5f82c759fd6768b8b699fb5f2b2d072 | 5612b357c1cd5a08cf2a57db97ce0e789bb87018 | refs/heads/master | 2023-07-05T02:27:08.083259 | 2020-11-12T15:37:42 | 2020-11-12T15:37:42 | 222,684,189 | 0 | 0 | BSD-3-Clause | 2019-11-19T11:50:39 | 2019-11-19T11:50:38 | null | UTF-8 | R | false | false | 512 | r | electric_grade1_supp.data.R | N <- 21
post_test <-
c(48.9, 70.5, 89.7, 44.2, 77.5, 84.7, 78.9, 86.8, 60.8, 75.7,
95.1, 60.6, 55.5, 84.8, 84.9, 101.9, 70.6, 78.4, 84.2, 108.6, 76.6)
pre_test <-
c(13.8, 16.5, 18.5, 8.8, 15.3, 15, 19.4, 15, 11.8, 16.4, 16.2, 12,
12.3, 17.2, 14.6, 18.9, 15.3, 16.6, 16, 20.1, 16.4)
grade <-
c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
treatment <-
c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
supp <-
c(1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1)
|
74caea6650ba0b01d86be1957ee45a3925ce9e7f | 36c2c3d60ecf9dfc52e023bea0fba9b1bfc825bb | /cachematrix.R | edfa15b4b9cfcc1740f3019ea0f397816f184cd2 | [] | no_license | conghuanxu/ProgrammingAssignment2 | 512191177bd033076276484ec9078d811f31a90d | 3c8a4462025ce45976619d9f483661d69f2a4392 | refs/heads/master | 2021-01-24T10:05:57.944323 | 2015-04-26T02:48:44 | 2015-04-26T02:48:44 | 31,111,704 | 0 | 0 | null | 2015-02-21T02:25:37 | 2015-02-21T02:25:37 | null | UTF-8 | R | false | false | 1,254 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## This function have two goals, one is give the inverse of a matrix,
## the other goal is if we have computed the inverse once, and if we
## need compute the inverse again we can read from the cache
## Write a short comment describing this function
## This function is to make the cache of the inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
## if the cached data is null, we solve the inverse and make cache
## otherwise we read data from the cached data.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
|
6face2d58b550d4dc793f5da1a0caa79c9327198 | 6523d59b661f2abbad08ddad83b03dc39b85dd13 | /Scripts/global_nutrient_application.R | ca2b417e119a08eeb8dd5fdadc8a02582aeefce9 | [] | no_license | hansronald/flex-crops | fe83f1652831938aa97e847070dcfdc3379849b7 | 07c3afe951d8f3a3801ce711175d2cc5825f1efd | refs/heads/master | 2020-07-19T19:05:54.575593 | 2020-04-10T13:44:19 | 2020-04-10T13:44:19 | 206,498,029 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,996 | r | global_nutrient_application.R | # library
source("~/Google Drive/Skola/SRC/Thesis/Code/Scripts/common_settings.R")
#library(ggplot2)
nutrient_input_average_path = here("Data", "Crop data", "nutrient_input_average.csv")
nutrient_input_average = read_csv(nutrient_input_average_path) %>%
gather("Nutrient", "value", -Crop) %>%
clean_names()
nutrient_input_global_path = here("Data", "Crop data", "nutrient_input_global.csv")
nutrient_input_global = read_csv(nutrient_input_global_path) %>%
gather("Nutrient", "value", -Crop) %>%
clean_names() %>%
mutate(value = value * 1e6)
# # Plot input in kg per hectare
# nutrient_input_global %>%
# mutate(nutrient = as.factor(nutrient)) %>%
# mutate(nutrient = fct_relevel(nutrient, c("N", "P", "K", "NPK"))) %>%
# mutate(crop = as.factor(crop)) %>%
# mutate(crop = fct_relevel(crop, c("Maize", "Oil Palm", "Soybeans", "Sugar crops", "Rice", "Wheat"))) %>%
# group_by(crop) %>%
# ggplot(aes(fill=nutrient, y=value, x=crop)) +
# geom_bar(position="dodge", stat="identity") +
# labs(title = "Global total nutrient application per crop", y = "tonnes",x = "", fill = "Nutrient") +
# scale_y_continuous(labels = scaleFUN)
#
# ggsave(here("Output images", "nutrient_input_global.png"))
#
# # Plot input in kg per hectare
# nutrient_input_average %>%
# mutate(nutrient = as.factor(nutrient)) %>%
# mutate(nutrient = fct_relevel(nutrient, c("N", "P", "K", "NPK"))) %>%
# mutate(crop = as.factor(crop)) %>%
# mutate(crop = fct_relevel(crop, c("Maize", "Oil Palm", "Soybeans", "Sugar crops", "Rice", "Wheat"))) %>%
# group_by(crop) %>%
# ggplot(aes(fill=nutrient, y=value, x=crop)) +
# geom_bar(position="dodge", stat="identity") +
# labs(title = "Average nutrient application per crop", y = "Kilograms per hectare",x = "")
#
# ggsave(here("Output images", "nutrient_input_average.png"))
# Plot input in kg per hectare
# Find crop plot order based on total NPK input
crop_order = nutrient_input_global %>%
filter(nutrient == "NPK") %>%
arrange(desc(value)) %>%
pull(crop)
nutrient_input_global %>%
filter(nutrient != "NPK") %>%
mutate(nutrient = as.factor(nutrient)) %>%
mutate(nutrient = fct_relevel(nutrient, c("N", "P", "K"))) %>%
mutate(crop = as.factor(crop)) %>%
mutate(crop = fct_relevel(crop, rev(crop_order))) %>%
group_by(crop) %>%
ggplot(aes(fill=nutrient, y=value, x=crop)) +
geom_bar(position="stack", stat="identity") +
labs(title = "Global total nutrient application per crop", y = "tonnes",x = "", fill = "Nutrient", tag = "a)") +
coord_flip() +
scale_y_continuous(labels = scaleFUN) +
theme_classic(base_size = 7) +
scale_fill_brewer(palette="Paired")
ggsave(here("Output images", "nutrient_input_global.png"), height = 2, width = 3, dpi = 200)
# Plot input in kg per hectare
# Find the order based on total nutrient input
crop_order = nutrient_input_average %>%
filter(nutrient == "NPK") %>%
arrange(desc(value)) %>%
pull(crop)
nutrient_input_average %>%
filter(nutrient != "NPK") %>%
mutate(nutrient = as.factor(nutrient)) %>%
mutate(nutrient = fct_relevel(nutrient, c("N", "P", "K"))) %>%
mutate(crop = as.factor(crop)) %>%
mutate(crop = fct_relevel(crop, rev(crop_order))) %>%
group_by(crop) %>%
ggplot(aes(fill=nutrient, y=value, x=crop)) +
geom_bar(position="stack", stat="identity") +
labs(title = "Average nutrient application per crop", y = "Kilograms per hectare",x = "", tag = "b)") +
coord_flip() +
theme_classic(base_size = 7) +
scale_fill_brewer(palette="Paired")
ggsave(here("Output images", "nutrient_input_average.png"), height = 2, width = 3, dpi = 300)
global_fertiliser_per_category = read_csv(here("Data", "Input", "Global fertilisers use aggregated by category.csv")) %>%
clean_names()
global_fertiliser_per_category %>%
select(crop_category, starts_with("share")) %>%
filter(crop_category != "Fruits/Vegetables") %>%
gather(year, percent, -crop_category) %>%
mutate(year = case_when(year == "share_percent_2007_08" ~ "2007-08",
year == "share_percent_2010_11" ~ "2010-11",
year == "share_percent_2014_15" ~ "2014-15")) %>%
mutate(crop_category = as.factor(crop_category)) %>%
mutate(crop_category = fct_relevel(crop_category,
c("Wheat", "Rice", "Maize", "Soybean", "Oil Palm" ,"Sugar Crops",
"Other Oilseeds", "Fibre Crops", "Other Cereals", "Roots & Tubers",
"Fruits", "Vegetables", "Grassland","Residual"))) %>%
filter(!crop_category %in% c("Roots & Tubers", "Fruits", "Vegetables", "Grassland", "Residual", "Other crops")) %>%
mutate(percent = as.numeric(percent)/100) %>%
ggplot(aes(x = crop_category, y = percent, fill = year)) +
geom_bar(position="dodge", stat = "identity") +
theme_classic(base_size = 6) +
scale_y_continuous(labels = scales::percent_format(accuracy = 1)) +
labs(x = "", fill = "", title = "Changes in global fertilizer crop contribution 2007-2014/15") +
theme(axis.text.x = element_text(angle = 60, vjust = 1, hjust = 1),
legend.key.size = unit(0.5,"line"),
legend.position = "top") +
scale_fill_brewer(palette="Set2")
ggsave(here("Output images", "global_fertiliser_contribution_change_per_crop.png"),
height = 2, width = 3, dpi = 300)
# Total global NPK per crop
global_NPK_use_per_category = read_csv(here("Data", "Input", "Global NPK use aggregated by category.csv")) %>%
clean_names()
global_NPK_use_per_category_processed = global_NPK_use_per_category %>%
select(crop_category, fertiliser_type, quantity_mt_2014_2015) %>%
group_by(crop_category) %>%
mutate(total_fertiliser = sum(quantity_mt_2014_2015)) %>%
arrange(desc(total_fertiliser)) %>%
ungroup()
global_NPK_use_per_category$crop_category
global_NPK_use_per_category_processed %>%
mutate(fertiliser_type = as.factor(fertiliser_type)) %>%
mutate(fertiliser_type = fct_relevel(fertiliser_type, c("N", "P", "K"))) %>%
mutate(crop_category = as.factor(crop_category)) %>%
mutate(crop_category = fct_relevel(crop_category,
c("Wheat", "Rice", "Maize", "Soybean", "Oil Palm" ,"Sugar Crops",
"Other Oilseeds", "Fibre Crops", "Other Cereals", "Roots & Tubers",
"Fruits", "Vegetables", "Grassland","Residual"))) %>%
ggplot(aes(x = crop_category, y = quantity_mt_2014_2015, fill = fertiliser_type)) +
geom_bar(position = "stack", stat = "identity") +
coord_flip() +
scale_fill_brewer(palette="Paired") +
labs(title = "Total global NPK use per crop (Mt nutrients)", y = "quantity (Mt)", x = "", fill = "Fertiliser type") +
theme_classic(base_size = 6) +
theme(legend.key.size = unit(0.5,"line"),
legend.position="top")
ggsave(here("Output images","global_NPK_use_per_category.png"), height = 2, width = 3, dpi = 300)
|
421f6b817495efeea27794b6096f6e946cb29250 | 10aafda5ec7d972394bbfe3e380aeca0f484df99 | /3-1-3.R | 17a06bbf0ecdcc7ec74cdbd8b5a073758b2114ce | [] | no_license | hdohms/Linear-Regression | 7150fcaafa4ab6385b72313c68df53e4259990f7 | 799f50e9d20cf94dcec3788cb757094cb8804e5b | refs/heads/master | 2021-06-24T13:11:12.769218 | 2020-11-20T13:12:19 | 2020-11-20T13:12:19 | 159,203,884 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 194 | r | 3-1-3.R | data("GaltonFamilies")
GaltonFamilies %>%
filter(childNum == 1 & gender == "male") %>%
select(father, childHeight) %>%
rename(son = childHeight) %>%
do(tidy(lm(father ~ son, data = .))) |
5928d0410425af782583ba1bd449cd692b4aad4e | c04c8604c6f5fce5328b138658ab651dc0d0a139 | /task01_main_B2_up2.R | 3c663952ba46fd3143e01fa9a432c3e515c993fb | [] | no_license | realypz/MVE440-Home-Exam | 90859e33aec4f7a1bff75c70965fb4085f5d28b5 | 9ea94b135c5236e0cf827b7a8fc5d9f7f93f263d | refs/heads/master | 2022-04-05T01:26:27.781325 | 2020-02-16T18:48:02 | 2020-02-16T18:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,151 | r | task01_main_B2_up2.R | library(datadr)
library(dplyr)
library(ggplot2)
library(glmnet)
library(extraDistr)
library(foreach)
load("exercise1.RData")
##### ----------------------- Define X, y, n, p, m, s, r -----------------------
X <- as.data.frame(XB2) %>% as.matrix %>% scale
y <- as.data.frame(yB2) %>% as.matrix %>% as.numeric()
colnames(y) <- "y"
n <- dim(X)[1] # Total num of sample points
p <- dim(X)[2] # num of features
data_all <- cbind(y, X) %>% as.data.frame()
##### ----------------------- Fit a linear model on the scaled all features -----------------------
## --------- fit a linear model use lm ---------
fit.all <- lm(y~., data=data_all)
# ---------------------- Feature selection by a subsample of B2, using Lasso ----------------
sub_indice <- sample.int(n, 10000, replace = F)
Lasso_cv.subInd <- cv.glmnet(X[sub_indice, ], y[sub_indice], alpha = 1,
type.measure = "mse",
## K = 10 is the default.
nfold = 10,## prevalidated array is returned
keep = TRUE)
Lasso_cv.subInd$lambda.1se
Coef_lasso.subInd <- coef(Lasso_cv.subInd, s=Lasso_cv.subInd$lambda.1se)[-1] %>% as.numeric()
(features_by_lasso.subInd <- which(Coef_lasso.subInd!=0))
#####################################################################################
# ----------------------- Methods of feature selection -----------------------
# ####################################################################################
##### ----------------------- Feature selection by lasso -----------------------
Lasso_cv <- cv.glmnet(X, y, alpha = 1,
type.measure = "mse",
## K = 10 is the default.
nfold = 10,## prevalidated array is returned
keep = TRUE)
Lasso_cv$lambda.1se
Coef_lasso <- coef(Lasso_cv, s=Lasso_cv$lambda.1se)[-1] %>% as.numeric()
(features_by_lasso <- which(Coef_lasso!=0))
# the selected features by lasso
X_selected <- X[,features_by_lasso]
data_selected <- cbind(y, X[,features_by_lasso]) %>% as.data.frame()
##### ----------------------- Fit a linear model from data_selected by lasso -----------------------
system.time({
fit.selected <- lm(y~., data=data_selected)
})
##### ----------------------- Use package vita -----------------------
# library(vita)
# library("randomForest")
# reg.rf= randomForest(X_selected,y,mtry = 3,ntree=100,
# importance=TRUE,keep.inbag = TRUE)
##### ----------------------- Stepwise feature selection, using AIC -----------------------
library(caret)
train.control <- trainControl(method = "cv", number = 20)
# http://topepo.github.io/caret/train-models-by-tag.html#generalized-linear-model
regfit.fwd <- caret::train(X_selected, as.numeric(y), method = "lmStepAIC", # "leapForward", "leapSeq"
trControl = train.control, intercept = FALSE)
summary(regfit.fwd)
##### ----------------------- Random forest variable importance -----------------------
# library( randomForest )
# model <- randomForest(X, y, importance=T)
# varImpPlot(model)
library( randomForest )
library( iterators )
library(foreach)
library(doParallel)
registerDoParallel(numCores) # use multicore, set to the number of our cores
### --------- parallel computing -------------
num_cores <- detectCores(all.tests = FALSE, logical = TRUE)
cl <- makeCluster(num_cores)
registerDoParallel(cl)
system.time({
para.data <- foreach (i=1:200, .combine=cbind, .packages='randomForest') %dopar% {
rand.indice <- sample.int(n, 10000, replace = F)
X.rfsub <- X[rand.indice,features_by_lasso]
y.rfsub <- y[rand.indice]
model <- randomForest(X.rfsub, #
y.rfsub, ntree=7, importance=T,
keep.forest=F, replace = T,
sampsize = 0.5*nrow(X.rfsub))
model$importance[,1]
}
avg_imp_rf <- rowMeans(para.data) %>% as.data.frame()
})
stopCluster(cl)
### --------- A single random forest ---------------
model <- randomForest(X[,features_by_lasso], y, ntree=80, importance=T,
keep.forest=T, replace = F,
sampsize = 0.05*nrow(X))
plot(model)
varImpPlot(model, type=1, main = "% of increasing in MSE, Dataset B1[, selected_by_Lasso]") # Optional arg: n.var = 100,
### ----------- For loop ----------------
system.time({
imp_table <- NULL
for (i in (1:2)){
model <- randomForest(X[,features_by_lasso], y, ntree=80, importance=T,
keep.forest=T, replace = F,
sampsize = 0.05*nrow(X))
imp_table <- cbind(imp_table, model$importance[,1])
}
})
avg_imp_rf <- rowMeans(imp_table) %>% as.data.frame()
# Compute the average VI by random forest
o <- order(avg_imp_rf,decreasing = T)
avg_imp_rf_ranking <- avg_imp_rf[order(avg_imp_rf, decreasing = T),] %>% as.data.frame()
##### ----------------------- Calculate Relative Importance, by "lmg" -----------------------
# -------- Due to so many features, lmg is super slow on ALL features.
# Then I tried LMG with SELECTED features by lasso.
library(relaimpo)
metrics <- calc.relimp(fit.selected,type=c("lmg"), rela=T)
lmg_imp <- metrics$lmg %>% as.data.frame()
##### ----------------------- Final model -----------------------
(coef.final <- coef(fit.selected))
# --------- fit a linear model use bag of little bootstrap ---------
rrkc <- datadr::divide(
data_selected, by = datadr::rrDiv(500), update = TRUE)
system.time({
kcBLB <- rrkc %>% datadr::addTransform(function(x) {
drBLB(
x,
statistic = function(x, weights) {
coef(glm(y ~ ., data = x, family = "gaussian", weights = weights))
},
metric = function(x) {
quantile(x, c(0.025, 0.975))
},
R = 100,
n = nrow(rrkc))
})
coefs <- datadr::recombine(kcBLB, datadr::combMean)
matrix(coefs, ncol = 2, byrow = TRUE)
})
|
02547d03028006c8303845293f9a00daa47886d6 | 0fcb9e49dc934221da0cf91847eef0d14a9cd4bb | /inf0611_trabalho3.R | baf89e0b936871499f4c0aec09a6d01e34b71d8a | [] | no_license | diogomotta/inf-0611 | 160e28e9aaf1f780b7a8020cd6ee9890a5df2598 | d5299f434f18829f4ca2c1055ee2843273af22a2 | refs/heads/master | 2021-04-18T11:02:10.196672 | 2020-03-23T20:34:12 | 2020-03-23T20:34:12 | 249,537,497 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,065 | r | inf0611_trabalho3.R | #------------------------------------------------#
# INF-0611 Recuperacao de Informacao #
# #
# Trabalho Avaliativo 3 #
#------------------------------------------------#
# Nome COMPLETO Aluna (o) 1: #
# #
# Nome COMPLETO Aluna (o) 2: #
# #
# Nome COMPLETO Aluna (o) 3: #
# #
# Nome COMPLETO Aluna (o) 4: # #
#------------------------------------------------#
library(IM)
library(e1071)
library(wvtool)
library(ecodist)
library(imager)
library(ggplot2)
#------------------------------------------------#
# Configuracao dos arquivos auxiliares #
#------------------------------------------------#
setwd("/home/diogo/MDC/inf-0611/trabalho 3") # configure o caminho antes de descomentar essa linha
source("./ranking_metrics.R")
source("./trabalho3_base.R")
# caminho da pasta de imagens
path_soccer = './soccer/'
#------------------------------------------------#
# Leitura das imagens #
#------------------------------------------------#
imagens <- read_images(path_soccer)
#------------------------------------------------#
# Obtem classe de cada imagem #
#------------------------------------------------#
nome_classes <- get_classes(path_soccer)
#------------------------------------------------#
# Define classe relevante #
#------------------------------------------------#
classe_relevante <- 'barcelona'
#------------------------------------------------#
# obtem ground_truth #
#------------------------------------------------#
ground_truth <- get_ground_truth(nome_classes, classe_relevante)
#------------------------------------------------#
# define consulta classe relevante #
#------------------------------------------------#
consulta <- #utilizar as mesmas do Trabalho 2
#------------------------------------------------#
# define tamanho do topK analisado #
#------------------------------------------------#
top <- 20
#################################################
#################################################
#------------------------------------------------#
# Questao 1 #
#------------------------------------------------#
# obtem caracteristicas de cor
features_color <- function(imagens){
#entrada: o conjunto de imagens carregadas
#saida: uma matriz de caracteristicas de cor onde cada linha é referente a uma imagem
features <- c()
cnt <- 1
for(img_ in imagens){
print(sprintf("Processando imagem %d de %d", cnt, length(imagens)))
channels <- dim(img_[])
img_features <- c()
for (color in 1:channels[4]){
# seleciona um canal de cor para ser processado
img <- img_[ , , 1, color]
# tranforma intensidade de pixels em valores de 0 a 255
min_v <- min(img)
max_v <- max(img)
img <- ((img - min_v) / (max_v - min_v)) * 255
# calcula histograma
h <- hist(img, plot=FALSE, breaks=0:255)$counts
# junta os histogramas de cor em um único vetor
img_features <- c(img_features, h)
}
# adiciona uma nova linha na matriz de caracteristicas (uma nova imagem)
features <- rbind(features, img_features)
cnt <- cnt + 1
}
return(features)
}
#------------------------------------------------#
# obtem caracteristicas de textura
features_texture <- function(imagens){
#entrada: o conjunto de imagens carregadas
#saida: uma matriz de caracteristicas de textura onde cada linha é referente a uma imagem
#se a imagem nao estiver em escala de cinza deve-se transforma-la
features <- NULL
cnt <- 1
for(img_ in imagens){
print(sprintf("Processando imagem %d de %d", cnt, length(imagens)))
# transforma a imagem para escala cinza
img <- drop(grayscale(img_[]))
# tranforma intensidade de pixels em valores de 0 a 255
min_v <- min(img)
max_v <- max(img)
img <- ((img - min_v) / (max_v - min_v)) * 255
# obtem catacteristicas de textura
values <- criarMatrizCoocorrencia(img, c(1,0))
# adiciona uma nova linha na matriz de caracteristicas (uma nova imagem)
features <- rbind(features,values)
cnt <- cnt + 1
}
return(features)
}
#------------------------------------------------#
# obtem caracteristicas de forma
features_shape <- function(imagens){
# entrada: o conjunto de imagens carregadas
# saida: uma matriz de caracteristicas de forma onde cada linha é referente a uma imagem
#se a imagem nao estiver em escala de cinza deve-se transforma-la
features <- NULL
cnt <- 1
for(img_ in imagens){
print(sprintf("Processando imagem %d de %d", cnt, length(imagens)))
# transforma a imagem para escala cinza
img <- drop(grayscale(img_[]))
aux_line <- NULL
for(i in 0:10){
for(j in 0:10){
#adiciona um novo momento como caracteristica no vetor de caracteristicas da imagem
aux_line <- cbind(aux_line,moment(img, order=c(i,j), center=TRUE))
}}
#adiciona uma nova linha na matriz de caracteristicas (uma nova imagem)
features <- rbind(features, aux_line)
cnt <- cnt + 1
}
return(features)
}
features_c <- features_color(imagens)
features_t <- features_texture(imagens)
features_s <- features_shape(imagens)
#################################################
#################################################
#------------------------------------------------#
# Questao 2 #
#------------------------------------------------#
# agregando rankings por valor
#escolha duas consultas da classe barcelona (posicoes de 11 a 20 do vetor ground_truth)
#construa os rankings para cada metodo de agregacao e para cada consulta (3 rankings para cada consulta)
q1 <- 13
q2 <- 14
generate_distances <- function(features, query){
#entrada: conjunto de caracteristicas que serao utilizadas para calculo de distancia e indice da consulta no vetor de caracteristicas
#saida: vetor não-ordenado de distancias das imagens para a consulta
## calcular distancia euclidiana de todas as imagens (representada pelas caracteristicas) para a consulta
distancias <- full(ecodist::distance(features, "euclidean"))
distancias <- distancias[,query]
return(distancias)
}
# consulta 1
distancia_c1 <- generate_distances(features_c, q1)
distancia_t1 <- generate_distances(features_t, q1)
distancia_s1 <- generate_distances(features_s, q1)
# consulta 2
distancia_c2 <- generate_distances(features_c, q2)
distancia_t2 <- generate_distances(features_t, q2)
distancia_s2 <- generate_distances(features_s, q2)
##FAZER para cada consulta
## calcular rankings para a agregacao por COMBMIN
ranking_combmin1 <- combmin(distancia_c1, distancia_t1, distancia_s1)
ranking_combmin2 <- combmin(distancia_c2, distancia_t2, distancia_s2)
## calcular rankings para a agregacao por COMBMAX
ranking_combmax1 <- combmax(distancia_c1, distancia_t1, distancia_s1)
ranking_combmax2 <- combmax(distancia_c2, distancia_t2, distancia_s2)
## calcular rankings para a agregacao por COMBSUM
ranking_combsum1 <- combsum(distancia_c1, distancia_t1, distancia_s1)
ranking_combsum2 <- combsum(distancia_c2, distancia_t2, distancia_s2)
#################################################
#################################################
# comparando rankings
##FAZER
## utilize as funções do arquivo ranking_metrics.R para calcular a precisao e revocacao utilizando o ground_truth e os rankings obtidos (como o parametro predictions)
# precisao no topk para combmin
p_combmin1 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combmin1))
p_combmin2 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combmin2))
# precisao no topk para combmax
p_combmax1 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combmax1))
p_combmax2 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combmax2))
# precisao no topk para combsum
p_combsum1 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combsum1))
p_combsum2 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combsum2))
pr1 <- data.frame(precision = NULL, recall = NULL)
pr2 <- data.frame(precision = NULL, recall = NULL)
## gere um grafico de precisao X topK para cada consulta (contendo as curvas de todas os rankings para uma consulta)
### CONSULTA 1 ###
ggplot(pr1, aes(x = 1:top)) +
geom_point(aes(y = p_combmin1), color = 'red') +
geom_line(aes(y = p_combmin1), color = 'red') +
geom_text(aes(0, 1,label = "CombMin"), vjust= -0.3, color = 'red') +
geom_point(aes(y = p_combmax1), color = 'blue') +
geom_line(aes(y = p_combmax1), color = 'blue') +
geom_text(aes(0, 0.9,label = "CombMax"), vjust= -0.3, color = 'blue') +
geom_point(aes(y = p_combsum1),color = 'green') +
geom_line(aes(y = p_combsum1),color = 'green') +
geom_text(aes(0, 0.8, label = "CombSum"), vjust= -0.3, color = 'green') +
theme_light() +
labs(colour = element_blank(), title = "Precisão x TopK - Consulta 1") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Precisão", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
### CONSULTA 2 ###
ggplot(pr2, aes(x = 1:top)) +
geom_point(aes(y = p_combmin2), color = 'red') +
geom_line(aes(y = p_combmin2), color = 'red') +
geom_text(aes(0, 1,label = "CombMin"), vjust= -0.3, color = 'red') +
geom_point(aes(y = p_combmax2), color = 'blue') +
geom_line(aes(y = p_combmax2), color = 'blue') +
geom_text(aes(0, 0.9,label = "CombMax"), vjust= -0.3, color = 'blue') +
geom_point(aes(y = p_combsum2),color = 'green') +
geom_line(aes(y = p_combsum2),color = 'green') +
geom_text(aes(0, 0.8, label = "CombSum"), vjust= -0.3, color = 'green') +
theme_light() +
labs(colour = element_blank(), title = "Precisão x TopK - Consulta 2") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Precisão", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
## gere um grafico de revocacao X topK para cada consulta (contendo as curvas de todas os rankings para uma consulta)
# revocação no topk para combmin
r_combmin1 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combmin1))
r_combmin2 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combmin2))
# revocação no topk para combmax
r_combmax1 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combmax1))
r_combmax2 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combmax2))
# revocação no topk para combsum
r_combsum1 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combsum1))
r_combsum2 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_combsum2))
### CONSULTA 1 ###
ggplot(pr1, aes(x = 1:top)) +
geom_point(aes(y = r_combmin1), color = 'red') +
geom_line(aes(y = r_combmin1), color = 'red') +
geom_text(aes(0, 1,label = "CombMin"), vjust= -0.3, color = 'red') +
geom_point(aes(y = r_combmax1), color = 'blue') +
geom_line(aes(y = r_combmax1), color = 'blue') +
geom_text(aes(0, 0.9,label = "CombMax"), vjust= -0.3, color = 'blue') +
geom_point(aes(y = r_combsum1),color = 'green') +
geom_line(aes(y = r_combsum1),color = 'green') +
geom_text(aes(0, 0.8, label = "CombSum"), vjust= -0.3, color = 'green') +
theme_light() +
labs(colour = element_blank(), title = "Revocação x TopK - Consulta 1") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Revocação", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
### CONSULTA 2 ###
ggplot(pr2, aes(x = 1:top)) +
geom_point(aes(y = r_combmin2), color = 'red') +
geom_line(aes(y = r_combmin2), color = 'red') +
geom_text(aes(0, 1,label = "CombMin"), vjust= -0.3, color = 'red') +
geom_point(aes(y = r_combmax2), color = 'blue') +
geom_line(aes(y = r_combmax2), color = 'blue') +
geom_text(aes(0, 0.9,label = "CombMax"), vjust= -0.3, color = 'blue') +
geom_point(aes(y = r_combsum2),color = 'green') +
geom_line(aes(y = r_combsum2),color = 'green') +
geom_text(aes(0, 0.8, label = "CombSum"), vjust= -0.3, color = 'green') +
theme_light() +
labs(colour = element_blank(), title = "Revocação x TopK - Consulta 2") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Revocação", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
#################################################
#################################################
##NAO SE ESQUECA DO RELATORIO
#################################################
#################################################
#------------------------------------------------#
# Questao 3 #
#------------------------------------------------#
# agregando rankings por posicao
#escolha duas consultas da classe barcelona (posicoes de 11 a 20 do vetor ground_truth)
#construa os rankings para cada metodo de agregacao e para cada consulta (3 rankings para cada consulta)
#utilize a funcao da questão anterior generate_distance seguida pela generate_ranking para
#cada vetor de caracteristicas individualmente
generate_rankings <- function(distancias){
#entrada: conjunto de caracteristicas que serao utilizadas para calculo de distancia e indice da consulta no vetor de caracteristicas
#saida: vetor ordenado pelas distancias com os indices das imagens mais proximas à consulta
##FAZER
## ordenar distancias
ranking <- order(distancias)
return(ranking)
}
##FAZER para cada consulta
## calcular ranking para o metodo de agregacao BORDA
### CONSULTA 1 ###
ranking_c1 <- generate_rankings(distancia_c1)
ranking_t1 <- generate_rankings(distancia_t1)
ranking_s1 <- generate_rankings(distancia_s1)
ranking_borda1 <- bordacount(ranking_c1, ranking_t1, ranking_s1)
### CONSULTA 2 ###
ranking_c2 <- generate_rankings(distancia_c2)
ranking_t2 <- generate_rankings(distancia_t2)
ranking_s2 <- generate_rankings(distancia_s2)
ranking_borda2 <- bordacount(ranking_c2, ranking_t2, ranking_s2)
#################################################
#################################################
# comparando rankings
##FAZER
## utilize as funções do arquivo ranking_metrics.R para calcular a precisao e revocacao utilizando o ground_truth e os rankings obtidos (como o parametro predictions)
## gere um grafico de precisao X topK para cada consulta (contendo as curvas dos rankings gerados pelo BORDA e pelo COMB escolhido)
# precisao no topk para bordacount
p_borda1 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_borda1))
p_borda2 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_borda2))
### CONSULTA 1 ###
ggplot(pr1, aes(x = 1:top)) +
geom_point(aes(y = p_combmin1), color = 'red') +
geom_line(aes(y = p_combmin1), color = 'red') +
geom_text(aes(0, 1,label = "CombMin"), vjust= -0.3, color = 'red') +
geom_point(aes(y = p_borda1), color = 'blue') +
geom_line(aes(y = p_borda1), color = 'blue') +
geom_text(aes(0, 0.9,label = "Borda"), vjust= -0.3, color = 'blue') +
theme_light() +
labs(colour = element_blank(), title = "Precisão x TopK - Consulta 1") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Precisão", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
### CONSULTA 2 ###
ggplot(pr2, aes(x = 1:top)) +
geom_point(aes(y = p_combsum2), color = 'red') +
geom_line(aes(y = p_combsum2), color = 'red') +
geom_text(aes(0, 1,label = "CombSum"), vjust= -0.3, color = 'red') +
geom_point(aes(y = p_borda2), color = 'blue') +
geom_line(aes(y = p_borda2), color = 'blue') +
geom_text(aes(0, 0.9,label = "Borda"), vjust= -0.3, color = 'blue') +
theme_light() +
labs(colour = element_blank(), title = "Precisão x TopK - Consulta 2") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Precisão", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
## gere um grafico de revocacao X topK para cada consulta (contendo as curvas dos rankings gerados pelo BORDA e pelo COMB escolhido)
# revocação no topk para bordacount
r_borda1 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_borda1))
r_borda2 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_borda2))
### CONSULTA 1 ###
ggplot(pr1, aes(x = 1:top)) +
geom_point(aes(y = r_combmin1), color = 'red') +
geom_line(aes(y = r_combmin1), color = 'red') +
geom_text(aes(0, 1,label = "CombMin"), vjust= -0.3, color = 'red') +
geom_point(aes(y = r_borda1), color = 'blue') +
geom_line(aes(y = r_borda1), color = 'blue') +
geom_text(aes(0, 0.9,label = "Borda"), vjust= -0.3, color = 'blue') +
theme_light() +
labs(colour = element_blank(), title = "Revocação x TopK - Consulta 1") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Revocação", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
### CONSULTA 2 ###
ggplot(pr2, aes(x = 1:top)) +
geom_point(aes(y = r_combsum2), color = 'red') +
geom_line(aes(y = r_combsum2), color = 'red') +
geom_text(aes(0, 1,label = "CombSum"), vjust= -0.3, color = 'red') +
geom_point(aes(y = r_borda2), color = 'blue') +
geom_line(aes(y = r_borda2), color = 'blue') +
geom_text(aes(0, 0.9,label = "Borda"), vjust= -0.3, color = 'blue') +
theme_light() +
labs(colour = element_blank(), title = "Revocação x TopK - Consulta 2") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Revocação", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
#################################################
#################################################
##NAO SE ESQUECA DO RELATORIO
#################################################
#################################################
#------------------------------------------------#
# Questao 4 #
#------------------------------------------------#
# concatenando caracteristicas
## FAZER -- pode ser utilizado mesmo metodo do trabalho anterior
## obter vetores finais de caracteristicas pela concatenação de cada tipo de caracteristica (forma, cor, textura):
## - dos 3
features_cts <- cbind(features_c, features_t, features_s)
## utilizar a funcao generate_distance da questao 2 seguida da funcao generate_ranking da questao 3 para cada novo vetor de caracteristicas (com as mesmas consultas)
# distâncias
distancia_cts1 <- generate_distances(features_cts, q1)
distancia_cts2 <- generate_distances(features_cts, q2)
# rankings
ranking_cts1 <- generate_rankings(distancia_cts1)
ranking_cts2 <- generate_rankings(distancia_cts2)
#################################################
#################################################
# comparando rankings
##FAZER
## utilize as funções do arquivo ranking_metrics.R para calcular a precisao e revocacao utilizando o ground_truth e os rankings obtidos (como o parametro predictions)
## gere um grafico de precisao X topK para cada consulta (contendo as curvas dos rankings da agregacao escolhida e da concatenacao de caracteristicas)
# precisao no topk para bordacount
p_concat1 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_cts1))
p_concat2 <- mapply(precision, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_cts2))
### CONSULTA 1 ###
ggplot(pr1, aes(x = 1:top)) +
geom_point(aes(y = p_concat1), color = 'red') +
geom_line(aes(y = p_concat1), color = 'red') +
geom_text(aes(0, 1,label = "Concatenação"), vjust= -0.3, color = 'red') +
geom_point(aes(y = p_combmin1), color = 'blue') +
geom_line(aes(y = p_combmin1), color = 'blue') +
geom_text(aes(0, 0.9,label = "CombMin"), vjust= -0.3, color = 'blue') +
theme_light() +
labs(colour = element_blank(), title = "Precisão x TopK - Consulta 1") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Precisão", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
### CONSULTA 2 ###
ggplot(pr2, aes(x = 1:top)) +
geom_point(aes(y = p_concat2), color = 'red') +
geom_line(aes(y = p_concat2), color = 'red') +
geom_text(aes(0, 1,label = "Concatenação"), vjust= -0.3, color = 'red') +
geom_point(aes(y = p_borda2), color = 'blue') +
geom_line(aes(y = p_borda2), color = 'blue') +
geom_text(aes(0, 0.9,label = "Borda"), vjust= -0.3, color = 'blue') +
theme_light() +
labs(colour = element_blank(), title = "Precisão x TopK - Consulta 2") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Precisão", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
## gere um grafico de revocacao X topK para cada consulta (contendo as curvas dos rankings da agregacao escolhida e da concatenacao de caracteristicas)
r_concat1 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_cts1))
r_concat2 <- mapply(recall, 1:top,
MoreArgs = list(ground_truth = ground_truth,
prediction = ranking_cts2))
### CONSULTA 1 ###
ggplot(pr1, aes(x = 1:top)) +
geom_point(aes(y = r_concat1), color = 'red') +
geom_line(aes(y = r_concat1), color = 'red') +
geom_text(aes(0, 1,label = "Concatenação"), vjust= -0.3, color = 'red') +
geom_point(aes(y = r_combmin1), color = 'blue') +
geom_line(aes(y = r_combmin1), color = 'blue') +
geom_text(aes(0, 0.9,label = "CombMin"), vjust= -0.3, color = 'blue') +
theme_light() +
labs(colour = element_blank(), title = "Revocação x TopK - Consulta 1") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Revocação", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
### CONSULTA 2 ###
ggplot(pr2, aes(x = 1:top)) +
geom_point(aes(y = r_concat2), color = 'red') +
geom_line(aes(y = r_concat2), color = 'red') +
geom_text(aes(0, 1,label = "Concatenação"), vjust= -0.3, color = 'red') +
geom_point(aes(y = r_borda2), color = 'blue') +
geom_line(aes(y = r_borda2), color = 'blue') +
geom_text(aes(0, 0.9,label = "Borda"), vjust= -0.3, color = 'blue') +
theme_light() +
labs(colour = element_blank(), title = "Revocação x TopK - Consulta 2") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_y_continuous(name = "Revocação", limits = c(0, 1),
breaks = 0:10 * 0.1,
minor_breaks = NULL) +
scale_x_continuous(name = "TopK", limits = c(0, top),
breaks = 0:top,
minor_breaks = NULL)
## serao entao um total de 4 graficos (dois para cada consulta)
#################################################
#################################################
##NAO SE ESQUECA DO RELATORIO
#################################################
#################################################
|
f340437ca6c8947ceda42715174d55a64463bb48 | c2c760f3803dfafad79755f7233089c899bec60c | /man/plotPeak.Rd | 10c47417a02f8aee68446d5831d053cc9575fcfe | [
"Apache-2.0"
] | permissive | scottwalmsley/wSIMCity | 6f05b6b2a36cf25e2ce66346e7753207f1f28e30 | 98ea258fc5ff9af33af94a1a4ce2d56f507da134 | refs/heads/master | 2021-06-14T14:45:15.543418 | 2021-04-28T18:51:03 | 2021-04-28T18:51:03 | 184,795,602 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 506 | rd | plotPeak.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_plot.R
\name{plotPeak}
\alias{plotPeak}
\title{Plots the spectrum of precursor and aglycones}
\usage{
plotPeak(spectrum, mz, ppm, main = NULL, col = 1)
}
\arguments{
\item{spectrum}{matrix of mz and intensities}
\item{mz}{target mz value}
\item{ppm}{tolerance window in ppm}
\item{main}{character vector for the plot title}
\item{col}{integer color value}
}
\description{
Plots the spectrum of precursor and aglycones
}
|
474a5c90536211fd0f3612801af54b4f056d70b7 | fa79ee0e432e7b56f494f553bc7a577fe3df2576 | /lecture3/q2/server.R | 68f2fc2d7677dd1feb565e94cfbb4b0fa1dcc385 | [] | no_license | maxwagner/608 | 503c59c417732d4f9f5be558213b80f81b528ab5 | 7f2d036c63ddb18dfe1ceba2eccc4a245783ae76 | refs/heads/master | 2021-01-14T08:39:28.147005 | 2017-10-14T01:25:06 | 2017-10-14T01:25:06 | 68,726,565 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,703 | r | server.R | library(shiny)
library(googleVis)
library(plyr)
library(dplyr)
# read in data
mortality <- read.csv('https://raw.githubusercontent.com/maxwagner/608/master/lecture3/q1/cleaned-cdc-mortality-1999-2010.csv')
# rem some columns, rename remaining
mortality <- mortality[, c(3,5,7,10,11)]
colnames(mortality) <- c("Chapter", "State", "Year", "Population", "Rate")
# shiny server
shinyServer(function(input, output) {
# sr = state rate, nr = national rate
mortalityReactive <- reactive({
srFilt <- filter(mortality, Chapter == input$chapter)
srGroup <- group_by(srFilt, State)
states <- data.frame(State = unique(srGroup$State))
Change <- srGroup$Rate[srGroup$Year == 1999] - srGroup$Rate[srGroup$Year == 2010]
stateChange <- cbind(states, Change)
nrFilt <- filter(mortality, Chapter == input$chapter)
nrDdply <- ddply(nrFilt, ~Year, summarise, Rate=mean(Rate))
nrChange <- nrDdply$Rate[nrDdply$Year == 1999] - nrDdply$Rate[nrDdply$Year == 2010]
stateChange$NationalChange <- nrChange
stateChange$Delta <- stateChange$Change - stateChange$NationalChange
forGraph <- select(stateChange, State, Delta)
arrange(forGraph, desc(Delta))
})
output$gvisplot <- renderGvis({
gvisBarChart(mortalityReactive(),
options = list(
title = "Mortality Rate Difference From National",
backgroundColor = "#CFD8DC",
backgroundColor.stroke = "black",
backgroundColor.strokeWidth = 10,
height = 1000,
width = 1000,
chartArea = "{width: '60%', height: '95%'}"
))
})
}) |
68699ec55467e8ae52388af3033081885082fbcc | 29585dff702209dd446c0ab52ceea046c58e384e | /goric/R/orgls.R | f24029d72f79c939e649d61c163c2ad9bd67a6a1 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,840 | r | orgls.R | orgls <-
function(formula, data, constr, rhs, nec, weights=NULL, correlation=NULL, control=orlmcontrol()){
UseMethod("orgls")
}
orgls.formula <-
function(formula, data, constr, rhs, nec, weights=NULL, correlation=NULL, control=orlmcontrol()){
cl <- match.call()
if (!is.null(correlation)){
groups <- getGroupsFormula(correlation)
} else {
groups <- NULL
}
glsSt <- glsStruct(corStruct = correlation, varStruct = varFunc(weights))
model <- terms(formula, data = data)
mfArgs <- list(formula = asOneFormula(formula(glsSt), formula, groups), data = data, na.action = na.fail)
dataMod <- do.call("model.frame", mfArgs)
origOrder <- row.names(dataMod)
if (!is.null(groups)) {
groups <- eval(parse(text = paste("~1", deparse(groups[[2]]), sep = "|")))
grps <- getGroups(dataMod, groups, level = length(getGroupsFormula(groups, asList = TRUE)))
ord <- order(grps)
grps <- grps[ord]
dataMod <- dataMod[ord, , drop = FALSE]
revOrder <- match(origOrder, row.names(dataMod))
} else {
grps <- NULL
}
X <- model.frame(model, dataMod)
contr <- lapply(X, function(el) if (inherits(el, "factor")) contrasts(el))
contr <- contr[!unlist(lapply(contr, is.null))]
x <- model.matrix(model, X)
y <- eval(model[[2]], dataMod)
if (is.numeric(constr)) constr <- rbind(constr)
if (!is.matrix(constr)) stop("constr needs to be a matrix.")
if (ncol(x) != ncol(constr)) stop(paste("constr has not correct dimensions.\nNumber of columns (",ncol(constr),") should equal the number of parameters: ", ncol(x), sep=""))
if (length(rhs) != nrow(constr)) stop(paste("rhs has a different number of elements than there are numbers of rows in constr (",length(rhs), " != ", nrow(constr), ")", sep=""))
if (is.numeric(nec) & length(nec) != 1) stop("nec needs to be single a numeric value or a logical vector with the same length as the number of constraints.")
if (is.logical(nec) & length(nec) != length(rhs)) stop("nec needs to be single a numeric value or a logical vector with the same length as the number of constraints.")
if (is.logical(nec)){
ord <- order(nec, decreasing=TRUE)
constr <- constr[ord,,drop=FALSE]
rhs <- rhs[ord]
nec <- sum(nec)
}
if (nec < 0) stop("nec needs to be positive")
if (nec > length(rhs)) stop(paste("nec is larger than the number of constraints. (",nec," > ",length(rhs),")", sep=""))
########################
## unconstrained linear model
unc <- gls(formula, data=dataMod, weights=weights, correlation=correlation, method="ML")
## extracting the variance-covariance structure
if (is.null(unc$modelStruct$varStruct)){
V <- diag(nrow(x))
} else {
V <- diag(attr(unc$modelStruct$varStruct, "weights"))
}
if (is.null(unc$modelStruct$corStruct)){
crr <- diag(nrow(x))
} else {
cr <- corMatrix(unc$modelStruct$corStruct)
crr <- if (is.matrix(cr)) cr else as.matrix(bdiag(cr))
}
W <- V %*% crr %*% V
tBeta <- lm.gls(formula, data = dataMod, W=W)$coefficients
# taken from lm.gls in package MASS
# transforming X and y into a classical linear model framework
eW <- eigen(W, TRUE)
d <- eW$values
if (any(d <= 0)) stop("'W' is not positive definite")
eWv <- eW$vector
A <- diag(sqrt(d)) %*% t(eWv)
Ainv <- eWv %*% diag(1/sqrt(d))
X <- A %*% x
Y <- as.vector(A %*% y)
res <- Y - X %*% tBeta
Sigma <- as.vector(t(res) %*% (res))/(nrow(x))
############################
## lin model with order restrictions
orsolve <- function(tBeta, X, Y, Constr, RHS, NEC){
yVx <- t(X) %*% Y
dvec <- 2*yVx
Dmat <- 2*(t(X) %*% X)
Amat <- t(Constr)
bvec <- RHS
solve.QP(Dmat,dvec,Amat,bvec=bvec, meq=NEC)
}
orBeta <- tBeta
val <- 0
for (i in 1:control$maxiter){
sqp <- orsolve(orBeta, X, Y, constr, rhs, nec)
orBeta <- sqp$solution
if (abs(sqp$value - val) <= control$absval) break else val <- sqp$value
}
if (i == control$maxiter & abs(sqp$value - val) > control$absval) warning("Maximum number of iterations reached without convergence!")
ores <- (Y - X %*% orBeta)
orSigma <- as.vector(t(ores) %*% (ores))/(nrow(x))
Aores <- Ainv %*% (Y - X %*% orBeta)
AorSigma <- as.vector(t(Aores) %*% diag(diag(W)) %*% (Aores))/(nrow(x))
p <- unc$dims$p
N <- unc$dims$N
Np <- N - p
loglik <- (-N/2)*log(2*pi) + (-1/2)*(nrow(x)*log(AorSigma) + determinant(W, logarithm=TRUE)$modulus) - (1/2)*N + sum(log(diag(W)))
names(orBeta) <- colnames(x)
out <- list(call=cl, X=X, XW=x, y=Y, unccoefficients=tBeta, coefficients=orBeta, fitted=Ainv %*% (X %*% orBeta), residuals=Ainv %*% (Y - X %*% orBeta), sigma=Sigma, orSigma=orSigma, logLik=loglik, constr=constr, rhs=rhs, nec=nec, Niter=i, iact=sqp$iact, extrap=length(coef(unc[["modelStruct"]])), modelStruct=unc$modelStruct, W=W)
class(out) <- "orgls"
return(out)
}
|
cb32006355c4524eddec90dc3fa6d9be2a0d33d6 | 97515ba84e0c105203baa1f7fd24134cc7bffe38 | /week 7/homework/w07-hw/Question 3.R | 6772b06a08f555c1e2f3cfd62f169fe8c4483ded | [] | no_license | memasanz/stats420 | 7055c68cdd069164382eb9b7e2016678c20cdca7 | 0adb47fce9eac823637126dece815d2f75c91434 | refs/heads/master | 2023-07-09T16:10:50.175503 | 2021-08-02T16:12:20 | 2021-08-02T16:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 470 | r | Question 3.R | #Question 3
fish = read_csv("fish.csv")
fish_smaller = lm(Weight ~ Length1 + HeightPct * WidthPct, data = fish)
summary(fish_smaller)
fish_int = lm(Weight ~ Length1 + HeightPct + WidthPct + Length1:HeightPct + Length1:WidthPct + HeightPct:WidthPct + Length1:HeightPct:WidthPct, data = fish)
summary(fish_int)
fish_int = lm(Weight ~ Length1 + HeightPct + WidthPct + Length1:HeightPct + Length1:WidthPct + HeightPct:WidthPct + Length1:HeightPct:WidthPct, data = fish)
|
79ebfb0c3805ee1005e60d96dc91c8f069756785 | 4eba831feeba02ea01ae63cf7c13eaa36d980c01 | /app17.R | e8ce83ccff4d80b3d6e70b2700198275e84b38cb | [] | no_license | hiltoncsj/R | d63d122e02271289bdfc02ec73a119e4bf38ea2c | 97107f6e94b274196191a1a4182f5348ea802ec8 | refs/heads/master | 2023-08-20T11:51:31.398139 | 2021-10-16T16:40:46 | 2021-10-16T16:40:46 | 416,733,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 313 | r | app17.R | library(shiny)
library("shinyalert")
ui = fluidPage(
useShinyalert(), # Set up shinyalert
actionButton("btn", "Mostrar mensagem")
)
server = function(input, output) {
observeEvent(input$btn, {
shinyalert(title = "Mensagem de Erro", type="error") #error #default #warning
})
}
shinyApp(ui, server) |
c776c6c8a5d9ded6471a4b8e3f83bce327de3088 | 48a30f16d7863912e91d247ad7a374c106438352 | /plot4.R | 5d87b2d2aa402c27b28aa4df8fab59b7ff9790f9 | [] | no_license | Jubijub/ExData_Plotting1 | 187fa2c2a4fb92a8c4cdd037ce888ed1c5ae1f81 | 01141a7d698fdae0fc912542a26e437d284d358c | refs/heads/master | 2021-01-12T15:09:08.098407 | 2016-10-23T20:13:07 | 2016-10-23T20:13:07 | 71,714,894 | 0 | 0 | null | 2016-10-23T16:25:11 | 2016-10-23T16:25:11 | null | UTF-8 | R | false | false | 2,190 | r | plot4.R | ##### COMMON BOILERPLATE FOR ALL FILES #####
## Downloading the source data
rawDataSource <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipFile <- "./household_power_consumption.zip"
if (!file.exists(zipFile)){download.file(rawDataSource, zipFile)}
message("Source File downloaded")
if(file.exists(zipFile)){unzip(zipfile=zipFile, exdir=".")}
message("ZIP content extracted")
## Reading the data into a dataframe
# As per the exercise parameters, load only lines between 2007-02-1 and 2007-02-02
hpc <- read.table("./household_power_consumption.txt",
header = TRUE,
sep = ";",
skip=66636,
nrow=69517-66637)
# Setting names manually as skip removes the head line
names(hpc) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2", "Sub_metering_3")
# Converting dates to proper dates
hpc$Date <- as.Date(hpc$Date, format="%d/%m/%Y")
hpc$DateTime <- as.POSIXct(paste(hpc$Date,hpc$Time))
message("Household power consumption data loaded and cleaned")
###################################################
#Forces local to English so that data labels are in English
Sys.setlocale(category = "LC_ALL", locale = "english")
# Produce the PNG file
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
#top-left graph
with(hpc, plot(DateTime, Global_active_power, type="l", col="black", xlab="", ylab="Global Active Power (kilowatts)"))
#top-right graph
with(hpc, plot(DateTime, Voltage, type="l", col="black", xlab="datetime"))
#bottom-left graph
with(hpc, plot(DateTime, Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering"))
with(hpc, lines(DateTime, Sub_metering_2, col="red"))
with(hpc, lines(DateTime, Sub_metering_3, col="blue"))
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lwd=2, bty="n")
#bottom-right graph
with(hpc, plot(DateTime, Global_reactive_power, type="l", col="black", xlab="datetime"))
dev.off()
message("plot4.png file created") |
e4a63e3df719432a934ce0a1cfe64c4244579f29 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/8989_0/rinput.R | a7856b0ab543e1258b5241ce71a65882a55cb9e0 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("8989_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8989_0_unrooted.txt") |
f25945bb780a21f3496f29eec7733e19aadb3085 | 7e1d9c9dc85c3036d101bafeffe494f5408b44cc | /man/mss.Rd | 33594664cd14af60566c940f89a622756cb5dde4 | [] | no_license | kvnkuang/rmimp | c31283f0a4de5bcb3e27f0a761e4ea298319a500 | 9db33a5e1bd929022ec365d227eb77ab11b8cfa2 | refs/heads/master | 2020-04-05T23:10:40.368463 | 2017-12-08T15:02:38 | 2017-12-08T15:02:38 | 62,658,628 | 1 | 2 | null | 2017-05-29T21:27:56 | 2016-07-05T18:07:13 | R | UTF-8 | R | false | true | 759 | rd | mss.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pwm-functions.r
\name{mss}
\alias{mss}
\title{Compute matrix similarity score as described in MATCH algorithm}
\usage{
mss(seqs, pwm, na_rm = F, ignore_cent = T, kinase.domain = T)
}
\arguments{
\item{seqs}{Sequences to be scored}
\item{pwm}{Position weight matrix}
\item{na_rm}{Remove NA scores?}
\item{ignore_cent}{If TRUE, central residue is ignore from scoring.}
\item{kinase.domain}{Whether the domain to be trained is a kinase domain.}
}
\description{
Computes matrix similarity score of a PWM with a k-mer.
Score ranges from 0-1, as described in [PMID: 12824369]
}
\examples{
# No Examples
}
\keyword{internal}
\keyword{match}
\keyword{mss}
\keyword{pwm}
\keyword{tfbs}
|
34a71195242bd37fb62f01b820948418b366d2b2 | 7aad69c67ed152472b3f6c40bc410ef8139e0eb1 | /Code/man/prepare.tree.Rd | bd05b74b964b72202db51ac1d93b1c3007f3af91 | [] | no_license | carlos-alberto-silva/slashGY | de432f7e9fb71abd01bca05ac4256cf3bbf6e6e4 | 3f787fc909c6abb476e0766cc842bb9fdc819e67 | refs/heads/master | 2022-01-19T05:53:34.066757 | 2019-04-29T20:16:57 | 2019-04-29T20:16:57 | 286,616,659 | 1 | 0 | null | 2020-08-11T01:33:30 | 2020-08-11T01:33:29 | null | UTF-8 | R | false | true | 2,260 | rd | prepare.tree.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare.tree.r
\name{prepare.tree}
\alias{prepare.tree}
\title{Checks and prepares tree-level data from a single plot and calculates some stand-level parameters.}
\usage{
prepare.tree(TREEID = NA, DBH = NA, HT = NA, AREA = NA, AGE = NA,
methodHT = 2)
}
\arguments{
\item{TREEID}{Vector of unique tree identification.}
\item{DBH}{Vector of diameter at breast height (DBH, in). Must be complete and have the same size and order as TREEID.}
\item{HT}{Vector of total height (ft). Must be of the same size and order as TREEID.}
\item{AREA}{Numeric value of area of the inventory plot (ft2).}
\item{AGE}{Numeric value of stand age (years). Required if method = 1.}
\item{methodHT}{Numeric value that identifies the method to estimate missing heights. 1: parametrized DBH-height model,
2: fits a simple DBH-height model from available measurements. Default method = 2.}
}
\value{
A list containing the following:
\itemize{
\item \code{BA} Basal Area (ft2/acre).
\item \code{N} Number of trees per acre.
\item \code{HDOM} Dominant Height (ft).
\item \code{tree.table} Data frame with all tree data an observed heights (for the ones provided) and estimated heights
(for those missing). The data frame contains the columns: TREEID, DBH and HT.
}
}
\description{
\code{prepare.tree} Checks and prepares tree-level data from a single plot and then calculates stand-level
parameters such as basal area, number of trees per hectarea and dominant height. The provided vector of total heights
can have missing information. If there are missing trees, there are two methods to use: 1) Estimates heights according
to a parametrized DBH-height model, or 2) Estimates heights by fitting a simple DBH-height model that requires at least
10 measurements. Missing values are indentified as 'NA'.
}
\examples{
# Example - Stand-level information from inventory data
treedata <- subset(treedata, is.na(DBH)==FALSE)
TREEID <- treedata$TREEID
DBH <- treedata$DBH
HT <- treedata$HT
prepare.tree(TREEID=TREEID, DBH=DBH, HT=HT, AREA=3240, AGE=5, methodHT=2)
}
\seealso{
\code{\link{tree.HT}}
}
\author{
Priscila Someda-Dias, Salvador A. Gezan
}
|
ff7f52b9a256abce23f14f9108f0a9dc35e19e7c | 9b07478e8c8a3806f2cc897c5cbecd5980357dc3 | /10_Classification/src/naiveBayes.r | caaee43752e70f9446e3b14e152001fdc338343f | [] | no_license | jimyungkoh/BigDataAnalysis | 577450e1ae33afc287d3e30bca0af8bb204d05e8 | da72f7b54563cf9b14b6aa6ce5fdbd1acf17ff6f | refs/heads/master | 2023-05-08T00:41:06.839480 | 2021-06-05T14:38:45 | 2021-06-05T14:38:45 | 356,225,232 | 0 | 0 | null | 2021-06-05T13:26:15 | 2021-04-09T10:07:50 | HTML | UTF-8 | R | false | false | 847 | r | naiveBayes.r | #########################
## Naive Bayes Classifier
#########################
#패키지 설치 및 로드
#install.packages("e1071")
??e1071
library(e1071)
#데이터 셋: UCI 대학에서 제공하는 독버섯 관련 데이터
mushroom <- read.csv("data/mushrooms.csv")
str(mushroom)
?naiveBayes
#데이터 나누기
# 훈련 집합(6500개)., 테스트 집합(-개)
n <- nrow(mushroom);
tr_idx <- sample(1:n, 6500);
#훈련집합, 타겟클래스, 테스트 집합 생성
train <- mushroom[tr_idx, -1];
test <- mushroom[-tr_idx, -1];
train_class <- mushroom[tr_idx, "type"];
test_class <- mushroom[-tr_idx, "type"];
#모델/ 분류기 생성
m_cl <- naiveBayes(train, train_class)
#test 데이터 예측
m_pred <- predict(m_cl, test)
#예측 결과 확인
table(test_class, m_pred)
#성능 평가
sum(test_class==m_pred)/(n-6500) |
6e918f6a4b54709e84c6973a32fef20ccfe1f885 | 73d3258e393c4857cf6e1a17400bc891a5cbf88a | /Old/EDA.R | d13401df2e2b89e496e441b9c39835b6914512d6 | [] | no_license | Junjie-Dylan-Yang/Building-Energy-Consumptions-Classification | 7e9d42c1d9768647b689fe8aede82c17f2090a11 | 6a636145d0e0a8828730f9fa7ca29acac08f67de | refs/heads/main | 2023-02-03T20:04:11.312493 | 2020-12-10T18:19:30 | 2020-12-10T18:19:30 | 314,711,061 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,604 | r | EDA.R | library(tidyverse)
library(lubridate)
library(here)
building_metadata = read_csv(here("data","building_metadata.csv"))
building_meter <- read_csv(here("data", "train.csv"))
weather <- read_csv(here("data", "weather_train.csv"))
building = building_meter %>%
left_join(building_metadata, by = 'building_id') %>%
left_join(weather, by = c('site_id', 'timestamp'))
building %>%
filter(meter == 0) %>%
mutate(day = wday(timestamp), hour = hour(timestamp)) %>%
group_by(primary_use, day, hour) %>%
summarize(avg_meter_reading = mean(meter_reading)) %>%
mutate(date = ymd_h(paste0("2020-11-",day, " ", hour))) %>%
ggplot(aes(x=date, y=avg_meter_reading)) +
geom_path() + facet_wrap(~primary_use)
building %>%
filter(meter == 1) %>%
mutate(day = wday(timestamp), hour = hour(timestamp)) %>%
group_by(primary_use, day, hour) %>%
summarize(avg_meter_reading = mean(meter_reading)) %>%
mutate(date = ymd_h(paste0("2020-11-",day, " ", hour))) %>%
ggplot(aes(x=date, y=avg_meter_reading)) +
geom_path() + facet_wrap(~primary_use)
building %>%
filter(meter == 2) %>%
mutate(day = wday(timestamp), hour = hour(timestamp)) %>%
group_by(primary_use, day, hour) %>%
summarize(avg_meter_reading = mean(meter_reading)) %>%
mutate(date = ymd_h(paste0("2020-11-",day, " ", hour))) %>%
ggplot(aes(x=date, y=avg_meter_reading)) +
geom_path() + facet_wrap(~primary_use)
building %>%
filter(meter == 3) %>%
mutate(day = wday(timestamp), hour = hour(timestamp)) %>%
group_by(primary_use, day, hour) %>%
summarize(avg_meter_reading = mean(meter_reading)) %>%
mutate(date = ymd_h(paste0("2020-11-",day, " ", hour))) %>%
ggplot(aes(x=date, y=avg_meter_reading)) +
geom_path() + facet_wrap(~primary_use)
#-------------------------------------------------------------------------------
building %>%
filter(meter == 0, primary_use == "Education") %>%
mutate(day = wday(timestamp), hour = hour(timestamp), month = month(timestamp)) %>%
group_by(month, day, hour) %>%
summarize(avg_meter_reading = mean(meter_reading)) %>%
mutate(date = ymd_h(paste0("2020-",month,"-",day, " ", hour))) %>%
ggplot(aes(x=date, y=avg_meter_reading)) +
geom_path()
building %>%
filter(meter == 0, primary_use == "Religious worship") %>%
mutate(day = wday(timestamp), hour = hour(timestamp)) %>%
group_by(primary_use, day, hour) %>%
summarize(avg_meter_reading = mean(meter_reading)) %>%
mutate(date = ymd_h(paste0("2020-11-",day, " ", hour))) %>%
ggplot(aes(x=date, y=avg_meter_reading)) +
geom_path() + facet_wrap(~primary_use)
|
1ab81a17da556ee6597d5cd8cafa9385db9f9f9b | 3e8a948e1d49911eb041c6be61fa02125293ce80 | /statistical_analysis/wilcoxon_signed_rank_test.R | 7b3333050865a102ba8e7616807ca73ceee431a4 | [] | no_license | MatsudaYoshio/study | 66fea10ada4920763cab9d56ff38aad4eec8a6f3 | 604efb487ccced2d93ca1e93dc1b0a9559d0ba9b | refs/heads/master | 2022-11-16T15:51:11.022117 | 2022-11-12T12:49:46 | 2022-11-12T12:49:46 | 101,472,509 | 0 | 0 | null | null | null | null | SHIFT_JIS | R | false | false | 1,158 | r | wilcoxon_signed_rank_test.R | # ウィルコクソンの符号順位検定
# 二群(二手法),対応あり,ノンパラメトリック
# 二つの手法でリッカート尺度のアンケートとかに使う
library(openxlsx)
library(stringr)
library(exactRankTests)
file.path <- './wilcoxon_signed_rank_test_sample.xlsx'
all.data <- read.xlsx(file.path, 1)
item.num <- (ncol(all.data)-1)/2 # 項目数
item.name <- str_sub(colnames(all.data[2:(2+item.num-1)]), start = 1, end = -4)
data1 <- all.data[2:(2+item.num-1)]
data2 <- all.data[(2+item.num):(2*item.num+1)]
for (i in 1:item.num){
cat(sprintf('-------------- %s --------------\n', item.name[i]))
# 各項目の全部のデータを使って正規性の検定(シャピロ・ウィルク検定)を行い、帰無仮説(標本分布が正規分布に従う)が棄却されることを確認する
all.vector <- append(as.vector(t(data1[i])), as.vector(t(data2[i])))
print(shapiro.test(x=all.vector))
# 各項目でウィルコクソンの符号順位検定を行う
print(wilcox.exact(x=t(data1[i]), y=t(data2[i]), paired=T))
cat(sprintf('--------------------------------------------------\n'))
} |
9d58a943a76947c1d03b21cac10fe7f348bdc4a9 | 96c383f3534dfc283f9e8e57af14c7053e282714 | /run_analysis.R | a79b91d8badb289c78aa5ca92a217cd15896bcbc | [] | no_license | awalva/smartphones-project | f1a8339c6f6ee1d19df37c8c10dbb20bbe0bc4e5 | ad378774db0d23865dc38a0d67897a11475a5e13 | refs/heads/master | 2021-01-20T05:55:01.870223 | 2014-06-22T04:16:22 | 2014-06-22T04:16:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,882 | r | run_analysis.R | # You should create one R script called run_analysis.R that does the
# following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation
# for each measurement.
# 3. Uses descriptive activity names to name the activities in the data
# set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. Creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
####################### helpful comment from forums ############
# you want a run_analysis R script, a ReadMe markdown document, a Codebook
# markdown document, and a tidy data text file (this last goes on Coursera)
################################################################
# Read in features and activity information
activity <- read.table("activity_labels.txt", stringsAsFactors=F)
features <- read.table("features.txt", stringsAsFactors=F)
# Read in test/train data, labels, and subjects
test <- read.table("./test/X_test.txt", stringsAsFactors=F)
test_lab <- read.table("./test/y_test.txt", stringsAsFactors=F)
subject_test<- read.table("./test/subject_test.txt")
train <- read.table("./train/X_train.txt", stringsAsFactors=F)
train_lab <- read.table("./train/y_train.txt", stringsAsFactors=F)
subject_train<- read.table("./train/subject_train.txt")
# Create a character vector of appropriate column names (acceptable to R)
# based on features
vars<-make.names(features$V2)
# Combine test and train data sets, and rename columns using char vector
test_train <- rbind(test, train)
names(test_train) <- vars
# Combine labels (for activity) and subjects for test and train, then
# append to the data set. `V1` = label, `V1.1` = subject in combined set.
label <- rbind(test_lab, train_lab)
subject <- rbind(subject_test, subject_train)
test_train_lab <- cbind(test_train, label, subject)
colnames(test_train_lab)[563]<-"subject"
# merge with activities. This will reorder the data.
test_train_act <- merge(test_train_lab, activity)
# subset combined data set to include only mean and std,
# then re-append subject and activity labels
columns <- grep("[Mm]ean|std", names(test_train_act))
test_train_act_sub <- test_train_act[,columns]
test_train_act_sub$subject <- test_train_act$subject
test_train_act_sub$activity <- test_train_act$V2
test_train_act_sub <- test_train_act_sub[,c(87,88,1:86)]
# next we must create tidy data for step 5. Use melt, ddply, and dcast.
molten<-melt(test_train_act_sub, id.vars=c("subject","activity"))
d <- ddply(mx, .(subject, activity, variable), summarize, mean=mean(value))
tidy5<- dcast(d, subject + activity ~ variable, value.var="mean")
# write tidy data set to a txt file called `tidy.txt`
write.table(tidy5, file = "tidy.txt")
|
59646dd394b66add673f56800352e02d1e92dd4b | 41f88e136b0da2ce1a26b823b2f2ec26d88ddd35 | /cachematrix.R | 98258e58cc8acb3d01d44a59a01127238c893851 | [] | no_license | roberthortonii/ProgrammingAssignment2 | 2b69410d48c720ae995f5ceb55dc9663bb660ae8 | 959678a27ee193be0ebfad6db7b1ff778f1eeb66 | refs/heads/master | 2021-05-19T14:17:13.483575 | 2020-04-01T14:54:48 | 2020-04-01T14:54:48 | 251,752,497 | 0 | 0 | null | 2020-03-31T21:58:50 | 2020-03-31T21:58:49 | null | UTF-8 | R | false | false | 1,178 | r | cachematrix.R | ## Since calcualting the inverse of amatrix is costly,
## These functions create an object that caches the inverse
## af a given matrix
## returns a list of 4 fuctions
## get teh matrix
## set teh matrix
## get the inverse, initialized to NULL
## set the inverse
makeCacheMatrix <- function(x = matrix())
{
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## retrieves teh inverse of the stored matrix
## if the cahed inverse is null, calculates and
## atores the inverse of the stored matrix
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
9c7bca026352844de4cbe81d49eba960ea210c75 | b8841bf56594f65fe13115b7f8318b97c9dddb1f | /ch_inference_foundations_oi_biostat/figures/pValueTuna/pValueTuna.R | 802e06d6ff9e2539e51d945567dcb61391a6967f | [] | no_license | OI-Biostat/oi_biostat_text | 5cdab8191714beef6a67d61d1f69982e4d8c13aa | a333893cffe830b8ce1e31d9226a9a50906605e4 | refs/heads/master | 2023-05-28T12:33:51.243348 | 2022-11-30T20:59:44 | 2022-11-30T20:59:44 | 281,164,408 | 7 | 7 | null | 2022-11-30T21:02:43 | 2020-07-20T16:06:49 | TeX | UTF-8 | R | false | false | 642 | r | pValueTuna.R | library(openintro)
data(COL)
myPDF('pValueTuna.pdf', 6, 2.4,
mar = c(2, 0, 0.5, 0),
mgp = c(3, 0.65, 0))
normTail(L = -0.599,
U = 0.599,
col = COL[1],
axes = FALSE)
labels <- expression('t = 0.599')
axis(1, at = 0.599, labels = labels, cex.axis = 0.87)
par(mgp = c(3, 0.77, 0))
at <- c(-15, 0, 5)
labels <- expression(0, ''*mu*' = 0 ', 0)
axis(1, at, labels, cex.axis = 0.87)
par(new=TRUE)
normTail(L = -1.96,
U = 1.96,
curveColor = fadeColor(3, fade = "00"),
border = fadeColor(2, fade = "33"),
col = fadeColor(2, fade = "33"),
axes = FALSE)
dev.off()
|
6ea3f3b8b3712141a6d97bbbcc8b03e63b5f367b | c77069c2dc6dbf3f9449a44e06d70b540a1912b5 | /R/fixed.parameters0.R | 638af7d1026d6d60581ce213e7a822d972c30017 | [] | no_license | cran/phenology | 62b323a9231c3701568de58c57a804e043abe6a2 | 991d2c35dcbcf1fcff23cbcc0c2f82b74a868dfb | refs/heads/master | 2023-04-15T03:37:51.464388 | 2023-04-01T09:10:02 | 2023-04-01T09:10:02 | 17,698,504 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,123 | r | fixed.parameters0.R | #' fixed.parameters0 generates a set of fixed parameters for series with only 0 counts
#' @title Generate a set of fixed parameters for series with only 0 counts
#' @author Marc Girondot
#' @return Return a set of parameters
#' @param series Set of series generated with add_phenology()
#' @description This function generates a set of fixed parameters for series with only 0 counts.\cr
#' The parameter series must be a result from add_phenology().
#' @examples
#' \dontrun{
#' refdate <- as.Date("2001-01-01")
#' data_Gratiot <- add_phenology(Gratiot, name="Complete",
#' reference=refdate, format="%d/%m/%Y")
#' pfixed <- fixed.parameters0(data_Gratiot)
#' }
#' @export
fixed.parameters0 <-
function(series=stop("A result from add_phenology() must be provided.")) {
pfixed <- NULL
sumSeries <- sapply(series, FUN = function(x) sum(x$nombre))
sumSeries0 <- names(sumSeries[sumSeries == 0])
if (!identical(sumSeries0, character(0))) {
p0 <- rep(0, length(sumSeries0))
names(p0) <- paste0("Max_", sumSeries0)
pfixed <- c(pfixed, p0)
}
return(pfixed)
}
|
620ac697ef2dbf255ae33527a93bab13e9de0af1 | 753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed | /service/paws.ec2/man/delete_flow_logs.Rd | 989cf8454837d0a4f0ece35e5c400de05352c368 | [
"Apache-2.0"
] | permissive | CR-Mercado/paws | 9b3902370f752fe84d818c1cda9f4344d9e06a48 | cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983 | refs/heads/master | 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 770 | rd | delete_flow_logs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ec2_operations.R
\name{delete_flow_logs}
\alias{delete_flow_logs}
\title{Deletes one or more flow logs}
\usage{
delete_flow_logs(DryRun = NULL, FlowLogIds)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.}
\item{FlowLogIds}{[required] One or more flow log IDs.}
}
\description{
Deletes one or more flow logs.
}
\section{Accepted Parameters}{
\preformatted{delete_flow_logs(
DryRun = TRUE|FALSE,
FlowLogIds = list(
"string"
)
)
}
}
|
827467c3d361c6e95a4af8ce360a01c933b26bdf | 3d3add09244aa3f9d325d2ca56642355baf1dda6 | /DSwRproject.R | 8949f793faf1660c0116389a82889fc8229ca58e | [] | no_license | AlejandroGarciaTorrado/DSwR | 77a3a6970bdf813f3198de6ae626f6754471a6a4 | b463ffd3463b289ca7bfeec66792cc7c8d012b3a | refs/heads/master | 2022-11-13T07:38:11.795043 | 2020-07-09T09:02:47 | 2020-07-09T09:02:47 | 278,300,218 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23 | r | DSwRproject.R | #DSwR
str(mtcars)
#ok |
2746177ef123f01b0a00c49123f928ee762a12b9 | 230fabadbc7881e514a9e3288be18743026f7cc3 | /man/Plot.Sobol.Rd | 8b4dd571ca2ac5b374ef41698365f65aa8ccdb09 | [] | no_license | cran/rrepast | d15e5f00a973c569957c26671c3e9002a1c51ccf | 46ca64781419e5c475521e0ade9e9786b6427cd1 | refs/heads/master | 2021-05-04T11:21:56.188742 | 2020-02-19T05:00:05 | 2020-02-19T05:00:05 | 48,087,733 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 464 | rd | Plot.Sobol.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rrepast-plots.R
\name{Plot.Sobol}
\alias{Plot.Sobol}
\title{Plot of Sobol output}
\usage{
Plot.Sobol(obj, type, title = NULL)
}
\arguments{
\item{obj}{An instance of Sobol Object \code{\link{AoE.Sobol}}}
\item{type}{The chart type}
\item{title}{Chart title, may be null}
}
\value{
The resulting ggplot2 plot object
}
\description{
Generate plot for Sobol's GSA
}
|
e1c31217c310266bc6ae68cf3602a3a3c55ed7ed | c76a3e135789e3f721e00886552eb0651cb3f887 | /PathwayFoldChange.R | 99be24f7e1128c51c80f6916bdd31f494c5197f9 | [] | no_license | amberr098/StageRadboud | e67d510e0d3f3949a5f4d0d0df3dcc1456d7e2b5 | 954a41accd9317b07656ba71939c8c97e9249b97 | refs/heads/master | 2021-04-27T15:04:15.351091 | 2018-02-22T10:13:08 | 2018-02-22T10:13:08 | 122,462,340 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,571 | r | PathwayFoldChange.R | # Een dataframe gegenereerd waarin de twee gekozen condities door elkaar gedeeld worden. kolomnamen zijn de compounds,
# rijnaam is "Ratio"
getRatio <- function(CondMatrix){
preRatioDataframe <- list()
# Dit moet waar zijn, want er zijn maar twee condities die met elkaar vergeleken worden
if(nrow(CondMatrix) == 2){
# Ophalen van de rijen met de waarden van de condities
cond1 <- as.numeric(unlist(CondMatrix[1,]))
cond2 <- as.numeric(unlist(CondMatrix[2,]))
# Ratio bereken van de twee condities
ratio <- cond1/cond2
preRatioDataframe <- rbind(ratio)
}
ratioDataframe <- as.data.frame(preRatioDataframe)
colnames(ratioDataframe) <- colnames(CondMatrix)
rownames(ratioDataframe) <- "Ratio"
getLog2(ratioDataframe, ratio)
}
# De log2 wordt berekent van de ratios van de twee gekozen condities.
getLog2 <- function(ratioDataframe, ratio){
preLog2Dataframe <- list()
# Er is maar 1 rij met de ratio erin.
if(nrow(ratioDataframe) == 1){
preLog2Dataframe <- rbind(log2(ratio))
}
log2Dataframe <<- as.data.frame(preLog2Dataframe)
colnames(log2Dataframe) <<- colnames(ratioDataframe)
rownames(log2Dataframe) <<- "log2"
}
# C13 kolommen delen door C12 kolommen
getRatioTime <- function(dataConditionNumeric){
pattern <- "13C.{1,2}-"
coln <- c()
preRatios <- list()
# Ophalen van alle 13C kolommen
all13C <- grep(pattern, colnames(dataConditionNumeric))
for(index_C13 in all13C){
C13 <- colnames(dataConditionNumeric)[index_C13]
compound <- gsub(pattern, "", C13)
index_C12 <- which(colnames(dataConditionNumeric) == compound)
# De values op halen van de kolommen die gedeeld door elkaar moeten worden
valuesC13 <- dataConditionNumeric[,index_C13]
valuesC12 <- dataConditionNumeric[,index_C12]
C13divC12 <- valuesC13/valuesC12
C12 <- gsub(" Results", "", colnames(dataConditionNumeric)[index_C12])
C12 <- gsub("-", "_", C12)
preRatios <- cbind(preRatios, C13divC12)
coln <- c(coln, C12)
}
ratios <- matrix(as.numeric(unlist(preRatios)),nrow=nrow(preRatios))
rownames(ratios) <- rownames(dataConditionNumeric)
colnames(ratios) <- coln
return(ratios)
}
# Aan de hand van de tijden de fold changes van de twee condities berekenen.
getFoldChangeTime <- function(average_ratios){
rown <- c()
new_col <- c()
# Sample en time apart nemen zodat dezelfde tijden door elkaar gedeeld kunnen worden
for(name in rownames(average_ratios)){
sample_time <- unlist(strsplit(name, "_"))
sample <- sample_time[1]
time <- sample_time[2]
new_col <- c(new_col, time)
rown <- c(rown, sample)
}
# Kolom met de tijd toevoegen en de dataframe omzetten naar matrix
average_ratios$Time <- new_col
average_ratios_matrix <- matrix(as.character(unlist(average_ratios)),nrow=nrow(average_ratios))
colnames(average_ratios_matrix) <- colnames(average_ratios)
rownames(average_ratios_matrix) <- rown
# De data met dezelfde tijden door elkaar delen.
index_col_Time <- which(colnames(average_ratios_matrix) == "Time")
unique_times <- unique(average_ratios_matrix[,index_col_Time])
preFoldChanges <- list()
for(time in unique_times){
# Index time zijn er altijd twee want er worden twee condities met elkaar vergeleken
indexes_time <- which(average_ratios_matrix[,index_col_Time] == time, arr.ind = T)
index_row1 <- indexes_time[1]
row1 <- average_ratios_matrix[index_row1,]
index_row2 <- indexes_time[2]
row2 <- average_ratios_matrix[index_row2,]
# Verwijderen van de values waarin de tijd staat (staat achteraan dus de lengte geeft de laatste index aan die verwijderd moet worden)
values1 <- as.numeric(row1[-length(row1)])
values2 <- as.numeric(row2[-length(row2)])
FoldChange <- values1/values2
preFoldChanges <- rbind(preFoldChanges, FoldChange)
}
FoldChanges <- as.data.frame(preFoldChanges)
rownames(FoldChanges) <- unique_times
colnames(FoldChanges) <- colnames(average_ratios[-length(average_ratios)])
# Log2 van de foldchanges berekenen
log2 <- getLog2Time(FoldChanges)
log2 <- replace(log2, is.na(log2), 0)
}
# Log2 berekenen van de dataset met Time waarden.
getLog2Time <- function(FoldChanges){
preLog2 <- list()
for(index_row in 1:nrow(FoldChanges)){
values <- log2(unlist(FoldChanges[index_row,]))
preLog2 <- rbind(preLog2, values)
}
log2 <- as.matrix(preLog2)
colnames(log2) <- colnames(FoldChanges)
rownames(log2) <- rownames(FoldChanges)
return(log2)
} |
bb9f88d20810f1869eb482893c904eae25318e7c | e7708a1f5245d117acfcdb0113db5f6ff2f021ce | /binomial/man/bin_cumulative.Rd | c4aa907e341e0146a457d76536d8356b11bcfe34 | [] | no_license | stat133-sp19/hw-stat133-J-V-H | d0701984ae1ce64b479f7181fd2397697fca5d53 | 1170bcd2378ec5c0a48c0ded4acec62911f46892 | refs/heads/master | 2020-04-28T18:53:45.693129 | 2019-05-03T22:23:42 | 2019-05-03T22:23:42 | 175,492,863 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 417 | rd | bin_cumulative.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{Binomial Cumulative}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{trials}
\item{prob}{probability}
}
\value{
bincum
}
\description{
Calculates a distribution given a probability of success and given number of trials
}
\examples{
bin_cumulative(5, 0.5)
}
|
eaaaf1d798b2bf495fb6ae9dea6095d9aeacbe55 | d2f39a2258dbe6253bc28fd00717a67b131751f4 | /man/GMRF_basis.Rd | 9810787a6d4c0fd0297446fb5da007abc596d5b5 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | andrewzm/MVST | 6e5d9d5c84ba0d28e38fdb69b12cfa8ba1bcc45f | 2bf0835e66e04e120f78fe8673afe3dd9d6f42c0 | refs/heads/master | 2022-09-29T23:40:39.048820 | 2022-09-15T21:37:50 | 2022-09-15T21:37:50 | 20,478,703 | 10 | 9 | null | 2018-10-18T14:50:36 | 2014-06-04T10:13:03 | R | UTF-8 | R | false | true | 803 | rd | GMRF_basis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllConstructor.R
\name{GMRF_basis}
\alias{GMRF_basis}
\title{GMRF function over basis}
\usage{
GMRF_basis(G = new("GMRF"), Basis = new("Basis", pars = list(vars =
data.frame(x = c(1, 2)))))
}
\arguments{
\item{G}{an object of class \code{GMRF}}
\item{Basis}{an object of class \code{Basis}}
}
\value{
Object of class \code{GMRF_basis} (which inherits from class \code{process} and is thus also a process block)
}
\description{
This function initialises an object of class \code{GMRF_basis} which defines a GMRF over a set of basis functions.
}
\examples{
G <- GMRF_RW(n=9)
Basis <- initGRBFbasis(x = c(0,1), y = c(0,1), std=0.1,nx=9,ny=1)
print(GMRF_basis(G,Basis))
}
\keyword{GMRF,}
\keyword{basis}
\keyword{functions}
|
e22fdce44553d76af1f156a33969d823544481bb | 862c4bca74786b462929176b28f2f54c4021c5ec | /man/storeNormalized.Rd | c7ac0cda9c6ee9104907c350131ee5c9123746ab | [] | no_license | iaconogi/bigSCale2 | 1d94d232781f08e28ee2a0c43214798a10cc9301 | e47f0cd4b6374e5bcc52d99f4c50d0671aada811 | refs/heads/master | 2023-07-06T21:52:31.163393 | 2020-07-12T08:52:34 | 2020-07-12T08:52:34 | 169,756,139 | 109 | 42 | null | 2023-07-03T12:18:59 | 2019-02-08T15:31:16 | R | UTF-8 | R | false | true | 653 | rd | storeNormalized.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SingleCellMethods.R
\name{storeNormalized}
\alias{storeNormalized}
\title{storeNormalized}
\usage{
storeNormalized(object, memory.save = TRUE)
}
\arguments{
\item{memory.save}{enables a series of tricks to reduce RAM memory usage. Aware of one case (in linux) in which this option causes irreversible error.}
\item{sce}{object of the SingleCellExperiment class.}
}
\value{
object of the SingleCellExperiment class with normalized counts saved inside (in the virtual memory)
}
\description{
Calculates and stores the normalized counts
}
\examples{
sce=storeNormalized(sce)
}
|
8278c6684156f070bda17547d1358e1f2a2e49c6 | e0a963242ba158c812eb3fe687c1c905493531c1 | /cachematrix.R | 2c25e55946968dee6420889c9c35e0c628cc1402 | [] | no_license | SivaMalladi/ProgrammingAssignment2 | f4ce737a8fe4aabcc86543eb16336854b27f319f | d3e7b9c51148a8465fbfc75b7d8522692ad13df7 | refs/heads/master | 2021-01-12T13:48:02.091961 | 2016-03-29T10:55:14 | 2016-03-29T10:55:14 | 54,943,130 | 0 | 0 | null | 2016-03-29T03:29:55 | 2016-03-29T03:29:54 | null | UTF-8 | R | false | false | 2,762 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## 1. A special funtion makeCacheMatrix that contains a list of four functions set, get, setinverse, getinverse
## the argument for the above function is a matrix
## For set and setuniverse a matrix should be passed as argument.
## For get and getunivese no argument is need, They return the existing matrix and its inverse
## These four function are called invidually
## 2. A second function cacheSolve is by passing an object of makeCacheMtrix class as an argument
## This function is used for either calculating an inverse of the matrix or retuning the inverse if already exists
## Write a short comment describing this function
## The function makeCacheMatrix contains a list of four functions set, get, setinverse, getinverse
## the argument for this function is a matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set,
get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## Write a short comment describing this function
## for calculating or getting back inverse of a given matrix, this function calls the functions defined
## as list in the makeCacheMatrix function
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
## Return a matrix that is the inverse of 'x'
inv
}
## Instructions for running the above functions.
## 1. Define matrix mat1 and mat2 as 3x3 matrix
## mat1<-matrix(rnorm(9),nrow = 3, ncol = 3)
## mat2<-matrix(1:9, nrow = 3, ncol = 3)
## 2. check if the above are inversible or not.
## If det(matrixname) returns non zero, then it is inversible else the matrix is not inversible
## det(mat1) returns not zero (solve(mat1) return inverse of mat1)
## det(mat2) returns 0 (solve(mat2) returns error)
## 3. create an object a of makeCacheMatrix class without any arguments
## a<-makeCacheMatrix()
## 4. use a$set function for assigning mat1
## a$set(mat1)
## 5. use a$get function to return the matrix mat1
## a$get()
## 6. To calculate the inverser of the matrix mat1 run execute cacheSolve function by passing the object a as argument
## cacheSolve(a)
## 7. Get the inverse of mat1 by executing the function a$getinverse
## a$getinverse()
## 8. Use solve funtion to validate output(The outputs from steps 6.7 and 8 should be same)
|
dfc39431643aec0483a732a3a6c206bf32a7a1da | 8f045a1293610a0bb955da7022418beea0d80c6f | /ChicagoChlamydiaModel/Scripts/ChicagoChlamydiaStats.R | f7d012a03c2a42e52c4e0371bc5ac0ee81ff6021 | [] | no_license | chrisporras/Chicago-chlamydia-spatialsim | 11fbca7369c1ec26d237df06d6a0547d49f481a0 | 0256b142178c902750e0fd5874d8711300a73e64 | refs/heads/master | 2020-03-23T08:16:30.350473 | 2018-08-07T16:20:39 | 2018-08-07T16:20:39 | 141,317,760 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 119 | r | ChicagoChlamydiaStats.R | #######Takes Realizations and Return Statisics for Difference from Actual Data
####Put into List of Fract Infecteds
|
99ec8d6a87d46cd7a2766de2a13402b89085f759 | 477b91511c1c345f583a53339c649587b142451d | /plot2.R | 9fc25c5dff2911d64f002a215064645d5689d2ac | [] | no_license | nasraf/ExData_Plotting1 | 6612a6bc16635647200d2ec5e373b3ad0963d7a5 | f68e6a04afa4d90f986ddfc10fc504c680f14c78 | refs/heads/master | 2020-12-25T15:40:52.994894 | 2014-09-06T15:33:17 | 2014-09-06T15:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 816 | r | plot2.R | ## read household power consumption file
df<-read.table("household_power_consumption.txt", sep=";",na.strings="?",header=TRUE)
## convert Date varible using as.Date() function
df$Date<-as.Date(df$Date,format="%d/%m/%Y")
datafile<-subset(df, Date==as.Date("2007-02-01")| Date==as.Date("2007-02-02"))
##convert attributes starting at the 3rd colunm to numeric
i=3
while (i <= 9)
{
datafile[,i]<-as.numeric(as.character(datafile[,i]))
i<-i+1
}
## convert the Date and Time variables to Date/Time classes in R
Ddate=paste(as.character(datafile$Date),as.character(datafile$Time),sep=" ")
Dtime=strptime(Ddate,format="%Y-%m-%d %H:%M")
datafile$Time=Dtime
## construct the plot #2
png("plot2.png")
plot(datafile$Time,datafile$Global_active_power,ylab="Global Active Power (kilowatts)",type="l",xlab="")
dev.off()
|
d1a232d12a1421529b1b19b1694d7e34d050fb30 | 0ad0e9e2e7b6aa6d12d897940a00a3d3966a7b0e | /assignment10/ui.R | d0ab0aabbc4e20353eebaea2116ed256358ab01b | [] | no_license | jenniferzj/DSTools | 9b4bbeb020eb28d9b38c226ca610b06b3db8a757 | 4011970020f3498ef48dcf9e732c87ee1815eb82 | refs/heads/master | 2021-04-27T12:33:02.995637 | 2018-05-03T01:21:57 | 2018-05-03T01:21:57 | 122,420,864 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,143 | r | ui.R | library(shiny)
ui <- fluidPage(
titlePanel("Predicting Probabilities of Each Species"),
sidebarLayout(
sidebarPanel(
sliderInput("Sepal.Length",label = "Sepal.Length:", value = 5
, min = 4.3, max = 7.9, step = .1)
, numericInput("Sepal.Width", "Sepal.Width:", value = 3
, min = 2, max = 4.4, step = .1)
, numericInput("Petal.Length", "Petal.Length:", value = 3,
min = 1, max = 6.9, step = .1)
,sliderInput("Petal.Width",label = "Petal.Width:", value = 1
, min = 0.1, max = 2.5, step = .1)
),
mainPanel(
tabsetPanel(type = 'tabs',
tabPanel("Prediction Table", tableOutput("pred_table")),
tabPanel("Scatter plot", plotOutput("distPlot")),
tabPanel("Density plot for Sepal Length", plotOutput("plot1")),
tabPanel("Density plot for Sepal Width", plotOutput("plot2")),
tabPanel("Density plot for Petal Length", plotOutput("plot3")),
tabPanel("Density plot for Petal Width", plotOutput("plot4"))
)
)
)
)
|
3d1bb63585dc38dd8d383c7ad98f89c6316f47b6 | 8564df84659380bcf825d61b38caaf1d2e05f8ff | /scripts/assignment_9_example.R | b91f57386d7111098cfecc51ae38f4e77e767b85 | [] | no_license | edquant/edh7916_student | e4365a801502327c0ceaf705466c746edd901730 | 8af9d96fe043935813a43faaaee9b698461ccc02 | refs/heads/main | 2023-04-16T03:59:55.586719 | 2023-04-11T21:08:27 | 2023-04-11T21:08:27 | 232,349,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,696 | r | assignment_9_example.R | ################################################################################
##
## <PROJ> EDH7916: Functional programming
## <FILE> assignment_9_example.R
## <INIT> 18 February 2020
## <AUTH> Benjamin Skinner (GitHub/Twitter: @btskinner)
##
################################################################################
## ---------------------------
## libraries
## ---------------------------
library(tidyverse)
## ---------------------------
## directory paths
## ---------------------------
## assume we're running this script from the ./scripts subdirectory
dat_dir <- file.path("..", "data")
sch_dir <- file.path(dat_dir, "sch_test")
bys_dir <- file.path(sch_dir, "by_school")
## -----------------------------------------------------------------------------
## Answer assignment 8 questions
## -----------------------------------------------------------------------------
## ------------
## (1)
## ------------
## get files
files <- list.files(bys_dir, "bend_gate|niagara", full.names = TRUE)
## init list
df_list <- list()
## loop
for (i in 1:length(files)) {
## read in each file to list, adding column along the way
df_list[[i]] <- read_csv(files[i]) %>%
## add column
mutate(relative_path = files[i])
}
## bind together
df <- bind_rows(df_list)
## ------------
## (2)
## ------------
## get files
files <- list.files(bys_dir, "bend_gate|niagara", full.names = TRUE)
## using purrr::map()
df <- map(files,
~ read_csv(.x) %>%
## add column WITHIN map() function
mutate(relative_path = .x)) %>%
## bind everything together AFTER map() function
bind_rows
## ------------
## (3)
## ------------
## (1)
## set up fix_missing() function (from class)
fix_missing <- function(x, miss_val) {
x <- ifelse(x %in% miss_val,
NA,
x)
return(x)
}
## read in hsls data
df <- read_csv(file.path(dat_dir, "hsls_small.csv"))
## show missing
df %>%
## count x1ses unique values
count(x1ses) %>%
## arrage descending
arrange(desc(n))
## NOTE: We know that x1ses is continuous and standard normal, so most
## values should be close to 0, have multiple decimal places, and not
## be repeated that often. When we take the count and show the the
## value that occurs the most at the top, we see that -8 is the most
## often by far and breaks out other rules. We'll use this to compare
## our fix
## remove missing in x1ses using fix_missing()
df %>%
mutate(x1ses = fix_missing(x1ses, -8)) %>%
## count x1ses unique values
count(x1ses) %>%
## arrage descending
arrange(desc(n))
## (2.1)
test_scr <- df %>%
filter(row_number() <= 50) %>%
pull(x1txmtscor)
## (2.2)
for (i in 1:length(test_scr)) {
if (test_scr[i] == -8) {
print(i)
}
}
## (2.3)
for (i in 1:length(test_scr)) {
if (test_scr[i] == -8) {
print(i)
} else {
print(test_scr[i])
}
}
## (2.4)
for (i in 1:length(test_scr)) {
if (test_scr[i] == -8) {
print("Flag: missing value")
} else if (test_scr[i] < 40){
print("Flag: low score")
} else {
print(test_scr[i])
}
}
## (3)
## version 1: this one doesn't not account for missing values
return_higher <- function(value_1, value_2){
ifelse(value_1 > value_2, # is value_1 bigger than value_2?
value_1, # YES: return value_1
value_2) # NO: return value_2
}
df %>%
## remove missing using our fix_missing() function
mutate(x1stuedexpct = fix_missing(x1stuedexpct, -8),
x1paredexpct = fix_missing(x1paredexpct, -8)) %>%
## filter our missing since our function can't account for them
filter(!is.na(x1stuedexpct), !is.na(x1paredexpct)) %>%
## use our function to get higher value
mutate(high_expct = return_higher(x1stuedexpct, x1paredexpct)) %>%
## select key vars to show
select(x1stuedexpct, x1paredexpct, high_expct)
## BONUS / MORE ADVANCED
## I don't expect that you will have done it this way (or know how),
## but I do want you to see a more sophisticated version.
## version 2: this one handles missing values and returns non-missing
return_higher <- function(value_1, value_2){
## case_when() is a more sophisticated way to do ifelse() statements
## without having to nest a bunch. It evaluates on the LHS of the ~
## and then, if TRUE, returns what's on the RHS of the ~
##
## Two notes:
##
## (1) It can't return just NA, but has to know which type of NA;
## since the other values are numbers, I've given it NA_real_
## (2) The final TRUE just means "everything else"; I know the various
## options well enough to know that if it isn't the first 4, then
## it has to be the last value.
case_when(
(is.na(value_1) & is.na(value_2)) ~ NA_real_,
(is.na(value_1) & !is.na(value_2)) ~ value_2,
(!is.na(value_1) & is.na(value_2)) ~ value_1,
(value_1 > value_2) ~ value_1,
TRUE ~ value_2
)
}
df %>%
## remove missing using our fix_missing() function
mutate(x1stuedexpct = fix_missing(x1stuedexpct, -8),
x1paredexpct = fix_missing(x1paredexpct, -8)) %>%
## use our function to get higher value
mutate(high_expct = return_higher(x1stuedexpct, x1paredexpct)) %>%
## select key vars to show
select(x1stuedexpct, x1paredexpct, high_expct) %>%
## show more than 10 so can see an NA
head(20)
## -----------------------------------------------------------------------------
## END SCRIPT
################################################################################
|
ff2765d052f288c1833c96baa66a76fbf7b3b367 | 251df421cec78612cbf56db7a0cbf2078b205dcd | /DNA_analysis/meta-workflow2.0.R | 9c34506c26db5f134046c9a4fc575952875bc463 | [
"MIT"
] | permissive | deponent-verb/popgen.analysis.pipeline | 7987d6e12f3b57ea70ce62dc0d3987e6d02eeb05 | ae482e915c7b2baca87242717cb6a0f19ca08792 | refs/heads/master | 2021-08-19T04:57:27.080926 | 2021-07-03T04:04:17 | 2021-07-03T04:04:17 | 213,124,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,732 | r | meta-workflow2.0.R | #version 2 of workflow for chunkybit 1
pacman::p_load(tidyverse,vip)
library(tidymodels)
#load data from cleaning script
genomes = read_csv("~/Documents/GitHub/popgen.analysis.pipeline/data/cleaned_chunky1_data.csv")
genomes = subset(genomes,
select = -c(ID))
genomes$sweep <- ifelse(genomes$sweep=="hard",1,0)
genomes$sweep <- as.factor(genomes$sweep)
#Partition dataset----
set.seed(1066)
genome_split<-initial_split(genomes,prop=0.8)
genome_train = training (genome_split)
genome_test = testing (genome_split)
cv_splits<-rsample::vfold_cv(genome_train,v=10,strata="sweep")
#we don't standardise haplotype stats because they are bounded by 0,1
hap_cols <- colnames(genomes)[which(colnames(genomes)=="h1_1"):which(colnames(genomes)=="h123_11")]
std_recipe <- recipe(sweep ~., data=genome_train) %>% #set sweep as response variable. everything else is a predictor.
update_role(demography, new_role = 'demography') %>% #remove demography as a predictor
update_role(s_coef, new_role = 'demography') %>% #remove s_coef as predictor
update_role(severity, new_role = 'demography') %>% #remove severity as a predictor
add_role(all_of(hap_cols), new_role = 'haplotype') %>%
step_corr(all_predictors(),threshold = 0.8) %>% #remove all highly correlated predictors
step_normalize(all_predictors(), -has_role("haplotype")) %>% #normalize all predictors, except haplotype stats
prep()
#transform dataset for pdp
baked_data = bake(std_recipe, new_data = genome_train)
trans_data = baked_data[,which(colnames(baked_data)=="H_1"):which(colnames(baked_data)=="h2_11")] %>%
cbind(sweep = baked_data$sweep)
#Logistical regression with L1 regularisation ----
genome_lr = logistic_reg(
mode="classification",
penalty = tune(),
mixture= 1
) %>%
set_engine("glmnet")
#Create set of tuning parameters
lr_grid = grid_regular(penalty(range=c(0,0.1)) ,
levels=10,
original = F)
#fit model
doParallel::registerDoParallel()
lr_t1 = Sys.time()
lr_wkfl = workflow() %>%
add_recipe(std_recipe) %>%
add_model(genome_lr)
lr_tune = lr_wkfl %>%
tune_grid(
resamples = cv_splits,
grid = lr_grid
)
lr_t2 = Sys.time()
#3.860783 mins for 10 levels
#LR CV surface plots
lr_cv = lr_tune %>%
tune::collect_metrics() %>%
dplyr::filter(.metric == "accuracy")
ggplot(data = lr_cv,
aes(x = penalty, y = mean)) +
geom_point() +
geom_errorbar( aes(ymax = mean + std_err, ymin = mean - std_err))+
ylab("cv accuracy") +
xlab("lambda")
#finalise LR
best_lr <- lr_tune %>%
tune::select_best(metric = "accuracy")
lr_final = lr_wkfl %>%
tune::finalize_workflow(best_lr) %>%
parsnip::fit(data = genome_train)
#vip
# lr_final %>%
# pull_workflow_fit() %>%
# vip(method = "firm", train = bake(std_recipe,genome_train), type = "classification")
#
# lr_final %>%
# pull_workflow_fit() %>%
# vip(method = "firm", train = bake(std_recipe,genome_train),
# type = "classification")
#
# lr_fit = lr_final %>%
# pull_workflow_fit()
#
# vip(lr_fit, lamda = lr_fit$fit$lambda[100], method = "firm",
# train = bake(std_recipe,genome_train), type = "classification",
# new_data = NULL,
# pred_wrapper = glmnet::predict.glmnet,
# arg = "nonzero")
#not ideal. Inserted best param here. Couldn't get vip to work with glmnet.
caret_data = bake(std_recipe,genome_train)
caret_data = subset(caret_data,
select = -c(s_coef,demography,severity))
caret_data$sweep = ifelse(caret_data$sweep==1,"hard","neutral") %>% as.factor()
lr_caret = caret::train(
sweep~. ,
data = caret_data,
method = 'glmnet',
trControl = caret::trainControl(method = "none", classProbs = TRUE),
tuneGrid = data.frame(alpha = 1 ,lambda = 0),
metric = "accuracy"
)
#vip(lr_caret)
lr_imp = vip(lr_caret, method = "firm")
lr_imp +
ggtitle("Logistic Regression")
#pdp
features = c("w_max_5","w_max_3","w_max_7")
lr_pdp = list()
for(f in 1:length(features)){
lr_pdp[[f]] = pdp::partial(lr_caret, pred.var = features[f],
plot = TRUE, type = "classification")
}
grid.arrange(grobs = lr_pdp, ncol = 3)
#ice example
ice_t1 = Sys.time()
lr_ice = pdp::partial(lr_caret, pred.var = "w_max_5",
ice = TRUE, type = "classification", plot = TRUE)
ice_t2 = Sys.time()
# ice_curves <- lapply(features, FUN = function(feature) {
# ice <- pdp::partial(caret_model, pred.var = feature, ice = TRUE)
# autoplot(ice, alpha = 0.1) +
# theme_light()
# })
# pdp::partial(lr_caret, pred.var = "w_max_5",
# plot = TRUE, type = "classification")
#RDA ----
library("discrim")
genome_rda <- discrim::discrim_regularized(
mode = 'classification',
frac_common_cov = tune(), #lambda
frac_identity = tune() #gamma
) %>%
set_engine("klaR")
#ref https://rdrr.io/cran/klaR/man/rda.html, https://discrim.tidymodels.org/reference/discrim_regularized.html
rda_grid <- grid_regular(frac_common_cov=discrim::frac_common_cov(range=c(0,1)),
discrim::frac_identity(range=c(0,1)),
levels=5)
names(rda_grid)[1] <- "frac_identity" #hack for weird bug
#fit rda
rda_wkfl = workflow() %>%
add_recipe(std_recipe) %>%
add_model(genome_rda)
rda_t1 = Sys.time()
rda_tune = rda_wkfl %>%
tune_grid(
resamples = cv_splits,
grid = rda_grid
)
rda_t2 = Sys.time()
#15.87438 mins for 25, 0.635 mins/model
rda_cv = rda_tune %>%
tune::collect_metrics() %>%
dplyr::filter(.metric == "accuracy")
ggplot(data = rda_cv,
aes( x = frac_common_cov, y = mean, color = factor(frac_identity))) +
geom_point() +
geom_errorbar( aes(ymax = mean + std_err, ymin = mean - std_err)) +
xlab("lambda") +
labs(color = "gamma") +
ylab("cv accuracy")
#finalise rda
best_rda <- rda_tune %>%
tune::select_best(metric = "accuracy")
rda_final = rda_wkfl %>%
tune::finalize_workflow(best_rda) %>%
parsnip::fit(data = genome_train)
#vip, caret version (not ideal)
trans_data1 = trans_data
trans_data1$sweep = ifelse(trans_data1$sweep==1,"hard","neutral")
rda_caret = caret::train(sweep~.,
data = trans_data1,
method = "rda",
trControl = caret::trainControl(method = "none", classProbs = TRUE),
tuneGrid = data.frame(gamma = best_rda$frac_identity,
lambda = best_rda$frac_common_cov))
rda_imp = vip(rda_caret, method = "firm")
saveRDS(rda_imp,file = "./results/Chunky1/rda_firm.rds")
rda_imp +
ggtitle("RDA")
features = c("D_6","H_6","D_7")
rda_pdp = list()
for(f in 1:length(features)){
rda_pdp[[f]] = pdp::partial(rda_caret, pred.var = features[f],
plot = TRUE, type = "classification")
}
grid.arrange(grobs = rda_pdp, ncol = 3)
#vip
#does not work. Will have to use caret.
# rda_imp = rda_final %>%
# pull_workflow_fit() %>%
# vip(method = "firm", train = bake(std_recipe, genome_train),
# features_names = c("w_max_6","w_max_5"))
#
# rda_final %>%
# pull_workflow_fit() %>%
# .$fit %>%
# vip(method = "firm",train = bake(std_recipe, genome_train),
# features_names = c("w_max_6","w_max_5"))
#Random Forest, test code with small hyperparams ---
#get number of predictors after applying recipe
num_terms = which(std_recipe$term_info$role == "predictor") %>% length()
genome_rf = rand_forest(
mode = "classification",
mtry = tune(),
min_n = tune(),
trees = 100
) %>%
set_engine("ranger")
rf_wkfl = workflow() %>%
add_recipe(std_recipe) %>%
add_model(genome_rf)
rf_grid<-grid_regular(mtry(range=c(10,num_terms)),
min_n(range=c(100,1000)),levels=4)
rf_t1 = Sys.time()
rf_tune = rf_wkfl %>%
tune_grid(
resamples = cv_splits,
grid = rf_grid
)
rf_t2 = Sys.time()
#takes 0.2 hours for one model
rf_cv = rf_tune %>%
collect_metrics() %>%
dplyr::filter(.metric == "accuracy")
write_csv(rf_cv, file = "./results/Chunky1/rf_res.csv")
ggplot(data = rf_cv,
aes( x = mtry, y = mean, color = cut(min_n, breaks = length(min_n) ))) +
geom_point() +
geom_errorbar( aes(ymax = mean + std_err, ymin = mean - std_err)) +
ylab("cv accuracy") +
labs(color = "min_n")
ggplot(data = rf_cv,
aes( x = mtry, y = mean, color = factor(min_n))) +
geom_point() +
geom_errorbar( aes(ymax = mean + std_err, ymin = mean - std_err)) +
ylab("cv accuracy") +
labs(color = "min_n")
#finalise rf
best_rf <- rf_tune %>%
tune::select_best(metric = "accuracy")
rf_final = rf_wkfl %>%
tune::finalize_workflow(best_rf) %>%
parsnip::fit(data = genome_train)
saveRDS(rf_final, file = "./results/Chunky1/rf_model.rds")
#vip
rf_imp = rf_final %>%
pull_workflow_fit() %>%
vip(method = "firm", target = "sweep", metric = "accuracy",
pred_wrapper = ranger::predictions,
train = bake(std_recipe,genome_train),
new_data = NULL)
rf_imp +
ggtitle("Random Forest")
saveRDS(rf_imp, file = "./results/Chunky1/rf_firm.rds")
features = c("D_6", "D_5", "D_4")
rf_pdp = list()
t1 = Sys.time()
for (f in 1:length(features)){
rf_pdp[[f]] = rf_final %>%
pull_workflow_fit() %>%
.$fit %>%
pdp::partial(train = bake(std_recipe,genome_train),
pred_wrapper = ranger::predictions,
pred.var = features[f],
plot = TRUE,
type = "classification",
new_data = NULL,
which.class = 2)
}
grid.arrange(grobs = rf_pdp, ncol = 3)
t2 = Sys.time()
t1= Sys.time()
rf_final %>%
pull_workflow_fit() %>%
.$fit %>%
pdp::partial(train = bake(std_recipe,genome_train),
pred_wrapper = ranger::predictions,
pred.var = "D_7",
plot = TRUE,
type = "classification",
new_data = NULL,
which.class = 2)
t2 = Sys.time()
#MARS ----
genome_mars <- mars(
mode = "classification",
prod_degree = tune(),
num_terms = tune(),
prune_method = "forward" #find default
) %>%
set_engine("earth")
#get number of predictors after applying recipe
num_terms = which(std_recipe$term_info$role == "predictor") %>% length()
n = 5
mars_grid = grid_regular(num_terms(range=c(1,num_terms)), levels = n) %>%
cbind(prod_degree = c(rep(1,n),rep(2,n)))
mars_t1 = Sys.time()
mars_wkfl = workflow() %>%
add_recipe(std_recipe) %>%
add_model(genome_mars)
mars_tune = mars_wkfl %>%
tune_grid(
resamples = cv_splits,
grid = mars_grid
)
mars_t2 = Sys.time()
#avg 0.847 mins for one model
#CV plots
mars_cv = mars_tune %>%
collect_metrics() %>%
dplyr::filter(.metric == "accuracy")
write_csv(mars_cv, file = "./results/Chunky1/mars_res.csv")
ggplot(data = mars_cv,
aes( x = num_terms, y = mean, color = prod_degree)) +
geom_point() +
geom_errorbar( aes(ymax = mean + std_err, ymin = mean - std_err)) +
ylab("cv accuracy") +
labs(color = "degree")
#finalise mars
best_mars <- mars_tune %>%
tune::select_best(metric = "accuracy")
mars_final = mars_wkfl %>%
tune::finalize_workflow(best_mars) %>%
parsnip::fit(data = genome_train)
saveRDS(mars_final, file = "./results/Chunky1/mars_model.rds")
#mars vip
mars_imp = mars_final %>%
pull_workflow_fit() %>%
vip(method = "firm", train = bake(std_recipe,genome_train))
saveRDS(mars_imp, file = "./results/Chunky1/mars_firm.rds")
mars_imp +
ggtitle("MARS")
#mars pdp
features = c("H_6", "h1_6", "H_5")
mars_pdp = list()
for(f in 1:length(features)){
mars_pdp[[f]] = mars_final %>%
#pull parsnip model
pull_workflow_fit() %>%
#pull out MARS model since pdp does not have support for parsnip
.$fit %>%
pdp::partial(train = trans_data, pred.var = features[f],
plot = TRUE, type = "classification")
}
grid.arrange(grobs = mars_pdp, ncol = 3)
ggplot(trans_data, aes(factor(sweep), D_6)) +
geom_boxplot()
#plot all firm scores
firm_plots = list(lr_imp,rda_imp,rf_imp,mars_imp)
grid.arrange(grobs = list(lr_imp + ggtitle("Logistic Regression"),
rda_imp + ggtitle("RDA"),
rf_imp + ggtitle("Random Forest"),
mars_imp + ggtitle("MARS")),
ncol = 2)
#overall AUC for each model
preds <- predict(list(lr_final,rda_final,rf_final,mars_final), genome_test, type = 'prob')
truth <- as.factor(genome_test$sweep)
roc_auc(tibble(preds,truth), truth = truth, .pred_0)
models = list(lr_final,rda_final,rf_final,mars_final)
for (m in models){
preds <- predict(m, genome_test, type = 'prob')
truth <- as.factor(genome_test$sweep)
print(roc_auc(tibble(preds,truth), truth = truth, .pred_0)
)
}
#AUC for each severity
source("./Model_comparison/model_performance.R")
# model_performance(fitted_model = mars_final,
# test_data = genome_test,recipe = std_recipe)
models = list(lr_final,rda_final,rf_final,mars_final)
model_names = c("Logistic Regression", "RDA","Random Forest","MARS")
#check the performance of each model by mapping the model_performance()
model_robustness <- map(.x = models,
.f = model_performance,
test_data = genome_test,
recipe = std_recipe)
#attach names for each AUC tibble
for( i in 1:length(models)){
#add names to each list
names(model_robustness)[i] <- model_names[i]
#add the ML method used for each AUC tibble
model_robustness[[i]] <- model_robustness[[i]] %>%
mutate(method = names(model_robustness)[i])
}
#bind all the AUC tibbles into the one dataframe
robustness_df <- do.call(rbind, model_robustness)
#auc plot across bottleneck severities
ggplot(data = robustness_df,
aes(x = severity+1, y = .estimate, color = method)) + #+1 to offset severity 0
geom_point() +
scale_x_log10() +
ylab("AUC") +
xlab("severity") +
theme(axis.text=element_text(size=15),
axis.title=element_text(size=15,face="bold"))
#AUC for s_coef
source("./Model_comparison/auc_scoef.R")
model_scoef <- map(.x = models,
.f = auc_scoef,
test_data = genome_test,
recipe = std_recipe)
#attach names for each AUC tibble
for( i in 1:length(models)){
#add names to each list
names(model_scoef)[i] <- model_names[i]
#add the ML method used for each AUC tibble
model_scoef[[i]] <- model_scoef[[i]] %>%
mutate(method = names(model_scoef)[i])
}
#bind all the AUC tibbles into the one dataframe
scoef_df <- do.call(rbind, model_scoef)
ggplot(data = scoef_df,
aes(x = s_coef, y = .estimate, color = method)) +
geom_point() +
scale_x_log10() +
ylab("AUC") +
xlab("selection coefficient") +
theme(axis.text=element_text(size=15),
axis.title=element_text(size=15,face="bold"))
|
5786b130ed54c6c0a6820341c832a2dfd62b17a3 | 7359c3690ffebafb2bde0b2869d06c9c4a0212ec | /R/xy.scale.R | 40ade4e68a8b5c924838610b65b63b5e5a389fe8 | [] | no_license | jfpalomeque/pandora | ad1bac6846da9ca99048dcedd69f286c5777f2ca | 8dc7c3a4c1d023406c9fbe79d566110be11c5cf5 | refs/heads/master | 2021-01-17T04:40:28.306193 | 2020-11-03T20:16:55 | 2020-11-03T20:16:55 | 43,978,220 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 375 | r | xy.scale.R | xy.scale <-
function(){#Open new window whit image, and take XY coordinates of scale.
windows(6,6); plot(seq(0, dim(specimen)[2], length.out = 10), seq(0, dim(specimen)[1], length.out = 10), type = "n", xlab = "x", ylab = "y", asp = 1, tck = 0, xaxt = "n", yaxt = "n")
rasterImage(specimen, 1, 1, dim(specimen)[2], dim(specimen)[1])
xy_scale<<-locator(n=2, type="l")
}
|
9e867364d65a9fbdb5a41147c375510fe828ff5e | 29585dff702209dd446c0ab52ceea046c58e384e | /internetarchive/tests/testthat/test-files-download.R | 448d108f50b91d7bc2f1b7d4c2dfcb3ae51fd4ab | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,238 | r | test-files-download.R | context("Files, metadata, and downloading")
library(dplyr, warn.conflicts = FALSE)
dir <- tempdir()
items <- ia_get_items("TheLifeOfFatherHecker", silence = TRUE)
meta <- ia_metadata(items)
files <- ia_files(items) %>%
filter(type == "txt")
downloads <- ia_download(files, dir, silence = TRUE)
test_that("ia_downloads() downloads a file", {
expect_equal_to_reference(readLines(downloads$local_file[1]),
"hecker_txt.rds")
})
test_that("ia_files() returns a data frame", {
expect_is(files, c("data.frame", "tbl_df"))
expect_named(files, c("id", "file", "type"))
})
test_that("ia_downloads() returns a data frame", {
expect_is(downloads, c("data.frame", "tbl_df"))
expect_named(downloads, c("id", "file", "type", "url",
"local_file", "downloaded"))
})
test_that("ia_metadata() returns a data frame", {
expect_is(meta, c("data.frame", "tbl_df"))
expect_named(meta, c("id", "field", "value"))
expect_equal_to_reference(meta, "hecker_meta.rds")
})
test_that("ia_get_item() returns a list", {
expect_is(items, "list")
expect_named(items, )
})
test_that("ia_item_id() returns item ids", {
expect_equal(ia_item_id(items), "TheLifeOfFatherHecker")
}) |
cdb284d678672f259b43ba33b719bf94048bbb13 | 951b65be142a703bfdd258bec4194c309868ccef | /simple_assessment/report.R | 3e0e878ef40990d0a821ec0436ecb11ad624ad6c | [] | no_license | colinpmillar/FLa4a.tests.taf | ef9ee4a52122904c01201b5f756dd1c6b90e3140 | 03185f36f41458eb182a52ae415589921e029323 | refs/heads/master | 2021-01-05T06:22:50.489977 | 2020-03-01T11:31:32 | 2020-03-01T11:31:32 | 240,913,362 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 253 | r | report.R | ## Prepare plots and tables for report
## Before:
## After:
library(icesTAF)
library(rmarkdown)
mkdir("report")
render("report.Rmd")
cp("report.md", "report", move = TRUE)
if (dir.exists("report_files"))
cp("report_files", "report", move = TRUE)
|
ae35ea0bbeb1ed081b7a59779a3a38b955c98c97 | 052e53c266c74e9a20f037cfca5df2b9c25c59be | /server.R | 889361cd89098100f61a6e03be9cc0be4e5e8aa4 | [] | no_license | kongchakbun/Course-Project-Shiny-Application-and-Reproducible-Pitch | 2d11e486863ef9fe56562bf17b6ee8f52699534c | 7720bd171528ba20d074a3f0eb21fdd068716c83 | refs/heads/master | 2022-02-22T11:37:48.140821 | 2019-09-17T07:16:36 | 2019-09-17T07:16:36 | 208,983,984 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,886 | r | server.R | library(shiny)
library(quantmod)
library(dplyr)
library(ggplot2)
shinyServer(function(input, output) {
#download the data
from.dat <- as.Date("09/01/09", format = "%m/%d/%y")
to.dat <- as.Date("08/31/19", format = "%m/%d/%y")
suppressWarnings(getSymbols("^HSI", src = "yahoo", from = from.dat, to =to.dat))
suppressWarnings(getSymbols("^DJI", src = "yahoo", from = from.dat, to =to.dat))
suppressWarnings(mHSI <- to.monthly(HSI))
suppressWarnings(mDJI <- to.monthly(DJI))
HSIClose <- Cl(mHSI)
DJIClose <- Cl(mDJI)
#create time series
ts1 <- ts(HSIClose, frequency = 12)
ts2 <- ts(DJIClose, frequency = 12)
#combine the time series data
stocks <- cbind(as.data.frame(ts1), as.data.frame(ts2))
#calculate the monthly return of the HSI and DJI
HSIMReturn <- NULL
DJIMReturn <- NULL
for (i in 2:nrow(stocks)){HSIMReturn[i-1] <- (stocks[i,1]- stocks[i-1, 1])/stocks[i-1,1]}
for (i in 2:nrow(stocks)){DJIMReturn[i-1] <- (stocks[i,2]- stocks[i-1, 2])/stocks[i-1,2]}
#calculate the standard deviations
HSIVol <- sd(HSIMReturn)*sqrt(12)
DJIVol <- sd(DJIMReturn)*sqrt(12)
#change the column names
colnames(stocks) <- c("HSI", "DJI")
#add the time period in the data frame.
stocks <- mutate(stocks, time = rep(as.Date(time(mHSI))))
output$distPlot <- renderPlot({
#check the slider input
yearNo <- input$yearNo
#create data starting period
yearData <- nrow(stocks) - yearNo * 12 + 1
#select the data based on the slider input
stocks <- stocks[yearData:nrow(stocks), ]
#plot the graph
g <- ggplot(stocks, aes(time))
g <- g + geom_line(aes(y=HSI, colour = "HSI")) + geom_line(aes(y = DJI, color = "DJI"))
g <- g + xlab("Year") + ylab("HSI and DJI Index Level") + ggtitle("HSI and DJI Index Level")
g
})
output$HSIVol <- renderText({
if(input$showHSIVol){
HSIVol}
})
output$DJIVol <- renderText({
if(input$showDJIVol){
DJIVol}
})
output$correlation <- renderText({
#check the slider input
yearok <- input$yearok
#create data starting period
yearData1 <- nrow(stocks) - yearok * 12 + 1
#select the data based on the slider input
stocks <- stocks[yearData1:nrow(stocks), ]
correlation <- cor(stocks[, 1], stocks[, 2])
})
}) |
31b7c9e2a5c2d0703f7889c15d850a7688f11f88 | 655326c8b219d49b20bf0673b25ab7ef8277476f | /classCode.R | 00e1a99ef01afe7bf96f178e14cae6d1f97fe53e | [] | no_license | Jay-Bektasevic/temp | 915da187ffd81472e7f4f91614e07bcce3d2527f | ccc009a4dbc910beb278a568e587cab5df98bbaa | refs/heads/master | 2021-01-11T21:58:04.380198 | 2018-01-26T01:24:55 | 2018-01-26T01:24:55 | 78,885,479 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 944 | r | classCode.R | ap <- AirPassengers
diff(ap) # lag = 2
# diff(ap, lag = 2)
ap_diff <- as.numeric()
for (i in length(ap)){
ap_diff[i-1] <- ap[i] - ap[(i-1)]
}
ap_diff == diff(ap)
# create an acf() function manually
acf_ap <- acf(ap)
cent <- ap - mean(ap)
n <- length(cent)
ACF <- 0
z <- sum(cent * cent)/n
ACF[1] <- z/z
for (i in 1:24) {
lag <- cent[-c(1:i)]
clipped <- cent[1:length(lag)]
ACF[i +1] <- (sum(clipped * lag)/n)/z
}
round(ACF, 6)[1:22] == round(as.vector(acf_ap$acf), 6)
# test wheather the time series is stationary
require(tseries)
# Augmented Dickey–Fuller Test
adf.test(rnorm(100)) # stationary b/c p-value is small
adf.test(diffinv(rnorm(100))) # not stationary data becasue the p-value is large
# residual diagnostics
forecast::checkresiduals(rnorm(100))
View(cov)
|
8dc918345db8f3208bae372ae3e5348414821b50 | 4f1fd976d00219383f19cdd95ed376adf5ebddba | /RApplication-Visualization.R | 92364460f4336f64e285683ea78834a1ca61531d | [] | no_license | piotrsalkowski5/R-and-Shiny-Applications | c952265a661398db98325980c22144d38d525721 | 2736704fd69217330e86059b36c167b93b18e2e0 | refs/heads/master | 2021-01-13T16:56:34.187418 | 2016-12-30T18:17:52 | 2016-12-30T18:17:52 | 77,702,695 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 1,741 | r | RApplication-Visualization.R |
dane <- read.csv(file = "C:/PIOTREK/Praca_Dane_R.csv")
library(shinythemes)
library(ggplot2)
library(shiny)
library(leaflet)
library(RColorBrewer)
ui<-fluidPage(title = "Zarobki",theme = shinytheme("cerulean"),
headerPanel("Zależności zarobkowe i zawodowe na podstawie informacji ofert pracy znalezionych na stronie www.goldenline.pl"),
sidebarPanel(sliderInput(inputId = "slider",label = "Choose number of Id",min = 1,max=length(dane[,1]),step = 1,value = 1)),
selectInput(inputId = "select",label = ("Choose Value"),choices = c("Zawód" = 1,"Firma" = 2,"Lokalizacja" = 3,"Wynagrodzenie" = 4)),
mainPanel(
radioButtons(inputId = "radiob",label = "Choose style",choices = c(1,2,3)),
tabsetPanel(
tabPanel(title = "Dane",
fluidRow(column(12, verbatimTextOutput("value")))),
tabPanel("Histogram",plotOutput("histt")),
tabPanel("BoxPlot",plotOutput("box"))
)
)
)
server<-function(input,output)
{
output$value<-renderPrint(dane[input$slider,as.numeric(input$select)])
Legenda <- dane[,3]
h<-ggplot(data = dane,aes(x=dane[,4]))
h2<-h + geom_histogram(binwidth = 300,aes(fill=Legenda),colour = "black") + xlab(colnames(dane)[4]) + ylab("Zlicz")
output$histt<-renderPlot(h2)
m<-ggplot(data = dane,aes(x=dane[,3],y=dane[,4],size=dane[,4],colour=dane[,3]))
m2<-m+geom_boxplot(aes(fill=Legenda),colour = "black") + xlab(colnames(dane)[3]) + ylab(colnames(dane)[4])+scale_color_gradient(low = "white", high = "black")
output$box <- renderPlot(m2)
}
shinyApp(ui,server)
|
860d05b66c009c60833279c863678faaf03f01b0 | ad3f3f1b14282d0169a7b383e5c51ee9aca2d8a2 | /Session_5/scr_R/data_simulation.R | 14e791ed176bf77376a15e3ca95bd28cdaf01d38 | [] | no_license | rohit-21/SS_JGU_21_ML | 207f644f1db55fad35b73fb23776ef2d5f757234 | f03db7b4f92401b328b5e53ea2a060a6063a442e | refs/heads/main | 2023-08-12T20:01:16.300327 | 2021-10-08T15:18:23 | 2021-10-08T15:18:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,808 | r | data_simulation.R | #@author = Marcel Schubert
####Change parameters here as needed
#remainder-error rnorm(N, 1, eta)
#eta <- 0.3
#individual-specific error rnorm(N,1,sigma)
#sigma <- 0.3
#type vector
# type 1: 0 + err; #freerider
# type 2: conditional up till 10, reverse conditional thereafter; ##hump-shaped
# type 3: conditional + err;
# type 4: unconditionally high;
# type 5: far-sighted free-rider: behaves like conditional cooperator until period p, and free-rides thereafter
#if more types are added one has to adapt gen_strategy_tables() and gen_time_dep_strategy()
#types <- c(1,2,3,4,5)
#groupsize
#grpsize <- 4
#number of rounds to play
#numrounds <- 10
#endowment
#endowment <-20
#random contribution in first round or contribution as laid out in the strategy functions
#rand_first <- FALSE
#draw random subset from all possible group compositions; values are TRUE, FALSE or string 'specific'
#if 'specific' the arguement group_compositions must be given as a list and number of players in group must equal those in the composition vector
#if random_subset == FALSE, then n is the times each possible group compositions is in the data set
#random_subset <- FALSE
#if specific specify compositions here, otherwise arguement is ignored and does not need to be passed to function
#group_compositions <- list(c(4,4,4,5))#, c(1,2,3,4), c(5,5,5,5))
#otherwise n is the number of groups in the random subset; it must be n>=2
#n <- 5
#do if want to add errors
#add_errors <- FALSE
#individual specific error
#sigma <- 0.3
#remainder error
#eta <- 0.3
#seed - every time seed is used it is a function depending on this value; value itself is not reused
#seed <- 12345
##### flexible data generation solution
gen_all_combinations <- function(types = c(1,2,3,4,5), grpsize){
#generate all possible combinations, return as list
require(iterpc)
I = iterpc(n=length(types), r=grpsize, labels = types, ordered = FALSE, replace = TRUE)
res = as.list(data.frame(t((getall(I)))))
rm(I)
return(res)
}
gen_strategy_tables <- function(endowment, grpsize){
##if additional types are introduced, one has to put the contribution rule here
#position in list corresponds to numerical identifier of type
contrib_pos <- 0:endowment
#avgothercontrib = (0:(endowment*(grpsize-1)))/(grpsize-1)
rules = list()
#_f marks as timeindependent strategy, _t marks as timedependent
nomen <- c('freerider_f', 'hump-shaped_f', 'conditional_f', 'unconditional_f', 'farsighted-freerider_t')
##in first place of table there is the initial contribution
#freerider
rules[[1]] <- rep(0, length(contrib_pos)+1)
#hump-shaped
rules[[2]] <- c(round(endowment/4),ifelse(contrib_pos <= round(endowment/2), contrib_pos, endowment-contrib_pos))
#conditional
rules[[3]] <- c(round(endowment/2), contrib_pos)
#unconditional
rules[[4]] <- rep(endowment, length(0:endowment)+1)
#far-sighted freerider
rules[[5]] <- c(round(endowment/2), rep(0, length(1:endowment)))
names(rules) <- nomen
return(rules)
}
gen_time_dep_strategy <- function(endowment, grpsize, numrounds){
##if additional time-dependent types are introduced, one has to put the contribution rule here
nomen <- c('farsighted-freerider_t')
#avgothercontrib = (1:(endowment*(grpsize-1)))/(grpsize-1)
rules_t <- list()
#far-sighted freerider, this denotes the periods and as which type the the player plays in this period
rules_t[[1]] <- c(rep(3, numrounds-(floor(numrounds/2))), rep(1, floor(numrounds/2)))
names(rules_t) <- nomen
return(rules_t)
}
gen_contribution_rules <- function(endowment, grpsize, numrounds){
rules = list()
nomen <- c('time_independent', 'time_dependent')
rules[[1]] <- gen_strategy_tables(endowment, grpsize)
rules[[2]] <- gen_time_dep_strategy(endowment, grpsize, numrounds)
names(rules) <- nomen
return(rules)
}
draw_groups <- function(n, combinations, random_subset = FALSE, seed = 12345){
set.seed(seed)
if(random_subset == TRUE){
sub <- combinations[sample(1:length(combinations), size = n, replace = TRUE)]
}
else{
sub <- combinations[rep(c(1:length(combinations)), n)]
}
return(sub)
}
chunk2 <- function(x,n){
##vector into list of equal sized chunks
split(x, cut(seq_along(x), n, labels = FALSE))
}
make_errors <- function(sigma, eta, numgroups, grpsize, rounds, add_errors = TRUE, seed=12345){
set.seed(seed*2)
##individualspecific errors
errors <- chunk2(rnorm(numgroups*grpsize, 0, sigma), numgroups)
##roundspecific errors/residual error
errors_round <- lapply(chunk2(rnorm(numgroups*grpsize*rounds, 0, eta), numgroups), chunk2, n=rounds)
##add remainder
##do for every list of errors by group
errors <- as.list(data.frame(mapply(function(eind, eround){
##here we have the individual specific errors in vector with #elements == #members of group
##the remainder are in a list with #elements == #rounds and each element is vector with #elements of vector == #members of group
lapply(eround, function(eroun, ein){
eroun + ein
}, ein= eind)
}, eind=errors, eround=errors_round)))
if(!add_errors){
errors <- lapply(errors, function(x){lapply(x, function(y){y <- rep(0, length(y))})})
}
return(errors)
}
create_id <- function(marker, gid){
uid <- as.numeric(paste(gid, marker, sep=''))
return(uid)
}
calc_contrib_others <- function(uid, contribution){
##this calculates the contrib of all others in group when a single group for a single period is given
avg_others <- sapply(uid, function(x, id, contr){
avg <- sum(contr[!(x == id)])/(length(contr)-1)
}, id =uid, contr = contribution)
return(round(avg_others))
}
make_group_wise_contributions <- function(group, errors, group_id, endowment, numrounds, strategy_f, strategy_t, rand_first){
#function makes the contributions on individual group-level and returns them
#uid
uid <- sapply(1:length(group), create_id, gid=group_id)
#make first round contribution
#if random first round contrib
if(rand_first){
contr <- sample(0:20, 4)+errors[[1]]
contr <- ifelse(contr <= endowment, ifelse(contr >= 0, contr, 0), endowment)
}
#if fixed first round contrib from strategy table
else{
contr <- mapply(function(individ, error){
contr <- round(strategy_f[[individ]][1]+error)
#check that contribution is within limits
contr <- ifelse(contr <= endowment, ifelse(contr >= 0, contr, 0), endowment)
}, individ = group, error = errors[[1]])
}
contr <- round(contr)
#make vector with croup composition so that every member of group has the same entry
grp_cmpstn <- rep(paste(group, collapse = ' '),length(group))
df <- data.frame(uid = uid, group_id = rep(group_id, length(group)), contribution =contr, grp_cmpstn = grp_cmpstn,
avg_others_l = rep(-1, length(group)), type = group, period = rep(1, length(group)))
##calculate contributions of other rounds
for(i in 2:numrounds){
#for each individual in group
for(ind in uid){
#position in the uid vector is the same as the position of individual in the group/type vector
#cmd <- sprintf('period %s', i)
#print(eval(cmd))
pos = which(ind == uid)[[1]]
tp <- group[pos]
avg_others <- round(sum(df[df$period == i-1 & df$uid != ind,]$contribution)/(length(group)-1))
#get name of type
nom <- names(strategy_f[tp])[1]
#retrieve contribution from strategy table
##if time dependent
if(grepl('_t', nom, fixed = TRUE)){
contr <- strategy_f[[strategy_t[[nom]][i]]][avg_others+2]
#print(contr)
}
#if fixed strategy
else{
contr <- strategy_f[[tp]][avg_others+2]
}
##add random error to contribution
contr <- round(contr + errors[[i]][pos])
contr <- ifelse(contr <= endowment, ifelse(contr >= 0, contr, 0), endowment)
tmp <- data.frame(uid = ind, group_id = group_id, contribution = contr, grp_cmpstn = grp_cmpstn[1],
avg_others_l = avg_others, type = tp, period = i)
df <- rbind(df, tmp)
#if(i >3){return(df)}
}
}
return(df)
}
make_contributions <- function(endowment, errors, groups, numrounds, strategy_f,
strategy_t, rand_first = FALSE, seed = 12345){
##function to make the contributions and concat the datarame correctly
require(dplyr)
set.seed(floor(seed/3))
contrib_matrix <- mapply(make_group_wise_contributions, group=groups, errors=errors, group_id = 1:length(groups),
MoreArgs = c(endowment=endowment, numrounds = numrounds, strategy_f = list(strategy_f), strategy_t = list(strategy_t),
rand_first = rand_first))
#rearrange as data frame from matrix-like
##make df with first entry then loop; row and cols are named but col-names are not necessarily unique -> index access
df <- data.frame(uid = contrib_matrix['uid', 1][[1]], group_id = contrib_matrix['group_id', 1][[1]],
contribution= contrib_matrix['contribution', 1][[1]], grp_cmpstn=contrib_matrix['grp_cmpstn', 1][[1]],
avg_others_l=contrib_matrix['avg_others_l', 1][[1]], type = contrib_matrix['type', 1][[1]],
period=contrib_matrix['period', 1][[1]])
for(i in 2:(dim(contrib_matrix)[2])){
tmp <- data.frame(uid = contrib_matrix['uid', i][[1]],
group_id = contrib_matrix['group_id', i][[1]],
contribution= contrib_matrix['contribution', i][[1]],
grp_cmpstn=contrib_matrix['grp_cmpstn', i][[1]],
avg_others_l=contrib_matrix['avg_others_l', i][[1]],
type = contrib_matrix['type', i][[1]],
period=contrib_matrix['period', i][[1]])
df <- rbind(df, tmp)
}
rm(contrib_matrix)
#df %>% group_by(group_id, period) %>%
# mutate(avg_others = calc_contrib_others(uid, contribution))
#df %>% group_by(group_id, uid) %>%
# mutate(avg_others_l = c(-1, avg_others[1:(length(avg_others)-1)]))
return(df)
}
generate_data <- function(types, grpsize, numrounds, endowment, rand_first, n, random_subset,
add_errors, sigma = 0.3, eta = 0.3, seed = 12345, group_compositions = NA, normalize = TRUE){
#function generates all data; call only this function
comb_lookup <- group_compositions
#generate lookup-list with all combinations if not 'specific' otherwise do not execute
if(paste(random_subset) != 'specific'){
# print('generating all possible group compositions...')
comb_lookup <- gen_all_combinations(types, grpsize)
}
stopifnot(!is.na(comb_lookup))
#generate all strategies [[1]] are the fixed ones [[2]] are the time-dependent ones
strategies <- gen_contribution_rules(endowment, grpsize, numrounds)
#generate groups that played from lookup table
# print('draw groups in data...')
groups <- draw_groups(n, comb_lookup, random_subset, seed)
rm(comb_lookup)
#generate errors; if add_errors == FALSE, the a list full of zeros and nothing will be added
# print('generate necessary errors...')
errors <- make_errors(sigma, eta, length(groups), grpsize, numrounds, add_errors, seed)
#generate data
# print('generate data...')
df <- make_contributions(endowment,errors, groups, numrounds, strategies[['time_independent']],
strategies[['time_dependent']], rand_first, seed)
# print('finished generating')
################
# add measures #
################
#ratio
df$ratio<-(df$contribution+1)/((df$avg_others_l+1))
df$ratio[!is.finite(df$ratio)] <- max(df$ratio[is.finite(df$ratio)])
#difference
df$diff<- df$contribution - df$avg_others_l
#check if normalization should happen
if(normalize){
df$contribution <- (df$contribution - 0)/(endowment - 0)
df$avg_others_l <- (df$avg_others_l - 0)/(endowment - 0)
sb <- df$ratio[df$period != 1]
df$ratio <- (df$ratio - min(sb))/(max(sb)-min(sb))
df$ratio[is.na(df$ratio)] <- 0
##slightly above zero to avoid complications
#df$ratio <- df$ratio + 0.0001
df$diff <- (df$diff - (-endowment))/(endowment - (-endowment))
}
return(df)
}
#group_compositions <- list(c(1,2,3,4))
#df <- generate_data(types,grpsize, numrounds, endowment, rand_first, n, random_subset='specific',
# add_errors, sigma, eta, seed, group_compositions)
|
596b2cb7e58efb2baa170ccac07c98e4b9248d5f | 66f34eae780c7e8cdf001090c22698ea9c3e545f | /heatmap.R | b6042ab6cb723f3b595e7522ec1a0640b6fcce59 | [] | no_license | aringhosh/acorn | 46511b8bff21cf321e986b9c079b1d78c30c7583 | 8f744a6c2a1a2c063c98d984e05cb2b728a4668b | refs/heads/master | 2021-06-08T17:15:37.522194 | 2017-01-06T20:16:22 | 2017-01-06T20:16:22 | 63,098,732 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,159 | r | heatmap.R | # Tartu housing density map ###
library(ggplot2)
library(ggmap)
data <- read.csv("locations.csv")
# change parameters
tartu_map_g_str <- get_map(location="Salina,KS", zoom = 4, maptype = "terrain")
map <- ggmap(tartu_map_g_str, extent='device') + geom_density2d(data=data, aes(x=lon, y=lat), size=.2, colour = "black") + stat_density2d(data=data, aes(x=lon, y=lat, fill = ..level.., alpha = ..level..), size = 0.01, geom = 'polygon')+ scale_fill_gradient(low = "blue", high = "red") + scale_alpha(range = c(0, 0.30), guide = FALSE)
map
#pin method 1
# location <-geocode("1410 North Scott Street , Apt 645ArlingtonVirginia")
# lon <- location['lon']
# lat <- location['lat']
# map <- map + geom_point(aes(x = lon, y = lat, size = 2), data = location, alpha = .5)
#pin method 2
# #Using GGPLOT, plot the Base World Map
# mp <- NULL
# mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
# mp <- ggplot() + mapWorld
#
# #Now Layer the cities on top
# mp <- mp+ geom_point(aes(x=visit.x, y=visit.y) ,color="blue", size=3)
# mp
#summarize
#library('dplyr')
#name <- group_by (data, lat,lon)
#p <- summarise(name, n()) |
0f85ca1690cd762a6163a6383949c805e5cf878b | 0df97eb5c4b292c490cacc2bd0e6ff56dd2321a6 | /Lab07/07-outliers-data.R | c5848820ad17fba9281552960a29601b243fe504 | [] | no_license | ZuxAlone/Adm-Informacion | b5db5ba8d71f12b927d3674a6591fd808b4d1bb4 | adaffa98ffc20579230036665463d7e7dafe6df5 | refs/heads/main | 2023-08-15T06:43:37.185907 | 2021-10-09T22:34:08 | 2021-10-09T22:34:08 | 397,649,940 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,847 | r | 07-outliers-data.R | ozone.data <- read.csv("Data/ozone.csv", stringsAsFactors = FALSE)
View(ozone.data)
outliers.values <- boxplot(ozone.data$pressure_height)$out
outliers.values
summary(ozone.data$pressure_height)
boxplot(ozone.data$pressure_height, main = "Pressure Height", boxwex = 0.5)
boxplot(pressure_height ~ Month, data = ozone.data, main = "Pressure Height per Month")
boxplot(ozone_reading ~ Month, data = ozone.data, main = "Ozone reading per Month")
boxplot(ozone_reading ~ Month, data = ozone.data, main = "Ozone reading per Month")$out
#Caso 1: Cambio de outliers por el promerio y/o la mediana
fix_outliers <- function(x, removeNA = TRUE) {
#Calculamos los quantiles por arriba del 5% y por debajo del 95%
quantiles <- quantile(x, c(0.05, 0.95), na.rm = removeNA)
x[x<quantiles[1]] <- mean(x, na.rm = removeNA)
x[x>quantiles[2]] <- median(x, na.rm = removeNA)
x
}
sin.outliers <- fix_outliers(ozone.data$pressure_height)
par(mfrow = c(1,2))
boxplot(ozone.data$pressure_height, main = "Presión sin Outliers")
boxplot(sin.outliers, main = "Presión sin Outliers")
#Caso 2: Cambio de outliers enmascarando sus valores (capping)
replace_outliers <- function(x, removeNA = TRUE){
qrts <- quantile(x, probs = c(0.25, 0.75), na.rm = removeNA)
# si el outlier esta por debajo del cuartil 0.5 o por arriba de 0.95
caps <- quantile(x, probs = c(.05, .95), na.rm = removeNA)
# Calculamos el rango intercuartilico
iqr <- qrts[2]-qrts[1]
# Calculamos el 1.5 veces el rango intercuartiligo (iqr)
altura <- 1.5*iqr
#reemplazamos del vector los outliers por debajo de 0.05 y 0.095
x[x<qrts[1]-altura] <- caps[1]
x[x>qrts[2]+altura] <- caps[2]
x
}
par(mfrow = c(1,2))
boxplot(ozone.data$pressure_height, main = "Presión con Outliers")
boxplot(replace_outliers(ozone.data$pressure_height), main = "Presión sin Outliers")
|
525fcda9e9bad05d4152a0320052bc71cf8a97e5 | 9fec5550f60deb556f99eb69b1075ccd475534e2 | /plot4.R | eba9d3e8ab547905fb979bec22f8d9d59108c0a0 | [] | no_license | stvnwlsn/ExData_Plotting1 | c4f6561674be2e97639a117baf87eb6f182dbc77 | ac4d02ee1864ce260ba6756d5ec096cfcd8cf7e5 | refs/heads/master | 2021-01-22T15:51:08.748394 | 2015-04-10T08:51:44 | 2015-04-10T08:51:44 | 33,601,406 | 0 | 0 | null | 2015-04-08T10:58:15 | 2015-04-08T10:58:15 | null | UTF-8 | R | false | false | 1,178 | r | plot4.R | dataset <- read.table("household_power_consumption.txt", header = TRUE, sep = ";",
stringsAsFactors = TRUE, na.strings = c("?"))
sub_dataset <- subset(dataset, Date %in% c("1/2/2007", "2/2/2007"))
sub_dataset$DateTime <- as.POSIXct(paste(sub_dataset$Date, sub_dataset$Time), format = "%d/%m/%Y %T")
png(filename = 'plot4.png', width = 480 , height = 480, units = 'px')
par(mfrow = c(2, 2))
with(sub_dataset, {
plot(DateTime, Global_active_power, type="l",
xlab = "", ylab = "Global Active Power")
plot(DateTime, Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(DateTime, Sub_metering_1,
xlab='', ylab='Energy sub metering', type="l")
with(sub_dataset, points(DateTime, Sub_metering_2, type="l", col="red"))
with(sub_dataset, points(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", lty= c('solid', 'solid', 'solid') ,
col = c("black", "red", "blue"),
legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty="n")
plot(DateTime, Global_reactive_power,
type="l", xlab="datetime", ylab="Global_reactive_power")
})
dev.off() |
59363ea5c3aa0b3d23bae55116cbe27ccedae094 | e5a1b357db65237236a6f841b7cc019d8ed1bbdc | /scripts/QC_STARAlignment.R | 8092466b4c4c0c628f418dadba88183b3e5fe61e | [] | no_license | KatalinaBobowik/Epi_Study | 9baae5e24fdbab538a997de230a4f365efae2e1c | 62f5f05b1b4bdaf627b365056ed5b4313da700e0 | refs/heads/master | 2023-02-26T20:43:16.347623 | 2021-02-13T23:41:04 | 2021-02-13T23:41:04 | 263,541,155 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,138 | r | QC_STARAlignment.R | # Script for creating barplot of number of reads at all different processing stages
# Code developed by Katalina Bobowik, 10.12.2018
# load library and colour palette
library(viridis)
library(Rcmdr)
library(reshape2)
library(ggplot2)
# set working directory
inputdir = "/Users/katalinabobowik/Documents/UniMelb_PhD/Analysis/UniMelb_Sumba/Output/Epi_Study/QC/STAR/"
outputdir = "/Users/katalinabobowik/Documents/UniMelb_PhD/Analysis/UniMelb_Sumba/Output/Epi_Study/QC/"
# set ggplot colour theme to white
theme_set(theme_bw())
# read in summary file and tidy up
a=read.table(paste0(inputdir,"mqc_star_alignment_plot_1.txt"), header=T, sep="\t")
# remove data from first pass of star
a=a[-grep("STARpass1", a$Sample),]
pdf(paste0(outputdir,"QCofReads_STAR.pdf"), width=12)
ggplot(data, aes(fill=forcats::fct_rev(variable), y=value, x=Sample)) +
geom_bar(position="fill", stat="identity") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1), legend.title = element_blank()) +
scale_fill_manual(values=c("#7F0000", "#B1084C", "#F7A35C", "#7CB5EC", "#437BB1")) +
ylab("Percentage of Reads")
dev.off() |
a7ee6ede27d2d0a7b0e57c6b7b6c3d25905ed26e | 16e2a0e0f1676ae9eee284127f7a824e043efde6 | /code/Untitled.R | 37a2c2936916e95cbfcf88f46d118a0c657a730b | [] | no_license | kaizadp/destabilization_som | dc4a21d286004412ff45c06cc97c273f204331da | 9dc55906839fb459afa191a7cd53ec36619ee388 | refs/heads/master | 2023-08-06T01:52:08.576183 | 2021-04-27T00:36:34 | 2021-04-27T00:36:34 | 273,317,987 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,822 | r | Untitled.R |
# -------------------------------------------------------------------------
resp =
respiration %>%
mutate(R13C = ((D13C_VPDB_CO2/1000) + 1) * R13C_VPDB,
F13C = R13C/(1+R13C),
R13C = round(R13C, 4),
F13C = round(F13C, 4),
C13_umol = F13C * umol_CO2C,
C12_umol = umol_CO2C - C13_umol,
C13_ug = C13_umol*13,
C12_ug = C12_umol*12,
C_ug = C13_ug+C12_ug,
C_ug_g = C_ug/60)
##
## partial pressure ppm = umol CO2/mol air
## mol of air = 0.0177
resp_licor_temp2 =
resp_licor_temp %>%
mutate(umol_CO2 = pCO2_ppm *0.0177)
loadd(combined_data_processed)
combined_data_processed %>%
filter(type == "control") %>%
ggplot(aes(x = treatment, y = d13C_VPDB))+
geom_point()+
facet_grid(fraction~., scales = "free_y")
# -------------------------------------------------------------------------
combined_data_processed %>%
mutate(fraction = factor(fraction, levels = c("respiration", "weoc", "soil"))) %>%
filter(!type %in% c("control")) %>%
ggplot(aes(x = treatment, y = d13C_VPDB, color = type))+
#geom_hline(data = control_summary, aes(yintercept = d13C_VPDB), linetype = "dashed", color = "grey30")+
geom_point(size = 3, show.legend = FALSE) +
#scale_color_manual(values = pnw_palette("Sailboat", 3))+
scale_color_manual(values = c(NA, "black"))+
labs(title = "δ13C enrichment in each fraction")+
scale_x_discrete(labels = c("T0", "W", "D", "DW"))+
facet_grid(fraction~., scales = "free_y")+
theme_kp()+
NULL
combined_data_processed %>%
mutate(fraction = factor(fraction, levels = c("respiration", "weoc", "soil"))) %>%
filter(!type %in% c("control")) %>%
filter(treatment == "3-drying") %>%
ggplot(aes(x = treatment, y = d13C_VPDB, color = type))+
geom_hline(data = control_summary, aes(yintercept = d13C_VPDB), linetype = "dashed", color = "grey30")+
geom_point(size = 3, show.legend = FALSE) +
#scale_color_manual(values = c(NA, "#e89c81"))+
scale_x_discrete(labels = c("T0", "W", "D", "DW"))+
labs(title = "sorbed-C",
caption = "dashed line = avg of control samples",
x = "",
y = "δ13C (VPDB)")+
facet_grid(fraction~., scales = "free_y")+
theme_kp()+
theme(panel.grid.minor = element_blank(),
strip.text.y = element_blank())+
NULL
test = combined_data_processed %>%
mutate(type2 = "TRT") %>%
rbind(control) %>%
filter(fraction == "respiration" & type == "sorbed-C" & treatment == "3-drying")
test %>%
ggplot(aes(x = type2, y = d13C_VPDB))+
geom_point()
aov(d13C_VPDB ~ type2, data = test) %>% broom::tidy()
test %>%
group_by(type2) %>% dplyr::summarise(d13C = mean(d13C_VPDB))
# -------------------------------------------------------------------------
## calculate MDC ----
# https://www.epa.gov/sites/production/files/2015-10/documents/tech_memo_3_oct15.pdf
# http://www.ttable.org/student-t-value-calculator.html
combined_data_processed %>%
mutate(C13_ug_g = C13_mg_g*1000) %>%
dplyr::select(-C_mg_g, -C13_mg_g) %>%
filter(type == "control" & fraction == "soil" & treatment == "1-time-zero") %>%
summarise(sd = sd(C13_ug_g))
combined_data_processed %>%
mutate(C13_ug_g = C13_mg_g*1000) %>%
dplyr::select(-C_mg_g, -C13_mg_g) %>%
filter(type == "sorbed-C" & fraction == "soil" & treatment == "1-time-zero") %>%
summarise(sd = sd(C13_ug_g))
(1.8595+0.8889) * sqrt(((6.54*6.54) + (17.8*17.8))/5) #1t
(0.0647+0.2619) * sqrt(((6.54*6.54) + (17.8*17.8))/5) #2t
## minimum detectable change in soil 13C is 2.76 ug/g (by the 2-tailed test)
## calculate MDC part 2 ----
## here, we assume that total 13C in soil will not change by treatment. so we calculate a total SD across all four treatments
tail = 2
a = 0.05
power = 0.80
pre = combined_data_processed %>% filter(type == "control" & fraction == "soil") %>% mutate(C13_ug_g = C13_mg_g*1000)%>% pull(C13_ug_g)
post = combined_data_processed %>% filter(type == "sorbed-C" & fraction == "soil") %>% mutate(C13_ug_g = C13_mg_g*1000)%>% pull(C13_ug_g)
calculate_mdc = function(tail, alpha, power, pre, post){
n_pre = length(pre)
n_post = length(post)
b = 1-power
MSE_pre = sd(pre)^2
MSE_post = sd(post)^2
ta = qt(1 - (2*b/2), n_pre+n_post-2)
tb = qt(1 - (alpha/tail), n_pre+n_post-2)
(ta + tb) * (sqrt((MSE_pre/n_pre) + (MSE_post/n_post)))
}
loadd(combined_data_processed)
calculate_mdc(tail = 2,
alpha = 0.05,
power = 0.80,
pre = combined_data_processed %>% filter(type == "control" & fraction == "soil"& treatment == "2-wetting") %>% mutate(C13_ug_g = C13_mg_g*1000)%>% pull(C13_ug_g),
post = combined_data_processed %>% filter(type == "solution-C" & fraction == "soil"& treatment == "2-wetting") %>% mutate(C13_ug_g = C13_mg_g*1000)%>% pull(C13_ug_g))
## comparing solution-C 1-time-zero vs. 2-wetting, MDC = 22.89 for a = 0.05 and power = 0.80
#
# checking solution C T0 stats --------------------------------------------
loadd(combined_data_processed)
solution_tzero =
combined_data_processed %>%
filter(treatment == "1-time-zero" & type != "sorbed-C")
solution_tzero %>%
ggplot(aes(x = type, y = d13C_VPDB))+
geom_point()+
facet_grid(fraction~., scales = "free")+
NULL
aov(d13C_VPDB ~ type, data = solution_tzero %>% filter(fraction == "respiration")) %>% summary()
aov(d13C_VPDB ~ type, data = solution_tzero %>% filter(fraction == "soil")) %>% summary()
combined_data_processed %>%
filter(treatment == "1-time-zero" & type == "control") %>%
group_by(fraction) %>%
dplyr::summarise(n = n(),
C = mean(C_mg_g)*10,
d13C = mean(d13C_VPDB),
R = mean(R13C))
## convert R to at% ----
R = C13/C12
at = (C13(C12+C13)) * 100
at = (R13C/(1+R13C)) * 100
R13C = 0.0109
# -------------------------------------------------------------------------
combined_data_processed_summary %>%
filter(type != "solution-C" & !(fraction == "respiration" & treatment == "3-drying")) %>%
ggplot(aes(x = treatment, y = C13_mg_g*1000))+
geom_bar(aes(fill = fraction, color = fraction), stat = "identity", position = position_dodge(width = 0.7),
width = 0.5, alpha = 0.7, size = 0.7)+
#geom_text(data = label %>% filter(type != "solution-C"), aes(y = 2.95, label = C13_mg_g*1000))+
#annotate("text", label = "total 13C in soil (μg/g):", x = 0.7, y = 3.10, hjust = 0)+
labs(x = "", y = "13C (μg/g)")+
scale_x_discrete(labels = c("T0", "W", "D", "DW"))+
scale_y_log10()+
#scale_fill_manual(values = pnw_palette("Sunset", 3))+
scale_fill_manual(values = soilpalettes::soil_palette("redox2", 3))+
scale_color_manual(values = soilpalettes::soil_palette("redox2", 3))+
facet_wrap(~type)+
theme_kp()+
#theme(axis.text.x = element_text(angle = 45))+
NULL
combined_data_processed %>%
filter(type != "solution-C" & !(fraction == "respiration" & treatment == "3-drying")) %>%
ggplot(aes(x = treatment, y = C13_mg_g*1000))+
# geom_bar(aes(fill = fraction, color = fraction), stat = "identity", position = position_dodge(width = 0.7),
# width = 0.5, alpha = 0.7, size = 0.7)+
geom_point(aes(color = fraction))+
#geom_text(data = label %>% filter(type != "solution-C"), aes(y = 2.95, label = C13_mg_g*1000))+
#annotate("text", label = "total 13C in soil (μg/g):", x = 0.7, y = 3.10, hjust = 0)+
labs(x = "", y = "13C (μg/g)")+
scale_x_discrete(labels = c("T0", "W", "D", "DW"))+
scale_y_log10()+
#scale_fill_manual(values = pnw_palette("Sunset", 3))+
scale_fill_manual(values = soilpalettes::soil_palette("redox2", 3))+
scale_color_manual(values = soilpalettes::soil_palette("redox2", 3))+
facet_wrap(~type)+
theme_kp()+
#theme(axis.text.x = element_text(angle = 45))+
NULL
combined_data_processed %>%
filter(type != "solution-C" & !(treatment == "3-drying")) %>%
group_by(type, fraction, treatment) %>%
dplyr::summarise(C13 = mean(C13_mg_g*1000)) %>%
pivot_wider(names_from = "type", values_from = "C13") %>%
mutate(diff_ug_g = `sorbed-C` - control) %>%
ggplot(aes(x = treatment, y = diff_ug_g))+
geom_point(aes(color = fraction))+
labs(x = "", y = "13C difference (μg/g)")+
scale_x_discrete(labels = c("T0", "W", "D", "DW"))+
#scale_y_log10()+
#scale_fill_manual(values = pnw_palette("Sunset", 3))+
scale_fill_manual(values = soilpalettes::soil_palette("redox2", 3))+
scale_color_manual(values = soilpalettes::soil_palette("redox2", 3))+
#facet_wrap(~type)+
theme_kp()+
#theme(axis.text.x = element_text(angle = 45))+
NULL
combined_data_processed %>%
filter(type != "sorbed-C" & (treatment %in% c("1-time-zero", "2-wetting"))) %>%
group_by(type, fraction, treatment) %>%
dplyr::summarise(C13 = mean(C13_mg_g*1000)) %>%
pivot_wider(names_from = "type", values_from = "C13") %>%
mutate(diff_ug_g = `solution-C` - control) %>%
ggplot(aes(x = treatment, y = diff_ug_g))+
geom_point(aes(color = fraction))+
labs(x = "", y = "13C difference (μg/g)")+
scale_x_discrete(labels = c("T0", "+C"))+
#scale_y_log10()+
#scale_fill_manual(values = pnw_palette("Sunset", 3))+
scale_fill_manual(values = soilpalettes::soil_palette("redox2", 3))+
scale_color_manual(values = soilpalettes::soil_palette("redox2", 3))+
#facet_wrap(~type)+
theme_kp()+
#theme(axis.text.x = element_text(angle = 45))+
NULL
combined_data_processed %>%
#filter(type != "sorbed-C" & (treatment %in% c("1-time-zero", "2-wetting"))) %>%
group_by(type, fraction, treatment) %>%
dplyr::summarise(C13 = mean(C13_mg_g*1000)) %>%
pivot_wider(names_from = "type", values_from = "C13") %>%
mutate(diff_sol_ug_g = `solution-C` - control,
diff_sor_ug_g = `sorbed-C` - control) %>%
dplyr::select(fraction, treatment, starts_with("diff")) %>%
pivot_longer(-c(fraction, treatment), values_to = "diff_ug_g", names_to = "type") %>%
ggplot(aes(x = treatment, y = diff_ug_g))+
geom_point(aes(color = fraction))+
labs(x = "", y = "13C difference (μg/g)")+
scale_x_discrete(labels = c("T0", "W", "D", "DW"))+
#scale_y_log10()+
#scale_fill_manual(values = pnw_palette("Sunset", 3))+
scale_fill_manual(values = soilpalettes::soil_palette("redox2", 3))+
scale_color_manual(values = soilpalettes::soil_palette("redox2", 3))+
facet_wrap(~type)+
theme_kp()+
#theme(axis.text.x = element_text(angle = 45))+
NULL
combined_data_processed %>%
filter(!(type == "sorbed-C" & treatment == "3-drying" & fraction == "respiration")) %>%
group_by(type, fraction, treatment) %>%
ggplot(aes(x = treatment, y = C13_mg_g*1000))+
geom_point(aes(color = fraction), position = position_dodge(width = 0.3))+
labs(x = "", y = "13C difference (μg/g)")+
scale_x_discrete(labels = c("T0", "W", "D", "DW"))+
scale_y_log10()+
#scale_fill_manual(values = pnw_palette("Sunset", 3))+
scale_fill_manual(values = soilpalettes::soil_palette("redox2", 3))+
scale_color_manual(values = soilpalettes::soil_palette("redox2", 3))+
facet_wrap(~type)+
theme_kp()+
#theme(axis.text.x = element_text(angle = 45))+
NULL
solutionc = combined_data_processed %>% filter(type == "solution-C")
summary(aov((C13_mg_g) ~ treatment, data = solutionc %>% filter(fraction == "weoc")))
goethite = tribble(
~g_perc, ~d13C,
1.5, -27.63,
1.5, -27.72,
1.5, -29.30,
7.5, -30.46,
7.5, -27.41,
7.5, -26.88,
13, -26.03,
13, -24.42,
13, -28.01,
20, -26.62,
50, -27.43,
0, -27.00,
0, -27.32,
100, -23.68,
100, -23.80,
100, -24.20
)
goethite %>%
ggplot(aes(x = g_perc, y = d13C))+
geom_point()
# alternate drying/rewetting plot -----------------------------------------
drying_rewetting_data =
combined_data_processed %>%
mutate(trt = case_when(type == "control" & treatment == "1-time-zero" ~ "baseline",
type == "sorbed-C" & (treatment == "1-time-zero" | treatment == "2-wetting") ~ "wetting",
type == "sorbed-C" & (treatment == "3-drying" | treatment == "4-drying-rewetting") ~ "drying")) %>%
filter(!is.na(trt)) %>%
mutate(level = case_when(trt == "baseline" ~ "baseline",
treatment == "1-time-zero" | treatment == "3-drying" ~ "TO",
treatment == "2-wetting" | treatment == "4-drying-rewetting" ~ "wet",
))
drying_rewetting_data %>%
ggplot(aes(x = trt, y = d13C_VPDB, color = level))+
geom_point(position = position_dodge(width = 0.4))+
facet_grid(fraction ~., scales = "free_y")+
theme_kp()
drying_rewetting_data %>%
ggplot(aes(x = trt, y = C_mg_g, color = level))+
geom_point(position = position_dodge(width = 0.4))+
facet_grid(fraction ~., scales = "free_y")+
theme_kp()
|
553ef8841d2d1697071d9c645f7d31edca828ebf | 590142f535831def89b5b2d0f6ac1d47b8306850 | /man/ChunkDataFiles.Rd | 618e2735af3f053a82b7b9d14d7e1d4ceb15b3f8 | [] | no_license | jfontestad/makeParallel | 2b62704c9e26477bc89d505de313ea07aaebdcca | 6e43f34f51a23692907ec1563d3d47a8e189d7bf | refs/heads/master | 2023-01-13T03:27:16.260825 | 2020-11-17T16:41:04 | 2020-11-17T16:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 340 | rd | ChunkDataFiles.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataSource.R
\name{ChunkDataFiles}
\alias{ChunkDataFiles}
\title{Constructor for ChunkDataFiles}
\usage{
ChunkDataFiles(files, sizes = file.info(files)$size,
readFuncName = inferReadFuncFromFile(files[1]), ...)
}
\description{
Constructor for ChunkDataFiles
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.