blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d430dcdc0d15815a8bf0bf198e37040f9dd4504
|
8933d3a00e9d676cdd9a0e7155f33e76fb52cb44
|
/association_test_wgs_wes/5b-match_wes2wgs.R
|
cffbdf9af6e90ed37e660ea5f0417f5bfd90b076
|
[] |
no_license
|
LeiChen0218/PhD_toolkits
|
19fe47c90ebce1fadee5fb987e3d48ef63a34cf0
|
c4c54f8b9ed16e5ba2eb6b961cc49be8f6211238
|
refs/heads/master
| 2020-12-28T09:52:59.450619
| 2020-02-04T19:20:47
| 2020-02-04T19:20:47
| 238,277,650
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,952
|
r
|
5b-match_wes2wgs.R
|
setwd('/Users/leichen/Desktop/Lab/Finmetseq_paper/3-Candidate_analysis/data/wes_match/')
library(ggplot2)
# read in wes results
bam1 <- read.table('cands.traits.bam1.txt', header = T)
bam2 <- read.table('cands.traits.bam2.txt', header = T)
meta <- read.table('cands.traits.r01_exon.meta.txt', header = T)
# read wes to cnv chain file
chain1 <- read.table('targets/cnv.exon.r_01.bams1.table', header = F)
chain2 <- read.table('targets/cnv.exon.r_01.bams2.table', header = F)
colnames(chain1) <- c("ID","REGION","R2")
colnames(chain2) <- c("ID","REGION","R2")
# read wgs results
gs <- read.table('../cand_p3/gs.candidate.p3.txt', header = T)
lumpy <- read.table('../cand_p3/lumpy.candidate.p3.txt', header = T)
cnvnator <- read.table('../cand_p3/cnvnator.candidate.p3.txt', header = T)
wgs <- as.data.frame(rbind(gs, lumpy, cnvnator))
# merge wes results
#wes <- merge(bam1,bam2, by = c("TRAIT","REGION"), all = T)
#wes <- merge(wes, meta, by = c("TRAIT","REGION"), all = T)
#wes_sub <- wes[c(1,2,4,5,7,8,10,11,13,14)]
# match wgs candidate and exons
wgs1 <- merge(chain1, wgs, by = "ID")
colnames(wgs1) <- c("CNV","REGION","R2","TRAIT_RN","CHR","POS","WGS_P","WGS_BETA","AC","AF","N")
wgs1$TRAIT <- gsub('_rn','',wgs1$TRAIT_RN)
# merge wes and wgs results
combined_bam1 <- merge(bam1,wgs1, by = c("TRAIT","REGION"))
#write.table(all,'wes_wgs_matched.results.txt', row.names = F, sep = '\t', quote = F)
library('metap')
combined_bam1$ED <- (combined_bam1$BETA*combined_bam1$WGS_BETA) > 0
combined_bam1$CP <- 1
for(i in 1:dim(combined_bam1)[1]){
combined_bam1[i,]$CP <- sumlog(c(combined_bam1[i,]$PVALUE, combined_bam1[i,]$WGS_P))$p
}
# bams2
wgs2 <- merge(chain2, wgs, by = "ID")
colnames(wgs2) <- c("CNV","REGION","R2","TRAIT_RN","CHR","POS","WGS_P","WGS_BETA","AC","AF","N")
wgs2$TRAIT <- gsub('_rn','',wgs2$TRAIT_RN)
combined_bam2 <- merge(bam2,wgs2, by = c("TRAIT","REGION"))
combined_bam2$ED <- (combined_bam2$BETA*combined_bam2$WGS_BETA) > 0
combined_bam2$CP <- 1
for(i in 1:dim(combined_bam2)[1]){
combined_bam2[i,]$CP <- sumlog(c(combined_bam2[i,]$PVALUE, combined_bam2[i,]$WGS_P))$p
}
# meta
chain <-read.table('targets/cnv.exon.r_01.table', header = F)
colnames(chain) <- c("ID","REGION")
wgs_meta <- merge(chain, wgs, by = "ID")
colnames(wgs_meta) <- c("CNV","REGION","TRAIT_RN","CHR","POS","WGS_P","WGS_BETA","AC","AF","N")
wgs_meta$TRAIT <- gsub('_rn','',wgs_meta$TRAIT_RN)
combined_meta <- merge(meta,wgs_meta, by = c("TRAIT","REGION"))
combined_meta$ED_re <- (combined_meta$BETA_RE*combined_meta$WGS_BETA) > 0
combined_meta$CP_re <- 1
combined_meta$ED_fe <- (combined_meta$BETA_FE*combined_meta$WGS_BETA) > 0
combined_meta$CP_fe <- 1
for(i in 1:dim(combined_meta)[1]){
combined_meta[i,]$CP_re <- sumlog(c(combined_meta[i,]$PVALUE_RE, combined_meta[i,]$WGS_P))$p
combined_meta[i,]$CP_fe <- sumlog(c(combined_meta[i,]$PVALUE_FE, combined_meta[i,]$WGS_P))$p
}
ggplot(combined_bam1, aes(x=-log10(CP)))+geom_histogram(bins=100)+
ggtitle('combined p distribution, wes batch1 ')
ggplot(combined_bam2, aes(x=-log10(CP)))+geom_histogram(bins=100)+
ggtitle('combined p distribution, wes batch2 ')
ggplot(combined_meta, aes(x=-log10(CP_fe)))+geom_histogram(bins=100)+
ggtitle('combined p distribution, meta, fixed effect ')
ggplot(combined_meta, aes(x=-log10(CP_re)))+geom_histogram(bins=100)+
ggtitle('combined p distribution, meta, random effect ')
write.table(combined_bam1, 'wes_wgs_matched.bam1.txt',row.names = F, sep = '\t', quote = F)
write.table(combined_bam2, 'wes_wgs_matched.bam2.txt',row.names = F, sep = '\t', quote = F)
write.table(combined_meta, 'wes_wgs_matched.meta.txt',row.names = F, sep = '\t', quote = F)
valid1 <- combined_bam1[combined_bam1$ED & combined_bam1$CP < 0.00000189,]
valid2 <- combined_bam2[combined_bam2$ED & combined_bam2$CP < 0.00000189,]
combined_meta$valid_fe <- combined_meta$ED_fe & combined_meta$CP_fe < 0.00000189
combined_meta$valid_re <- combined_meta$ED_re & combined_meta$CP_re < 0.00000189
valid_meta <- combined_meta[combined_meta$valid_fe,]
write.table(valid1,'wes_wgs_matched.valid.bam1.txt', row.names = F, sep = '\t', quote = F)
write.table(valid2,'wes_wgs_matched.valid.bam2.txt', row.names = F, sep = '\t', quote = F)
write.table(valid_meta,'wes_wgs_matched.valid.meta.txt', row.names = F, sep = '\t', quote = F)
valid1 <- combined_bam1[combined_bam1$ED & combined_bam1$CP < 0.00001,]
valid2 <- combined_bam2[combined_bam2$ED & combined_bam2$CP < 0.00001,]
combined_meta$valid_fe <- combined_meta$ED_fe & combined_meta$CP_fe < 0.00001
combined_meta$valid_re <- combined_meta$ED_re & combined_meta$CP_re < 0.00001
valid_meta <- combined_meta[combined_meta$valid_fe,]
write.table(valid1,'wes_wgs_matched.subthre.bam1.txt', row.names = F, sep = '\t', quote = F)
write.table(valid2,'wes_wgs_matched.subthre.bam2.txt', row.names = F, sep = '\t', quote = F)
write.table(valid_meta,'wes_wgs_matched.subthre.meta.txt', row.names = F, sep = '\t', quote = F)
|
b41923d5eb99c0829fdf8d384090e76f4c6a1202
|
6fe5ae4a3f67f560f43e6343839d0a17ffa5181a
|
/R/multi_trial.R
|
7aa3bac35af2dc353dcff510c8dacd0dfcc2ee8d
|
[] |
no_license
|
cran/adaptDiag
|
0c5901e53e0d119d959fe3e0e3b5553bb74d97e1
|
d651b259304bb4b877a2070ae6826a76be2251da
|
refs/heads/master
| 2023-07-10T10:29:09.351707
| 2021-08-17T06:20:14
| 2021-08-17T06:20:14
| 397,309,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,728
|
r
|
multi_trial.R
|
#' @title Simulate and analyse multiple trials
#'
#' @description Multiple trials and simulated and analysed up to the final
#' analysis stage, irrespective of whether it would have been stopped for
#' early success or expected futility. The output of the trials is handled
#' elsewhere.
#'
#' @param sens_true scalar. True assumed sensitivity (must be between 0 and 1).
#' @param spec_true scalar. True assumed specificity (must be between 0 and 1).
#' @param prev_true scalar. True assumed prevalence as measured by the
#' gold-standard reference test (must be between 0 and 1).
#' @param endpoint character. The endpoint(s) that must meet a performance goal
#' criterion. The default is \code{code = "both"}, which means that the
#' endpoint is based simultaneously on sensitivity and specificity.
#' Alternative options are to specify \code{code = "sens"} or \code{code =
#' "spec"} for sensitivity and specificity, respectively. If only a single
#' endpoint is selected (e.g. sensitivity), then the PG and success
#' probability threshold of the other statistic are set to 1, and ignored for
#' later analysis.
#' @param sens_pg scalar. Performance goal (PG) for the sensitivity endpoint,
#' such that the the posterior probability that the PG is exceeded is
#' calculated. Must be between 0 and 1.
#' @param spec_pg scalar. Performance goal (PG) for the specificity endpoint,
#' such that the the posterior probability that the PG is exceeded is
#' calculated. Must be between 0 and 1.
#' @param prior_sens vector. A vector of length 2 with the prior shape
#' parameters for the sensitivity Beta distribution.
#' @param prior_spec vector. A vector of length 2 with the prior shape
#' parameters for the specificity Beta distribution.
#' @param prior_prev vector. A vector of length 2 with the prior shape
#' parameters for the prevalence Beta distribution.
#' @param succ_sens scalar. Probability threshold for the sensitivity to exceed
#' in order to declare a success. Must be between 0 and 1.
#' @param succ_spec scalar. Probability threshold for the specificity to exceed
#' in order to declare a success. Must be between 0 and 1.
#' @param n_at_looks vector. Sample sizes for each interim look. The final value
#' (or only value if no interim looks are planned) is the maximum allowable
#' sample size for the trial.
#' @param n_mc integer. Number of Monte Carlo draws to use for sampling from the
#' Beta-Binomial distribution.
#' @param n_trials integer. The number of clinical trials to simulate overall,
#' which will be used to evaluate the operating characteristics.
#' @param ncores integer. The number of cores to use for parallel processing. If
#' `ncores` is missing, it defaults to the maximum number of cores available
#' (spare 1).
#'
#' @details
#'
#' This function simulates multiple trials and analyses each stage of the trial
#' (i.e. at each interim analysis sample size look) irrespective of whether a
#' stopping rule was triggered or not. The operating characteristics are handled
#' by a separate function, which accounts for the stopping rules and any other
#' trial constraints. By enumerating each stage of the trial, additional
#' insights can be gained such as: for a trial that stopped early for futility,
#' what is the probability that it would eventually go on to be successful if
#' the trial had not stopped. The details on how each trial are simulated here
#' are described below.
#'
#' \strong{Simulating a single trial}
#'
#' Given true values for the test sensitivity (\code{sens_true}), specificity
#' (\code{spec_true}), and the prevalence (\code{prev_true}) of disease, along
#' with a sample size look strategy (\code{n_at_looks}), it is straightforward
#' to simulate a complete dataset using the binomial distribution. That is, a
#' data frame with true disease status (reference test), and the new diagnostic
#' test result.
#'
#' \strong{Posterior probability of exceeding PG at current look}
#'
#' At a given sample size look, the posterior probability of an endpoint (e.g.
#' sensitivity) exceeding the pre-specified PG (\code{sens_pg}) can be
#' calculated as follows.
#'
#' If we let \eqn{\theta} be the test property of interest (e.g. sensitivity),
#' and if we assume a prior distribution of the form
#'
#' \deqn{\theta ~ Beta(\alpha, \beta),}
#'
#' then with \eqn{X | \theta \sim Bin(n, \theta)}, where \eqn{X} is the number
#' of new test positive cases from the reference positive cases, the posterior
#' distribution of \eqn{\theta} is
#'
#' \deqn{\theta | X=x ~ Beta(\alpha + x, \beta + n - x).}
#'
#' The posterior probability of exceeding the PG is then calculated as
#'
#' \eqn{P[\theta \ge sens_pg | X = x, n]}.
#'
#' A similar calculation can be performed for the specificity, with
#' corresponding PG, \code{spec_pg}.
#'
#' \strong{Posterior predictive probability of eventual success}
#'
#' When at an interim sample size that is less the maximum
#' (i.e. \code{max(n_at_looks)}), we can calculate the probability that the trial
#' will go on to eventually meet the success criteria.
#'
#' At the \eqn{j}-th look, we have observed \eqn{n_j} tests, with \eqn{n_j^* =
#' n_{max} - n_j} subjects yet to be enrolled for testing. For the \eqn{n_j^*}
#' subjects remaining, we can simulate the number of reference positive results,
#' \eqn{y_j^*}, using the posterior predictive distribution for the prevalence
#' (reference positive tests), which is off the form
#'
#' \deqn{y_j^* | y_j, n_j, n_j^* ~ Beta-Bin(n_j^*, \alpha_0 + y_j, \beta + n_j - y_j),}
#'
#' where \eqn{y_j} is the observed number of reference positive cases.
#' Conditional on the number of subjects with a positive reference test in the
#' remaining sample together with \eqn{n_j^*}, one can simulate the complete 2x2
#' contingency table by using the posterior predictive distributions for
#' sensitivity and specificity, each of which has a Beta-Binomial form.
#' Combining the observed \eqn{n_j} subjects' data with a sample of the
#' \eqn{n_j^*} subjects' data drawn from the predictive distribution, one can
#' then calculate the posterior probability of trial success (exceeding a PG)
#' for a specific endpoint. Repeating this many times and calculating the
#' proportion of probabilities that exceed the probability success threshold
#' yields the probability of eventual trial success at the maximum sample size.
#'
#' As well as calculating the predictive posterior probability of eventual
#' success for sensitivity and specificity, separately, we can also calculate
#' the probability for both endpoints simultaneously.
#'
#' @section Parallelization:
#'
#' To use multiple cores (where available), the argument \code{ncores} can be
#' increased from the default of 1. On UNIX machines (including macOS),
#' parallelization is performed using the \code{\link[parallel]{mclapply}}
#' function with \code{ncores} \eqn{>1}. On Windows machines, parallel
#' processing is implemented via the \code{\link[foreach]{foreach}} function.
#'
#' @return A list containing a data frame with rows for each stage of the trial
#' (i.e. each sample size look), irrespective of whether the trial meets the
#' stopping criteria. Multiple trial simulations are stacked longways and
#' indicated by the `trial` column. The data frame has the following columns:
#'
#' \itemize{
#' \item{\code{stage}:} Trial stage.
#' \item{\code{pp_sens}:} Posterior probability of exceeding the performance
#' goal for sensitivity.
#' \item{\code{pp_spec}:} Posterior probability of exceeding the performance
#' goal for specificity.
#' \item{\code{ppp_succ_sens}:} Posterior predictive probability of eventual
#' success for sensitivity at the maximum sample size.
#' \item{\code{ppp_succ_spec}:} Posterior predictive probability of eventual
#' success for specificity at the maximum sample size.
#' \item{\code{ppp_succ_both}:} Posterior predictive probability of eventual
#' success for *both* sensitivity and specificity at the maximum sample
#' size.
#' \item{\code{tp}:} True positive count.
#' \item{\code{tn}:} True negative count.
#' \item{\code{fp}:} False positive count.
#' \item{\code{fn}:} False negative count.
#' \item{\code{sens_hat}:} Posterior median estimate of the test
#' sensitivity.
#' \item{\code{sens_CrI2.5}:} Lower bound of the 95% credible interval of
#' the test sensitivity.
#' \item{\code{sens_CrI97.5}:} Upper bound of the 95% credible interval of
#' the test sensitivity.
#' \item{\code{spec_hat}:} Posterior median estimate of the test
#' specificity.
#' \item{\code{spec_CrI2.5}:} Lower bound of the 95% credible interval of
#' the test specificity.
#' \item{\code{spec_CrI97.5}:} Upper bound of the 95% credible interval of
#' the test specificity.
#' \item{\code{n}:} The sample size at the given look for the row.
#' \item{\code{trial}:} The trial number, which will range from 1 to
#' `n_trials`.
#' }
#'
#' The list also contains the arguments used and the call.
#'
#' @examples
#'
#' multi_trial(
#' sens_true = 0.9,
#' spec_true = 0.95,
#' prev_true = 0.1,
#' endpoint = "both",
#' sens_pg = 0.8,
#' spec_pg = 0.8,
#' prior_sens = c(0.1, 0.1),
#' prior_spec = c(0.1, 0.1),
#' prior_prev = c(0.1, 0.1),
#' succ_sens = 0.95,
#' succ_spec = 0.95,
#' n_at_looks = c(200, 400, 600, 800, 1000),
#' n_mc = 10000,
#' n_trials = 2,
#' ncores = 1
#' )
#'
#' @importFrom parallel detectCores
#' @importFrom pbmcapply pbmclapply
#' @importFrom doParallel registerDoParallel
#' @importFrom foreach foreach registerDoSEQ '%dopar%'
#'
#' @export
multi_trial <- function(
sens_true,
spec_true,
prev_true,
endpoint = "both",
sens_pg = 0.8,
spec_pg = 0.8,
prior_sens = c(0.1, 0.1),
prior_spec = c(0.1, 0.1),
prior_prev = c(0.1, 0.1),
succ_sens = 0.95,
succ_spec = 0.95,
n_at_looks,
n_mc = 10000,
n_trials = 1000,
ncores
) {
Call <- match.call()
# Check: missing 'ncores' defaults to maximum available (spare 1)
if (missing(ncores)) {
ncores <- max(1, parallel::detectCores() - 1)
}
# Check: cannot specify <1 core
if (ncores < 1) {
warning("Must use at least 1 core... setting ncores = 1")
}
# Check: endpoint selection
if (endpoint == "both") {
# Both
if (is.null(sens_pg) | is.null(spec_pg) |
missing(sens_pg) | missing(spec_pg)) {
stop("Missing performance goal argument")
}
if (is.null(succ_sens) | is.null(succ_spec) |
missing(succ_sens) | missing(succ_spec)) {
stop("Missing probability threshold argument")
}
} else if (endpoint == "sens") {
# Sensitivity only
if (is.null(sens_pg) | missing(sens_pg) | is.na(sens_pg)) {
stop("Missing performance goal argument")
}
if (!is.null(spec_pg)) {
warning("spec_pg is being ignored")
}
spec_pg <- 1 # can never exceed this
succ_spec <- 1 # can never exceed this
} else if (endpoint == "spec") {
# Specificity only
if (is.null(spec_pg) | missing(spec_pg) | is.na(spec_pg)) {
stop("Missing performance goal argument")
}
if (!is.null(sens_pg)) {
warning("sens_pg is being ignored")
}
sens_pg <- 1 # can never exceed this
succ_sens <- 1 # can never exceed this
} else {
stop("endpoint should be either 'both', 'sens', or 'spec'")
}
# Check: true values specified
if (missing(sens_true) | missing(spec_true) | missing(prev_true)) {
stop("True values must be provided for for sensitivity, specificity, and prevalence")
}
# Check: prior distributions specified
if (missing(prior_sens) | missing(prior_spec) | missing(prior_prev) |
is.null(prior_sens) | is.null(prior_spec) | is.null(prior_prev)) {
stop("Prior distribution parameters must be provided for sensitivity, specificity, and prevalence")
}
single_trial_wrapper <- function(x) {
single_trial(
sens_true = sens_true,
spec_true = spec_true,
prev_true = prev_true,
sens_pg = sens_pg,
spec_pg = spec_pg,
prior_sens = prior_sens,
prior_spec = prior_spec,
prior_prev = prior_prev,
succ_sens = succ_sens,
succ_spec = succ_spec,
n_at_looks = n_at_looks,
n_mc = n_mc)
}
if (.Platform$OS.type == "windows") {
# Windows systems
if (ncores == 1L) {
sims <- lapply(X = 1:n_trials,
FUN = single_trial_wrapper)
} else {
doParallel::registerDoParallel(cores = ncores)
sims <- foreach(x = 1:n_trials, .packages = 'adaptDiag',
.combine = rbind) %dopar% {
single_trial_wrapper()
}
registerDoSEQ()
}
} else {
# *nix systems
sims <- pbmclapply(X = 1:n_trials,
FUN = single_trial_wrapper,
mc.cores = ncores)
sims <- do.call("rbind", sims)
}
sims$trial <- rep(1:n_trials, each = length(n_at_looks))
args <- list("sens_true" = sens_true,
"spec_true" = spec_true,
"prev_true" = prev_true,
"endpoint" = endpoint,
"sens_pg" = sens_pg,
"spec_pg" = spec_pg,
"prior_sens" = prior_sens,
"prior_spec" = prior_spec,
"prior_prev" = prior_prev,
"succ_sens" = succ_sens,
"succ_spec" = succ_spec,
"n_at_looks" = n_at_looks,
"n_mc" = n_mc,
"n_trials" = n_trials)
out <- list(sims = sims,
call = Call,
args = args)
invisible(out)
}
|
023fb5eec65de1fcb209c9543b3e5a7af9413b6f
|
604209f18e54add484640e37a8d12636e7451540
|
/man/get_single_Quandl.Rd
|
39cfe81eac8e895dd656ce52a44a2252196f43bb
|
[] |
no_license
|
msperlin/GetQuandlData
|
5212b6eb9f984ccca3653c4fd83870a38cadc78e
|
563edf8ace68111868bd0d4043d80f9bd32eead0
|
refs/heads/master
| 2023-02-23T13:00:19.433950
| 2023-02-15T12:26:25
| 2023-02-15T12:26:25
| 212,104,930
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,790
|
rd
|
get_single_Quandl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_guandl_series.R
\name{get_single_Quandl}
\alias{get_single_Quandl}
\title{Fetches a single time series from Quandl}
\usage{
get_single_Quandl(
id_in,
name_in,
api_key,
first_date,
last_date,
do_cache = TRUE,
order = "asc",
collapse = "none",
transform = "none"
)
}
\arguments{
\item{id_in}{Character vector of ids to grab data. When using a named vector, the name is used to
register the time series. Example: id_in <- c('US GDP' = 'FRED/GDP')}
\item{name_in}{Name of series to fetch}
\item{api_key}{YOUR api key (get your own at <https://www.quandl.com/sign-up-modal?defaultModal=showSignUp>)}
\item{first_date}{First date of all requested series as YYYY-MM-DD (default = Sys.date() - 365)}
\item{last_date}{Last date of all requested series as YYYY-MM-DD (default = Sys.date() - 365)}
\item{do_cache}{Do cache? TRUE (default) or FALSE. Sets the use of package memoise to cache results from the api}
\item{order}{How to order the time series data: 'desc' (descending dates, default) or 'asc' (ascending)}
\item{collapse}{Frequency of time series: 'none' (default), 'daily', 'weekly', 'monthly', 'quarterly', 'annual'}
\item{transform}{Quandl transformation: 'none', 'diff', 'rdiff', 'rdiff_from', 'cumul', 'normalize'.
Details at <https://docs.quandl.com/docs/parameters-2>}
}
\value{
A single dataframe
}
\description{
Fetches a single time series from Quandl
}
\examples{
api_key <- 'YOUR_API_KEY_HERE'
id_in <- c('Inflation argentina' = 'RATEINF/INFLATION_ARG')
\dontrun{
df <- get_single_Quandl(id_in = id_in, name_in = '',
api_key = api_key,
first_date = '2010-01-01',
last_date = Sys.Date())
}
}
|
dcd2fa72590af9e00024f9c5793ec414d8c35fe6
|
6141ec79d6d942783a2ee5eca2ed957b2b852b11
|
/Scripts/GeometricMorphometrics.R
|
bea06ebc3983910746f50382e0bfa479d2b313c2
|
[] |
no_license
|
Moreau-Lab/MorphologyAndPCAs
|
adc56a53ea26cda2151379d6b81141a56fc4218c
|
94593c7f1075061ab50c0690be959c636575d078
|
refs/heads/main
| 2023-03-24T19:46:20.387234
| 2021-03-22T17:32:54
| 2021-03-22T17:32:54
| 350,418,189
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,637
|
r
|
GeometricMorphometrics.R
|
# Script for geometric morphometrics of ant head shapes.
# This script requires tps formatted landmark data for input. See the data folder for an example of what this looks like.
# Load the packages we will use:
library(geomorph)
library(tidyverse)
# All of the steps of the analysis are wrapped up in a single function, which reads in the raw landmark data, superimposes it for standardization, and performs a PCA on the standardized coordinates.
geometricMorphometricPCA <- function(dataFile, pointColor, figureTitle) {
data <- geomorph::readland.tps(file = dataFile, specID = "ID")
# Superimpose the raw coordinate data:
superimposition <- geomorph::gpagen(data, Proj = TRUE, ProcD = TRUE, curves = NULL, surfaces = NULL)
# Extract out the coords values from the object returned by gpagen:
coordinates <- geomorph.data.frame(superimposition)
# Convert it from an array to a matrix:
coordinates2 <- matrix(coordinates$coords, nrow=dim(coordinates$coords)[3], byrow=TRUE)
# Set the rownames of that matrix:
rownames(coordinates2) <- dimnames(coordinates$coords)[[3]]
# Convert the matrix to a dataframe:
coordinates3 <- data.frame(coordinates2)
# Run the PCA:
regularPCA <- prcomp(coordinates3)
# Plot the PCA:
PCAplot <- regularPCA %>%
broom::augment(coordinates3) %>% # add original dataset back in
ggplot(aes(x = .fittedPC1, y = .fittedPC2)) +
geom_point(size = 5, color = pointColor) +
theme_half_open(12) + background_grid()
plot(PCAplot)
regularPCA %>%
tidy(matrix = "rotation")
# define arrow style for plotting
arrow_style <- arrow(
angle = 20, ends = "first", type = "closed", length = grid::unit(8, "pt")
)
# plot rotation matrix
rotationMatrix <- regularPCA %>%
tidy(matrix = "rotation") %>%
pivot_wider(names_from = "PC", names_prefix = "PC", values_from = "value") %>%
ggplot(aes(PC1, PC2)) +
geom_segment(xend = 0, yend = 0, arrow = arrow_style) +
geom_text(
aes(label = column),
hjust = 1, nudge_x = -0.02,
color = "#904C2F", size = 3
) +
xlim(-1.25, .5) + ylim(-.5, 1) +
theme_minimal_grid(12)
plot(rotationMatrix)
# How much variance is explained by each pc:
varianceValues <- regularPCA %>%
tidy(matrix = "eigenvalues")
variancePlot <- regularPCA %>%
tidy(matrix = "eigenvalues") %>%
ggplot(aes(x = PC, y = percent)) +
geom_col(fill = "#56B4E9", alpha = 0.8) +
scale_x_continuous(breaks = 1:9) +
scale_y_continuous(
labels = scales::percent_format(),
expand = expansion(mult = c(0, 0.01)), limits = c(0, 1)
) +
theme_minimal_hgrid(12)
plot(variancePlot)
xLabel <- paste("PC1, explains ", as.character(varianceValues$percent[1] * 100), "% of variance", sep = "")
yLabel <- paste("PC2, explains ", as.character(varianceValues$percent[2] * 100), "% of variance", sep = "")
PCAplotFinal <- PCAplot + labs(x = xLabel, y = yLabel)
plot(PCAplotFinal)
allPlots <- ggarrange(PCAplotFinal, ggarrange(rotationMatrix, variancePlot, ncol = 1, nrow = 2), ncol = 2, nrow = 1, widths = c(2, 1))
allPlots <- annotate_figure(allPlots, top = text_grob(figureTitle, size = 14))
plot(allPlots)
}
# To run this function, supply the data filename; the color you want your points to be; and a text string for the figure title.
AntHeadPCA <- geometricMorphometricPCA(dataFile = "./Data/ExampleLandmarks.txt", pointColor = "#F4B266", figureTitle = "Ant Head Shape")
plot(AntHeadPCA)
ggsave(filename = "AntHeadPCA.png", device = "png", path = "./Plots/PCAs/", width = 16, height = 9, bg = "transparent")
|
06031e1128d3e711588b04336dac25903a11b33d
|
005bb9edaf643be9c8548d803483628c80cc0225
|
/second_fall_experiment/scripts/clay_R_scripts/analysis/model_psi_leaf/crap/predict_leafwp_noaddedvars.R
|
c4e395e8ae02b7a3876b93f08d03c337f7570493
|
[] |
no_license
|
sean-gl/2020_greenhouse
|
16b35b6b035a1926dc8858c7d0b2eba6b8dbe864
|
691c3923c75eea1bd57b8d218b343e8fdc10c33c
|
refs/heads/master
| 2021-05-22T00:22:46.456072
| 2020-05-25T17:28:54
| 2020-05-25T17:28:54
| 252,879,077
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,150
|
r
|
predict_leafwp_noaddedvars.R
|
### Model fitting, Greenhouse experiment 2019
### Goal: Use measured parameters to predict water potential, build a model to fill in
### missing data (so we can treat leaf water potential as a continuous variable)
rm(list=ls())
require(ggplot2)
require(plyr)
require(lubridate)
require(readODS)
require(tidyr)
require(dplyr)
### IMPORTANT: SET SYSTEM TIMEZONE TO GMT, THIS IS REQUIRED FOR CODE TO WORK.
Sys.setenv(TZ='GMT')
Sys.getenv('TZ') # make sure it got set
### SECTION 1: Read data sets and do some processing -----------------
# 1. leaf temperature
lt <- readRDS('/home/sean/github/2020_greenhouse/second_fall_experiment/data/leaf_thermistor_data/leaf_thermistor_data_15min_agg_flagged.rds')
# remove position column, not useful
lt$position <- NULL
colnames(lt)[colnames(lt)=='canopy_position'] <- 'position'
# change position categoies to match wind data
lt$position[lt$position=='lower'] <- 'bottom'
lt$position[lt$position=='upper'] <- 'top'
# filter data by flag
lt_filter <- subset(lt, flag <= 2 & temperature_flag == 'none')
nrow(lt_filter)/nrow(lt)
# Aggregate by block
lt_block <- ddply(lt_filter, .(by15, block, treatment, position), function(x){
setNames(mean(x$mean_leaftemp_C, na.rm = T), 'mean_leaftemp_C')
})
# 2. PAR
lq <- read.csv('/home/sean/github/2020_greenhouse/second_fall_experiment/data/line_PAR_sensors/line_PAR_15.csv')
lq$by15 <- as.POSIXct(lq$by15, tz = 'GMT')
# 3. RH, air temp, soil temp
rh <- read.csv('/home/sean/github/2020_greenhouse/second_fall_experiment/data/RH_temp_PAR_logger_data/rh_15.csv')
rh$by15 <- as.POSIXct(rh$by15, tz='GMT')
rh$par2_s <- NULL # REMOVE THIS VARIABLE, DATA ARE BAD
# remove soil temp columsn, these are imported below
rh <- rh %>% select(-contains('soil_t'))
soil_temp <- read.csv('/home/sean/github/2020_greenhouse/second_fall_experiment/data/RH_temp_PAR_logger_data/soil_temp_15.csv')
soil_temp$by15 <- as.POSIXct(soil_temp$by15, tz='GMT')
# merge leaf temp and "RH" (includes air temp, rh, and light data)
lat <- merge(lt_block, rh)
lat$date <- lubridate::date(lat$by15)
# convert to wide
lat_wide <- tidyr::spread(lat, 'position', 'mean_leaftemp_C')
names(lat_wide)[names(lat_wide) %in% c('bottom','middle','top')] <- c('leaftemp_bottom','leaftemp_middle','leaftemp_top')
# Since position changes, sometimes data isn't available at a given position.
# Let's add a couple variables to handle these cases.
# Highest position with available data
lat_wide$leaftemp_highest_avail <- apply(lat_wide, 1, function(x) {
ind <- which(!is.na(x[c('leaftemp_bottom','leaftemp_middle','leaftemp_top')]))
as.numeric(x[c('leaftemp_bottom','leaftemp_middle','leaftemp_top')][max(ind)])
})
# Mean of all position's data
lat_wide$leaftemp_mean <- rowMeans(lat_wide[,c('leaftemp_bottom','leaftemp_middle','leaftemp_top')], na.rm = T)
# 4. Wind sensors
wind <- read.csv('/home/sean/github/2020_greenhouse/second_fall_experiment/data/wind_sensor_data/wind_15.csv')
wind$by15 <- as.POSIXct(wind$by15, tz='GMT')
# convert to long format
windWide <- tidyr::spread(wind, 'position', 'wind_speed_m_s')
head(windWide)
colnames(windWide) <- c('by15','treatment','windspeed_bottom','windspeed_middle','windspeed_top')
# 5. Pressure bomb data
pb <- read.csv('/home/sean/github/2020_greenhouse/second_fall_experiment/data/pressure_bomb/pressure_bomb_15.csv')
pb$by15 <- as.POSIXct(pb$by15, tz='GMT')
# omit bad observation & missing observation
pb <- pb[pb$data_ok=='yes' & !is.na(pb$psi_MPa), ]
## Make some edits to data this day...
pb$by15[date(pb$by15)=='2019-11-15' & pb$treatment=='moderate_drought'] <- '2019-11-15 13:45:00 GMT'
# pb$by15[date(pb$by15)=='2019-11-15' & pb$treatment=='well_watered'] <- '2019-11-15 13:45:00 GMT'
# get means by day and treatment/block
pb$block <- toupper(substr(pb$plant_id,1,1))
pb$date <- lubridate::date(pb$by15)
pbMeans <- ddply(pb, .(by15, block, treatment), function(x) {
setNames(mean(x$psi_MPa), 'mean_psi_MPa')
})
### SECTION 3. Merge datasets and add more variables ---------------------
comb <- merge(lq, soil_temp, by=c('by15')); nrow(comb)
comb <- merge(comb, windWide, by=c('by15', 'treatment'), all.x = T); nrow(comb)
comb <- merge(comb, lat_wide, by=c('by15', 'treatment')); nrow(comb)
### Merge in actual pressure bomb data
comb_xonly <- merge(comb, pbMeans, all.x = T)
# rename column to "mean" to match code below
# names(comb)[names(comb)%in%'psi_MPa'] <- 'mean_psi_MPa'
# check for any duplicated columsn in merges above
which(grepl('\\.x', names(comb)) | grepl('\\.y', names(comb)))
# add "minutes" (of day) column
comb$minutes <- 60*hour(comb$by15) + minute(comb$by15)
# add irrigation amount (ml)
comb$date <- date(comb$by15)
comb$irrig <- NA
comb$irrig[comb$date < "2019-11-05" & comb$treatment == 'well_watered'] <- 750
comb$irrig[comb$date >= "2019-11-05" & comb$treatment == 'well_watered'] <- 1000
comb$irrig[comb$treatment == 'moderate_drought'] <- 375
comb$irrig[comb$treatment %in% c('full_drought','virgin_drought')] <- 150
table(comb$irrig)
# calculate VPD_leaf based on leaf temperature
cor(comb$sht1_high_rh, comb$am2320_high_rh, use = 'complete.obs')
cor(comb$sht2_low_rh, comb$sht1_high_rh, use = 'complete.obs')
comb$rh_high_mean <- rowMeans(comb[ , c('sht1_high_rh','am2320_high_rh')], na.rm = T)
comb$VPD_leaf <- (1 - (comb$rh_high_mean / 100)) * 0.61121 * exp((17.502 * comb$leaftemp_highest_avail) / (240.97 + comb$leaftemp_highest_avail))
summary(comb$VPD_leaf)
### Add days since treatment started
summary(comb$date)
comb$daysPostTrt <- NA
ind <- comb$date < '2019-11-05'
comb$daysPostTrt[ind] <- comb$date[ind] - as.Date('2019-10-25')
ind <- comb$date > '2019-11-04' & comb$date < '2019-11-28'
comb$daysPostTrt[ind] <- comb$date[ind] - as.Date('2019-11-05')
ind <- comb$date > '2019-11-27'
comb$daysPostTrt[ind] <- comb$date[ind] - as.Date('2019-11-28')
summary(comb$daysPostTrt)
### LEDs on (y/n)?
# comb$LED_on <- 'y'
# comb$LED_on[comb$by15 %in% c(as.POSIXct('2019-12-10 18:00:00', tz='GMT'),
# as.POSIXct('2019-12-11 06:15:00', tz='GMT'))] <- 'n'
### SECTION 4. Model fitting ---------------------
### CURRENTLY THE BEST R2
m <- lm(mean_psi_MPa ~ minutes + treatment + block + daysPostTrt +
windspeed_middle + bmp_box_temp + soil_temp_C, data = comb); summary(m)
# AS GOOD, could add windspeed_middle in, if possible.
# could use PAR length instead of minutes.
m2 <- lm(mean_psi_MPa ~ minutes + irrig + block +
bmp_box_temp + leaftemp_mean, data = comb); summary(m)
mean(m2$residuals^2)
mean(abs(m2$residuals))
### TRUNCATED REGRESSION (doesn't seem to work well...)
require(truncreg)
m.trunc <- truncreg(mean_psi_MPa ~ minutes + irrig + block +
bmp_box_temp + leaftemp_mean, data = comb,
point = 0, direction = "left")
summary(m.trunc)
library(caret)
library(randomForest)
library(glmnet)
### 4.1 Lasso Regression
# Keep soil_tempe, windspeed and VPD
df2 <- subset(comb, select = -c(by15, date, leaftemp_bottom, leaftemp_middle))
# altd_bottom, altd_middle))
# Omit those variables so we have more complete cases
df2 <- subset(df2, select = -c(leaftemp_top, soil_temp_C, windspeed_bottom, windspeed_middle, windspeed_top))
# there can't be any missing values
df2 <- subset(df2, complete.cases(df2)); nrow(df2)
# create model matrix for predictor variables
x <- model.matrix(mean_psi_MPa ~ ., df2)[,-1]
# create vector for response variable
y <- df2$mean_psi_MPa
# set.seed(51)
# train.prop <- 0.5
# train <- sample(1:nrow(df2), nrow(df2) * train.prop); length(train)
# test <- -train
# lasso.mod <- glmnet(x[train, ], y[train], alpha = 1, standardize = T, nlambda = 100)
# plot(lasso.mod, label = T)
# print(lasso.mod)
# plot(lasso.mod, xvar = 'dev')
### REPEAT THE CROSS-VALIDATION N TIMES, TO SEE WHICH VARIABLES ARE CONSISTENTLY IMPORTNAT
# list to store variables
nreps <- 10
nzcList <- list()
for(i in 1:nreps) {
# CV using full dataset
lasso.cv <- cv.glmnet(x, y, family='gaussian', alpha=1, nfolds=5, standardize=T)
# plot(lasso.cv)
lasso.cv$lambda.1se
# Now that we know lambda, fit on *full* data set
full_fit_lasso <- glmnet(x, y, alpha = 1, lambda = lasso.cv$lambda.1se)
# summary(full_fit_lasso)
lasso_coeffs <- predict(full_fit_lasso,
type = "coefficients", # return betas; not predictions
s = lasso.cv$lambda.1se)
nzCoef <- lasso_coeffs@Dimnames[[1]][which(lasso_coeffs != 0)]
nzCoef <- nzCoef[nzCoef != '(Intercept)']
nzcList[[i]] <- nzCoef
}
z=unlist(nzcList)
b=sort(unique(unlist(nzcList)))
sapply(b, function(a) length(z[z == a]))
# THESE 7 VARIABLES SEEM MOST IMPORTANT:
# blockM, bMP_box_temp, irrig, leaftemp_mean, minutes, sht2_low_rh, "treatmentwell_watered" (somewhat)
# leaftemp_top and windspeed_top important only if using these variables (but results in smaller n)
# Or using "derived variables", par_length also important
# to a much lesser degree, "daysPostTrt" is also important
# CV using full dataset
lasso.cv <- cv.glmnet(x, y, family='gaussian', alpha=1, nfolds=5, standardize=T); plot(lasso.cv)
lasso.cv$lambda.1se
# Now that we know lambda, fit on *full* data set
full_fit_lasso <- glmnet(x, y, alpha = 1, lambda = lasso.cv$lambda.1se)
# summary(full_fit_lasso)
lasso_coeffs <- predict(full_fit_lasso,
type = "coefficients", # return betas; not predictions
s = lasso.cv$lambda.1se)
nzCoef <- lasso_coeffs@Dimnames[[1]][which(lasso_coeffs != 0)]
nzCoef <- nzCoef[nzCoef != '(Intercept)']
nzCoef
# predictions on full data set
lasso_pred_full <- predict(full_fit_lasso, s = lasso.cv$lambda.1se, newx = x)
mean((lasso_pred_full - y)^2)
mean(abs(lasso_pred_full - y))
plot(lasso_pred_full, y); abline(0, 1, col='red')
# Use all 7 variables (full model)
fullmod <- lm(y ~ x[ , nzCoef])
summary(fullmod)
mean(fullmod$residuals^2); mean(abs(fullmod$residuals))
# try omitting variables w/low p-values
mod1 <- lm(y ~ x[,"irrig"])
summary(mod1)
mean(mod1$residuals^2); mean(abs(mod1$residuals))
# Almost as good as full model but with only 3 variables.
mod2 <- lm(y ~ x[,c('bmp_box_temp','minutes','irrig')])
# using original data
mod2 <- lm(mean_psi_MPa ~ bmp_box_temp + minutes + irrig, df2)
summary(mod2)
mean(mod2$residuals^2); mean(abs(mod2$residuals))
mod2 <- lm(y ~ x[,c('bmp_box_temp','par_length','irrig','leaftemp_top')])
mod2 <- lm(y ~ x[,c('bmp_box_temp','par_length','irrig','blockM')])
summary(mod2)
mean(mod2$residuals^2); mean(abs(mod2$residuals))
### BEST SUBSETS REGRESSION
require(leaps)
colnames(x)
which(colnames(x) == 'irrig')
x2 <- x[ , -c(1:5)]
regfit.full <- regsubsets(x2, y, nvmax = 10)
rs <- summary(regfit.full)
rs$adjr2
rs
### RANDOM FOREST BOOSTING
# there can't be any missing values
# df <- comb[!is.na(comb$leaftemp_top), ]
# df2 <- comb[, !names(comb) %in% c('by15','date',
# 'cumsum_altd_bottom','cumsum_altd_middle','cumsum_altd_top')]
df2 <- comb[!is.na(comb$windspeed_bottom),]
df2 <- df2[, !names(df2) %in% c('by15','date')]
df2$block <- as.factor(df2$block)
# omit non-predictor vars
names(df2)
# replace NaN with NA (required for boosting)
for(i in 1:ncol(df2)) {
ind <- is.nan(df2[,i])
df2[ind, i] <- NA
}
# df[,c(grep('wind', names(df), value = T), 'leaftemp_bottom','leaftemp_middle','leaftemp_top')] <- NULL
# df2 <- df[complete.cases(df),]
# subset into train and test dfs
# train_n <- round(0.7 * nrow(df2))
# test_n <- nrow(df2) - train_n
# train_ind <- sample(1:nrow(df2), train_n, replace = F)
# train_data <- df2[train_ind,]
# test_data <- df2[-train_ind,]
### Boosting
require(gbm)
m.boost <- gbm(mean_psi_MPa ~ .,
data = df2,
distribution = 'gaussian',
n.trees = 50,
interaction.depth = 1,
shrinkage = 0.1,
bag.fraction = 0.5,
cv.folds = 5)
plot(1:length(m.boost$cv.error), m.boost$cv.error)
plot(1:length(m.boost$train.error), m.boost$train.error)
# m.boost
# summary(m.boost)
# yhat <- predict(m.boost, newdata = test_data, n.trees = 5000)
# y <- test_data$mean_psi_MPa
yhat <- predict(m.boost, newdata = df2, n.trees = 5000)
y <- df2$mean_psi_MPa
diffs <- y-yhat
mean(diffs^2) # test MSE
mean(abs(diffs)) # test MAD
summary(diffs)
# preds <- data.frame(predicted_psi = yhat, actual_psi=test_data$mean_psi_MPa)
preds <- data.frame(predicted_psi = yhat, actual_psi=df2$mean_psi_MPa)
plot(predicted_psi ~ actual_psi, data=preds)
abline(0,1, col='red')
### SECTION 5. Make predictions based on models -------------
# recombine data, this time without pressure bomb data
# summary(date(lq$by15))
# summary(date(soil_temp$by15))
# summary(date(windWide$by15))
# summary(date(lat_wide$by15))
# nrow(lq); nrow(soil_temp)
comb_xonly <- merge(lq, soil_temp, by=c('by15')); nrow(comb_xonly)
comb_xonly <- merge(comb_xonly, windWide, by=c('by15', 'treatment'), all.x = T); nrow(comb_xonly)
comb_xonly <- merge(comb_xonly, lat_wide, by=c('by15', 'treatment')); nrow(comb_xonly)
# add "minutes" (of day) column
comb_xonly$minutes <- 60*hour(comb_xonly$by15) + minute(comb_xonly$by15)
# add irrigation amount (ml)
comb_xonly$date <- date(comb_xonly$by15)
comb_xonly$irrig <- NA
comb_xonly$irrig[comb_xonly$date < "2019-11-05" & comb_xonly$treatment == 'well_watered'] <- 750
comb_xonly$irrig[comb_xonly$date >= "2019-11-05" & comb_xonly$treatment == 'well_watered'] <- 1000
comb_xonly$irrig[comb_xonly$treatment == 'moderate_drought'] <- 375
comb_xonly$irrig[comb_xonly$treatment %in% c('full_drought','virgin_drought')] <- 150
table(comb_xonly$irrig)
# calculate VPD_leaf based on leaf temperature
comb_xonly$rh_high_mean <- rowMeans(comb_xonly[ , c('sht1_high_rh','am2320_high_rh')])
comb_xonly$VPD_leaf <- (1 - (comb_xonly$rh_high_mean / 100)) * 0.61121 * exp((17.502 * comb_xonly$leaftemp_top) / (240.97 + comb_xonly$leaftemp_top))
summary(comb_xonly$VPD_leaf)
### Add days since treatment started
summary(comb_xonly$date)
comb_xonly$daysPostTrt <- NA
ind <- comb_xonly$date < '2019-11-05'
comb_xonly$daysPostTrt[ind] <- comb_xonly$date[ind] - as.Date('2019-10-25')
ind <- comb_xonly$date > '2019-11-04' & comb_xonly$date < '2019-11-28'
comb_xonly$daysPostTrt[ind] <- comb_xonly$date[ind] - as.Date('2019-11-05')
ind <- comb_xonly$date > '2019-11-27'
comb_xonly$daysPostTrt[ind] <- comb_xonly$date[ind] - as.Date('2019-11-28')
summary(comb_xonly$daysPostTrt)
### Merge in actual pressure bomb data
comb_xonly <- merge(comb_xonly, pbMeans, all.x = T)
### Make predictions based on some models
# comb_xonly$yhat_m1 <- predict(m.trunc, newdata = comb_xonly)
# LINEAR MODEL (MANUAL SELECTION)
comb_xonly$yhat_m1 <- predict(m2, newdata = comb_xonly)
# LASSO PREDICTIONS
comb_xonly$yhat_lasso <- predict(mod2, newdata = comb_xonly)
# RANDOM FOREST BOOSTING
comb_xonly$yhat_rfboost <- predict(m.boost, newdata = comb_xonly, n.trees = 50)
summary(comb_xonly$yhat_m1)
plot(density(comb$mean_psi_MPa))
qqnorm(comb$mean_psi_MPa); qqline(comb$mean_psi_MPa)
plot(mod2)
plot(density(comb_xonly$yhat_m1))
plot(density(comb_xonly$yhat_m1[comb_xonly$treatment=='full_drought']))
plot(density(comb_xonly$yhat_m1[comb_xonly$treatment=='moderate_drought']))
plot(density(comb_xonly$yhat_m1[comb_xonly$treatment=='well_watered'])) # most the negative values are for well-watered...
plot(density(comb_xonly$yhat_m1[comb_xonly$treatment=='virgin_drought']))
### Plot the predicted psi_leaf
head(comb_xonly)
# plot 2nd treatments
sub <- subset(comb_xonly, date(by15) >= '2019-11-11' & date(by15) <= '2019-11-12')
# plot 3rd treatments
sub <- subset(comb_xonly, date(by15) >= '2019-12-01' & date(by15) <= '2019-12-12')
sub <- subset(comb_xonly, date(by15) == '2019-11-20')
ggplot(sub, aes(x=by15, y=yhat_m1, color=treatment)) + geom_line() +
# geom_line(aes(x=by15, y=leaftemp_mean/10)) +
geom_point(aes(x=by15, y=mean_psi_MPa), size=3)
ggplot(sub) +
geom_line(aes(x=by15, y=yhat_rfboost, color=treatment)) +
# geom_line(aes(x=by15, y=leaftemp_mean/10, color=treatment)) +
geom_point(aes(x=by15, y=mean_psi_MPa, color=treatment), size=3)
|
133743d59b722201400677380a16bdda7fb4dbb7
|
b0c09959df30b73d953fa98b8bb6c10810fa080d
|
/man/clr_set_alpha.Rd
|
b1229e87558047d7eee27a867fea5c7beec09d2c
|
[] |
no_license
|
k-hench/fftidy
|
f325ed1aaefb9d0af395ef21acef387849f6a1f1
|
a8c2cd364f1597de8612188bbe73cccd7d539d37
|
refs/heads/master
| 2023-03-15T11:02:11.998683
| 2021-03-05T16:37:54
| 2021-03-05T16:37:54
| 300,317,485
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 602
|
rd
|
clr_set_alpha.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample_colors.R
\docType{data}
\name{clr_set_alpha}
\alias{clr_set_alpha}
\title{The transparent project color scheme}
\format{
An object of class \code{colors} of length 8.
}
\usage{
clr_set_alpha
}
\description{
The transparent project color scheme
}
\examples{
#> Source Code:
clr_set_alpha
}
\seealso{
[fftidy::clr_set_base] a basic version of the color scheme.
[fftidy::clr_set_samples] a less saturated version of the color scheme.
[fftidy::clr_set_light] a lighter version of the color scheme.
}
\keyword{datasets}
|
c09ae35888889779fb84c0da44255bbce1aa1ba8
|
d66dfd6d796d5cec519bdac2a37bbac2d7e8e1a8
|
/Prepare_rasters.R
|
de63a93dd4c24f2332dd79753d5cd71e7bfbd2d2
|
[] |
no_license
|
derek-corcoran-barrios/RyeNorskov
|
0e9c1ed08bd1ec67531756ff7d110fc199f7c69c
|
ee84fd3f7c5b76a607be9e4ad2cc4e7ac40c3823
|
refs/heads/master
| 2023-08-17T05:55:10.860940
| 2021-09-29T06:55:26
| 2021-09-29T06:55:26
| 408,747,301
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,152
|
r
|
Prepare_rasters.R
|
## Load packages
library(raster)
library(sf)
## Read in shapefiles to crop and mask
RyeNoskov <- read_sf("ShapeFiles/RyeNoerskov.shp") %>% st_transform(crs = "+proj=utm +zone=32 +ellps=GRS80 +units=m +no_defs")
VegDens <- list.files(path = "O:/Nat_Ecoinformatics-tmp/au634851/dk_lidar_backup_2021-06-28/vegetation_density", pattern = "vrt", full.names = T) %>%
raster() %>%
crop(RyeNoskov) %>%
mask(RyeNoskov)
canopy_height <- list.files(path = "O:/Nat_Ecoinformatics-tmp/au634851/dk_lidar_backup_2021-06-28/canopy_height", pattern = "vrt", full.names = T) %>%
raster() %>%
crop(RyeNoskov) %>%
mask(RyeNoskov)
openness_mean <- list.files(path = "O:/Nat_Ecoinformatics-tmp/au634851/dk_lidar_backup_2021-06-28/openness_mean", pattern = "vrt", full.names = T) %>%
raster() %>%
crop(RyeNoskov) %>%
mask(RyeNoskov)
TWI <- list.files(path = "O:/Nat_Ecoinformatics-tmp/au634851/dk_lidar_backup_2021-06-28/twi", pattern = "vrt", full.names = T) %>%
raster() %>%
crop(RyeNoskov) %>%
mask(RyeNoskov)
Vars <- stack(VegDens, canopy_height, openness_mean, TWI)
Vars <- readAll(Vars)
saveRDS(Vars, "Variables.rds")
|
6d54e5a9913928f92d5f3637748f11d066e4ef3b
|
c053cc97c204c6af25664cf337d6dd94d984c591
|
/tests/testthat/test-validation.R
|
0b37314892716108d101a9446060c2544eabe6d1
|
[
"MIT"
] |
permissive
|
tidymodels/yardstick
|
1b2454ae37da76b6c5c2b36682d573c7044767a7
|
e5c36f206fb737fc54b1a6161c09bc0d63b79beb
|
refs/heads/main
| 2023-08-19T03:29:20.953918
| 2023-08-08T21:32:57
| 2023-08-08T21:32:57
| 108,898,402
| 294
| 55
|
NOASSERTION
| 2023-08-08T21:32:59
| 2017-10-30T19:26:54
|
R
|
UTF-8
|
R
| false
| false
| 8,524
|
r
|
test-validation.R
|
test_that("validate_numeric_truth_numeric_estimate errors as expected", {
expect_no_error(
validate_numeric_truth_numeric_estimate(1:10, 1:10)
)
expect_no_error(
validate_numeric_truth_numeric_estimate(1, 1)
)
expect_no_error(
validate_numeric_truth_numeric_estimate(1L, 1L)
)
expect_no_error(
validate_numeric_truth_numeric_estimate(numeric(), numeric())
)
expect_snapshot(
error = TRUE,
validate_numeric_truth_numeric_estimate("1", 1)
)
expect_snapshot(
error = TRUE,
validate_numeric_truth_numeric_estimate(1, "1")
)
expect_snapshot(
error = TRUE,
validate_numeric_truth_numeric_estimate(matrix(1), 1)
)
expect_snapshot(
error = TRUE,
validate_numeric_truth_numeric_estimate(1, matrix(1))
)
expect_snapshot(
error = TRUE,
validate_numeric_truth_numeric_estimate(1:4, 1:5)
)
})
test_that("validate_factor_truth_factor_estimate errors as expected", {
expect_no_error(
validate_factor_truth_factor_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
factor(c("a", "a", "a"), levels = c("a", "b"))
)
)
expect_no_error(
validate_factor_truth_factor_estimate(
factor(c("a"), levels = c("a")),
factor(c("a"), levels = c("a"))
)
)
expect_no_error(
validate_factor_truth_factor_estimate(
factor(character(), levels = character()),
factor(character(), levels = character())
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_factor_estimate("1", 1)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_factor_estimate(
c("a", "b", "a"),
factor(c("a", "a", "a"), levels = c("a", "b"))
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_factor_estimate(
factor(c("a", "a", "a"), levels = c("a", "b")),
c("a", "b", "a")
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_factor_estimate(
factor(c("a", "a", "a"), levels = c("a", "b")),
1:3
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_factor_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
factor(c("a", "a", "a"), levels = c("a", "b", "c"))
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_factor_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
factor(c("a", "a", "a", "a"), levels = c("a", "b"))
)
)
})
test_that("validate_factor_truth_matrix_estimate errors as expected for binary", {
expect_no_error(
validate_factor_truth_matrix_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
1:3,
estimator = "binary"
)
)
expect_no_error(
validate_factor_truth_matrix_estimate(
factor(c("a"), levels = c("a", "b")),
1,
estimator = "binary"
)
)
expect_no_error(
validate_factor_truth_matrix_estimate(
factor(character(), levels = c("a", "b")),
numeric(),
estimator = "binary"
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_matrix_estimate(
c("a", "b", "a"),
1:3,
estimator = "binary"
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_matrix_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
c("a", "b", "a"),
estimator = "binary"
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_matrix_estimate(
factor(character(), levels = c("a", "b")),
matrix(1:6, ncol = 2),
estimator = "binary"
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_matrix_estimate(
factor(c("a", "b", "a"), levels = c("a", "b", "c")),
1:3,
estimator = "binary"
)
)
})
test_that("validate_factor_truth_matrix_estimate errors as expected for non-binary", {
expect_no_error(
validate_factor_truth_matrix_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
matrix(1:6, ncol = 2),
estimator = "non binary"
)
)
expect_no_error(
validate_factor_truth_matrix_estimate(
factor(c("a", "b", "a"), levels = c("a", "b", "c", "d")),
matrix(1:12, ncol = 4),
estimator = "non binary"
)
)
expect_no_error(
validate_factor_truth_matrix_estimate(
factor(c("a"), levels = c("a", "b")),
matrix(1:2, ncol = 2),
estimator = "non binary"
)
)
expect_no_error(
validate_factor_truth_matrix_estimate(
factor(character(), levels = c("a", "b")),
matrix(numeric(), ncol = 2),
estimator = "non binary"
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_matrix_estimate(
c("a", "b", "a"),
matrix(1:6, ncol = 2),
estimator = "non binary"
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_matrix_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
1:3,
estimator = "non binary"
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_matrix_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
matrix(as.character(1:6), ncol = 2),
estimator = "non binary"
)
)
expect_snapshot(
error = TRUE,
validate_factor_truth_matrix_estimate(
factor(c("a", "b", "a"), levels = c("a", "b")),
matrix(1:15, ncol = 5),
estimator = "non binary"
)
)
})
test_that("validate_numeric_truth_numeric_estimate errors as expected", {
expect_no_error(
validate_binary_estimator(
factor(c("a", "b", "a"), levels = c("a", "b", "c")),
estimator = "not binary"
)
)
expect_no_error(
validate_binary_estimator(
factor(c("a", "b", "a"), levels = c("a", "b")),
estimator = "binary"
)
)
expect_snapshot(
error = TRUE,
validate_binary_estimator(
factor(c("a", "b", "a"), levels = c("a", "b", "c")),
estimator = "binary"
)
)
})
test_that("validate_surv_truth_numeric_estimate errors as expected", {
lung_surv <- data_lung_surv()
expect_no_error(
validate_surv_truth_numeric_estimate(
lung_surv$surv_obj,
lung_surv$.pred_time
)
)
expect_no_error(
validate_surv_truth_numeric_estimate(
survival::Surv(1, 0),
lung_surv$.pred_time[1]
)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_numeric_estimate("1", 1)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_numeric_estimate(
lung_surv$surv_obj,
as.character(lung_surv$.pred_time)
)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_numeric_estimate(
lung_surv$surv_obj[1:5, ],
lung_surv$.pred_time
)
)
})
test_that("validate_surv_truth_list_estimate errors as expected", {
lung_surv <- data_lung_surv()
lung_surv$list <- lapply(seq_len(nrow(lung_surv)), identity)
lung_surv$list2 <- lapply(
seq_len(nrow(lung_surv)),
function(x) data.frame(wrong = 1, names = 2)
)
lung_surv$list3 <- lapply(
lung_surv$.pred,
function(x) x[c(1, 2, 5)]
)
lung_surv$list4 <- lapply(
lung_surv$.pred,
function(x) x[c(1, 2, 3)]
)
expect_no_error(
validate_surv_truth_list_estimate(
lung_surv$surv_obj,
lung_surv$.pred
)
)
expect_no_error(
validate_surv_truth_list_estimate(
survival::Surv(1, 0),
lung_surv$.pred[1]
)
)
expect_no_error(
validate_surv_truth_list_estimate(
lung_surv$surv_obj,
lung_surv$list3
)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_list_estimate("1", 1)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_list_estimate(
lung_surv$surv_obj,
lung_surv$list
)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_list_estimate(
lung_surv$surv_obj,
lung_surv$list2
)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_list_estimate(
lung_surv$surv_obj,
lung_surv$list4
)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_list_estimate(
lung_surv$surv_obj,
as.character(lung_surv$.pred_time)
)
)
expect_snapshot(
error = TRUE,
validate_surv_truth_list_estimate(
lung_surv$surv_obj[1:5, ],
lung_surv$.pred_time
)
)
})
test_that("validate_case_weights errors as expected", {
expect_no_error(
validate_case_weights(NULL, 10)
)
expect_no_error(
validate_case_weights(1:10, 10)
)
expect_snapshot(
error = TRUE,
validate_case_weights(1:10, 11)
)
})
|
c25a9498d064f32f79e4d5ce04df723c5d4df8b2
|
d227e4308a1b139690c7dc89c5cf55ae82e7a44e
|
/Shiny app/app.R
|
a9e950e33b3035c23d5f984535e5514ee01806f4
|
[] |
no_license
|
PHP-2560/pre-class-work-2018-rsbuckland
|
25b4efbe0aeadcb675eefecac6b7ead2e84ed266
|
b6e81747598ebf8d600b734acc5686706ee8ebcb
|
refs/heads/master
| 2020-03-29T00:58:23.275128
| 2018-12-05T03:06:43
| 2018-12-05T03:06:43
| 149,365,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 411
|
r
|
app.R
|
library(shiny)
ui <- fluidPage(
titlePanel("Z to P"),
sidebarLayout(
sidebarPanel(
sliderInput("zInput", "Z", 0, 3.4, 0, step = 0.1)
),
mainPanel(
verbatimTextOutput("results")
)
)
)
server <- function(input, output) {
P <- reactive({pnorm(-abs(input$zInput))
})
output$results <- renderPrint({
P()
})
}
shinyApp(ui = ui, server = server)
|
32e306ca41438261a10bb9e050f41b92d97cd8d8
|
d1625e2223c81a6c510ccf8bb847c67ed85f8e2f
|
/tests/testthat/test-anb-families.R
|
cefff522c8875eca0ed9cab97065330c45f28cb5
|
[] |
no_license
|
bmihaljevic/bnclassify
|
ea548c832272c54d9e98705bfb2c4b054f047cf3
|
0cb091f49ffa840983fb5cba8946e0ffb194297a
|
refs/heads/master
| 2022-12-08T21:37:53.690791
| 2022-11-20T10:00:18
| 2022-11-20T10:00:18
| 37,710,867
| 20
| 12
| null | 2020-08-13T19:39:24
| 2015-06-19T08:30:56
|
R
|
UTF-8
|
R
| false
| false
| 4,045
|
r
|
test-anb-families.R
|
context("Aug nb families")
test_that("graph 2 families nominal", {
g <- test_dag()
f <- graphNEL2families(dag = g, class = 'A')
expect_equal(names(f), c('B', 'A'))
})
test_that("graph 2 families class not in dag ", {
g <- test_dag()
expect_error(graphNEL2families(dag = g, class = 'C'), 'last not found')
})
test_that("graph 2 families class length > 1 ", {
g <- test_dag()
expect_error(graphNEL2families(dag = g, class = LETTERS[1:2]),
'string')
})
test_that("graph 2 families Undirected graph" , {
e <- list(A = 'B', B = 'A')
edges <- graph_from_to_to_edges(c('A', 'B'), c('B', 'A'))
g <- graph_internal(nodes = LETTERS[1:2], edges, weights = NULL, edgemode = "directed")
if (!skip_testing()) expect_error(graphNEL2families(dag = g, class = LETTERS[1]), 'is_dag_graph')
g <- graph_internal(nodes = LETTERS[1:2], edges, weights = NULL, edgemode = "undirected")
if (!skip_testing()) expect_error(graphNEL2families(dag = g, class = LETTERS[1]), 'is_dag_graph')
})
test_that("check families", {
# Nominal
tvars <- setNames(nm = letters[1:6])
tfams <- lapply(tvars[-6], function(x) c(x, 'f'))
tfams <- append(tfams, list(f = 'f'))
check_anb_families(tfams, 'f')
# Class not in all families
tvars <- setNames(nm = letters[1:6])
tfams <- lapply(tvars[-6], function(x) c(x, 'f'))
tfams <- append(tfams, list(f = 'f'))
tfams$b <- 'b'
if (!skip_assert()) expect_error(check_anb_families(tfams, 'f'), 'fams_ok')
# Family not in vars order
tvars <- setNames(nm = letters[1:6])
tfams <- lapply(tvars[-6], function(x) c(x, 'f'))
tfams <- append(tfams, list(f='f'))
tfams <- tfams[6:1]
if (!skip_assert()) expect_error(check_anb_families(tfams, 'f'), 'last')
})
test_that("is is family nominal", {
f <- letters[1:6]
expect_true(is_anb_family(f, 'a', 'f'))
})
test_that("is is family wrong var", {
f <- letters[1:6]
expect_true(!is_anb_family(f, 'b', 'f'))
})
test_that("is is family wrong class", {
f <- letters[1:6]
expect_true(!is_anb_family(f, 'a', 'e'))
})
test_that("is is family missing values", {
f <- c(letters[1:6], NA, 'g')
expect_true(!is_anb_family(f, 'a', 'g'))
})
test_that("Unique families some in common", {
a <- families(nbcar())
b <- families(nbcarp(car[, 4:7]))
fams <- unique_families(list(a, b))
expect_equal(length(fams), 7)
expect_equivalent(fams, a)
})
test_that("Unique families none in common", {
cr <- families(nbcar())
vt <- families(nbvote())
fams <- unique_families(list(cr, vt))
expect_equal(length(fams), 7 + 17)
})
# test_that("Unique families single dag", {
#
# })
test_that("Tag families nominal", {
cr <- families(nbcar())
fms <- make_families_ids(cr)
expect_equal(length(fms), 7)
expect_equal(fms[['persons']], "personsclass")
})
test_that("Acyclic order nominal", {
n <- nbcar()
o <- order_acyclic(families(n))
expect_equal(o, c('class', colnames(car)[1:6]))
})
test_that("Acyclic order a cycle", {
n <- nbcar()
n <- add_feature_parents('safety', 'lug_boot', n)
n <- add_feature_parents('lug_boot', 'doors', n)
f <- families(n)
f[['safety']] <- c('safety', 'doors', 'class')
o <- order_acyclic(f)
expect_null(o)
})
test_that("Acyclic order 0 node is a DAG", {
o <- order_acyclic(list())
# expect_equal(o, get_family_node(character()))
# Not sure what should happen here...
expect_equal(o, character())
})
test_that("Find ancestors not in graph nominal", {
a <- tan_cl('class', car)
b <- get_ancestors('doors', families(a))
expect_true(is_perm(b, c('lug_boot', 'safety', 'buying', 'class')))
b <- get_ancestors('safety', families(a))
expect_true(is_perm(b, c('buying', 'class')))
b <- get_ancestors('class', families(a))
expect_equal(b, character())
})
test_that("Find ancestors", {
a <- nbcarclass()
b <- get_ancestors('class', families(a))
expect_equal(b, character())
})
test_that("Find ancestors not in graph", {
a <- nbcarclass()
expect_error(get_ancestors('p', families(a)), "families")
})
|
5c5476003edd9e1e2e5809f27d995512377b9d11
|
615f1caa6c4fbabfb82589bc06ba4e6d5b1d72d2
|
/R/GenerateSubmission.R
|
b394d4acec83e85d992578ebf9436ab8b8a00e4b
|
[] |
no_license
|
tohweizhong/Standard
|
b60fdfa167aa960aa5390f8d2db78a1c0c952d0b
|
661641f66fe8960aa045a959e40c118c40135632
|
refs/heads/master
| 2020-04-15T06:44:27.164254
| 2016-08-23T06:35:12
| 2016-08-23T06:35:12
| 41,418,868
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 293
|
r
|
GenerateSubmission.R
|
# Function to generate a submission file for competitions
GenerateSubmission <- function(predictions, filename, samplefilename){
subm <- read.csv(samplefilename)
subm$RESIGNED <- predictions
write.csv(subm, file = paste("submissions/", filename, ".csv", sep = ""), row.names = F)
}
|
6df578a21d355b46a679cef59122338a77013414
|
5f98f63fab3cf4480196482b63ae7b023cb22e15
|
/parkruns/ui.R
|
eed9d6f849b30e40585784879d93bd3ce04d9319
|
[] |
no_license
|
padpadpadpad/Shiny
|
29c160de0a8fa89440602079104d962d1b555d8f
|
a0573165ce1348b2af775e0009fde94ea27f4dc4
|
refs/heads/master
| 2021-01-10T06:04:13.486670
| 2017-09-24T14:36:59
| 2017-09-24T14:36:59
| 50,671,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,495
|
r
|
ui.R
|
# shiny app with leaflet to add graphs to the side
# ui
library(leaflet)
library(shinydashboard)
header <- dashboardHeader(
title = "Parkruns UK"
)
body <- dashboardBody(
fluidRow(
column(width = 8,
box(width = NULL, title = 'Introduction',
collapsible = TRUE,
solidHeader = TRUE,
status = 'success',
p("Since coming back to running after a long-term injury, I took up running a weekly parkrun. My weekly parkrun is Trelissick in South Cornwall and it is rather hilly. However, there appears to be nowhere on the internet where you can compare the different parkrun profiles to get a handle on how hilly YOUR parkrun is! This is my attempt at doing that. All the data was collected using the Strava API using the package rStrava. Hope you enjoy it. Currently I have only managed to get the parkruns from A-M.")),
box(width = NULL, solidHeader = TRUE,
leafletOutput("map", height = 610)
)),
column(width = 4,
box(width = NULL, title = 'Selected parkrun elevation profile:',
plotOutput("elev_plot", height = 240),
status = 'success'
),
box(width = NULL, title = 'How hilly is the selected parkrun?',
status = 'success',
plotOutput("elev_dist", height = 240))
)
)
)
dashboardPage(
skin = 'green',
header,
dashboardSidebar(disable = TRUE),
body
)
|
af7822f2b1360ef6e6382c414c0f19d33844ee45
|
7c5caeca7735d7909c29ee3ed6074ad008320cf0
|
/man/glomApply.Rd
|
082a6090f7226e536ea2334e77132f8e3a04e432
|
[] |
no_license
|
ncss-tech/aqp
|
8063e800ed55458cfa7e74bc7e2ef60ac3b1e6f5
|
c80591ee6fe6f4f08b9ea1a5cd011fc6d02b5c4a
|
refs/heads/master
| 2023-09-02T07:45:34.769566
| 2023-08-31T00:14:22
| 2023-08-31T00:27:14
| 54,595,349
| 47
| 12
| null | 2023-08-17T15:33:59
| 2016-03-23T21:48:50
|
R
|
UTF-8
|
R
| false
| true
| 3,696
|
rd
|
glomApply.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glomApply.R
\name{glomApply}
\alias{glomApply}
\alias{glomApply,SoilProfileCollection-method}
\title{Subset an SPC by applying glom to each profile}
\usage{
glomApply(
object,
.fun = NULL,
truncate = FALSE,
invert = FALSE,
modality = "all",
...,
chunk.size = 100
)
}
\arguments{
\item{object}{A SoilProfileCollection}
\item{.fun}{A function that returns vector with top and bottom depth (\code{z1} and \code{z2} arguments to \code{glom}) for a single profile \code{p} (as passed by \code{profileApply})}
\item{truncate}{Truncate horizon top and bottom depths to \code{[z1, z2]}}
\item{invert}{Truncate horizon top and bottom depths to \code{[z1, z2]} and then invert result?}
\item{modality}{Aggregation method for glom result. Default \code{"all"}: return all horizons; \code{"thickest"}: return (shallowest) thickest horizon}
\item{...}{A set of comma-delimited R expressions that resolve to a transformation to be applied to a single profile e.g \code{glomApply(hzdept = max(hzdept) - hzdept)} like \code{aqp::mutate}}
\item{chunk.size}{Chunk size parameter for \code{profileApply}}
}
\value{
A SoilProfileCollection.
}
\description{
\code{glomApply()} is a function used for subsetting SoilProfileCollection objects by depth. It is a wrapper around \code{glom} which is intended to subset single-profile SPCs based on depth intervals/intersection.
\code{glomApply} works by accepting a function \code{.fun} as argument. This function is used on each profile to process a multi-profile SPC for input to \code{glom} (via \code{profileApply}). For each profile, \code{.fun} returns a 2-length numeric vector of top and bottom boundaries \code{glom} arguments: \code{z1}, \code{z2}.
\code{glomApply} provides the option to generate profile-specific glom depths for a large SPC and handles iteration and rebuilding of a subset SPC object. Optional arguments include: \code{truncate} to cut the boundaries to specified \code{[z1, z2]}; \code{invert} to the portion outside \code{[z1, z2]}, \code{modality} to either \code{"all"} horizons or \code{"thickest"} horizon in the \code{glom} interval. \code{...} are various expressions you can run on the individual profiles using NSE, similar to \code{mutate}.
}
\examples{
# keep examples from using more than 2 cores
data.table::setDTthreads(Sys.getenv("OMP_THREAD_LIMIT", unset = 2))
data(sp3)
depths(sp3) <- id ~ top + bottom
# init horizon designation column in metadata, used by estimateSoilDepth
hzdesgnname(sp3) <- 'name'
# constant depths, whole horizon returns by default
plot(glomApply(sp3, function(p) c(25,100)))
# constant depths, truncated
#(see aqp::trunc for helper function)
plot(glomApply(sp3, function(p) c(25,30), truncate = TRUE))
# constant depths, inverted
plot(glomApply(sp3, function(p) c(25,100), invert = TRUE))
# constant depths, inverted + truncated (same as above)
plot(glomApply(sp3, function(p) c(25,30), invert = TRUE, truncate=TRUE))
# random boundaries in each profile
plot(glomApply(sp3, function(p) round(sort(runif(2, 0, max(sp3))))))
# random boundaries in each profile (truncated)
plot(glomApply(sp3, function(p) round(sort(runif(2, 0, max(sp3)))), truncate = TRUE))
# calculate some boundaries as site level attribtes
sp3$glom_top <- profileApply(sp3, getMineralSoilSurfaceDepth)
sp3$glom_bottom <- profileApply(sp3, estimateSoilDepth)
# use site level attributes for glom intervals for each profile
plot(glomApply(sp3, function(p) return(c(p$glom_top, p$glom_bottom))))
}
\seealso{
\code{\link{glom}} \code{\link{trunc}}
\code{\link{glom}} \code{\link{glomApply}}
}
\author{
Andrew G. Brown.
}
|
e7893959abe111334e1d70018dded61d3aadfeaf
|
8808c17cd8fbf1e484a7da06694622815503f013
|
/tests-local/test-local-Gapfill.R
|
84f3c61408db3fa7d56796df028f91e43b6c1421
|
[] |
no_license
|
florafauna/gapfill
|
49002b35a1f498cbeb22f6f78725ab235b32135e
|
c4a49143605573943a79ebe32a12eae8fc2c5635
|
refs/heads/master
| 2023-02-28T20:31:14.805035
| 2021-02-11T17:45:06
| 2021-02-11T17:45:06
| 337,828,891
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,056
|
r
|
test-local-Gapfill.R
|
#require(testthat);
library("gapfill", lib.loc = "../lib")
load("maskstudy.rda")
load("maskstudy_out.rda")
data <- data_array_masked20[1:15,1:15,1:2,1:6]
context("test-local-Gapfill")
test_that("Gapfill-base",{
expect_equal(Gapfill(data = data)$fill, ref)
})
test_that("Gapfill-iMax",{
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = 14L,
nPredict = 1,
clipRange = c(0,1),
dopar = FALSE)
expect_equal(out$fill, ref)
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = 0L,
nPredict = 1,
clipRange = c(0, 1),
dopar = FALSE)
expect_equal(out$fill, ref)
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = 0L,
nPredict = 1,
clipRange = c(0, 1),
dopar = FALSE,
initialSize = c(0L, 0L, 1L, 6L))
expect_equal(out$fill, data)
})
test_that("Gapfill-nPredict",{
out <- Gapfill(data = data_array_masked20[1:15,1:15,1:2,1:6],
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = Inf,
nPredict = 2,
clipRange = c(0, 1),
dopar = FALSE)
expect_equal(out$fill[,,,,1], ref)
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = Inf,
nPredict = 3,
clipRange = c(0, 1),
dopar = FALSE)
expect_equal(out$fill[,,,,1], ref)
})
test_that("Gapfill-subset",{
subset <- array(rep(c(TRUE, FALSE), length(data) / c(2, 2)),
dim(data))
out <- Gapfill(data = data, subset = subset)
expect_equal(out$fill[subset&is.na(data)], ref[subset&is.na(data)])
})
test_that("Gapfill-clipRange",{
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = Inf,
nPredict = 1,
clipRange = c(.5, .55),
dopar = FALSE)
alt <- ref
alt[alt < .5] <- .5
alt[alt > .55] <- .55
expect_equal(out$fill, alt)
})
test_that("Gapfill dopar",{
if(!require(doParallel))
skip("package \"doPrallel\" is not installed.")
registerDoParallel(4)
expect_equal(Gapfill(data = data, dopar = TRUE)$fill, ref)
## iMax
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = 14L,
nPredict = 1,
clipRange = c(0,1),
dopar = TRUE)
expect_equal(out$fill, ref)
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = 0L,
nPredict = 1,
clipRange = c(0, 1),
dopar = TRUE)
expect_equal(out$fill, ref)
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = 0L,
nPredict = 1,
clipRange = c(0, 1),
dopar = TRUE,
initialSize = c(0L, 0L, 1L, 6L))
expect_equal(out$fill, data)
#nPredict
out <- Gapfill(data = data_array_masked20[1:15,1:15,1:2,1:6],
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = Inf,
nPredict = 2,
clipRange = c(0, 1),
dopar = TRUE)
expect_equal(out$fill[,,,,1], ref)
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = Inf,
nPredict = 3,
clipRange = c(0, 1),
dopar = TRUE)
expect_equal(out$fill[,,,,1], ref)
## subset
subset <- array(rep(c(TRUE, FALSE), length(data) / c(2, 2)),
dim(data))
out <- Gapfill(data = data, subset = subset)
expect_equal(out$fill[subset&is.na(data)], ref[subset&is.na(data)])
## clipRange
out <- Gapfill(data = data,
fnSubset = Subset,
fnPredict = Predict,
subset = "missings",
iMax = Inf,
nPredict = 1,
clipRange = c(.5, .55),
dopar = TRUE)
alt <- ref
alt[alt < .5] <- .5
alt[alt > .55] <- .55
expect_equal(out$fill, alt)
})
## arg verbose is not tested
|
42109d4004cc604b43b3a07788343b3106974eb5
|
829787776c441d00eb220e907a973a9b066d213b
|
/R/yaml.R
|
09ea9b98db838335dd3244d534c9fb1d09a82bef
|
[] |
no_license
|
MomX/Momit
|
76ab0b1959af5ae11d996e89853175a8e9cedea2
|
ead244d7400cae166ece36682185783efd5a5422
|
refs/heads/master
| 2021-04-28T02:36:02.192219
| 2020-05-08T19:08:17
| 2020-05-08T19:08:17
| 122,117,594
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 746
|
r
|
yaml.R
|
# utils ----
#' @export
print.yaml <- function(x, ...){
cat(x)
}
# yaml ----------------------------------------------------
#' yaml wrappers
#'
#' Around `pkg::yaml` base functions
#'
#' @param x any object
#'
#' @examples
#' (chivas$coo[[1]] %>% export_yaml() -> x)
#' x %>% import_yaml()
#' @export
export_yaml <- function(x){
x %>%
Momocs2::coo_single() %>%
yaml::as.yaml() %>%
# add yaml class to benefit print.yaml
`class<-`(c("yaml", class(.)))
}
#' @rdname export_yaml
#' @export
import_yaml <- function(x){
x %>%
yaml::yaml.load() %>%
# turn into a tibble
Momocs2::coo_single()
# no idea why rownmaes in as_tibble doesnt work
# tibble::remove_rownames()
# `attr<-`("row.names", NULL)
}
|
6c0178341890ee4dfe6ee6d06ed09efe8886e9db
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/rpf/man/rpf.rparam.Rd
|
b4f3bec16b64bf73ad8dcb81b7c65f4c23471b51
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 929
|
rd
|
rpf.rparam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\docType{methods}
\name{rpf.rparam}
\alias{rpf.rparam}
\alias{rpf.rparam,rpf.1dim.drm-method}
\alias{rpf.rparam,rpf.mdim.drm-method}
\alias{rpf.rparam,rpf.1dim.graded-method}
\alias{rpf.rparam,rpf.mdim.graded-method}
\alias{rpf.rparam,rpf.mdim.nrm-method}
\alias{rpf.rparam,rpf.mdim.mcm-method}
\alias{rpf.rparam,rpf.1dim.lmp-method}
\alias{rpf.rparam,rpf.1dim.grmp-method}
\alias{rpf.rparam,rpf.1dim.gpcmp-method}
\title{Generates item parameters}
\usage{
rpf.rparam(m, version = 2L)
}
\arguments{
\item{m}{an item model}
\item{version}{the version of random parameters}
}
\value{
item parameters
}
\description{
This function generates random item parameters. The version
argument is available if you are writing a test that depends on
reproducable random parameters (using \code{set.seed}).
}
\examples{
i1 <- rpf.drm()
rpf.rparam(i1)
}
|
30636adff2c4e9be0c53aaf7ceb0c062c1ffb532
|
13102ffdeb61b0e0be9bd981de725cc836bdd1a8
|
/man/downlit-package.Rd
|
40dc65e1f6f4382d930d231de1a6ad2dbaf47009
|
[
"MIT"
] |
permissive
|
jjallaire/downlit
|
e057db8b34fcf6b542ebea7470abd75f93a3db20
|
001acfcc71e22e90e2be0ca0291b54fadc5cfc1f
|
refs/heads/master
| 2022-12-16T05:22:55.445421
| 2020-09-18T18:27:46
| 2020-09-18T18:28:03
| 296,702,927
| 1
| 0
|
NOASSERTION
| 2020-09-18T18:41:15
| 2020-09-18T18:41:14
| null |
UTF-8
|
R
| false
| true
| 1,821
|
rd
|
downlit-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/downlit-package.R
\docType{package}
\name{downlit-package}
\alias{downlit}
\alias{downlit-package}
\title{downlit: Syntax Highlighting and Automatic Linking}
\description{
Syntax highlighting of R code, specifically designed
for the needs of 'RMarkdown' packages like 'pkgdown', 'hugodown', and
'bookdown'. It includes linking of function calls to their documentation
on the web, and automatic translation of ANSI escapes in output to the
equivalent HTML.
}
\section{Options}{
downlit provides a number of options to control the details of the linking.
They are particularly important if you want to generate "local" links.
\itemize{
\item \code{downlit.package}: name of the current package. Determines when
\code{topic_index} and \code{article_index}
\item \code{downlit.topic_index} and \code{downlit.article_index}: named character
vector that maps from topic/article name to path.
\item \code{downlit.rdname}: name of current Rd file being documented (if any);
used to avoid self-links.
\item \code{downlit.attached}: character vector of currently attached R packages.
\item \code{downlit.local_packages}: named character vector providing relative
paths (value) to packages (name) that can be reached with relative links
from the target HTML document.
\item \code{downlit.topic_path} and \code{downlit.article_path}: paths to reference
topics and articles/vignettes relative to the "current" file.
}
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/r-lib/downlit}
\item Report bugs at \url{https://github.com/r-lib/downlit/issues}
}
}
\author{
\strong{Maintainer}: Hadley Wickham \email{hadley@rstudio.com}
Other contributors:
\itemize{
\item RStudio [copyright holder]
}
}
\keyword{internal}
|
29d283489209cd4237acf83a440844acaadc30bb
|
bb246f2febe8066635a5e3927d4941dd47b45ffe
|
/Monte Carlo Simulation.R
|
79e573f3f6e554ecaccdd816ee8a73158842b74a
|
[] |
no_license
|
fuatsezer/Simulation
|
455f125c3cfa1cbed23f37eb8ee3a11b855d2a26
|
cc65f8bf2cd816fa502c4c116a520c5c28db1276
|
refs/heads/master
| 2022-12-22T15:06:01.985053
| 2020-10-01T09:41:50
| 2020-10-01T09:41:50
| 291,794,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
Monte Carlo Simulation.R
|
# Roll d dice; find P(total = k)
probtotk = function(d,k,nreps) {
count = 0
# do the experiment nreps times -- like doing nreps notebook lines
for(rep in 1:nreps){
sum = 0
# roll ddice and find their sum
for (j in 1:d) sum = sum + roll()
if (sum == k) count = count + 1
}
return(count/nreps)
}
# simulate roll of one die; the possible return
# values are 1,2,3,4,5,6 all equally likely
roll = function() return(sample(1:6,1))
# example
probtotk(3,8,1000)
# Bus Ridership
nreps = 10000
nstops = 10
count = 0
for(i in 1:nreps){
passengers = 0
for (j in 1:nstops){
if (passengers > 0) # any alight?
for (k in 1:passengers)
if (runif(1) < 0.2)
passengers = passengers - 1
newpass = sample(0:2,1,prob = c(0.5,0.4,0.1))
passengers = passengers + newpass
}
if (passengers == 0) count = count + 1
}
print(count/nreps)
|
bf1b78026472afebf94f2e29bdce8496a29cb3dc
|
7c4ff4c059c519e6c73f19d8023961a02a07899d
|
/data_management/ui.R
|
0ecbf194e773c19692a5c626e24f28153cd14379
|
[
"MIT"
] |
permissive
|
bastianilso/data_managementRShiny
|
64bdfaf01385f671ea5116791e2f80628558be50
|
1839664ba5ec01e22791f34cecbd87974201f8a2
|
refs/heads/main
| 2023-03-27T14:38:28.916026
| 2021-03-30T07:31:38
| 2021-03-30T07:31:38
| 319,631,457
| 0
| 0
|
MIT
| 2020-12-10T13:12:50
| 2020-12-08T12:26:00
|
R
|
UTF-8
|
R
| false
| false
| 937
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinyjs)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
useShinyjs(debug=T),
# Input ----------------
fluidRow(
column(4, titlePanel("Data Management")),
),
fluidRow(
column(2, data_selection_summary_UI("input_info")),
column(3, actionButton("CsvButton","Manual Upload"),
actionButton("DbButton", "Change Data"))
),
# Output ----------------
tabsetPanel(id = "dataTypeChooser", type = "tabs",
tabPanel(value = "Data", id = "Timeline", strong("Data"),
),
# Rest of Page ---------------------------------------------------------------
tags$footer()
)
))
|
2e24fc47dd25addab19f9c51370da2ebba5c532b
|
7c8b2f9a212192910c6d33e10bc3e92786856f81
|
/plot3.R
|
ac8b66c2e676eccea71d7da27184b8502de8d633
|
[] |
no_license
|
Ankit40400/ExData_Plotting1
|
2929cb59f601eee98c94600b87f2c795a4dde735
|
23a8b311afcc138c6ca1236668a304f914bad673
|
refs/heads/master
| 2022-07-01T17:03:13.206557
| 2020-05-09T12:17:18
| 2020-05-09T12:17:18
| 262,518,085
| 0
| 0
| null | 2020-05-09T07:47:03
| 2020-05-09T07:47:02
| null |
UTF-8
|
R
| false
| false
| 770
|
r
|
plot3.R
|
##loading data
data<- read.csv("./week1assi/household_power_consumption.txt", sep = ";")
## converting date into date format
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data <- data[data$Date == '2007-02-01' | data$Date == '2007-02-02',]
for(i in 3:9) data[,i] <- as.numeric(data[,i])
data$DateTime <- strptime(paste(data$Date, data$Time),format = "%Y-%m-%d %H:%M:%S")
png(filename = "plot3.png")
with(data, plot(DateTime,Sub_metering_1, type="l", col= "black" , ylab = "Energy sub metering", xlab = ""))
with(data, lines(DateTime,Sub_metering_2,col = "red"))
with(data, lines(DateTime,Sub_metering_3, col= "blue"))
legend("topright", legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1)
dev.off()
|
dc35666b5fa49ff7f94c7ab78f928dc663a1683e
|
e428691e5a081014ac1c42190249b3a0f04b3de7
|
/R/weekRecruit.R
|
1ae2bab46b743bd9029f32e7604803fac09228c8
|
[] |
no_license
|
maillot-jaune/dashboard
|
1b5d6100273cb84cd9272dbbb7180ca0a35fa3a0
|
7460772d4ceed395f5b5f983839f287806fe17b5
|
refs/heads/master
| 2016-09-10T09:05:35.883996
| 2015-05-13T08:56:27
| 2015-05-13T08:56:43
| 18,711,075
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,228
|
r
|
weekRecruit.R
|
weekRecruit <- function(){
# library(RMySQL)
sql <- paste(scan('/home/dash/script/select_weekRecruit.SQL',
# sql <- paste(scan('/home/stefan/Desktop/select_weekRecruit.SQL',
what = 'character',
quiet = TRUE),
collapse = ' ')
drv <- dbDriver('MySQL')
con <- dbConnect(drv,
user = 'root',
host = '192.168.1.254',
dbname = 'dbSt',
password = 'four4u')
dbGetQuery(con, 'SET NAMES "utf8"')
res <- dbSendQuery(con, sql)
d <- fetch(res, n = -1)
#mysqlCloseConnection(con)
dbDisconnect(con)
par(bg = '#333333',
mar = c(3, 1, 2, 1) # bottom, left, top, right
)
plot(d[ ,3],
type='l',
ann = FALSE,
axes = FALSE,
lwd = 8,
col= '#33cccc'
)
axis(1,
mgp = c(0, 1.5 , 0), # label, tick-mark label, tick-mark
las = 1,
cex.axis = 2,
font = 2,
col = '#cccccc',
lwd = 8,
col.axis = '#cccccc',
at = axTicks(1),
labels = substring(d[ ,2], 1, 2)
)
points(d[ ,3],
cex = 3,
pch = 21,
lwd = 8,
col = '#33cccc',
bg = '#333333',
xpd = TRUE
)
text(x = 1:7,
y = d[ ,3],
cex = 2,
font = 2,
labels = d[ ,3],
xpd= TRUE,
col = '#cccccc',
pos = 3
)
}
|
5128b652cc042ad70c54c00fa0e6b2be05f3aea0
|
6805290f5950dadd7bf0df07730fb4a86adda50b
|
/R/ex101.R
|
fb84302af135aaac5ee331bd6e84cc0ac0ae4283
|
[
"Apache-2.0"
] |
permissive
|
Madonahs/Machine-Learning
|
e3e6da8ef6344a09b660c3bc9938cd796c636bb7
|
99107b6abf085dfd89376e0777dbd1a9545c9793
|
refs/heads/master
| 2021-04-09T13:55:13.315482
| 2020-01-22T01:28:47
| 2020-01-22T01:28:47
| 125,732,475
| 44
| 21
|
Apache-2.0
| 2019-08-30T19:00:49
| 2018-03-18T14:20:12
|
Python
|
UTF-8
|
R
| false
| false
| 380
|
r
|
ex101.R
|
---
title: "NN"
author: "Syombua"
date: "April 2, 2018"
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## NN
Multiply $W_1W_2$
$W_1 =\begin{bmatrix} 2 & 0 & 1\\ 0& 1 & 2\\ 3 & 0 & 1\end{bmatrix}$
$W_2 =\begin{bmatrix} 1 & 0 & 1\\ 2& 2 & 1\\ 0 & 3 & 0\end{bmatrix}$
Answer $=\begin{bmatrix} 2 & 3 & 2\\ 2& 8 & 1\\ 3 & 3 & 0\end{bmatrix}$
|
c327e081e0677eef69c7f55c9b46c5e78d5378e2
|
f4fd87898d4166e51e754512cc7d150400258d79
|
/man/data_correct_with_rules.Rd
|
623272df5239a46704959e8c595d781b01b943ec
|
[] |
no_license
|
rte-antares-rpackage/antaDraft
|
13a7ea300510fd21058bf349cb9ab32c386616be
|
110bd1305a11da0cef8dde5f37f7aa0952ad3882
|
refs/heads/master
| 2021-09-16T00:11:10.131627
| 2018-06-13T13:24:37
| 2018-06-13T13:24:37
| 94,311,409
| 0
| 0
| null | 2018-06-13T09:06:17
| 2017-06-14T09:03:15
|
R
|
UTF-8
|
R
| false
| true
| 847
|
rd
|
data_correct_with_rules.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_correct.R
\name{data_correct_with_rules}
\alias{data_correct_with_rules}
\title{correct datasets}
\usage{
data_correct_with_rules(data, refresh_validation = TRUE)
}
\arguments{
\item{data}{dataset}
\item{refresh_validation}{indicate to run \code{augment_validation}
after corrections.}
}
\description{
correct data based on condition expressed in a yaml file.
}
\examples{
load_dir <- system.file(package = "antaDraft",
"data_sample/load_sample_2017")
load_data <- anta_load(data_dir = load_dir )
load_data <- augment_validation(data = load_data)
head(load_data)
aggregated_db <- agg_data(load_data)
aggregated_db <- augment_validation(aggregated_db)
aggregated_db <- data_correct_with_rules(aggregated_db)
head(aggregated_db)
}
|
aa2e5ee65aa74bea02aff1e9e692b9f199a3a940
|
a2e90f6de453a9b346099a8533b8bd10a80005f7
|
/functions.R
|
8a8d4b157c677b3683d42d8e906590a5df9d5657
|
[
"MIT"
] |
permissive
|
songxxiao/txtnb
|
5ad940d5e9488743949c9f53f3019b32e96ff946
|
7bc8fcbce4d509b972914c367ea8af5d108de688
|
refs/heads/master
| 2022-11-14T19:30:40.831882
| 2022-10-12T00:32:17
| 2022-10-12T00:32:17
| 226,848,277
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,388
|
r
|
functions.R
|
# function define
translation = readRDS("./data/translation.rds")
## translates text into current language
tr = function(text,input){
sapply(text,function(s) translation[[s]][[input$language]], USE.NAMES=F)
}
## predict new string's class using machine learning
## param @model a classification algorithm
## param @string a message string to predict if it is a spam
## return Prediction, spam or ham.
train = readRDS("./data/train.rds")
test_result = function(model,string){ # get result from a string
ms_corpus = VCorpus(VectorSource(string))
test_dtm = DocumentTermMatrix(ms_corpus, control =
list(tolower = T,
removeNumbers = T,
stopwords = T,
removePunctuation = T,
stemming = T))
test_dtm = as.matrix(test_dtm)
smmat = train[1,]
smmat = as.data.frame(smmat)
smmat[,1] = 0
smmat = t(smmat)
sp = colnames(smmat) %in% colnames(test_dtm)
sp2 = colnames(test_dtm) %in% colnames(smmat)
smmat[,sp] = test_dtm[,sp2]
result = predict(model,smmat)
result = as.character(result)
return(result)
}
## get new string's DTM
## but it will delete columns do not contained in training data.
## param string: a message string to convert to DTM
## return a DTM just has one row
convert_dtm = function(string){
ms_corpus = VCorpus(VectorSource(string))
test_dtm = DocumentTermMatrix(ms_corpus, control = list(tolower = T,
removeNumbers = T,
stopwords = T,
removePunctuation = T,
stemming = T))
test_dtm = as.matrix(test_dtm)
smmat = train[1,] # smsmat is training data DTM, get first row
smmat = as.data.frame(smmat) # matrix --> data.frame
smmat[,1] = 0 # set this columns to 0
smmat = t(smmat) # transpose
sp = colnames(smmat) %in% colnames(test_dtm) # identify if new data columns appear on training data.列
sp2 = colnames(test_dtm) %in% colnames(smmat)
smmat[,sp] = test_dtm[,sp2] # get columns appear on training data, recode to frequency
smmat = as.data.frame(smmat)
smmat$Y = 'xxx'
return(smmat)
}
|
a6ad876d2c6a5af71c0495e5fa8a36d4f22f4f3e
|
efc5c6096121095cadc37acd42e03fadde89eb06
|
/R/model_comparison/models/test_model_cointegration.R
|
08f681642e5e0b57a3b971d0e21167a7fe4082a7
|
[] |
no_license
|
AlexAfanasev/bookdown_thesis
|
b04396739f2495dd60a5e5abfeccd916acaf2545
|
1cfe343618b5fca6e53a8c786cb6792589edc0c7
|
refs/heads/master
| 2023-06-03T11:00:36.998514
| 2021-06-17T16:44:46
| 2021-06-17T16:44:46
| 331,723,414
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,007
|
r
|
test_model_cointegration.R
|
# SETUP
source(here::here("R", "pd_pomp.R"))
y <- read.csv(here::here("data", "final_dataset.csv"))
source(here::here("R", "covars.R"))
# QUESTION: WHICH MODEL TO CHOSE???
# MODEL 3: MODEL WITH AR AND LAGS & COVARIATES
model_3 <- pomp::pomp(
data = y[, c(1, 2)], times = "time", t0 = 0,
rinit = function(e_lpd_0, ...) {
return(c(e_lpd = e_lpd_0))
},
rprocess = pomp::discrete_time(
pomp::Csnippet(
"
e_lpd = (
a0
+ tanh(phi)*e_lpd
+ (tanh(phi)-1)*(
-beta_0
- beta_1*l_cr
- beta_2*l_mys
- beta_3*l_fr
- beta_4*l_ms
- beta_5*l_gdp
)
+ a1*(cr-l_cr)
+ a2*(mys-l_mys)
+ a3*(fr-l_fr)
+ a4*(ms-l_ms)
+ a5*(gdp-l_gdp)
+ rnorm(0, exp(sigma_u))
);
"
),
delta.t = 1
),
dmeasure = rw_latent_lpd_dmeasure,
statenames = c("e_lpd"),
paramnames = c("sigma_u", "sigma_e", "e_lpd_0", "beta_0", "phi",
"beta_1", "beta_2", "beta_3", "beta_4", "beta_5",
"a0", "a1", "a2", "a3", "a4", "a5"),
covar = pomp::covariate_table(covars, times = "time"),
covarnames = colnames(covars[, -1])
)
theta <- c(
e_lpd_0 = 3.5, sigma_e = log(0.05), sigma_u = log(0.05), phi = atanh(0.95),
beta_0 = 0.0, beta_1 = 0, beta_2 = 0, beta_3 = 0, beta_4 = 0, beta_5 = 0,
a0 = 0.175, a1 = 0, a2 = 0, a3 = 0, a4 = 0, a5 = 0
)
res <- pomp::pmcmc(
model_3, Nmcmc = 10000, Np = 1000,
proposal = pomp::mvn.diag.rw(
c(e_lpd_0 = 0.01, sigma_e = 0.01, sigma_u = 0.01, phi = 0.01,
beta_0 = 0.01, beta_1 = 0.01, beta_2 = 0.01,
beta_3 = 0.01, beta_4 = 0.01, beta_5 = 0.01,
a0 = 0.01, a1 = 0.01, a2 = 0.01, a3 = 0.01, a4 = 0.01, a5 = 0.01)
),
params = theta
)
|
f35e0a9e37321ab9fe3a64026b1dc56da21d66d3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BioGeoBEARS/examples/getAICc.Rd.R
|
42c2a18e63dd18ae2ac928dd65f1258c102155c4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
getAICc.Rd.R
|
library(BioGeoBEARS)
### Name: getAICc
### Title: Calculate AICc
### Aliases: getAICc
### ** Examples
LnL = -34.5
numparams = 2
samplesize = 20
getAICc(LnL, numparams, samplesize)
LnL = -20.9
numparams = 3
samplesize = 20
getAICc(LnL, numparams, samplesize)
LnL = -34.5
numparams = 2
samplesize = 5
getAICc(LnL, numparams, samplesize)
LnL = -20.9
numparams = 3
samplesize = 5
getAICc(LnL, numparams, samplesize)
|
1d459a625409da852dd1ba6844fc5afea13e70aa
|
34ddd88340d93fc8a674411dfc02340609f3495f
|
/plot1.R
|
1bb2bbcf94009bb4c062d8a9b7491e56f4b920f1
|
[] |
no_license
|
khemkaiitr/ExData_Plotting1
|
22e75f2290364d56a1523955e04c7faa93b3f172
|
cddd532ba715b941e7b356d53346ed92c9d0e7a1
|
refs/heads/master
| 2021-01-18T19:51:09.268375
| 2014-09-07T14:25:45
| 2014-09-07T14:25:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
plot1.R
|
source('getData.R') #This line gets the data in working directory
data <- data.frame(data)
ndata <- subset(data, as.Date(data$Date, format = '%d/%m/%Y') == '2007-02-01' | as.Date(data$Date, format = '%d/%m/%Y') == '2007-02-02')
# Create the plot
png(file = "plot1.png", bg = "white", width = 480, height = 480)
myplot <- hist(as.numeric(as.character(ndata$Global_active_power)), col = "red",xlab = "Global Active Power (kilowatts)", ylab ="Frequency", main = "Global Active Power")
dev.off()
|
5a0d0655c29e5e9d11df6db65e04541c1282da52
|
d35f7a78d956252e22b0c974acf0dec31dfb7d1a
|
/man/npmodelcheck.Rd
|
ff79f08a21ca3dfc019b559ab9da909f5614efab
|
[] |
no_license
|
cran/NonpModelCheck
|
144bc7a51df3a594064f67b0c908e26fff1c3f22
|
ee5a3c1664043959b1f3c6207984afb2f8eb0bfb
|
refs/heads/master
| 2021-11-30T08:50:39.002133
| 2021-09-08T12:10:05
| 2021-09-08T12:10:05
| 17,681,163
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,243
|
rd
|
npmodelcheck.Rd
|
\name{npmodelcheck}
\alias{npmodelcheck}
\title{Hypothesis Testing for Covariate or Group effect in Nonparametric Regression
}
\description{
Tests the significance of a covariate or a group of covariates in a nonparametric regression based on residuals from a local polynomial fit of the remaining covariates using high dimensional one-way ANOVA.
}
\usage{
npmodelcheck(X, Y, ind_test, p = 7, degree.pol = 0, kernel.type =
"epanech", bandwidth = "CV", gridsize = 30, dim.red = c(1, 10))
}
\arguments{
\item{X}{
matrix with observations, rows corresponding to data points and columns correspond to covariates.
}
\item{Y}{
vector of observed responses.}
\item{ind_test}{
index or vector with indices of covariates to be tested.}
\item{p}{
size of the window W_i. See Details.}
\item{degree.pol}{
degree of the polynomial to be used in the local fit.}
\item{kernel.type}{
kernel type, options are "box", "trun.normal", "gaussian", "epanech",\cr
"biweight", "triweight" and "triangular". "trun.normal" is a gaussian kernel truncated between -3 and 3.
}
\item{bandwidth}{
bandwidth, vector or matrix of bandwidths for the local polynomial fit. If a vector of bandwidths, it must correspond to each covariate of X_{-(ind_test)}, that is, the covariates not being tested. If "CV", leave-one-out cross validation with criterion of minimum MSE is performed to select a unique bandwidth that will be used for all dimensions of X_{-(ind_test)}; if "GCV", Generalized Cross Validation is performed to select a unique bandwidth that will be used for all dimensions of X_{-(ind_test)}; if "CV2" leave-one-out cross validation for each covariate of X_{-(ind_test)}; and if "GCV2", GCV for each covariate of X_{-(ind_test)}. It can be a matrix of bandwidths (not to be confused with bandwidth matrix H), where each row is a vector of the same dimension of the columns of X_{-(ind_test)}, representing a bandwidth that changes with the location of estimation for multidimensional X. See \link{localpoly.reg}.
}
\item{gridsize}{
number of possible bandwidths to be searched in cross-validation. If left as \emph{default} 0, gridsize is taken to be 5+as.integer(100/d^3). If cross-validation is not performed, it is ignored.
}
\item{dim.red}{
vector with first element indicating 1 for Sliced Inverse Regression (SIR) and 2 for Supervised Principal Components (SPC); the second element of the vector should be number of slices (if SIR), or number of principal components (if SPC). If 0, no dimension reduction is performed. See Details.}
}
\details{
To test the significance of a single covariate, say X_j, assume that its observations X_{ij}, i = 1,...n, define the factor levels of a one-way ANOVA. To construct the ANOVA, each of these factor levels is augmented by including residuals from nearby covariate values. Specifically, cell "i" is augmented by the values of the residuals corresponding to observations X_{ij} for "i" in W_i (W_i defines the neighborhood, and has size "p"). These residuals are obtained from a local polynomial fit of the remaining covariates X_{-(j)}. Then, the test for the significance of X_j is the test for no factor effects in the high-dimensional one-way ANOVA. See references for further details.
When testing the significance of a group of covariates, the window W_i is defined using the fist supervised principal component (SPC) of the covariates in that group; and the local polynomial fit uses the remaining covariates X_{-(ind_test)}.
Dimension reduction (SIR or SPC) is applied on the remaining covariates (X_{-(ind_test)}), which are used on the local polynomial fit. This reduction is used to moderate the effect of the curse of dimensionality when fitting nonparametric regression for several covariates. For SPC, the supervision is done in the following way: only covariates with p-values (from univariate "npmodelcheck" test with Y) < 0.3 can be selected to compose the principal components. If no covariate has p-value < 0.3, then the most significant covariate will be the only component. For SIR, the size of the effective dimension reduction space is selected automatically through sequential testing (see references for details).
}
\value{
\item{bandwidth}{bandwidth used for the local polynomial fit}
\item{predicted}{vector with the predicted values with the remaining covariates}
\item{p-value}{p-value of the test}
}
\references{
Zambom, A. Z. and Akritas, M. G. (2014). a) Nonparametric Lack-of-fit Testing and Consistent Variable Selection. Statistica Sinica, v. 24, pp. 1837-1858.
Zambom, A. Z. and Akritas, M. G. (2015). b) Signicance Testing and Group Variable Selection. Journal of Multivariate Analysis, v. 133, pp. 51-60.
Li, K. C. (1991). Sliced Inverse Regression for Dimension Reduction. Journal of the American Statistical Association, 86, 316-327.
Bair E., Hastie T., Paul D. and Tibshirani R. (2006). Prediction by supervised principal components. Journal of the American Statistical Association, 101, 119-137.
Zambom, A. Z. and Akritas, M. G. (2017) NonpModelCheck: An R Package for Nonparametric Lack-of-Fit Testing and Variable Selection, Journal of Statistical Software, 77(10), 1-28. \cr
doi:10.18637/jss.v077.i10
}
\author{
Adriano Zanin Zambom <adriano.zambom@gmail.com>
}
\seealso{
\code{\link{localpoly.reg}, \link{npvarselec}}
}
\examples{
X = matrix(1,100,5)
X[,1] = rnorm(100)
X[,2] = rnorm(100)
X[,3] = rnorm(100)
X[,4] = rnorm(100)
X[,5] = rnorm(100)
Y = X[,3]^3 + rnorm(100)
npmodelcheck(X, Y, 2, p = 9, degree.pol = 0, kernel.type = "trun.normal",
bandwidth = c(0.85, 0.09, 2.5, 2.2), dim.red = 0) # can use bandwidth = "CV"
# not run: can also try
#npmodelcheck(X, Y, 3, p = 7, degree.pol = 0, kernel.type = "trun.normal",
#bandwidth = "CV", dim.red = c(2,2))
#npmodelcheck(X, Y, c(1,2), p = 11, degree.pol = 0, kernel.type = "box",
#bandwidth = c(0.5, 0.5, 0.5), dim.red = c(1,10))
#npmodelcheck(X, Y, c(3,4), p = 5, degree.pol = 0, kernel.type = "box",
#bandwidth = c(2.8, 2.8, 2.8), dim.red = c(1,20))
#npmodelcheck(rnorm(100), rnorm(100), 1, p = 5, degree.pol = 1,
#kernel.type = "box", bandwidth = .5, dim.red = c(1,20))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
0e87c596fbba9aa621a8551bbfbc7cb3893096fd
|
b74b22cb304aeabf341f2c5f4e83427d1318befa
|
/src/json.montlyedits.R
|
1fc3b7b7d889665a0f78dbfcb1d7d07646c25289
|
[
"MIT"
] |
permissive
|
OCDX/article-quality
|
c93d7fdaa00d930ed39538644b0f5b688463e27b
|
061c0dea0e6f41a4c4ec9e880fac7dadcae71cda
|
refs/heads/master
| 2020-07-30T16:13:26.317313
| 2018-03-12T16:29:36
| 2018-03-12T16:29:36
| 73,626,777
| 5
| 1
| null | 2017-02-27T00:26:17
| 2016-11-13T16:06:09
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 93
|
r
|
json.montlyedits.R
|
library(rjsonlite)
jsonBoy <- fromJSON("women-scientists-monthly-edits.json", nullValue=NA)
|
ed349004fb126735421422ebbcc49cae316c839a
|
c220ab52a3f363c3377088e84df41f902d4b5520
|
/imputation2/imputeTraits/rareDiseases/export_script.R
|
033f3594ccdfa14b774bdeb38d4ff3249cbdc3ca
|
[
"MIT"
] |
permissive
|
trvinh/genomes-io-prj
|
0c00e323d1fb1a28226319c9f98580e820a3dd87
|
6359671245738cfcbfbe88ac9e58a566587d0d3d
|
refs/heads/master
| 2021-07-09T00:46:13.233317
| 2020-11-18T16:21:56
| 2020-11-18T16:21:56
| 213,131,379
| 0
| 2
|
MIT
| 2020-09-17T18:44:51
| 2019-10-06T08:13:31
|
R
|
UTF-8
|
R
| false
| false
| 2,443
|
r
|
export_script.R
|
export_function <- function (uniqueID, moduleDir, outputDir, gtool) {
if (!file.exists(outputDir)) {
stop(paste("Did not find a output data with this id", uniqueID))
}
table_file <- paste0(moduleDir, "/rareDiseases/SNPs_to_analyze.txt")
request <- table <- read.table(table_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE, comment.char = "", quote = "")
# get data
request <- request[!duplicated(request[, "SNP"]), ]
rownames(request) <- request[, "SNP"]
genotypes <- get_genotypes(uniqueID = uniqueID, request = request, gtool = gtool, destinationDir = outputDir)
# remove the iXXXX
table <- table[grep("^i", table[, "SNP"], invert = TRUE), ]
table <- table[order(table[, "disease_name"]), ]
# more intelligible comment
table[grep("^original", table[, "comment"]), "comment"] <- "rs-id from original 23andme"
# add genotypes in (many will be missing unfortunately)
table[, "Your genotype"] <- genotypes[table[, "SNP"], ]
# generate advice
table[, "First_allele"] <- substr(table[, "Your genotype"], 1, 1)
table[, "Second_allele"] <- substr(table[, "Your genotype"], 3, 3)
table[, "First_carrier"] <- table[, "First_allele"] == table[, "risk_allele"]
table[, "Second_carrier"] <- table[, "Second_allele"] == table[, "risk_allele"]
diseases_of_interest <- unique(table[table[, "Second_carrier"] | table[, "First_carrier"], "disease_name"])
diseases_of_interest <- diseases_of_interest[!is.na(diseases_of_interest)]
if (length(diseases_of_interest) == 0) {
m <- "There's no particular inherited conditions that you should pay attention to, according to this analysis"
} else if (length(diseases_of_interest) == 1) {
m <- paste("According to this analysis, you should pay particular attention to the inherited condition:", diseases_of_interest)
} else {
m <- paste("According to this analysis, you should pay particular attention to these", length(diseases_of_interest), "inherited conditions:", paste(diseases_of_interest, collapse = ", "))
}
table <- table[, c("SNP", "Your genotype", "risk_allele", "non_risk_allele", "disease_name")]
colnames(table) <- c("SNP", "Your genotype", "Risk-allele", "Non-Risk-allele", "Inherited Condition")
output <- list(
message = m,
diseases_of_interest = diseases_of_interest,
all_findings = table)
return(output)
}
|
ed52971dcaf59ba7b2366b13a55b387f9a7bd034
|
b8dbee4b91b48121bff4329ce2f37c89d8836290
|
/analysis/simulations/simulateTruncatedTranscripts.R
|
d749fbabb3b7fbec2ac05446cc1082d3ca56b882
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/macrophage-tuQTLs
|
18cc359c9052bd0eab45bd27f1c333566fb181d8
|
3ca0b9159f3e5d7d1e0a07cdeadbeb492e361dcb
|
refs/heads/master
| 2021-03-27T19:29:12.456109
| 2019-02-19T13:05:26
| 2019-02-19T13:05:26
| 93,025,290
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,842
|
r
|
simulateTruncatedTranscripts.R
|
library("dplyr")
library("BSgenome")
library("devtools")
library("data.table")
library("GenomicRanges")
library("GenomicFeatures")
load_all("../txrevise/")
load_all("../seqUtils/")
library("BSgenome.Hsapiens.NCBI.GRCh38")
#Import transcript annotations
txdb = loadDb("../../annotations/GRCh38/genes/Ensembl_87/TranscriptDb_GRCh38_87.db")
exons = exonsBy(txdb, by = "tx", use.names=TRUE)
cdss = cdsBy(txdb, by = "tx", use.names=TRUE)
#Import QTLs
salmonella_qtls = readRDS("results/trQTLs/salmonella_trQTL_min_pvalues.rds")
vcf_file = readRDS("results/genotypes/salmonella/imputed.86_samples.sorted.filtered.named.rds")
#Import QTL pairs
QTL_pairs = readRDS("results/simulations/trQTL_pair_diffs.rds")
#Import transcript metadata
transcript_data = tbl_df(readRDS("../../annotations/GRCh38/genes/Ensembl_87/Homo_sapiens.GRCh38.87.compiled_tx_metadata.rds"))
transcript_meta = dplyr::select(transcript_data, ensembl_transcript_id, cds_start_NF, cds_end_NF)
truncated_transcripts = dplyr::filter(transcript_meta, cds_start_NF == 1 | cds_end_NF == 1)
#Identify trQTL pairs with truncated transcripts
first_truncated = dplyr::semi_join(QTL_pairs, truncated_transcripts, by = c("tx1_id" = "ensembl_transcript_id"))
second_truncated = dplyr::semi_join(QTL_pairs, truncated_transcripts, by = c("tx2_id" = "ensembl_transcript_id"))
second_nonoverlap = dplyr::anti_join(second_truncated, first_truncated, by = "tx1_id")
#Make trancript pairs
truncated_pairs = dplyr::bind_rows(dplyr::transmute(first_truncated, full_tx = tx2_id, truncated_tx = tx1_id),
dplyr::transmute(second_nonoverlap, full_tx = tx1_id, truncated_tx = tx2_id)) %>%
dplyr::left_join(truncated_transcripts, by = c("truncated_tx" = "ensembl_transcript_id")) %>%
dplyr::mutate(truncation = case_when(
cds_start_NF == 1 & cds_end_NF == 0 ~ "start",
cds_start_NF == 0 & cds_end_NF == 1 ~ "end",
cds_start_NF == 1 & cds_end_NF == 1 ~ "both"
))
#Calculate sequence differences in basepairs
findAllDiffs <- function(tx1, tx2, exons){
print(paste(tx1, tx2))
diff = txrevise::indentifyAddedRemovedRegions(tx1, tx2, exons) %>%
calculateBasepairDifference()
}
#Find all differences between the two transcripts
tx1_list = as.list(truncated_pairs$full_tx)
tx2_list = as.list(truncated_pairs$truncated_tx)
all_differences = purrr::map2(tx1_list, tx2_list, ~findAllDiffs(.x, .y, exons)) %>% purrr::map_df(identity)
#Merge results
merged_diffs = dplyr::left_join(truncated_pairs, all_differences, by = c("full_tx" = "tx1_id")) %>% tbl_df()
unique_tx_ids = unique(c(merged_diffs$full_tx, merged_diffs$truncated_tx))
saveRDS(merged_diffs, "results/simulations/transcript_diffs.rds")
#Extract metadata for all transcripts
tx_meta = dplyr::filter(transcript_data, ensembl_transcript_id %in% unique_tx_ids) %>%
txrevise::filterTranscriptMetadata()
saveRDS(tx_meta, "results/simulations/transcript_meta.rds")
tx_meta = readRDS("results/simulations/transcript_meta.rds")
tx_exons = exons[tx_meta$ensembl_transcript_id]
tx_cdss = cdss[intersect(tx_meta$ensembl_transcript_id, names(cdss))]
#Extend transcripts and construct events
extendTruncatedTx <- function(gene_id, tx_meta, exons, cdss){
print(gene_id)
#Extract gene data
gene_data = txrevise::extractGeneData(gene_id, tx_meta, exons, cdss)
#Extend transcripts
gene_extended_tx = txrevise::extendTranscriptsPerGene(gene_data$metadata, gene_data$exons, gene_data$cdss)
gene_data_ext = txrevise::replaceExtendedTranscripts(gene_data, gene_extended_tx)
#Construct alt events
alt_events = txrevise::constructAlternativeEvents(gene_data_ext$exons, gene_id)
#Return results
return(list(extended_tx = gene_data_ext, alt_events = alt_events))
}
#Apply to all genes
gene_ids = unique(tx_meta$ensembl_gene_id)
gene_ids_list = seqUtils::idVectorToList(gene_ids)
alt_events = purrr::map(gene_ids_list, ~extendTruncatedTx(., tx_meta, tx_exons, tx_cdss))
saveRDS(alt_events, "results/simulations/extended_tx_and_events.rds")
alt_events = readRDS("results/simulations/extended_tx_and_events.rds")
#Recalculate differences after transcripts have been extended
extended_transcripts = purrr::map(alt_events, ~as.list(.$extended_tx$exons)) %>% purrr::flatten()
#Find all differences between the two transcripts
tx1_list = as.list(truncated_pairs$full_tx)
tx2_list = as.list(truncated_pairs$truncated_tx)
all_differences = purrr::map2(tx1_list, tx2_list, ~findAllDiffs(.x, .y, extended_transcripts)) %>% purrr::map_df(identity)
merged_diffs = dplyr::left_join(truncated_pairs, all_differences, by = c("full_tx" = "tx1_id")) %>% tbl_df() %>%
dplyr::select(-tx2_id)
#Mark truncation events that have actually been extended
all_diffs = dplyr::mutate(merged_diffs, truncation = NA) %>%
dplyr::mutate(truncation = ifelse(cds_start_NF == 1 & upstream == 0, "start", truncation)) %>%
dplyr::mutate(truncation = ifelse(cds_end_NF == 1 & downstream == 0, "end", truncation)) %>%
dplyr::mutate(truncation = ifelse((cds_start_NF == 1 & upstream == 0) & (cds_end_NF == 1 & downstream == 0), "both", truncation))
saveRDS(all_diffs, "results/simulations/extended_transcript_diffs.rds")
#Extract extended transcripts
new_exons = purrr::map(alt_events, ~as.list(.$extended_tx$exons)) %>% purrr::flatten()
new_exons = new_exons[names(tx_exons)]
#Sort exons by strand
sortGrangesByStrand <- function(granges){
tx_strand = as.character(strand(granges))[1]
if(tx_strand == "-"){
granges = sort(granges, decreasing = T)
} else{
granges = sort(granges, decreasing = F)
}
return(granges)
}
old_exons_sorted = purrr::map(as.list(tx_exons), sortGrangesByStrand)
new_exons_sorted = purrr::map(new_exons, sortGrangesByStrand)
#Extract sequences
old_sequences = BSgenome::getSeq(BSgenome.Hsapiens.NCBI.GRCh38, GRangesList(old_exons_sorted))
new_sequences = BSgenome::getSeq(BSgenome.Hsapiens.NCBI.GRCh38, GRangesList(new_exons_sorted))
#Concat exons
old_fastas = DNAStringSet(lapply(old_sequences, unlist))[tx_meta$ensembl_transcript_id]
new_fastas = DNAStringSet(lapply(new_sequences, unlist))[tx_meta$ensembl_transcript_id]
#Write transcripts to disk
writeXStringSet(old_fastas, 'results/simulations/original_transcripts.fa')
writeXStringSet(new_fastas, 'results/simulations/extended_transcripts.fa')
#Calculate effect sizes for tuQTLs
tx_meta = readRDS("results/simulations/transcript_meta.rds")
lead_variants = dplyr::filter(salmonella_qtls$Ensembl_87$naive, group_id %in% tx_meta$ensembl_gene_id) %>%
dplyr::select(group_id, snp_id)
genotype_matrix = vcf_file$genotypes[lead_variants$snp_id,]
genotype_df = as_tibble(genotype_matrix) %>%
dplyr::mutate(ensembl_gene_id = lead_variants$group_id) %>%
dplyr::select(ensembl_gene_id, everything())
#Add effect size multiplier to tx_meta
set.seed(1)
effect_direction = dplyr::select(tx_meta, ensembl_gene_id, ensembl_transcript_id) %>%
dplyr::group_by(ensembl_gene_id) %>%
dplyr::mutate(effect_multiplier = c(1,-1)) %>%
dplyr::mutate(is_de = round(runif(1,0,1))) %>%
dplyr::ungroup() %>%
dplyr::mutate(effect = effect_multiplier*is_de) %>%
dplyr::select(ensembl_gene_id, ensembl_transcript_id, effect)
#Make effect size matrix
fc_matrix = dplyr::left_join(effect_direction, genotype_df, by = "ensembl_gene_id") %>%
dplyr::select(-ensembl_gene_id, -ensembl_transcript_id, -effect) %>%
as.matrix()
row.names(fc_matrix) = effect_direction$ensembl_transcript_id
fc_matrix = effect_direction$effect*fc_matrix
fold_changes = 2^fc_matrix
fold_changes[is.na(fold_changes)] = 1
#Simulate reads from the original transcripts
# ~20x coverage ----> reads per transcript = transcriptlength/readlength * 20
# here all transcripts will have ~equal FPKM
fasta = readDNAStringSet("results/simulations/original_transcripts.fa")
readspertx = round(50 * width(fasta) / 100)
simulate_experiment('results/simulations/original_transcripts.fa', reads_per_transcript=readspertx,
num_reps=rep(1,86), fold_changes=fold_changes,
outdir='results/simulations/original_transcripts', gzip=TRUE, strand_specific = TRUE)
#Simulate reads from the extended transcripts
fasta = readDNAStringSet("results/simulations/extended_transcripts.fa")
readspertx = round(50 * width(fasta) / 100)
simulate_experiment('results/simulations/extended_transcripts.fa', reads_per_transcript=readspertx,
num_reps=rep(1,86), fold_changes=fold_changes,
outdir='results/simulations/extended_transcripts', gzip=TRUE, strand_specific = TRUE)
#Construct alternative events
#Extend transcripts and construct events
constructEvents <- function(gene_id, tx_meta, exons, cdss){
print(gene_id)
#Extract gene data
gene_data = txrevise::extractGeneData(gene_id, tx_meta, exons, cdss)
#Construct alt events
alt_events = txrevise::constructAlternativeEvents(gene_data$exons, gene_id)
#Return results
return(alt_events)
}
#Apply to all genes
gene_ids = unique(tx_meta$ensembl_gene_id)
gene_ids_list = seqUtils::idVectorToList(gene_ids)
alt_events = purrr::map(gene_ids_list, ~constructEvents(., tx_meta, tx_exons, tx_cdss))
#Flatten
alt_events = purrr::flatten(alt_events) %>% flattenAlternativeEvents()
saveRDS(alt_events, "results/simulations/qunatification_alt_events.rds")
#Construct event metadata
event_metadata = txrevise::constructEventMetadata(names(alt_events))
#Make annotations
annotations = txrevise::transcriptsToAnnotations(alt_events, event_metadata)
rtracklayer::export.gff3(annotations[annotations$gene_id %like% "upstream"], "results/simulations/txrevise_upstream.gff3")
rtracklayer::export.gff3(annotations[annotations$gene_id %like% "contained"], "results/simulations/txrevise_contained.gff3")
rtracklayer::export.gff3(annotations[annotations$gene_id %like% "downstream"], "results/simulations/txrevise_downstream.gff3")
|
9e37f641cf0445212c0afe158a328a9eb93ea77d
|
d9e22fee62c67886701ffeaca3b433c8a2d81150
|
/score_test_frailty.R
|
124f21097a64e1023ce30e918b355543ad7392d7
|
[] |
no_license
|
ignareyesa/JMstateModel
|
6baeca555e65b61b4564ef8205d09122eb27101b
|
d11bdc4523f21217cb8ed6def08811171405c62f
|
refs/heads/main
| 2023-03-29T05:35:00.365344
| 2021-03-24T09:39:56
| 2021-03-24T09:39:56
| 351,022,510
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,203
|
r
|
score_test_frailty.R
|
score_test_frailty <-
function(object)
{
if (object$method != "spline-PH-GH")
stop("Joint multi-state model is only implemented with 'method = spline-PH-GH'")
if(options()$digits < 7)
stop("You have to improve the precision of your machine (with options(digits = k)) or reduce the precision in 'deriva_forward()'")
transform.value <- object$transform.value
#### Longitudinal sub-part ####
method <- object$method
parameterization <- object$parameterization
logT <- object$y$logT
id.GK <- rep(seq_along(logT), each = object$control$GKk)
eta.yx <- as.vector(object$x$X %*% object$coefficients$betas)
GH <- JM:::gauher(object$control$GHk)
ncz <- ncol(object$x$Z)
b <- as.matrix(expand.grid(rep(list(GH$x), ncz)))
k <- nrow(b)
wGH <- as.matrix(expand.grid(rep(list(GH$w), ncz)))
wGH <- 2^(ncz/2) * apply(wGH, 1, prod) * exp(rowSums(b * b))
if (object$control$typeGH == "simple") {
b <- sqrt(2) * t(object$control$inv.chol.VC %*% t(b))
wGH <- wGH * object$control$det.inv.chol.VC
}
else {
b <- sqrt(2) * b
VCdets <- object$control$det.inv.chol.VCs
}
dimnames(b) <- NULL
Ztb <- object$x$Z %*% t(b)
if (parameterization %in% c("value", "both")) {
Ztime.b <- object$x$Ztime %*% t(b)
Zsb <- object$x$Zs %*% t(b)
}
if (parameterization %in% c("slope", "both")) {
if (length(object$derivForm$indRandom) > 1 || object$derivForm$indRandom) {
Ztime.b.deriv <- object$x$Ztime.deriv %*% t(b[, object$derivForm$indRandom, drop = FALSE])
Zsb.deriv <- object$x$Zs.deriv %*% t(b[, object$derivForm$indRandom, drop = FALSE])
}
else {
Ztime.b.deriv <- matrix(0, nrow(object$x$Ztime.deriv), k)
Zsb.deriv <- matrix(0, nrow(object$x$Zs.deriv), k)
}
}
if (object$control$typeGH != "simple") {
lis.b <- vector("list", object$n)
for (i in 1:object$n) {
lis.b[[i]] <- t(object$control$inv.chol.VCs[[i]] %*% t(b)) +
rep(object$control$ranef[i, ], each = k)
Ztb[object$id == i, ] <- object$x$Z[object$id == i, , drop = FALSE] %*%
t(lis.b[[i]])
}
lis.b2 <- lapply(lis.b, function(b) if (ncz == 1)
b * b
else t(apply(b, 1, function(x) x %o% x)))
for (i in seq_along(logT)) {
if (parameterization %in% c("value", "both")) {
bb <- t(lis.b[[object$x$idT[i]]])
Ztime.b[i, ] <- object$x$Ztime[i, , drop = FALSE] %*% bb
Zsb[id.GK == i, ] <- object$x$Zs[id.GK == i, ] %*% bb
}
if (parameterization %in% c("slope", "both") &&
(length(object$derivForm$indRandom) > 1 || object$derivForm$indRandom)) {
bb <- t(lis.b[[object$x$idT[i]]][, object$derivForm$indRandom, drop = FALSE])
Ztime.b.deriv[i, ] <- object$x$Ztime.deriv[i, , drop = FALSE] %*% bb
Zsb.deriv[id.GK == i, ] <- object$x$Zs.deriv[id.GK == i, ] %*% bb
}
}
}
mu.y <- eta.yx + Ztb
logNorm <- dnorm(object$y$y, mu.y, object$coefficients$sigma, TRUE)
log.p.yb <- rowsum(logNorm, object$id, reorder = FALSE)
dimnames(log.p.yb) <- NULL
#### Survival sub-part ####
eta.tw1 <- if (!is.null(object$x$W))
as.vector(object$x$W %*% object$coefficients$gammas)
else 0
eta.tw2 <- as.vector(object$x$W2 %*% object$coefficients$gammas.bs)
if (parameterization %in% c("value", "both")) {
Y <- as.vector(object$x$Xtime %*% object$coefficients$betas) + Ztime.b
Ys <- as.vector(object$x$Xs %*% object$coefficients$betas) + Zsb
eta.t <- {
if (is.null(transform.value))
eta.tw2 + eta.tw1 + c(object$x$WintF.vl %*% object$coefficients$alpha) * Y
else eta.tw2 + eta.tw1 + c(object$x$WintF.vl %*% object$coefficients$alpha) * transform.value(Y)
}
eta.s <- {
if (is.null(transform.value))
c(object$x$Ws.intF.vl %*% object$coefficients$alpha) * Ys
else c(object$x$Ws.intF.vl %*% object$coefficients$alpha) * transform.value(Ys)
}
}
if (parameterization %in% c("slope", "both")) {
Y.deriv <- as.vector(object$x$Xtime.deriv %*% object$coefficients$betas[object$derivForm$indFixed]) +
Ztime.b.deriv
Ys.deriv <- as.vector(object$x$Xs.deriv %*% object$coefficients$betas[object$derivForm$indFixed]) +
Zsb.deriv
eta.t <- if (parameterization == "both")
eta.t + c(object$x$WintF.sl %*% object$coefficients$Dalpha) * Y.deriv
else eta.tw2 + eta.tw1 + c(object$x$WintF.sl %*% object$coefficients$Dalpha) * Y.deriv
eta.s <- if (parameterization == "both")
eta.s + c(object$x$Ws.intF.sl %*% object$coefficients$Dalpha) * Ys.deriv
else c(object$x$Ws.intF.sl %*% object$coefficients$Dalpha) * Ys.deriv
}
eta.ws <- as.vector(object$x$W2s %*% object$coefficients$gammas.bs)
#### Cumulative intensities ####
log.hazard <- eta.t
log.survival <- -exp(eta.tw1) * object$x$P * rowsum(object$x$wk * exp(eta.ws + eta.s), id.GK, reorder = FALSE)
dimnames(log.survival) <- NULL
log.p.tb <- rowsum(object$y$d * log.hazard + log.survival, object$x$idT, reorder = FALSE)
#### Random effects ####
log.p.b <- if (object$control$typeGH == "simple") {
rep(JM:::dmvnorm(b, rep(0, ncz), object$coefficients$D, TRUE), each = object$n)
}
else {
matrix(JM:::dmvnorm(do.call(rbind, lis.b), rep(0, ncz),
object$coefficients$D, TRUE), object$n, k, byrow = TRUE)
}
p.ytb <- exp(log.p.yb + log.p.tb + log.p.b)
if (object$control$typeGH != "simple")
p.ytb <- p.ytb * VCdets
p.yt <- c(p.ytb %*% wGH)
# Likelihood function under H1
func_ll_H1 <- function(par){
sigma2_v <- par[1]
betas <- par[(1 + 1) : (1 + length(object$coefficients$betas))]
sigma <- par[(1 + length(c(sigma2_v, betas))) : length(c(sigma2_v, betas, sigma))]
gammas <- if (!is.null(object$x$W))
par[(1 + length(c(sigma2_v, betas, sigma))) : length(c(sigma2_v, betas, sigma, object$coefficients$gammas))]
else NULL
alpha <- if (parameterization %in% c("value", "both"))
par[(1 + length(c(sigma2_v, betas, sigma, gammas))) :
length(c(sigma2_v, betas, sigma, gammas, object$coefficients$alpha))]
else NULL
Dalpha <- if (parameterization %in% c("slope", "both"))
par[(1 + length(c(sigma2_v, betas, sigma, gammas, alpha))) :
length(c(sigma2_v, betas, sigma, gammas, alpha, object$coefficients$Dalpha))]
else NULL
gammas.bs <- par[(1 + length(c(sigma2_v, betas, sigma, gammas, alpha, Dalpha))) :
length(c(sigma2_v, betas, sigma, gammas, alpha, Dalpha, object$coefficients$gammas.bs))]
B <- par[(1 + length(c(sigma2_v, betas, sigma, gammas, alpha, Dalpha, gammas.bs))) :
npar]
# Extension of corpcor::rebuild.cov to a vecteur of correlation parameters as argument
rebuild.cov.vect <- function (r, v) {
if (any(v < 0))
stop("Negative variance encountered!")
sd <- sqrt(v)
r.mat <- matrix(1 , ncz, ncz)
r.mat[upper.tri(r.mat)] <- r
r.mat <- t(r.mat)
r.mat[upper.tri(r.mat)] <- r
m <- sweep(sweep(r.mat, 1, sd, "*"), 2, sd, "*")
return(m)
}
B <- rebuild.cov.vect(B[(ncz+1):(ncz*(ncz+1)/2)], B[seq_len(ncz)]^2)
GHk_score <- 50
GH_score <- JM:::gauher(GHk_score)
u <- GH_score$x
wGH_u <- GH_score$w
wGH_u <- 1/sqrt(pi) * wGH_u
u <- sqrt(2) * u
## Survival sub-part ##
eta.tw1 <- if (!is.null(object$x$W))
as.vector(object$x$W %*% gammas)
else 0
eta.tw2 <- as.vector(object$x$W2 %*% gammas.bs)
if (parameterization %in% c("value", "both")) {
Y <- as.vector(object$x$Xtime %*% betas) +
Ztime.b
Ys <- as.vector(object$x$Xs %*% betas) + Zsb
eta.t <- {
if (is.null(transform.value))
eta.tw2 + eta.tw1 + c(object$x$WintF.vl %*% alpha) * Y
else eta.tw2 + eta.tw1 + c(object$x$WintF.vl %*% alpha) * transform.value(Y)
}
eta.s <- {
if (is.null(transform.value))
c(object$x$Ws.intF.vl %*% alpha) * Ys
else c(object$x$Ws.intF.vl %*% alpha) * transform.value(Ys)
}
}
if (parameterization %in% c("slope", "both")) {
Y.deriv <- as.vector(object$x$Xtime.deriv %*% betas[object$derivForm$indFixed]) +
Ztime.b.deriv
Ys.deriv <- as.vector(object$x$Xs.deriv %*% betas[object$derivForm$indFixed]) +
Zsb.deriv
eta.t <- if (parameterization == "both")
eta.t + c(object$x$WintF.sl %*% Dalpha) * Y.deriv
else eta.tw2 + eta.tw1 + c(object$x$WintF.sl %*% Dalpha) * Y.deriv
eta.s <- if (parameterization == "both")
eta.s + c(object$x$Ws.intF.sl %*% Dalpha) * Ys.deriv
else c(object$x$Ws.intF.sl %*% Dalpha) * Ys.deriv
}
eta.ws <- as.vector(object$x$W2s %*% gammas.bs)
log.hazard.u <- apply(eta.t, 2, function(x) rep(x, each = GHk_score)) + rep(sqrt(sigma2_v) * u, length(logT))
log.survival.u <- -exp(rep(eta.tw1, each = GHk_score)) *
rep(exp(sqrt(sigma2_v) * u), length(logT)) *
rep(object$x$P, each = GHk_score) *
apply(rowsum(object$x$wk * exp(eta.ws + eta.s), id.GK, reorder = FALSE), 2, function(x) rep(x, each = GHk_score))
dimnames(log.survival.u) <- NULL
id.GHu <- c(apply(matrix(c(GHk_score * (object$x$idT - 1) + 1,
GHk_score * (object$x$idT - 1) + 1 + GHk_score - 1), ncol = 2), 1,
function(x) seq(from = x[1], to = x[2])))
log.p.tbu <- rowsum(rep(object$y$d, each = GHk_score) * log.hazard.u + log.survival.u,
id.GHu, reorder = FALSE)
p.tbu <- exp(log.p.tbu)
p.tb <- matrix( , nrow = object$n, ncol = object$control$GHk^ncz)
for (i in seq_len(object$n)){
p.tb[i, ] <- c(t(p.tbu[(GHk_score * (i - 1) + 1) : (GHk_score*i), ]) %*% wGH_u)
}
log.p.tb <- log(p.tb)
## Random effects sub-part ##
log.p.b <- if (object$control$typeGH == "simple") {
rep(JM:::dmvnorm(b, rep(0, ncz), B, TRUE), each = object$n)
}
else {
matrix(JM:::dmvnorm(do.call(rbind, lis.b), rep(0, ncz),
B, TRUE), object$n, k, byrow = TRUE)
}
## Longitudinal sub-part ##
eta.yx <- as.vector(object$x$X %*% betas)
mu.y <- eta.yx + Ztb
logNorm <- dnorm(object$y$y, mu.y, sigma, TRUE)
log.p.yb <- rowsum(logNorm, object$id, reorder = FALSE)
dimnames(log.p.yb) <- NULL
p.ytb <- exp(log.p.yb + log.p.tb + log.p.b)
if (object$control$typeGH != "simple")
p.ytb <- p.ytb * VCdets
p.yt <- c(p.ytb %*% wGH)
return(sum(log(p.yt)))
}
sigma2_v <- 0
betas <- object$coefficients$betas
sigma <- object$coefficients$sigma
gammas <- object$coefficients$gammas
gammas.bs <- object$coefficients$gammas.bs
alpha <- object$coefficients$alpha
Dalpha <- object$coefficients$Dalpha
B <- c(sqrt(diag(object$coefficients$D)),
cov2cor(object$coefficients$D)[upper.tri(cov2cor(object$coefficients$D))])
par <- c(sigma2_v, betas, sigma, gammas, alpha, Dalpha, gammas.bs, B)
npar <- length(par)
deriv_H1 <- deriva_forward_reduced(par, func_ll_H1)
#### Test Statistic ####
U_i <- 1/(2*p.yt) *
c( (p.ytb *
(rowsum(object$y$d + log.survival, group = object$x$idT, reorder = FALSE)^2 +
rowsum(log.survival, group = object$x$idT, reorder = FALSE))) %*% wGH)
U <- sum(U_i)
var_U <- c(deriv_H1$v[1] - deriv_H1$v[2:length(par)] %*% vcov(object) %*% deriv_H1$v[2:length(par)])
pval <- pchisq(pmax(0,U)^2 / var_U, df = 1, lower.tail = F)/2
list(U_i = U_i,
U = U,
var_U = var_U,
stat = pmax(0,U)^2 / var_U,
pval = pval,
conv = object$convergence)
}
|
f4673f9061a69b196bc2fbf8a744d8003e8e33b3
|
1ce6dbd45ea6d051008b0d1bfaef500aa696cd7e
|
/R/class_runtime.R
|
e4a435ecdc43868713ca6b6c45c53b09e2665d5a
|
[
"MIT"
] |
permissive
|
billdenney/targets
|
b6515ffd2cbd9c95545385ff6253a2b611c7300e
|
d881af68925f33283dc4945d9cbc76cd2d5209a9
|
refs/heads/main
| 2023-08-14T16:59:03.341340
| 2021-09-24T12:44:07
| 2021-09-24T12:44:07
| 406,497,687
| 0
| 0
|
NOASSERTION
| 2021-09-14T19:32:35
| 2021-09-14T19:32:35
| null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
class_runtime.R
|
runtime_init <- function(
target = NULL,
frames = NULL,
interactive = NULL
) {
runtime_new(
target = target,
frames = frames,
interactive = interactive
)
}
runtime_new <- function(
target = NULL,
frames = NULL,
interactive = NULL
) {
runtime_class$new(
target = target,
frames = frames,
interactive = interactive
)
}
runtime_class <- R6::R6Class(
classname = "tar_runtime",
class = FALSE,
portable = FALSE,
cloneable = FALSE,
public = list(
target = NULL,
frames = NULL,
interactive = NULL,
initialize = function(
target = NULL,
frames = NULL,
interactive = NULL
) {
self$target <- target
self$frames <- frames
self$interactive <- interactive
},
exists_target = function() {
!is.null(self$target)
},
exists_frames = function() {
!is.null(self$frames)
},
exists_interactive = function() {
!is.null(self$interactive)
},
get_target = function() {
self$target
},
get_frames = function() {
self$frames
},
get_interactive = function() {
self$interactive
},
set_target = function(target) {
self$target <- target
},
set_frames = function(frames) {
self$frames <- frames
},
set_interactive = function(interactive) {
self$interactive <- interactive
},
unset_target = function() {
self$target <- NULL
},
unset_frames = function() {
self$frames <- NULL
},
unset_interactive = function() {
self$interactive <- NULL
},
validate = function() {
if (!is.null(self$target)) {
tar_assert_inherits(self$target, "tar_target")
target_validate(self$target)
}
if (!is.null(self$frames)) {
frames_validate(self$frames)
}
if (!is.null(self$interactive)) {
tar_assert_scalar(self$interactive)
tar_assert_lgl(self$interactive)
}
}
)
)
tar_runtime <- runtime_init()
|
e8fa86443427e8ca4a918d163536809eac48cfcd
|
48d2c8117c4604e32bef0752f16447641bd82718
|
/electability/R/TweakAndSummarize.R
|
f74fbe52b043ae64a80c8600ca7fa21ebf1fb897
|
[
"MIT",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
tmcintee/electability-2020-pub
|
b3334cf5ada9c74a43f5cdc9bbb5742cfef290d1
|
5dd97241c7551633890020b4a5ce92eff78dc468
|
refs/heads/master
| 2020-12-13T09:14:24.949548
| 2020-01-16T17:23:56
| 2020-01-16T17:23:56
| 234,372,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,213
|
r
|
TweakAndSummarize.R
|
TweakAndSummarize <- function(model_sheet,
region_pairs,
candidate_name,
model_name,
region_weight = 0.25,
national_weight = 0.25,
update_weight = 100,
update_size = 0.05)
{
return_sheet <- TweakModel(model_sheet,region_pairs,region_weight,national_weight,update_weight,update_size) %>%
summarise(Candidate = candidate_name,
Model = model_name,
Votes.For = sum(Democratic)/sum(Total),
Votes.Against = sum(Republican)/sum(Total),
Electoral.Votes.For = sum(Democratic.EV),
Electoral.Votes.Against = sum(Republican.EV))
return(return_sheet)
}
SummarizeElecsheet <- function(elecsheet)
{
elecsheet <- elecsheet %>% summarise(Candidate = candidate_name,
Model = model_name,
Votes.For = sum(Democratic)/sum(Total),
Votes.Against = sum(Republican)/sum(Total),
Electoral.Votes.For = sum(Democratic.EV),
Electoral.Votes.Against = sum(Republican.EV))
return(elecsheet)
}
|
ae61f68be62662cce0b00d0e06577fa0567c610b
|
120de1ae49850f8212efc39ab9fa266f175dc4c6
|
/man/nameTo.Rd
|
b4fc72719e3b93d0054965bc9273f77110187361
|
[] |
no_license
|
vsrimurthy/EPFR
|
168aed47aa2c48c98be82e3d8c833d89e1d11e04
|
544471a8d0cf75c7d65a195b9f6e95d6b1d6800f
|
refs/heads/master
| 2023-08-02T14:50:25.754990
| 2023-07-29T13:56:39
| 2023-07-29T13:56:39
| 118,918,801
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 430
|
rd
|
nameTo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{nameTo}
\alias{nameTo}
\title{nameTo}
\usage{
nameTo(x, y)
}
\arguments{
\item{x}{= a logical vector/matrix/dataframe without NA's}
\item{y}{= a logical value, isomekic vector or isomekic isoplatic matrix/df without NA's}
}
\description{
pct name turnover between <x> and <y> if <x> is a vector or their rows otherwise
}
\keyword{nameTo}
|
ab71675a36bf081b3f63dd2e4daa6ea49ab865a6
|
dc172ad3471526c167d1d41a97c3ce8d0aa93395
|
/man/ce.Rd
|
5f37de4f3d7ceffaa3a095881a35c99a796a4414
|
[
"MIT"
] |
permissive
|
mcsiple/mmrefpoints
|
a1140f78d7e8e3819709bde70bbe77ef3e4a30bf
|
eec714388077c6905bc1c13f0c95ec5f4a5e974b
|
refs/heads/master
| 2023-04-14T07:05:05.067566
| 2022-06-14T05:02:17
| 2022-06-14T05:02:17
| 344,858,328
| 3
| 5
|
NOASSERTION
| 2022-03-10T23:37:35
| 2021-03-05T15:50:20
|
HTML
|
UTF-8
|
R
| false
| true
| 1,731
|
rd
|
ce.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/07_ce.R
\name{ce}
\alias{ce}
\title{Calculate normalized sustainable yield}
\usage{
ce(
S0 = NA,
S1plus = NA,
AgeMat = NA,
nages = NA,
z = NA,
E = NA,
A = NA,
P0 = NA,
N0 = NA
)
}
\arguments{
\item{S0}{Calf/pup survival, a numeric value between 0 and 1}
\item{S1plus}{1+ survival rate for animals age 1 year and older, a numeric value between 0 and 1}
\item{AgeMat}{Age at maturity (= age at first parturition - 1). Must be less than \code{nages}}
\item{nages}{"maximum" age, treated as the plus group age. The plus group age can be set equal to the age at maturity +2 years without losing accuracy.}
\item{z}{degree of compensation}
\item{E}{bycatch mortality rate (applies to 1+ numbers)}
\item{A}{the Pella-Tomlinson resilience parameter ((fmax - f0)/f0)}
\item{P0}{unfished number-per-recruit - 1+ adults}
\item{N0}{unfished numbers-per-recruit - mature adults}
}
\value{
a single value of normalized yield for exploitation rate E
}
\description{
This function calculates the normalized sustainable yield, which is used to find MNPL (the population size at which productivity is maximized).
}
\examples{
# Set parameters
S0.w = 0.5; S1plus.w = 0.944; nages.w = 25; AgeMat.w = 18
# Get number of individuals per recruit in terms of mature individuals (N0.w)
NPROut <- npr(S0 = S0.w, S1plus = S1plus.w, nages = nages.w, AgeMat = AgeMat.w, E = 0)
N0 <- NPROut$npr # mature numbers per recruit
# Get number of individuals per recruit in terms of individuals aged 1+ (P0.w)
P0 <- NPROut$P1r # 1+ nums per recruit
ce(S0 = S0.w, S1plus = S1plus.w,
nages = nages.w,
AgeMat = AgeMat.w,
E=0.01, z=2.39,A=2, N0 = N0, P0 = P0)
}
|
03673a77c09775f5d0298e96d63a762a2ccb3246
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/reshape/examples/round-any-u2.rd.R
|
43de56fe41b5ec9ed15495245dcdd45ed12f310f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 347
|
r
|
round-any-u2.rd.R
|
library(reshape)
### Name: round_any
### Title: Round any
### Aliases: round_any
### Keywords: internal
### ** Examples
round_any(135, 10)
round_any(135, 100)
round_any(135, 25)
round_any(135, 10, floor)
round_any(135, 100, floor)
round_any(135, 25, floor)
round_any(135, 10, ceiling)
round_any(135, 100, ceiling)
round_any(135, 25, ceiling)
|
a85560d1f27f9c3d385629474f423da3a2ec8c65
|
d5e4d8cc13151bf546727528ccf6849e2b43dc80
|
/Assignment 2/R for assignment 2.R
|
ccf66c341a73e4b8c10805a59f8d7fe307d4ad2f
|
[] |
no_license
|
jason2133/categorical_data_analysis
|
d369aeaee5b72cbfeb7da036ce7e413790d9c308
|
2e8bcf90aff80634fcd9ed30674c13f205058396
|
refs/heads/master
| 2022-01-19T11:43:36.241734
| 2022-01-15T19:02:47
| 2022-01-15T19:02:47
| 175,217,150
| 1
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 3,377
|
r
|
R for assignment 2.R
|
# Assignment 2
install.packages("PropCIs")
install.packages("epitools")
install.packages("vcd")
install.packages("vcdExtra")
install.packages("DescTools")
# Print - Table 2.3
table23 <- matrix(c(189, 104, 10845, 10933), ncol = 2)
table23
chisq.test(table23) #Pearson's chi-square test
library(epitools)
riskratio(matrix(c(189, 104, 10845, 10933), ncol=2), method="wald", conf=0.95, correct=FALSE) #Wald test and CI for Relative Risk(RR)
riskratio(matrix(c(189, 104, 10845, 10933), ncol=2), method="wald", conf=0.95, correct=FALSE) #Wald test and CI for Relative Risk(RR)
# Num 1 - a - Odds Ratio (OR)
# OR and CI of OR
oddsratio(matrix(c(21, 8, 2, 9), ncol=2), method="wald", conf=0.95, correct=FALSE) #Wald test and CI for Odds Ratio(OR)
# Book - Table 2.5
table25 <- matrix(c(762, 484, 327, 239, 468, 477), ncol = 3)
table25
chisq.test(table25)
library(epitools)
library(PropCIs)
library(DescTools)
GTest(table25) #Likelihood-ratio(LR) test
# 과제 문제 1번 Num 1
num1 <- matrix(c(21, 8, 2, 9), ncol = 2)
num1
# Num 1 - a - proportions(D)
# D and CI of D
prop.test(num1, conf.level=0.95, correct=FALSE) #Wald test and CI for diff props(D)
# Num 1 - a - Relative Risk (RR)
# RR and CI of RR
library(epitools)
riskratio(c(9, 8, 2, 21), method="wald", conf=0.95, correct=FALSE) #Wald test and CI for Relative Risk(RR)
riskratio(c(8, 9, 21, 2), method="wald", conf=0.95, correct=FALSE) #Wald test and CI for Relative Risk(RR)
oddsratio(c(21, 2, 8, 9), method="wald", conf=0.95, correct=FALSE) #Wald test and CI for OR
library(PropCIs)
diffscoreci(21, 23, 8, 17, conf.level=0.95) #Score CI for D
riskscoreci(21, 23, 8, 17, conf.level=0.95) #Score CI for RR
orscoreci(21, 23, 8, 17, conf.level=0.95) #Score CI for OR
# Num 1 - a - Odds Ratio (OR)
# OR and CI of OR
oddsratio(matrix(c(21, 8, 2, 9), ncol=2), method="wald", conf=0.95, correct=FALSE) #Wald test and CI for Odds Ratio(OR)
chisq.test(num1) #Pearson's chi-square test
lr.test(# What is here?)
# Num 1 againn
#Other political gender gap data
num1again <- matrix(c(21, 8, 2, 9), ncol=2)
num1again <- data.frame(num1again, row.names=c("Surgery", "Radiation Therapy"))
colnames(num1again) <- c("Controlled", "Not Controlled")
num1again
chisq.test(num1again) #Pearson's chi-square test
stdres <- chisq.test(GenderGap2)$stdres #standardized residuals
stdres
library(DescTools)
GTest(GenderGap2) #Likelihood-ratio(LR) test
###Analysis of tea data
fisher.test(num1) #Fisher's exact test
fisher.test(num1, alternative="greater") #Fisher's exact test (one-sided)
# Num 2
num2 <- matrix(c(9, 44, 13, 10, 11, 52, 23, 22, 9, 41, 12, 27), ncol = 3)
num2
chisq.test(num2)
library(DescTools)
GTest(num2, correct="none") #Likelihood-ratio(LR) test
# Num2 Again
#Other political gender gap data
numm2 <- matrix(c(762, 484, 327, 239, 468, 477), ncol=3)
GenderGap2 <- data.frame(GenderGap2, row.names=c("female", "male"))
colnames(GenderGap2) <- c("Dem", "Rep", "Ind")
GenderGap2
chisq.test(GenderGap2) #Pearson's chi-square test
stdres <- chisq.test(GenderGap2)$stdres #standardized residuals
stdres
library(DescTools)
GTest(GenderGap2) #Likelihood-ratio(LR) test
# > oddsratio(c(21, 2, 8, 9), method="wald", conf=0.95, correct=FALSE) #Wald test and CI for OR
oddsratio(c(9, 11, 9, 44, 52, 41, 13, 23, 12, 10, 22, 27), method='wald', conf=0.95, correct=FALSE)
|
f338880a4d5a7321be714d4dffa9d2539ff61281
|
66e04f24259a07363ad8da7cd47872f75abbaea0
|
/Data Visualization with ggplot2 (Part 1)/Chapter 2-Data/2.R
|
d944bd22b52bd673691dfddacbe8f662c19d1cc2
|
[
"MIT"
] |
permissive
|
artileda/Datacamp-Data-Scientist-with-R-2019
|
19d64729a691880228f5a18994ad7b58d3e7b40e
|
a8b3f8f64cc5756add7ec5cae0e332101cb00bd9
|
refs/heads/master
| 2022-02-24T04:18:28.860980
| 2019-08-28T04:35:32
| 2019-08-28T04:35:32
| 325,043,594
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,020
|
r
|
2.R
|
# base package and ggplot2, part 2 - lm
# If you want to add a linear model to your plot, shown right, you can define it with lm() and then plot the resulting linear model with abline(). However, if you want a model for each subgroup, according to cylinders, then you have a couple of options.
#
# You can subset your data, and then calculate the lm() and plot each subset separately. Alternatively, you can vectorize over the cyl variable using lapply() and combine this all in one step. This option is already prepared for you.
#
# The code to the right contains a call to the function lapply(), which you might not have seen before. This function takes as input a vector and a function. Then lapply() applies the function it was given to each element of the vector and returns the results in a list. In this case, lapply() takes each element of mtcars$cyl and calls the function defined in the second argument. This function takes a value of mtcars$cyl and then subsets the data so that only rows with cyl == x are used. Then it fits a linear model to the filtered dataset and uses that model to add a line to the plot with the abline() function.
#
# Now that you have an interesting plot, there is a very important aspect missing - the legend!
#
# In base package you have to take care of this using the legend() function. This has been done for you in the predefined code.
#
# Instructions
# 100 XP
# Instructions
# 100 XP
# Fill in the lm() function to calculate a linear model of mpg described by wt and save it as an object called carModel.
# Draw the linear model on the scatterplot.
# Write code that calls abline() with carModel as the first argument. Set the line type by passing the argument lty = 2.
# Run the code that generates the basic plot and the call to abline() all at once by highlighting both parts of the script and hitting control/command + enter on your keyboard. These lines must all be run together in the DataCamp R console so that R will be able to find the plot you want to add a line to.
# Run the code already given to generate the plot with a different model for each group. You don't need to modify any of this.
# Use lm() to calculate a linear model and save it as carModel
carModel <- lm(mpg ~ wt, data = mtcars)
# Basic plot
mtcars$cyl <- as.factor(mtcars$cyl)
plot(mtcars$wt, mtcars$mpg, col = mtcars$cyl)
# Call abline() with carModel as first argument and set lty to 2
abline(carModel, lty = 2)
# Plot each subset efficiently with lapply
# You don't have to edit this code
plot(mtcars$wt, mtcars$mpg, col = mtcars$cyl)
## this prints out a bunch of null values in list because nothing is returned from the abline function
## I have added results='hide' to prevent all that printing in the notebook
lapply(mtcars$cyl, function(x) {
abline(lm(mpg ~ wt, mtcars, subset = (cyl == x)), col = x)
})
# This code will draw the legend of the plot
# You don't have to edit this code
legend(x = 5, y = 33, legend = levels(mtcars$cyl),
col = 1:3, pch = 1, bty = "n")
|
026e7dd13c234f8f46e13810286f02e47da4480d
|
7b1c077c809ffdca5b4c199d55dcfab24a8dd59e
|
/hydroanalyzer/ui.R
|
4b3671c3591f8a6c147e91a8227e075659f376bf
|
[
"MIT"
] |
permissive
|
khaors/binder-hydroanalyzer
|
8b7cab50b2c635b321f76d0f34dff054cfeac5c4
|
273bfd9bd1b959f6b496b99ab06e9394933f3851
|
refs/heads/main
| 2023-03-11T00:25:26.721146
| 2021-03-02T23:56:55
| 2021-03-02T23:56:55
| 343,922,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,201
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(DT)
# Define UI for application that draws a histogram
shinyUI(pageWithSidebar(
# Application title
headerPanel("HydroAnalyzer-GUI: Shiny Interface (v0.1)"),
#### Panel 'About' (right hand side)
##############################################################################
sidebarPanel(
imageOutput("uptc.logo", inline=TRUE),
#
p(HTML("<h5>This is HydroAnalyzer-GUI, the Shiny interface for analysis and
evaluation of hydrological data in <strong>R</strong>.</h5>
This application can be used for the Exploratory Data Analysis of
hydrological variables (precipitation, discharge, temperature), consistency
analysis, watershed analysis (watershed delineation, river network extraction),
spatial analysis of hydrological variables (spatial correlation), water budget
calculation (using direct, abdc model and long term approach), frequency
analysis (return period estimation using maximum and minimum values), and
hydrologic regionalization (using the regression and L-moments approaches).")),
p(HTML('This package was developed by Oscar Garcia-Cabrejo, School of Geological
Engineering, Universidad Pedagogica y Tecnologica de Colombia, Sogamoso,
Boyaca, Colombia. Its source code is freely available on
<a href="http://www.github.com/khaors/hydroanalizer">github</a>.')),
br(),
h3('References:'),
p(HTML('<li> <span style="font-variant: small-caps;">V. T. Chow, Maidment, D. &
Mays, L.</span> (1988).<I>Applied Hydrology</I>.
McGraw-Hill Publishing Company; International Ed edition .</li>
<li> <span style="font-variant: small-caps;">Maidment, D.</span>(1993).
<I>Handbook of Hydrology</I>. McGraw-Hill Education. </li>
<li> <span style="font-variant: small-caps;">Davie, T.</span> (2002).
<I> Fundamentals of Hydrology</I> Routledge Fundamentals of Physical Geography,
Routledge.</li>'))
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
#########################################################################
# Panel 'Import Data'
#########################################################################
tabPanel("Import Data",
icon = icon("file"),
h3("First step: import data"),
p(HTML("To run the application, import your data set using the
import button below. Your data must be supplied in the form of a text/csv file.
If the importation is done properly, a preview of the data is displayed below.
When this is done, go to the next step: Exploratory Data Analysis.")),
#
br(),
checkboxInput('header', ' Header?', TRUE),
checkboxInput('rownames', ' Row names?', FALSE),
selectInput('sep', 'Separator:',
c("Comma","Semicolon","Tab","Space"), 'Comma'),
selectInput('quote', 'Quote:',
c("None","Double Quote","Single Quote"),
'Double Quote'),
selectInput('dec', 'Decimal mark', c("Period", "Comma"),
'Period'),
numericInput('nrow.preview','Number of rows in the preview:',20),
numericInput('ncol.preview', 'Number of columns in the preview:',
10),
fileInput('file1', 'Choose CSV/TXT File'),
helpText("Note: The preview only shows a given number of observations, but
the analysis will consider the whole dataset."),
tableOutput("view")
),
#########################################################################
# Panel 'Import Data'
#########################################################################
tabPanel("Exploratory Data Analysis",
icon = icon("bar-chart-o"),
h3("Second Step: Start to look at our data"),
br(),
p(HTML("In this step, a set of tools is used to gain insight into the data,
uncover the underlying structure, define important variables, detect
outliers and anomalies, test underlying assumptions, develop
parsimonious models.")),
br(),
h4("Variable"),
uiOutput("EDAvarnames"),
br(),
h4("Histogram"),
textInput(inputId = "EDAnbins", label = "Number Bins", value = "30"),
radioButtons(inputId = "EDAloghist", label ='Scale',
choices = c("Arithmetic", "Log"), selected = "Arithmetic"),
br(),
h4("Autocorrelation Function"),
textInput(inputId = "EDAmaxlag", label = "Maximum Lag",
value = '24'),
br(),
h4("Periodogram"),
textInput(inputId = 'EDAfilter', label = 'Filter', value = "3,5"),
textInput(inputId = 'EDAtaper', label = 'Taper', value = '0.1'),
radioButtons(inputId = 'EDAlogspec', label = 'Scale',
choices = c("Arithmetic", "Log"), selected = "Arithmetic"),
plotOutput("EDA.plot")
),
#########################################################################
# Panel 'Consistency'
#########################################################################
tabPanel("Consistency Analysis",
icon = icon("newspaper-o"),
h3("Consistency Analysis"),
br(),
h5("The tests included in this tab are used to determine if a
time series is homogeneous or not, or if two given time series
are consisten to each other. This type of analysis is helpful
in determining if corrections to the hydrological measurements
are requiered in the time series."),
br(),
selectInput(inputId = "consisttype", label = "Type",
choices = c(None= "None", Homogeneous="Homogeneous",
Consistency = "Consistency"),
selected = "None"),
br(),
conditionalPanel(condition = 'input.consisttype == "Homogeneous"',
selectInput(inputId = "homogeneousmethod",
label = "Method",
choices = c(None = "None",
VonNeumannTest = "VonNeumannTest",
CumulativeResiduals = "CumulativeResiduals"),
selected = "None")
),
br(),
conditionalPanel(condition = 'input.consisttype == "Consistency"',
selectInput(inputId = "consistmethod", label = "Method",
choices = c(None="None", DoubleMass="DoubleMass", Bois="Bois"),
selected = "None")),
uiOutput('consist1'),
uiOutput('consist2'),
uiOutput('consist3'),
br(),
plotOutput("consistency"),
br(),
tableOutput("homogeneity")
),
#########################################################################
# Panel 'Filling Missing Data'
#########################################################################
tabPanel("Filling Missing Data",
icon = icon("paint-brush"),
h3("Filling Missing Observations"),
br(),
h5("Sometimes the hydrologic time series are not complete due to
different reasons (equipment failure, extreme events, human
disturbances, mishandling of data records, accidental losses,
etc)"),
br(),
selectInput(inputId = "fillingtype", label = "Method",
choices = c(StationAverage="StationAverage",
MonthAverage="MonthAverage",
NormalRatio="NormalRatio",
IDW = "IDW", Regression = "Regression")),
br(),
uiOutput("filling1"),
uiOutput("filling2"),
uiOutput("filling3")
),
#########################################################################
# Panel 'Watershed Analysis'
#########################################################################
tabPanel("Watershed Analysis",
icon = icon("wrench")
),
#########################################################################
# Panel 'Spatial Analyisis'
#########################################################################
tabPanel("Spatial Analysis",
icon = icon("map-o"),
h3("Estimating the spatial distribution of hydrological variables"),
br(),
h5("In most cases the hydrological variables are sampled at specific
locations in space and using this information it is required to
know the values at unsampled locations. In this case geostatistics
comes to the rescue offering us a set of tools to solve this
challenging problem"),
br(),
sidebarLayout(
sidebarPanel(
h4("Input Variables"),
br(),
fileInput('watershed.limit.fl', 'Choose a SHP File'),
fileInput('DEM.fl', 'Choose a DEM File'),
fileInput('rainfall.fl', 'Choose a CSV file')
),
mainPanel(
tabsetPanel(
tabPanel("Spatial Correlation",
br()
),
tabPanel("Hydrological Maps",
br(),
plotOutput(outputId = "hydrologic.maps")
)
)
)
)
),
#########################################################################
# Panel 'Water Budget'
#########################################################################
tabPanel("Water Budget",
icon = icon("money"),
h3("Water Budget: How much water there is in a region during a period of
time"),
br(),
p(HTML("Using information of precipitation, temperature and discharge,
the different component of the hydrologic cycle are
determined.")),
br(),
selectInput(inputId = "budgetmethod", label = "Water Budget Method",
choices = c(None = "None",
Direct="Direct",
LongTerm = "LongTerm",
ABCD="ABCD"), selected = "None"),
br(),
conditionalPanel(
condition = "input.budgetmethod == 'Direct'",
br()),
uiOutput('budget1'),
uiOutput('budget2'),
uiOutput('budget3'),
uiOutput('budget4'),
uiOutput('budget5'),
uiOutput('budget6'),
uiOutput('budget7'),
uiOutput('budget8'),
plotOutput('water.budget'),
br(),
h4('Water Budget Results'),
br(),
#uiOutput('view.budget')
dataTableOutput("view.budget")
),
#########################################################################
# Panel 'Base Flow Analysis'
#########################################################################
tabPanel("Base Flow Analysis",
icon = icon("bath"),
br(),
h4("Discharge"),
uiOutput("BFvarnames"),
selectInput('time.base', "Time Base:", c('day','month','year')),
selectInput("method", "Method:", c('None','Graphical','Nathan', 'Chapman', 'Eckhardt')),
conditionalPanel(
condition = "input.method == 'Nathan'",
textInput("nathan.alpha", label = h5("alpha"), value = "0.925")),
conditionalPanel(
condition = "input.method == 'Chapman'",
textInput("chapman.alpha", label = h5("alpha"), value = "0.925")),
conditionalPanel(
condition = "input.method == 'Eckhardt'",
textInput("eckhardt.alpha", label = h5("alpha"), value = "0.925"),
textInput("eckhardt.bfi", label = h5('BFImax'), value = "0.8")),
#sliderInput("plot.range","Time range= ", min = -1, max = 0, value = c(-.6,-.5)),
plotOutput("baseflow")
),
#########################################################################
# Panel 'Frequency Analysis'
#########################################################################
tabPanel("Frequency Analysis",
icon = icon("repeat"),
h3("Frequency Analysis: Build a frequency model of your data"),
br(),
selectInput(inputId="freqselect", label = "Step",
choices = c(None = "None", SelectModel = "SelectModel",
ParameterEstimation = "ParameterEstimation",
ModelValidation = "ModelValidation",
UncertaintyAnalysis = "UncertaintyAnalysis")),
br(),
uiOutput("freq1"),
uiOutput("freq2"),
uiOutput("freq3"),
plotOutput("frequency"),
uiOutput("parameter.estimates")
),
#########################################################################
# Panel 'Regionalization'
#########################################################################
tabPanel("Regionalization",
icon = icon("globe")
)
)
)))
|
e11333c962e0f71aef85997acec4df7b330ea778
|
7c90bf87ad7974499a5028f268ce735d7fa1e45e
|
/man/epsilon_dispersion.Rd
|
7da67ba6ab106d2eb946c4f32a5e9984aefcdf7c
|
[] |
no_license
|
ciaranmoore/planar
|
ee3b4abc56645d1cd36292cb4a0c2abd88c534a0
|
d597c58baa3bfdcfeee42c375b5de9e31209a0a0
|
refs/heads/master
| 2021-01-22T16:13:29.626965
| 2015-04-16T05:14:14
| 2015-04-16T05:14:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 540
|
rd
|
epsilon_dispersion.Rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{epsilon_dispersion}
\alias{epsilon_dispersion}
\title{epsilon_dispersion}
\usage{
epsilon_dispersion(epsilon, wavelength = seq(400, 1000),
envir = parent.frame())
}
\arguments{
\item{epsilon}{list of real or complex values}
\item{wavelength}{numeric vector}
\item{envir}{environment to look for functions}
}
\value{
list
}
\description{
epsilon_dispersion
}
\details{
apply a function to a range of wavelength and return dielectric function
}
\author{
baptiste Auguie
}
|
cf2a41b2cbe7d8c19f95bf0a9b2154e3b54effb3
|
764560247c3988559ce7bdf8470ab07ac87b3e0e
|
/man/firstDeriv.Rd
|
a7ca4be33725f2e313c3ecb80ca71be5d0b8863e
|
[] |
no_license
|
cran/drsmooth
|
e563c658f8a915ccf478b4f5b2fddcf366e842f2
|
6bcbd16552a201200fa3c424c296f01b1018d0b4
|
refs/heads/master
| 2020-12-24T15:49:13.918627
| 2015-09-25T00:01:50
| 2015-09-25T00:01:50
| 17,695,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
rd
|
firstDeriv.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/firstDeriv.R
\name{firstDeriv}
\alias{firstDeriv}
\title{First Derivative Function(s)}
\usage{
firstDeriv(mod, n)
}
\arguments{
\item{mod}{The gam model.}
\item{n}{Prediction increments.}
}
\description{
First Derivative Function(s)
}
\keyword{internal}
|
c882d216d1a7d6b0427be75d7cc8003bf527102a
|
b372a5a898a4c9c73566ee38e04d997dc4e0e711
|
/R/utilities.R
|
a826d1eab5096b9719afde4776e239aeddb74106
|
[] |
no_license
|
dimbage/epidemia
|
ccb2b13c25b0dcb8a4857590cf6dc6d2494af3b2
|
0b89f58f39a25dc1b795fea467c54d3e362de3b1
|
refs/heads/main
| 2023-07-13T08:51:39.922234
| 2020-06-05T17:05:42
| 2020-06-05T17:05:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,600
|
r
|
utilities.R
|
# syntactic sugar for the formula
R <- function(group, date) {}
checkFormula <- function(formula) {
if(!inherits(formula,"formula"))
stop("'formula' must have class formula.", call. = FALSE)
vars <- all.vars(update(formula, ".~0"))
if(length(vars) != 2)
stop("Left hand side of 'formula' must have form 'Rt(code,date)'.")
return(formula)
}
# Performs a series of checks on the 'data' argument of genStanData
#
# @param formula See [genStanData]
# @param data See [genStanData]
checkData <- function(formula, data) {
if(!is.data.frame(data))
stop("'data' must be a data frame", call. = FALSE)
vars <- all.vars(formula)
not_in_df <- !(vars %in% colnames(data))
if (any(not_in_df))
stop(paste(c("Could not find column(s) ", vars[not_in_df], " in 'data'"), collapse=" "), call.=FALSE)
# remove redundant columns
data <- data[,vars]
# change name of response vars
vars <- all.vars(update(formula, ".~0"))
df <- data[,vars]
data[,c("group", "date")] <- df
# check if columns are coercible
data <- tryCatch(
{
data$group <- as.factor(data$group)
data$date <- as.Date(data$date)
data
},
error = function(cond) {
message(paste0(vars[1], " and ", vars[2], " are not coercible to Factor and Date Respectively."))
message("Original message:")
message(cond)
return(NULL)
}
)
# check for missing data
v <- !complete.cases(data)
if(any(v))
stop(paste(c("Missing data found on rows", which(v), " of 'data'"), collapse=" "))
# sort by group, then by date
data <- data[with(data, order(group, date)),]
# check for consecutive dates
f <- function(x) return(all(diff(x$date) == 1))
v <- !unlist(Map(f, split(data, data$group)))
if(any(v))
stop(paste(c("Dates corresponding to groups ", names(v[v]), " are not consecutive"), collapse=" "), call.=FALSE)
return(data)
}
checkObs <- function(data, obs) {
lst <- obs
if(!is.list(lst))
stop(" Argument 'obs' must be a list.", call.=FALSE)
for (i in seq_along(lst)) {
nme <- names(lst)[[i]]
elem <- lst[[i]]
for (name in names(elem))
assign(name, elem[[name]])
# check required components exist
req_cols <- c("obs", "rates", "pvec")
for (col in req_cols)
if (!exists(col))
stop(paste0("Could not find obs$", nme, "$", col), call. = FALSE)
obs <- checkObsDF(data,
obs,
paste0("obs$", nme, "$obs"))
rates <- checkRates(levels(data$group),
rates,
paste0("obs$", nme, "$rates"))
pvec <- checkSV(pvec,
paste0("obs$", nme, "$pvec"))
if (nrow(obs))
lst[[i]] <- nlist(obs, rates, pvec)
else {
warning(paste0("No relevant data found in obs$", nme, ". Removing..."), call. = FALSE)
lst[[i]] <- NULL
}
}
return(lst)
}
# Series of checks on dataframe df
#
# These include
# * formatting (column names, removing redundant columns)
# * throwing errors if duplicated data exists
# * removing incomplete cases
# * warning if unmodelled groups exists
# * warning if dates must be trimmed
# @param data The result of [checkData]
# @param df The dataframe to consider (obs$deaths or obs$incidence)
# @param name Name of dataframe to output in warnings
checkObsDF <- function(data, df, name) {
df <- checkDF(df, "obs$deaths", 3)
# format correctly
names(df) <- c("group", "date", "obs")
# check if columns are coercible
df <- tryCatch(
{
df$group <- as.factor(df$group)
df$date <- as.Date(df$date)
df$obs <- as.numeric(df$obs)
df
},
error = function(cond) {
message(paste0("Columns of '", name,"' are not coercible to required classes [factor, Date, numeric]"))
message("Original message:")
message(cond)
return(NULL)
}
)
groups <- levels(as.factor(data$group))
# throw error if duplicated
if(any(duplicated(df[,1:2])))
stop(paste0("Observations for a given group and date must be unique. Please check '", name, "'.", call. = FALSE))
# remove incomplete cases
v <- !complete.cases(df)
if(any(v)) {
df <- df[!v,]
warning(paste(c("Have removed missing data on rows", which(v), " of", name), collapse=" "), call.=FALSE)
}
# warn if there are unmodelled groups
v <- setdiff(levels(df$group), groups)
if(length(v))
warning(paste(c("Levels ", v, " in", name, "were not found in 'data'. Removing."), collapse = " "), call.=FALSE)
# warn if we have to trim the data.
for (group in groups) {
if(group %in% df$group) {
dates_data <- data[data$group == group, "date"]
start_date <- min(dates_data)
stop_date <- max(dates_data)
range <- paste0(start_date," : ", stop_date)
dates_df <- df[df$group == group, "date"]
if(min(dates_df) < start_date || max(dates_df > stop_date))
warning(paste0("Group: ", group, ", found dates in ", name, " outside of ", range, ". Trimming..."), call.=FALSE)
}
}
# trim the data
data$group <- as.factor(data$group)
df <- dplyr::left_join(data[,c("group", "date")], df, by = c("group", "date"))
df <- df[complete.cases(df),]
# warning if some groups do not have data
v <- setdiff(groups, df$group)
if(length(v))
warning(paste(c("No data for group(s) ", v, " found in", name), collapse=" "), call. = FALSE)
return(df)
}
# Generic checking of a dataframe
#
# @param df The Data.Frame to be checked
# @param name The name of the dataframe (for error message printing)
# @param nc The minimum number of columns expected.
checkDF <- function(df, name, nc) {
if(!is.data.frame(df))
stop(paste0(name, " must be a dataframe."))
if(any(is.na.data.frame(df[,1:nc])))
stop(paste0("'NA's exists in ", name))
if(ncol(df) < nc)
stop(paste0("Not enough columns in ", name))
as.data.frame(df[,1:nc])
}
# Check the data$pops argument of genStanData
#
# @param pops See [genStanData]
checkPops <- function(pops, levels) {
pops <- checkDF(pops, "pops", 2)
names(pops) <- c("group", "pop")
# check if columns are coercible
pops <- tryCatch(
{
pops$group <- as.factor(pops$group)
pops$pop <- as.integer(pops$pop)
pops
},
error = function(cond) {
message("Columns of 'pops' are not coercible to required classes [factor, integer]", call. = FALSE)
message("Original message:")
message(cond)
return(NULL)
}
)
# removing rows not represented in response groups
pops <- pops[pops$group %in% levels,]
# requiring all levels have an associated population
if (!all(levels %in% pops$group))
stop(paste0("Levels in 'formula' response missing in 'pops'"))
if(any(duplicated(pops$group)))
stop("Populations for a given group must be unique. Please check 'pops'.", call. = FALSE)
if(any(pops$pop < 0))
stop("Populations must take nonnegative. Plase check 'pops'", call. = FALSE)
# sort by group
pops <- pops[order(pops$group),]
return(pops)
}
# Check that a 'rate' is provided correctly for each observation
#
# @param levels Unique levels found in the 'data' argument of [epim]
# @param rates An element of each element of 'obs' see [epim]
# @param name The name to print in case of an error
checkRates <- function(levels, rates, name) {
if (!is.list(rates))
stop(paste0(name," must be a list.", call.=FALSE))
if(is.null(rates$means))
stop(paste0(name,"$means not found. "))
means <- rates$means
if(is.null(rates$scale))
scale = 0.1
else if(!is.numeric(rates$scale) || length(rates$scale) != 1)
stop(paste0(name, "$scale must be a numeric of length 1."))
else
scale = rates$scale
means <- checkDF(means, paste0(name, "$means"), 2)
names(means) <- c("group", "mean")
# check if columns are coercible
means <- tryCatch(
{
means$group <- as.factor(means$group)
means$mean <- as.numeric(means$mean)
means
},
error = function(cond) {
message(paste0("Columns of ", name, "$means are not coercible to required classes [factor, numeric]"))
message("Original message:")
message(cond)
return(NULL)
}
)
# removing rows not represented in response groups
means <- means[means$group %in% levels,]
# requiring all levels have an associated population
if (!all(levels %in% means$group))
stop(paste0("Levels in 'formula' response missing in ", name, "$means"))
if(any(duplicated(means$group)))
stop(paste0("Values for a given group must be unique. Please check ", name, "$means"), call. = FALSE)
if(any((means$mean > 1) + (means$mean < 0)))
stop(paste0("Mean values must be in [0,1]. Plase check ", name, "$means"), call. = FALSE)
# sort by group
means <- means[order(means$group),]
return(nlist(means, scale))
}
# Simple check of a simplex vector
#
# @param vec A numeric vector
# @param name The name of the vector (for error message printing)
checkSV <- function(vec, name) {
out <- tryCatch(as.numeric(vec),
error = function(cond) {
message(paste0(name, " could not be coerced to a numeric vector."))
message("Original message:")
message(cond)
})
if(any(vec < 0))
stop(paste0("Negative values found in ", name), call. = FALSE)
if(all(vec < 1e-14))
stop(paste0("No positive values found in ", name), call. = FALSE)
if(abs(sum(vec) - 1) > 1e-14)
warning(paste0(name, " did not sum to 1. Have rescaled to form a probability vector."), call. = FALSE)
return(vec/sum(vec))
}
checkCovariates <- function(data, if_missing = NULL) {
if (missing(data) || is.null(data)) {
warnCovariatesMissing()
return(if_missing)
}
if (!is.data.frame(data)) {
stop("'data' must be a data frame.", call. = FALSE)
}
# drop other classes (e.g. 'tbl_df', 'tbl', 'data.table')
data <- as.data.frame(data)
dropRedundantDims(data)
}
dropRedundantDims <- function(data) {
drop_dim <- sapply(data, function(v) is.matrix(v) && NCOL(v) == 1)
data[, drop_dim] <- lapply(data[, drop_dim, drop=FALSE], drop)
return(data)
}
warnCovariatesMissing <- function() {
warning(
"Omitting the 'covariates' element of 'data' is not recommended",
"and may not be allowed in future versions of rstanarm. ",
"Some post-estimation functions (in particular 'update', 'loo', 'kfold') ",
"are not guaranteed to work properly unless 'data' is specified as a data frame.",
call. = FALSE
)
}
|
9f4606232426d70bffac4092bc706c70d1ffcc36
|
b28f2fa998ce3e1004239aee1f6390dbffcb9ddb
|
/R/members.R
|
1d3b6fba2678d69eed02e56e85b646b11aeed90c
|
[
"MIT"
] |
permissive
|
linearregression/etseed
|
cf1a219568bd665cd76e64d77fdaa0218db66a76
|
d3c844ae09934efd493fbba2f35a87c3133a495a
|
refs/heads/master
| 2020-12-11T05:45:08.806594
| 2016-07-18T05:40:13
| 2016-07-18T05:40:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,003
|
r
|
members.R
|
#' Manage etcd members
#'
#' @export
#' @name members
#' @param id (character) A member id
#' @param newid (logical) new member id
#' @param ... Further args passed on to \code{\link[httr]{GET}}
#' @return Logical or R list
#' @examples \dontrun{
#' Sys.setenv(ETSEED_USER = "root")
#' Sys.setenv(ETSEED_PWD = "pickbetterpwd")
#'
#' # list members
#' member_list()
#'
#' # add a member
#' member_add("http://10.0.0.10:2380")
#'
#' # change a member
#' mms <- member_list()
#' member_change(mms$members[[1]]$id, "http://10.0.0.10:8380", config=verbose())
#'
#' # delete a member
#' mms <- member_list()
#' member_delete(mms$members[[1]]$id)
#' }
member_list <- function(...) {
res <- etcd_GET(paste0(etcdbase(), "members"), NULL, ...)
jsonlite::fromJSON(res, FALSE)
}
#' @export
#' @rdname members
member_add <- function(id, ...) {
res <- member_POST(paste0(etcdbase(), "members"),
body = list(peerURLs = list(id)),
make_auth(Sys.getenv("ETSEED_USER"), Sys.getenv("ETSEED_PWD")), ...)
jsonlite::fromJSON(res, FALSE)
}
#' @export
#' @rdname members
member_change <- function(id, newid, ...) {
res <- member_PUT(paste0(etcdbase(), "members/", id),
body = list(peerURLs = list(newid)),
make_auth(Sys.getenv("ETSEED_USER"), Sys.getenv("ETSEED_PWD")), ...)
jsonlite::fromJSON(res, FALSE)
}
#' @export
#' @rdname members
member_delete <- function(id, ...) {
res <- member_DELETE(paste0(etcdbase(), "members/", id),
make_auth(Sys.getenv("ETSEED_USER"), Sys.getenv("ETSEED_PWD")), ...)
identical(res, "")
}
member_POST <- function(url, ...) {
res <- POST(url, encode = "json", ...)
stop_for_status(res)
content(res, "text")
}
member_PUT <- function(url, ...) {
res <- PUT(url, encode = "json", ...)
stop_for_status(res)
content(res, "text")
}
member_DELETE <- function(url, ...) {
res <- DELETE(url, encode = "json", ...)
stop_for_status(res)
content(res, "text")
}
|
ca378ed980d5928389ef46f54b2c6d65b6710030
|
3fa1b23746232975b3b014db2f525007a3b49991
|
/anna_code/geographic_distribution/plot_v1.R
|
a5d44cfdafb813ec8c743d9c9b2c52157cc2cf3a
|
[] |
no_license
|
AshleyLab/myheartcounts
|
ba879e10abbde085b5c9550f0c13ab3f730d7d03
|
0f80492f7d3fc53d25bdb2c69f14961326450edf
|
refs/heads/master
| 2021-06-17T05:41:58.405061
| 2021-02-28T05:33:08
| 2021-02-28T05:33:08
| 32,551,526
| 7
| 1
| null | 2020-08-17T22:37:43
| 2015-03-19T23:25:01
|
OpenEdge ABL
|
UTF-8
|
R
| false
| false
| 884
|
r
|
plot_v1.R
|
rm(list=ls())
library(ggplot2)
library(fiftystater)
data("fifty_states")
data=read.table("v1.us.broadshare.tally",header=TRUE,sep='\t')
p1=ggplot(data,aes(map_id=State))+
geom_map(aes(fill=Users),map=fifty_states)+
expand_limits(x=fifty_states$long,y=fifty_states$lat)+
coord_map()+
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL) +
labs(x = "", y = "") +
theme(legend.position = "bottom",
panel.background = element_blank())+
fifty_states_inset_boxes()
p2=ggplot(data,aes(map_id=State))+
geom_map(aes(fill=log10(Users/StatePop)),map=fifty_states)+
expand_limits(x=fifty_states$long,y=fifty_states$lat)+
coord_map()+
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL) +
labs(x = "", y = "") +
theme(legend.position = "bottom",
panel.background = element_blank())+
fifty_states_inset_boxes()
|
c9dc9d6a35c0bf7e24c06ac9dec9baf6c399d17e
|
589479cdb8d92cea1734e9787e93e853d9d4cdd7
|
/grouping_script.r
|
9e757a97059e6c127feeba10a68be51626aa76e5
|
[] |
no_license
|
sfatali/Trend-Mining-Exercise
|
9ee0b4ac238c458d47445f6a53a9b53a8f17b211
|
d62c20dfd67943c97313440a1b12a4e0ee74ad9a
|
refs/heads/master
| 2021-07-21T02:51:20.318085
| 2017-10-31T17:08:53
| 2017-10-31T17:08:53
| 108,731,385
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,987
|
r
|
grouping_script.r
|
grouping = function(text) {
# After working on Scopus:
text <- gsub("open source", "opensource", text)
text <- gsub("internet of things", "iot", text)
text <- gsub("internet things", "iot", text)
text <- gsub("internet things", "iot", text)
text <- gsub("service oriented architecture", "soa", text)
text <- gsub("service oriented", "soa", text)
text <- gsub("service-oriented architecture", "soa", text)
text <- gsub("service-oriented", "soa", text)
text <- gsub("web services", "webservice", text)
text <- gsub("web service", "webservice", text)
text <- gsub("business process modelling", "businessprocessmodelling", text)
text <- gsub("workflows", "workflow", text)
text <- gsub("collaborative platform", "collaborativeplatform", text)
text <- gsub("case study", "casestudy", text)
text <- gsub("study methodology", "studymethodology", text)
text <- gsub("cloud computing", "cloud", text)
text <- gsub("containerization", "containers", text)
text <- gsub("containerized", "containers", text)
text <- gsub("containerize", "containers", text)
text <- gsub("containerizing", "containers", text)
text <- gsub("container", "containers", text)
text <- gsub("containerss", "containers", text)
text <- gsub("large scale", "largescale", text)
text <- gsub("multi-cloud", "cloud", text)
text <- gsub("big data", "bigdata", text)
text <- gsub("clouds", "cloud", text)
text <- gsub("machine learning", "machinelearning", text)
text <- gsub(" ml ", " machinelearning ", text)
text <- gsub("flexible solution", "flexibility", text)
text <- gsub("migrating", "migration", text)
text <- gsub("migrated", "migration", text)
text <- gsub("migrate", "migration", text)
text <- gsub("scalable", "scalability", text)
text <- gsub("reliable", "reliability", text)
text <- gsub("flexible", "flexibility", text)
text <- gsub("fast", "speed", text)
text <- gsub("quick", "speed", text)
text <- gsub("quickly", "speed", text)
text <- gsub("speedly", "speed", text)
text <- gsub("speedy", "speed", text)
text <- gsub("speeding", "speed", text)
text <- gsub("faster", "speed", text)
text <- gsub("rapid", "speed", text)
text <- gsub("operating systems", "operatingsystem", text)
text <- gsub("operating system", "operatingsystem", text)
text <- gsub("smart buildings", "smartbuildings", text)
text <- gsub("smart building", "smartbuildings", text)
text <- gsub("large-scale", "largescale", text)
text <- gsub("large scale", "largescale", text)
text <- gsub("orchestrate", "orchestration", text)
text <- gsub("orchestrating", "orchestration", text)
text <- gsub("virtualizing", "virtualization", text)
text <- gsub("virtual", "virtualization", text)
text <- gsub("virtualizationization", "virtualization", text)
text <- gsub("optimizing", "optimization", text)
text <- gsub("optimized", "optimization", text)
text <- gsub("optimize", "optimization", text)
text <- gsub("optimal", "optimization", text)
text <- gsub("digitalized", "digitalization", text)
text <- gsub("digitalize", "digitalization", text)
text <- gsub("digital", "digitalization", text)
text <- gsub("digitalizationization", "digitalization", text)
text <- gsub("communicating", "communication", text)
text <- gsub("communicated", "communication", text)
text <- gsub("communicate", "communication", text)
text <- gsub("communications", "communication", text)
text <- gsub("decomposing", "decomposition", text)
text <- gsub("decomposed", "decomposition", text)
text <- gsub("decompose", "decomposition", text)
text <- gsub("deploying", "deployment", text)
text <- gsub("deployed", "deployment", text)
text <- gsub("deployments", "deployment", text)
text <- gsub("deploy", "deployment", text)
text <- gsub("deploymentment", "deployment", text)
text <- gsub("agility", "agile", text)
text <- gsub("infratructures", "infratructure", text)
text <- gsub("architectural", "architecture", text)
text <- gsub("high level", "highlevel", text)
text <- gsub("low level", "lowlevel", text)
text <- gsub("configured", "configuration", text)
text <- gsub("configuring", "configuration", text)
text <- gsub("configure", "configuration", text)
text <- gsub("e-commerce", "ecommerce", text)
text <- gsub("evolving", "evolution", text)
text <- gsub("evolves", "evolution", text)
text <- gsub("evolve", "evolution", text)
text <- gsub("natural language processing", "nlp", text)
text <- gsub("language processing", "nlp", text)
text <- gsub("apis", "api", text)
text <- gsub("neural networks", "neuralnetworks", text)
text <- gsub("neural network", "neuralnetworks", text)
text <- gsub("collaborative", "collaboration", text)
text <- gsub("continuously", "continuous", text)
text <- gsub("component", "components", text)
text <- gsub("componentss", "components", text)
text <- gsub("resource", "resources", text)
text <- gsub("resourcess", "resources", text)
text <- gsub("platforms", "platform", text)
text <- gsub("technologies", "technology", text)
text <- gsub("challenge", "challenges", text)
text <- gsub("challengess", "challenges", text)
text <- gsub("contexts", "contex", text)
text <- gsub("complex", "complexity", text)
text <- gsub("complexityity", "complexity", text)
# After working on STO:
text <- gsub("rest ful", "rest", text)
text <- gsub("rest full", "rest", text)
text <- gsub("request", "requests", text)
text <- gsub("requestss", "requests", text)
text <- gsub("response", "responses", text)
text <- gsub("responses", "responses", text)
text <- gsub("authenticated", "authentication", text)
text <- gsub("authenticating", "authentication", text)
text <- gsub("authenticate", "authentication", text)
text <- gsub("authorizing", "authorization", text)
text <- gsub("authorized", "authorization", text)
text <- gsub("authorize", "authorization", text)
text <- gsub("database", "databases", text)
text <- gsub(" db ", " databases ", text)
text <- gsub("gateways", "gateway", text)
text <- gsub("config ", "configuration ", text)
text <- gsub("clusters", "cluster", text)
text <- gsub("implement ", "implementation ", text)
text <- gsub("instance ", "instances ", text)
text <- gsub("event ", "events ", text)
text <- gsub("tokens", "token", text)
text <- gsub("problems", "problem", text)
text <- gsub("issues", "issue", text)
text <- gsub("patterns", "pattern", text)
text <- gsub("connect ", "connection ", text)
text <- gsub("connects ", "connection ", text)
text <- gsub("connections", "connection", text)
text <- gsub("clients", "client", text)
text <- gsub("object oriented", "oop", text)
text <- gsub("object-oriented", "oop", text)
text <- gsub("back end", "backend", text)
text <- gsub("back-end", "backend", text)
text <- gsub("servers", "server", text)
text <- gsub("message ", "messages", text)
text <- gsub("spring boot", "springboot", text)
text <- gsub("tests", "testing", text)
text <- gsub("test", "testing", text)
text <- gsub("testinging", "testing", text)
text <- gsub("endpoints", "endpoint", text)
text <- gsub("node js", "nodejs", text)
text <- gsub("queueing", "queue", text)
text <- gsub("queued", "queue", text)
# After working on Twitter
text <- gsub("internetofthings", "iot", text)
text <- gsub("monolithics", "monolithic", text)
text <- gsub("monoliths", "monolithic", text)
text <- gsub("monolith", "monolithic", text)
text <- gsub("monolithicic", "monolithic", text)
text <- gsub("artificial intelligence", "ai", text)
text <- gsub("artificialintelligence", "ai", text)
text <- gsub("continuous delivery", "continuousdelivery", text)
text <- gsub("continuous integration", "continuousintegration", text)
text <- gsub(" ci ", " continuousintegration ", text)
text <- gsub(" cd ", " continuousdelivery ", text)
text <- gsub("angular js", "angularjs", text)
text <- gsub("react js", "reactjs", text)
text <- gsub("cloudcomputing", "cloud", text)
text <- gsub("iotpic", "iot", text)
text <- gsub("java ee", "javaee", text)
text <- gsub("silicon valley", "siliconvalley", text)
text <- gsub("dot net", "dotnet", text)
text <- gsub(" net ", "dotnet", text)
text <- gsub("software defined networkng", "sdn", text)
text <- gsub("religionsaas", "saas", text)
text <- gsub("internetofthings", "iot", text)
text <- gsub("restapis", "rest api", text)
text <- gsub("rest api", "rest api", text)
text <- gsub("restful", "rest", text)
text <- gsub("servicefabric", "fabric", text)
text <- gsub("jobs", "job", text)
text <- gsub("containerspic", "containers", text)
text <- gsub("digitaltransformation", "digitalization", text)
text <- gsub("awshttps", "aws", text)
text <- gsub("apispic", "api", text)
text <- gsub("tutorialpic", "tutorial", text)
text <- gsub("apachekafka", "kafka", text)
text <- gsub("azurepic", "azure", text)
text <- gsub("dddesign", "ddd", text)
text <- gsub("mongo db", "mongodb", text)
text <- gsub("rabbit", "rabbitmq", text)
text <- gsub("webcomponents", "components", text)
text <- gsub("websockets", "sockets", text)
text <- gsub("socket", "sockets", text)
text <- gsub("socketss", "sockets", text)
text <- gsub("virtual reality", "virtualreality", text)
text <- gsub(" vr ", " virtualreality ", text)
text <- gsub("dotnetcore", "dotnet", text)
text <- gsub("servicefabrichttps", "fabric https", text)
text <- gsub("netcore", "dotnet", text)
text <- gsub(" vm ", " virtualization ", text)
text <- gsub("virtualisation", "virtualization", text)
text <- gsub("cloudnativelondon", "cloudnative", text)
text <- gsub("kuberneteshttp", "kubernetes http", text)
text <- gsub("kuberneteshttps", "kubernetes https", text)
text <- gsub("mongodbe", "mongodb", text)
text <- gsub("apidev", "api", text)
text <- gsub("testdriven", "tdd", text)
text <- gsub("testdrivendevelopment", "tdd", text)
text <- gsub("test driven development", "tdd", text)
text <- gsub("testdrivendevelopment", "tdd", text)
text <- gsub("tddpic", "tdd", text)
text <- gsub("apisecurity", "security", text)
text <- gsub("androidappdev", "android", text)
text <- gsub("androiddev", "android", text)
text <- gsub("androidapp", "android", text)
text <- gsub("reliableservices", "reliability", text)
text <- gsub("paaspic", "paas", text)
text <- gsub("iaaspic", "paas", text)
text <- gsub("springpic", "spring", text)
text <- gsub("javaeepic", "javaee", text)
text <- gsub("javapic", "java", text)
text <- gsub("oss", "opensource", text)
text <- gsub("javaone", "opensource", text)
text <- gsub("openlibertyio", "openliberty", text)
text <- gsub("openlibertyibm", "openliberty", text)
text <- gsub("ibmopenliberty", "openliberty", text)
text <- gsub("open liberty", "openliberty", text)
text <- gsub("openlibertypic", "openliberty", text)
text <- gsub("startups", "startup", text)
text <- gsub("springframework", "spring", text)
text <- gsub("dockerpic", "docker", text)
text <- gsub("devopshttps", "devops", text)
text <- gsub("devopshttp", "devops", text)
text <- gsub("webdev", "devops", text)
text <- gsub("mobility", "mobile", text)
text <- gsub("asynchronous", "async", text)
text <- gsub("asynch", "async", text)
text <- gsub(" dl ", " deeplearning ", text)
text <- gsub("restapispic", "rest api", text)
text <- gsub("restapipic", "rest api", text)
text <- gsub("restpic", "rest", text)
text <- gsub("agilepic", "rest", text)
text <- gsub("aipic", "rest", text)
text <- gsub("iotpic", "iot", text)
text <- gsub("testingpic", "iot", text)
text <- gsub("load balancer", "loadbalancing", text)
text <- gsub("load balance", "loadbalancing", text)
text <- gsub("load balancing", "loadbalancing", text)
text <- gsub("mlpic", "machinelearning", text)
text <- gsub("cloudpic", "cloud", text)
text <- gsub("aspnetcore", "aspnet", text)
text <- gsub("dotnetpic", "aspnet", text)
text <- gsub("distributedhttps", "distributed https", text)
text <- gsub("infosecpic", "security", text)
text <- gsub("infosec", "security", text)
text <- gsub("jenkinsworld", "jenkinsworld", text)
text <- gsub("jenkinspic", "jenkins", text)
text <- gsub("apipic", "api", text)
text <- gsub("pythonpic", "python", text)
text <- gsub("rubypic", "api", text)
text <- gsub("retailpic", "retail", text)
text <- gsub("frameworkpic", "framework", text)
text <- gsub("bigdatapic", "bigdata", text)
text <- gsub("integrationpic", "integration", text)
text <- gsub("devopspic", "devops", text)
text <- gsub("awspic", "aws", text)
text <- gsub("osspic", "opensource", text)
text <- gsub("autoscaling", "scalability", text)
text <- gsub("scaling", "scalability", text)
text <- gsub("datapic", "data", text)
text <- gsub("ssdpic", "ssd", text)
text <- gsub("linuxhttps", "linux https", text)
text <- gsub("linuxpic", "linux", text)
text <- gsub("dataanalytics", "data analytics", text)
text <- gsub("cloudanalytics", "cloud analytics", text)
text <- gsub("javascriptpic", "javascript", text)
text <- gsub("rtpic", "rt", text)
text <- gsub("real time", "rt", text)
text <- gsub("real time", "rt", text)
text <- gsub("digitalizationtransformation", "digitalization", text)
text <- gsub("soacloud", "soa cloud", text)
text <- gsub(" tool ", " tools ", text)
text <- gsub(" messages ", " messaging ", text)
text <- gsub(" message ", " messaging ", text)
text <- gsub("message-oriented", "messaging", text)
text <- gsub("event-oriented", "events", text)
text <- gsub("dockercontainers", "docker containers", text)
text <- gsub("amazonwebservices", "aws", text)
text <- gsub("amazon webservices", "aws", text)
text <- gsub("amazon web services", "aws", text)
text <- gsub("mobileapp ", "mobile ", text)
text <- gsub("mobilepic", "mobile", text)
text <- gsub("dockercon ", "docker ", text)
text
}
|
24fd484af1c92098bb904c9a4f12f7344bd732ad
|
9d9cfce0073c28cf3b13050e4a6d3da9354fcf51
|
/man/minmaxtemp.Rd
|
dad9196d55f8bac3f1a49804afd9a2fd71e49a6d
|
[] |
no_license
|
derekhnguyen/climatePackage_esm262
|
cbf77413d32b0c8acde6513fe294418028184f91
|
5e2f4aeee68d89a2f1f330ac71cde44c717a416b
|
refs/heads/master
| 2021-04-16T07:38:06.851749
| 2020-03-26T05:59:33
| 2020-03-26T05:59:33
| 249,338,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
rd
|
minmaxtemp.Rd
|
% generated roxygen2: do not edit by hand
% Please edit documentation in R/minmaxtemp.R
\name{minmaxtemp}
\alias{minmaxtemp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Minimum and Maximum Temperature
}
\description{
This function returns min and max temp of a sample df
}
\usage{
minmaxtemp(x)
}
e.
\arguments{
\item{x}{
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
min and max temps
}
\author{
Derek Nguyen and Jonathan Hart
}
\examples{
maxmin_temp(df)
}
|
688a641c19d97918623fb2d4763d8464f92f903a
|
ef9acfc3a8166965b7d436e00a162f7a4b723707
|
/mineria texto.R
|
4b92dd7c1f29108c9c6624e44efdafce98ca863b
|
[
"MIT"
] |
permissive
|
oddmayo/DS-Training-DAFP
|
c1b71fd0d4679b4a2dc8c270cbccf9d9f782ed01
|
3980e546b86e5c52797691efce55be6032cefe87
|
refs/heads/master
| 2022-04-28T17:10:37.342719
| 2019-07-17T23:26:37
| 2019-07-17T23:26:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,661
|
r
|
mineria texto.R
|
# Paquete para leer PDFs no escaneados
library(pdftools)
# Leer CONPES Big Data (subir primero en parte inferior derecha: files, upload)
texto_crudo <- pdf_text("3920.pdf")
# Dejar solo caracteres alfanuméricos
texto_crudo <- gsub(pattern = "[^[:alnum:][:space:]]", " ", texto_crudo)
# Paquete con funciones de minería de texto
library(tm)
# Convertir texto crudo en corpus
corpus <- VCorpus(VectorSource(texto_crudo))
# Remover stopwords
corpus <- tm_map(corpus, removeWords, stopwords(kind = "sp"))
# Construir matriz de términos y documentos (en este caso páginas)
texto.tdm <- TermDocumentMatrix(corpus,control = list(removePunctuation = TRUE,
removeNumbers = TRUE,
tolower = TRUE,
trimws = TRUE))
# Lista de todas las palabras
ft <- findFreqTerms(texto.tdm)
# Tabla de frecuencias
ft.tdm <- as.matrix(texto.tdm[ft,])
# Sumar palabras repetidas
palabras_frecuentes <- as.data.frame(sort(apply(ft.tdm, 1, sum), decreasing = TRUE))
# Nombrar columnas para que quede más presentable
palabras_frecuentes <- data.frame(palabra = rownames(palabras_frecuentes) , conteo = palabras_frecuentes$`sort(apply(ft.tdm, 1, sum), decreasing = TRUE)`)
# Primeras 50 palabras
primeras <- palabras_frecuentes[1:50,]
# Paquete con muchas funciones de otros paquetes para tratamiento de datos
library(tidyverse)
# Paquete de gráficos amigables
library(esquisse)
# Generar gráfico
esquisser(primeras)
# Graficar palabras más frecuentes ordenadas
ggplot(primeras) +
aes(x = reorder(palabra,conteo) , weight = conteo) +
geom_bar(fill = "#0d0887") +
coord_flip() +
theme_minimal()
# Paquete para nube de palabras
library(wordcloud2)
# 100 palabras más frecuentes
primeras <- palabras_frecuentes[1:100,]
# Generar gráfico
wordcloud2(data = primeras)
# Paleta de colores personalizada
custom_colors <- c("#005073", "#107dac", "#189ad3", "#1ebbd7", "#71c7ec")
# Nube de palabras con más trabajo
wordcloud2(primeras, size=0.7,
color=rep_len( custom_colors, nrow(primeras)),backgroundColor = "white",shape = 'circle')
#---------------------------------#
# Construcción de red de bigramas
#---------------------------------#
# Paquete para encontrar bigramas
library(tidytext)
# Función que contruí para preprocesamiento de texto
preproctext <- function(x){
require(magrittr)
x[which(is.na(x))] <- ""
y <- x %>%
iconv(.,from="utf-8",to="ASCII//TRANSLIT") %>%
gsub("[^[:print:]]", " ", .) %>%
tolower %>%
gsub("[^[:lower:]^[:space:]]", " ", .) %>%
gsub("[[:space:]]{1,}", " ", .) %>%
trimws
return(y)
}
# Preprocesar texto (Volvemos a preprocesarlo porque necesitamos un vector, no un objeto corpus)
texto_limpio <- preproctext(texto_crudo)
# Remover stopwords
texto_limpio <- removeWords(texto_limpio, stopwords("sp"))
# Convertir en tabla (tibble es casi lo mismo que un dataframe, pero se necesita para la función de encontrar bigramas)
texto_limpio <- tibble(texto = texto_limpio)
# Totalidad de los bigramas
bigramas <- texto_limpio %>% unnest_tokens(bigram, texto, token = "ngrams", n = 2)
# Frecuencia de bigramas
bigramas %>% count(bigram, sort = TRUE)
# Separar cada palabra de los bigramas en columnas
bigramas_separados <- bigramas %>% separate(bigram, c("word1", "word2"), sep = " ")
# Columna con frecuencia del bigrama
bigramas_conteo <- bigramas_separados %>% count(word1, word2, sort = TRUE)
# Paquete para crear objeto graficable
library(igraph)
# Objeto a graficar
bigram_graph <- bigramas_conteo[1:50,] %>% filter(n > 2) %>%
graph_from_data_frame()
# Paquete para graficar red
library(ggraph)
# Gráfico básico
set.seed(2017)
ggraph(bigram_graph, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)
# Gráfico más elaborado (esta es la representación gráfica de una cadena de Markov)
set.seed(11234)
a <- grid::arrow(type = 'closed', length = unit(.15, "inches"))
x11()
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = F,
arrow = a,linemitre = 8, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "firebrick3", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
ggtitle('Red de bigramas más utilizados en CONPES 3920') +
theme_void() +
theme(plot.title=element_text(hjust=0.5))
|
8a7b32e2e4c127bfa206be84b9b0d07add648df2
|
e207c63b517bef7fbc1496f26286e5c5ec811db5
|
/churn.r
|
e901449a32aba63486f05e534907c9d4cf222bdb
|
[] |
no_license
|
R-Avalos/test_dash
|
5866ed792856f7d871bd664959bea74eda647807
|
4c1d90ba88c607ff528160e73d5970ec129ee4f5
|
refs/heads/master
| 2021-08-23T06:11:40.946400
| 2017-12-03T20:32:17
| 2017-12-03T20:32:17
| 110,893,956
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,030
|
r
|
churn.r
|
# Churn Data
library(lubridate)
library(dplyr)
library(plotly)
date <- c("2017-1-1", "2017-2-1", "2017-3-1", "2017-4-1", "2017-5-1", "2017-6-1", "2017-7-1", "2017-8-1", "2017-9-1", "2017-10-1", "2017-11-1", "2017-12-1")
total_churn <- c(3, 3, 3.3, 3.2, 3.1, 2.8, 2.9, 3, 2.7, 2.3, 2.8, 2.5)
involuntary_churn <- c(1.3, 1, .3, .25, .75, .8, .9, 1.3, .7, .5, .4, .6)
monthly_churn <- data.frame(date, total_churn, involuntary_churn)
monthly_churn$date <- ymd(monthly_churn$date)
### Lost Revenue Data
monthly_rev_lost <- c(3000, 15000, 8000, 7000, 9000, 6000, 5000, 7500, 6450, 5550, 4000, 3850)
lost_rev <- data.frame(date, monthly_rev_lost)
lost_rev$date <- ymd(lost_rev$date)
### Cohort Data
business_type <- c("Enterprise", "Education", "Med Business", "Small Business", "Individual")
business_churn <- c(3, 1.25, 2, 4.25, 5.5)
business_revenue <- c(250, 200, 120, 42, 10)
cohort_biz <- data.frame(business_type, business_churn, business_revenue)
cohort_biz$per_rev <- round(cohort_biz$business_revenue/sum(cohort_biz$business_revenue)*100,2)
cohort_biz$business_type <- factor(cohort_biz$business_type, levels = unique(cohort_biz$business_type)[order(cohort_biz$per_rev, decreasing = FALSE)])
colnames(cohort_biz) <- c("Cohort", "Churn %", "Rev mil", "Rev %")
location <- c("Americas", "Europe", "China", "Pacific", "India")
location_churn <- c(2.00, 5.25, 4.00, 3.15, 2.5)
location_rev <- c(240, 140, 100, 80, 62)
cohort_local <- data.frame(location, location_churn, location_rev)
cohort_local$per_rev <- round(cohort_local$location_rev/sum(cohort_local$location_rev)*100,2)
cohort_local$location <- factor(cohort_local$location, levels = unique(cohort_local$location)[order(cohort_local$per_rev, decreasing = FALSE)])
colnames(cohort_local) <- c("Cohort", "Churn %", "Rev mil", "Rev %")
### Top 5 Churned
churn_name <- c("Kitten Armada Co.", "Llama Farm", "Shady ICO", "British Tacos", "Drone Coffee Delivery")
churn_rev <- c(900, 500, 400, 150, 90)
churn_users <- c(600, 40, 100, 4, 25)
# churn_age_months <- c(36, 30, 2, 1, 8)
top5 <- data.frame(churn_name, churn_rev, churn_users)
colnames(top5) <- c("Company", "Revenue", "Users")
#### How are churned customers different from retained?
cohort_attributes <- c("ARPU", "Users", "Age", "MAU", "DAU", "Sticky", "<5min Meet", "Faults", "Ticket")
diff_avg_churned <- c(1.25, -5, -10, -5, -25, -35, 5, 16, 5)
diff_df <- data.frame(cohort_attributes, diff_avg_churned)
diff_df$cohort_attributes <- factor(diff_df$cohort_attributes, levels = unique(diff_df$cohort_attributes)[order(diff_df$diff_avg_churned, decreasing = FALSE)])
# ### Barplot Diff ####
# plot_ly(diff_df) %>%
# add_trace(x = ~cohort_attributes, y = ~diff_avg_churned,
# name = "churned",
# type = "bar",
# marker = list(color = "rgba(255, 0, 0, 0.5)")) %>%
# layout(paper_bgcolor = "transparent",
# plot_bgcolor = "transparent",
# showlegend = FALSE,
# xaxis = list(title = "",
# title = "",
# tickmode = "array",
# type = "marker",
# tickfont = list(family = "serif", size = 14),
# ticks = "outside",
# zeroline = FALSE),
# yaxis = list(title ="",
# ticksuffix = "%",
# tickfont = list(family = "serif", size = 14)
# ),
# annotations = list(
# list(xref = "x", yref = "y",
# x = 2.5,
# y = max(diff_df$diff_avg_churned),
# text = "<b>Churned Difference</b><br><span style='color:black;'>Churned difference from Median Retained Accounts</span>",
# showarrow = FALSE,
# align = "left")
# )
# )
#
#
#
#
# ### Barplot biz type ####
# plot_ly(cohort_biz) %>%
# add_trace(x = ~`Rev %`, y = ~Cohort,
# name = "Revenue",
# type = "bar",
# orientation = "h",
# marker = list(color = "rgba(58, 71, 80, 0.25)")) %>%
# add_trace(x = ~`Churn %`, y = ~Cohort,
# name = "Churn",
# type = "bar",
# orientation = "h",
# marker = list(color = "rgba(255, 0, 0, 0.5)")) %>%
# add_trace(x = ~`Churn %`, y = ~Cohort,
# type = "scatter",
# mode = "text",
# text = paste0(cohort_biz$`Churn %`, "%"),
# textposition = "right",
# textfont = list(color = "rgba(255, 0, 0, 1)",
# family = "sans serif",
# size = 14)
# ) %>%
# add_trace(x = ~`Rev %`, y = ~Cohort,
# type = "scatter",
# mode = "text",
# text = ~`Rev %`,
# textposition = "right",
# textfont = list(color = "rgba(58, 71, 80, 1)",
# family = "sans serif",
# size = 14)
# ) %>%
# layout(barmode = 'overlay',
# paper_bgcolor = "transparent",
# plot_bgcolor = "transparent",
# showlegend = FALSE,
# xaxis = list(title = "",
# title = "",
# tickmode = "array",
# ticksuffix = "%",
# type = "marker",
# tickfont = list(family = "serif", size = 14),
# ticks = "outside",
# zeroline = FALSE),
# yaxis = list(title ="",
# tickfont = list(family = "serif", size = 14)
# ),
# annotations = list(
# list(xref = "x", yref = "y",
# x = max(cohort_local$`Rev %`),
# y = 1,
# text = "<b>Cohort Breakdown</b><br><span style='color:red;'>30 Day Churn %</span><br><span style='color:black;'>Total Revenue %</span>",
# showarrow = FALSE,
# align = "right")
# )
# )
# #####
#
# ### Lost Revenue Plot ####
# plot_ly(lost_rev, x = ~date) %>%
# add_trace(y = ~monthly_rev_lost,
# type = "scatter",
# mode = "lines",
# line = list(color = "red"),
# hoverinfo = 'text',
# text = ~paste0("<span style='color:grey'>Revenue Lost to Churn </span><b>$",
# prettyNum(monthly_rev_lost,big.mark = ","),
# "</b></br>",
# "</br>",
# "<span style='color:grey'>Date </span>",
# date
# )
# ) %>%
# layout(title = "",
# paper_bgcolor = "transparent",
# plot_bgcolor = "transparent",
# margin = list(r = 20),
# hoverlabel = list(font = list(color = "black"),
# bgcolor = "white",
# bordercolor = "white"),
# showlegend = FALSE,
# xaxis = list(showgrid = FALSE,
# title = "",
# tickmode = "array",
# type = "marker",
# autorange = TRUE,
# tickfont = list(family = "serif", size = 10),
# ticks = "outside"
# ),
# yaxis = list(showgrid = FALSE,
# range = c(0, max(lost_rev$monthly_rev_lost)+200),
# title = "",
# tickmode = "array",
# tickpreffix = "$",
# type = "marker",
# tickfont = list(family = "serif", size = 10),
# ticks = "outside",
# zeroline = FALSE
# ),
# annotations = list(
# list(xref = "x", yref = "y",
# x = min(lost_rev$date) + 30,
# y = max(lost_rev$monthly_rev_lost) + 100,
# text = "<b>Revenue Lost to Churn</b>",
# showarrow = FALSE,
# align = "left")
# )
# )
#
#
#
# ### Churn Plot #####
# plot_ly(monthly_churn, x = ~date) %>%
# add_trace(y = ~total_churn,
# type = "scatter",
# mode = "lines",
# line = list(color = "red"),
# hoverinfo = 'text',
# text = ~paste0("<span style='color:grey'>Total Churn Rate </span><b>",
# total_churn,
# "</b>%</br>",
# "</br>",
# "<span style='color:grey'>Date </span>",
# date
# )
# ) %>%
# add_trace(y = ~involuntary_churn, name = "Involuntary Churn",
# type = "scatter",
# mode = "lines",
# line = list(color = "blue"),
# text = ~paste0("<span style='color:grey'>Involuntary Churn </span><b>",
# involuntary_churn,
# "</b>%</br>"
# )
# ) %>%
# layout(title = "",
# paper_bgcolor = "transparent",
# plot_bgcolor = "transparent",
# margin = list(r = 20),
# hoverlabel = list(font = list(color = "black"),
# bgcolor = "white",
# bordercolor = "white"),
# showlegend = FALSE,
# xaxis = list(showgrid = FALSE,
# title = "",
# tickmode = "array",
# type = "marker",
# autorange = TRUE,
# tickfont = list(family = "serif", size = 10),
# ticks = "outside"
# ),
# yaxis = list(showgrid = FALSE,
# range = c(0, max(monthly_churn$total_churn)+2),
# title = "",
# tickmode = "array",
# ticksuffix = "%",
# type = "marker",
# tickfont = list(family = "serif", size = 10),
# ticks = "outside",
# zeroline = FALSE
# ),
# annotations = list(
# list(xref = "x", yref = "y",
# x = min(monthly_churn$date)+30,
# y = max(monthly_churn$total)+2,
# text = "Customer Churn last 12 Months<br><span style='color:red;'>Total Churn</span><br><span style='color:blue;'>Involuntary Churn</span>",
# showarrow = FALSE,
# align = "left")
# )
# )
#
|
8fe78e12a847fbecc7be7b778c131851fd6d219a
|
fc12495e1457a8154c1ad0e8b439c914c1afca57
|
/man/exer_4_13.Rd
|
9d0276682557e72879e09087ba5167ecbcf8e40f
|
[] |
no_license
|
bayesball/tsub
|
535c0e85a637c01224a6938fa6699f979e2d54da
|
563eded690c2ea219edf213b8611141e8d7a54f7
|
refs/heads/master
| 2021-01-11T03:19:15.491826
| 2016-10-17T02:09:35
| 2016-10-17T02:09:35
| 71,090,868
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 501
|
rd
|
exer_4_13.Rd
|
\name{exer_4_13}
\alias{exer_4_13}
\docType{data}
\title{Season batting stats for two players}
\description{
Batting statistics of Jose Altuve and Nelson Cruz for the 2014 baseball season.
}
\usage{
exer_4_13
}
\format{
A data frame.
\describe{
\item{Player}{Player}
\item{AB}{At-bats}
\item{H}{Hits}
\item{X2B}{Doubles}
\item{X3B}{Triples}
\item{HR}{Home runs}
\item{BB}{Walks}
\item{HBP}{Hit by pitch}
\item{SF}{Sacrifice flies}
}
}
\source{Lahman database}
\keyword{datasets}
|
0b7ca5b05c6ebb5b0ac826f756987811d0a1ada3
|
b16a5d56c2281543636ddc2b3cd15a61a94de7b0
|
/plot/linear_fit.r
|
1d7225b8d50e4bd058a2cdf14bdbbc838339b839
|
[
"Apache-2.0"
] |
permissive
|
mschubert/ebits
|
b18bccde6198cb938c04be3704e9fdcff8e5be7d
|
e65b3941b44174e7267ee142387ffacafca11e53
|
refs/heads/master
| 2023-07-23T09:09:47.175229
| 2023-07-07T09:36:15
| 2023-07-07T09:36:15
| 18,678,011
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,513
|
r
|
linear_fit.r
|
.b = import('../base')
.st = import('../stats')
#.spf = import('../stats/process_formula')
#' Plots a given data.frame as a linear fit with optional subsets
#'
#' @param df data.frame holding at least the columns specified by x, y, and label
#' @param x Column to be plotted on the horizontal axis
#' @param y Column to be plotted on the vertical axis
#' @param label Column of label to be used for each sample; indicates subsets
#' @param drop Whether to drop unused factor levels in `label`
#' @param pt.size Size of the points indicating the samples
#' @param fit.size Width of the line(s) used for indicating the fit
linear_fit = function(formula, subsets=NULL, data=parent.frame(),
drop=TRUE, pt.size=4, fit.size=5) {
#TODO: formula length=3, [[1]]="~"
#TODO: if subsets is single char, take column in data
x = as.matrix(base::eval(formula[[3]], envir=data))
y = as.matrix(base::eval(formula[[2]], envir=data))
# allow either subsets or multiple columns in one matrix
if (((ncol(x) > 1) + (ncol(y) > 1) + (!is.null(subsets))) > 1)
stop("can only take multiple cols in one matrix or subsets")
if (ncol(x) > 1) {
subsets = c(sapply(colnames(x), function(i) rep(i, nrow(x))))
y = rep(y, ncol(x))
x = c(x)
} else if (ncol(y) > 1) {
subsets = c(sapply(colnames(y), function(i) rep(i, nrow(y))))
x = rep(x, ncol(y))
y = c(y)
} else
subsets = rep(1, nrow(x))
result = st$lm(y ~ x, subsets=subsets) %>%
filter(term == "x" & p.value < 0.05)
df = data.frame(x=x, y=y, subsets=subsets) %>%
filter(subsets %in% result$subset)
# print(result$p.value)
# if (length(unique(df[[label]])) > 1)
# df = df[df[[label]] %in% result$subset[result$p.value<0.05],]
# rsq = round(result$main*1000) / 10
# if (drop)
# df$tissue = sapply(as.character(df$tissue), function(t) paste(t, "-", rsq[t], "%"))
# if (!drop && !is.na(only) && length(only)==1)
# tit = paste(only, "-", rsq[only], "%")
# else
# tit = paste("Correlation between", pathway, "activity and", drug, "response")
ggplot(df, aes(x=x, y=y, colour=subsets)) +
geom_smooth(aes(fill=subsets), size=fit.size, method=stats::lm, se=F, na.rm=T, alpha=0.1) +
geom_point(aes(fill=subsets), pch=21, size=pt.size, colour="black", alpha=1, na.rm=T) +
scale_fill_discrete(drop=drop) +
scale_colour_discrete(drop=drop)
}
|
d275463afba3c2cbe5622b4bb7dd1e7474748206
|
1e09283f2340edc0d7937b42ee4de960d7d0525e
|
/man/dateForm.Rd
|
0431b59322d8753b6e040e12b23eda2edf5cdfca
|
[
"Apache-2.0"
] |
permissive
|
JDOsborne1/megametadata
|
a8b2139fdcc5b52788483fcf0cc1fda4b60aabff
|
0b5c0e97b499a0f1495a48a05593acbb0fad167f
|
refs/heads/master
| 2020-05-07T13:16:26.670303
| 2020-03-29T16:35:03
| 2020-03-29T16:35:03
| 180,542,223
| 0
| 0
|
NOASSERTION
| 2020-03-29T16:35:05
| 2019-04-10T08:55:17
|
R
|
UTF-8
|
R
| false
| true
| 320
|
rd
|
dateForm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meta_extract.R
\name{dateForm}
\alias{dateForm}
\title{Provisional date format checking function}
\usage{
dateForm(vect)
}
\arguments{
\item{vect}{the vector in question}
}
\value{
}
\description{
Provisional date format checking function
}
|
d76ef15efdb13eacc7ebf84daba8f4f94ee31e8d
|
671ad9a341b120e24eb1b58313298c004e5a85f5
|
/ImportanceSampling/ImportanceSampling.R
|
c726e7f0d4de72d483a2fad6dce6483781270c06
|
[] |
no_license
|
akirahg/CompuStat
|
2eae31159f87a5c7b52ede7e1b720ad25e2bacae
|
aa1e5ada685ef477bbf9f43f7e45cb2c9de5cb52
|
refs/heads/master
| 2020-12-24T15:40:38.643110
| 2015-10-07T05:31:00
| 2015-10-07T05:31:00
| 42,971,871
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,349
|
r
|
ImportanceSampling.R
|
# Importance sampling helper script and functions
integrando <- function(x,m=1){
m*exp(-m*x)
}
MethodComparison <- function(nsim, m, a=0, b=2, FUN=integrando, alpha=.05){
quant = qnorm(alpha/2, lower.tail = FALSE)
#Estimación con Uniformes [0.2]
U <- runif(nsim,0,2)
Eval.Unif <- 2*sapply(U,FUN)
Estim.Unif <-mean(Eval.Unif)
S2.Unif <- var(Eval.Unif)
lu.Unif <- Estim.Unif + sqrt(S2.Unif/nsim)*quant
li.Unif <- Estim.Unif - sqrt(S2.Unif/nsim)*quant
#Estimación con exponencial truncada
U2 <- runif(nsim,0,1)
ExpTruncada <- ((-1/m)*(log(1-(U2*(1-exp(-2*m))))))
Eval.ExpTruncada <- FUN(ExpTruncada,m)*((1-exp(-2*m))/(1-exp(-m*ExpTruncada)))
Estim.ExpTruncada <-mean(Eval.ExpTruncada)
S2.ExpTruncada <- var(Eval.ExpTruncada)
lu.ExpTruncada <- Estim.ExpTruncada + sqrt(S2.ExpTruncada/nsim)*quant
li.ExpTruncada <- Estim.ExpTruncada - sqrt(S2.ExpTruncada/nsim)*quant
#Estimación con Beta(1,5)
Beta <- rbeta(nsim,shape1=1, shape2=2*m)
Eval.Beta <- FUN(Beta,m)/dbeta(Beta,shape1=1,shape2=2*m)
Estim.Beta <-mean(Eval.Beta)
S2.Beta <- var(Eval.Beta)
lu.Beta <- Estim.Beta + sqrt(S2.Beta/nsim)*quant
li.Beta <- Estim.Beta - sqrt(S2.Beta/nsim)*quant
real.value <- 1-exp((-2)*m)
results <- data.frame(Nsim=nsim,LI.Unif=li.Unif,Estim.Unif=Estim.Unif,LU.Unif=lu.Unif,
LI.Exp=li.ExpTruncada,Estim.Exp=Estim.ExpTruncada,LU.Exp=lu.ExpTruncada,
LI.Beta=li.Beta,Estim.Beta=Estim.Beta,LU.Beta=lu.Beta,Real.Value=real.value)
return(results)
}
FullDataGenerator <- function(m){
full.results <- data.frame()
full.results <- rbind(MethodComparison(1000,m),MethodComparison(2000,m),
MethodComparison(3000,m),MethodComparison(4000,m),
MethodComparison(5000,m),MethodComparison(6000,m),
MethodComparison(7000,m),MethodComparison(8000,m),
MethodComparison(9000,m),MethodComparison(10000,m))
}
ErrorGenerator <- function(m){
results <- FullDataGenerator(m)
errors <- data.frame(Nsim = results$Nsim,
Error.Unif = abs(results$Estim.Unif - results$Real.Value),
Error.Exp = abs(results$Estim.Exp - results$Real.Value),
Error.Beta = abs(results$Estim.Beta - results$Real.Value))
return(errors)
}
|
1758fa09f33260e840f3764f414fa58a15e8cb4d
|
fcc13976b8952fedec00b0c5d4520edc6d5103b9
|
/R/multiVCtrl.R
|
3a86642804e378dc7fad337b8c75d94074b9a50d
|
[] |
no_license
|
anngvu/DIVE
|
851173b4515ab4fd8c26e171158aa17f079785db
|
e80d254fc4be2c4a3c12f4a1b4507beff3fe3663
|
refs/heads/master
| 2023-07-26T00:30:07.924714
| 2021-09-08T15:04:34
| 2021-09-08T15:04:34
| 173,828,093
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,358
|
r
|
multiVCtrl.R
|
#' Shiny module UI for controlling multi-dataset views
#'
#' UI for sourcing and selecting datasets
#'
#' Controls data views geared towards bioinformatics data.
#' Multiple high dimensional datasets can be displayed by calling separate instances of
#' \code{\link{xVUI}} modules, with the contents typically laid out in row containers.
#' The controller UI has three different ways to source the datasets:
#' \enumerate{
#' \item Selecting from available pre-processed datasets.
#' \item User-uploaded data.
#' \item A beta (least-supported) method of retrieving datasets from GEO.
#' }
#' For customization, it is possible make unavailable any of the three sourcing methods,
#' e.g. hide the GEO sourcing option by not displaying the UI.
#'
#' @param id Character ID for specifying namespace, see \code{shiny::\link[shiny]{NS}}.
#' @param menu Logical flag, whether to allow a menu for loading stored datasets.
#' @param upload Logical flag, whether to allow data upload.
#' @param GEO Logical flag, whether to allow pulling data from GEO (beta).
#' @param maxItems Integer representing the max number of tracks that can be selected (displayed).
#' @export
multiVCtrlUI <- function(id, menu = TRUE, upload = TRUE, GEO = TRUE, maxItems = 3L) {
ns <- NS(id)
tags$div(class = "multiVCtrlUI-panel", id = ns("multiVCtrlUI"),
if(menu) div(class = "ui-inline",
selectizeInput(ns("dataset"), HTML("<strong>Available datasets</strong>"),
choices = NULL, selected = NULL, multiple = T, width = "500px",
options = list(placeholder = paste("select to view (max of", maxItems, "concurrent tracks)"),
maxItems = maxItems))),
if(upload) div(class = "ui-inline", br(), actionButton(ns("upload"), "Upload my data")),
if(GEO) div(class = "ui-inline", br(), actionButton(ns("getGEO"), HTML("Source from GEO <sup>beta</sup>")))
)
}
#' Shiny module server for controlling multi-dataset views
#'
#' Implement control hub logic that provides data and parameters for \code{\link{xVServer}},
#' \code{\link{geneVServer}} and \code{\link{selectVServer}}
#'
#' The server logic handles sourcing of large expression datasets with three different methods:
#' \enumerate{
#' \item Selecting from available pre-processed datasets.
#' \item User-uploaded data.
#' \item A beta (least-supported) method of retrieving datasets from GEO.
#' }
#' The data in \code{cdata} is supposed to be a phenotype or clinical
#' feature that one usually tries to correlate with expression data and can be numeric or categorical.
#' The module handles upload of phenotype/clinical data,
#' using a mutable version of \code{cdata} that appends user uploaded data.
#'
#' @param id Character ID for specifying namespace, see \code{shiny::\link[shiny]{NS}}.
#' @param hdlist A list of matrices representing high dimensional datasets; the names are used for \code{choices}.
#' @param choices Selection choices are by default created from automatic parsing of `hdlist`. However, a manual list can be given,
#' which should be appropriate for passing to \code{shiny::\link[shiny]{selectizeInput}}.
#' @param cdata A \code{data.table} of characteristics data, commonly phenotype or clinical data.
#' @param key Name of column that contains IDs in \code{cdata} matching sample IDs in \code{hdlist} datasets. Defaults to "ID".
#' Note that column should already be of class character.
#' @param preselect Optional, pre-selected phenotype or clinical variables from \code{cdata}.
#' If is \code{NULL} (not recommended for most cases), the user can dynamically render as many datasets views as they can source.
#' @inheritParams dataUploadServer
#' @return A reactive values list containing the data matrix
#' for the parameter \preformatted{hdata} of the \code{\link{multiVServer}} module,
#' as well as parameters for \code{\link{geneVServer}} and \code{\link{selectVServer}}.
#' @import shiny
#' @export
multiVCtrlServer <- function(id,
hdlist,
choices = DIVE::hdlistchoicesMake(hdlist),
cdata,
key = "ID",
preselect = NULL,
checkFun = NULL,
informd = system.file("info/ht_upload.Rmd", package = "DIVE")) {
moduleServer(id, function(input, output, session) {
# cdata key should be character for later merging with hdata
cdata[[key]] <- as.character(cdata[[key]])
view <- reactiveValues(cdata = cdata, hdlist = hdlist, hdata = NULL, vselect = preselect)
inview <- c()
updateSelectizeInput(session, "dataset", choices = choices, selected = NULL)
# Parse a URL request for a specific dataset
observe({
query <- parseQueryString(session$clientData$url_search)
if(!is.null(query[["dataset"]])) updateSelectizeInput(session, "dataset", selected = query[["dataset"]])
})
# Handle dataset selection or de-selection ------------------------------------------------------------------#
observe({
if(!length(input$dataset)) { # everything has been cleared from the global dataset selection
dataset <- stats::setNames(object = list(NULL), # set return to NULL
nm = paste0("i", which(names(view$hdlist) %in% inview)))
inview <<- c()
} else {
dsname <- setdiff(input$dataset, inview)
if(length(dsname)) { # if more in selection than in view, view needs to add new dataset
dataset <- stats::setNames(object = view$hdlist[dsname],
paste0("i", which(names(view$hdlist) %in% dsname)))
} else { # a dataset needs to be removed from view
dsname <- setdiff(inview, input$dataset)
dataset <- stats::setNames(object = list(NULL),
paste0("i", which(names(view$hdlist) %in% dsname)))
}
inview <<- isolate(input$dataset)
}
view$hdata <- dataset
})
# -- packaging dataset/adding to selection -----------------------------------------------------------------#
addDataToSelection <- function(dataset, label, selectgroup) {
dataset <- t(dataset)
dataset <- stats::setNames(list(dataset), label)
view$hdlist <- c(view$hdlist, dataset)
choices[[selectgroup]] <<- c(choices[[selectgroup]], list(label))
updateSelectizeInput(session, "dataset", choices = choices, selected = c(input$dataset, label))
}
# -- handling user-uploaded data --------------------------------------------------------------------------#
udata <- dataUploadServer("upload")
observeEvent(input$upload, {
showModal(modalDialog(title = "Upload my data",
dataUploadUI(session$ns("upload"), label = NULL),
includeMarkdown(informd),
footer = modalButton("Cancel")
))
})
observeEvent(udata(), {
dataset <- udata() # check whether uploaded expression data or phenodata
if(key %in% names(dataset)) {
# phenodata -> check and modify column names if necessary
dataset <- merge(cdata, dataset, by = key, all = T)
view$cdata <- dataset
removeModal()
} else {
# high-throughput processing
filename <- attr(dataset, "filename")
# If filename is same as something in the selection, upload will replace that object (!)
filename <- paste0("userdata_", filename)
dataset <- as.matrix(dataset, rownames = 1)
addDataToSelection(dataset, label = filename, selectgroup = "Uploaded")
removeModal()
}
})
# -- handling GEO data -----------------------------------------------------------------------------------#
GEOdata <- getGEOServer("GEO")
observeEvent(input$getGEO, {
showModal(modalDialog(title = "Get data from GEO",
getGEOInput(session$ns("GEO")),
footer = modalButton("Cancel")
))
})
# When GEO data is pulled successfully, GEOdata$return changes from NULL to TRUE
observeEvent(GEOdata$return, {
addDataToSelection(GEOdata$eset, label = GEOdata$accession, selectgroup = "GEO")
if(!is.null(GEOdata$pData)) {
pData <- GEOdata$pData
# add key column to pData for merge even though the samples can be
# unrelated and there might not be anything to merge upon
for(col in names(pData)) pData[[col]] <- factor(pData[[col]])
pData[[key]] <- rownames(pData)
data <- merge(cdata, pData, by = key, all = T)
view$cdata <- data
view$vselect <- names(pData)[1]
} else {
view$vselect <- NULL
}
updateSelectizeInput(session, "dataset", choices = choices, selected = GEOdata$accession)
}, ignoreInit = TRUE)
return(view)
})
}
# TO-DO
# Check fun, returns notification message
xpMatrixCheck <- function() {
# check that IDs are same as main IDs
"Detected that at least some are not nPOD samples."
"Detected that expression values are not annotated to gene Entrez IDs.
Please use the Custom Selection when filtering with your data.
Refer to upload guide for more details"
}
|
28362ac7cbe8a01c176fb0e1795282d321e11922
|
edb23019571b2e8a8ad92e61eae7e9967d1308ce
|
/02 Data Wrangling/df_alert_caffeine.R
|
23a9dea435f7c01c04ee0a781e2953f53742168a
|
[] |
no_license
|
andreanc223/DV_FinalProject
|
9f35ca246bfb716b2a9aca56d225a9a9ab03f01a
|
534f27268631f6d6a25733baa9ca0b51f43419c0
|
refs/heads/master
| 2016-09-06T01:41:41.609881
| 2015-05-13T19:07:32
| 2015-05-13T19:07:32
| 34,625,773
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,188
|
r
|
df_alert_caffeine.R
|
library(RCurl)
library(dplyr)
library(tidyr)
library(ggplot2)
library(jsonlite)
require(jsonlite)
df_sleepalert <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select * from tsleepalert"'),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_thc359', PASS='orcl_thc359', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE)))
df_background <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select * from tbackground"'),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_thc359', PASS='orcl_thc359', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE)))
dfd <- inner_join(df_sleepalert, df_background, by="ID_NUMBER")
#Average Caffeine Level vs Alertness
dfe <- dfd %>% select(ID_NUMBER, ALERTNESS, CAFFEINE_AMOUNT) %>% group_by(ALERTNESS) %>% summarise(avg=mean(CAFFEINE_AMOUNT))
g <- ggplot(dfe, aes(x=ALERTNESS, y=avg)) + geom_point()
g + theme(legend.position="none") + labs(x="Alertness Level", y="Average Caffeine Level", title="Average Caffeine Level vs Alertness")
|
3506b3bbd52966c8dbc1bdb2c2f45f4523f70034
|
48b54e972f82a4d37778d0d01320f0198ca45742
|
/server.R
|
8d9dc325963f3778f1bf0f5683febccce3a0472a
|
[] |
no_license
|
DrRoad/AMShiny
|
e2cf59376770c227cbbd61ac3e5ecb41535f3410
|
12ddd667f37bb6238b947d5167d2dfb718a89779
|
refs/heads/master
| 2020-04-25T13:42:20.146602
| 2018-10-20T19:27:43
| 2018-10-20T19:27:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,659
|
r
|
server.R
|
source('./func/am_helper.R')
source('./func/shiny_helper.R')
my_colors = brewer.pal(6, "Blues")
shinyServer(function(input, output, session){
###############
## Static html pages
###############
output$disclaimer = renderUI(includeHTML("./html/disclaimer.html"))
output$abt = renderUI(includeHTML("./html/about.html"))
output$measures= renderUI(withMathJax(includeHTML("./html/measures.html")))
###############
## Theory Page
###############
# Render graphs for Theory part (ggplot comes from global.R)
output$graph1 =renderPlotly(g1)
output$graph2 =renderPlotly(g2)
output$graph3 =renderPlotly(g3)
output$graph4 =renderPlotly(g4)
###############
## Allocation Page
###############
#Weights (make sure that sliders are mutually dependent and weights add up to 1)
# Initialize portfolio weights
port_weight = reactiveValues(weight=rep(1/6, 6)) # naive diversification
# If any of the sliders change, then recalculate other weight weights to satisfy sum to 1 constraint
observers = list(
observeEvent(input$p1,
{
suspendMany(observers) #This function comes from shinyhelper.R
port_weight$weight = updateweight(port_weight$weight, input$p1, 1)
resumeMany(observers) #This function comes from shinyhelper.R
}
),
observeEvent(input$p2,
{
suspendMany(observers)
port_weight$weight = updateweight(port_weight$weight, input$p2, 2)
resumeMany(observers)
}
),
observeEvent(input$p3,
{
suspendMany(observers)
port_weight$weight = updateweight(port_weight$weight, input$p3, 3)
resumeMany(observers)
}
),
observeEvent(input$p4,
{
suspendMany(observers)
port_weight$weight = updateweight(port_weight$weight, input$p4, 4)
resumeMany(observers)
}
),
observeEvent(input$p5,
{
suspendMany(observers)
port_weight$weight = updateweight(port_weight$weight, input$p5, 5)
resumeMany(observers)
}
),
observeEvent(input$p6,
{
suspendMany(observers)
port_weight$weight = updateweight(port_weight$weight, input$p6, 6)
resumeMany(observers)
}
)
)
# If the weights change, update the sliders
output$p1ui = renderUI({
wghtsliderInput("p1", port_weight$weight[1], label = "S&P 500") #This function comes from shinyhelper.R
})
output$p2ui = renderUI({
wghtsliderInput("p2", port_weight$weight[2], label = "Europe Stocks")
})
output$p3ui = renderUI({
wghtsliderInput("p3", port_weight$weight[3], label = "Emerging Market Stocks")
})
output$p4ui = renderUI({
wghtsliderInput("p4", port_weight$weight[4], label = "US. Treasury")
})
output$p5ui = renderUI({
wghtsliderInput("p5", port_weight$weight[5], label = "US. Corporate Bonds")
})
output$p6ui = renderUI({
wghtsliderInput("p6", port_weight$weight[6], label = "Real Estate")
})
#Date slider
#If min date and max date are the same - reset the slider
observeEvent(input$date_range,{
if(input$date_range[1] == input$date_range[2]){
updateSliderTextInput(session,"date_range",selected = c(date_choices[1],date_choices[length(date_choices)]))
}
})
#Allocation pie chart
output$graph5 = renderPlotly({
alloc = data.frame(wght = port_weight$weight, asset = c("SP500","EuropeStocks","EMStocks","Treasury","CorpBond","RealEstate"))
g5 = plot_ly(alloc, labels = ~asset, values = ~wght, type = 'pie',
textposition = 'inside',
textinfo = 'label+percent',
insidetextfont = list(color = '#000'),
hoverinfo = 'text',
text = ~paste(round(wght,4)*100, ' %'),
marker = list(colors = my_colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = FALSE, width=250, height=250) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin = list(b = 0, l = 0, t = 0))
g5
})
#############################################
# Perform backtesting
# Functions are in shiny_helper.R
#############################################
# Backtest data
bt_data = reactive({bt_port(df, as.Date(input$date_range[1]), as.Date(input$date_range[2]), port_weight$weight, input$rebalance)})
# Optimal portfolio data
opt_weights = reactive({
#Calculate target risk and return
bt_df = bt_data()
target_ret = mean(bt_df$Portfolio) * 250
target_risk = sd(bt_df$Portfolio) * sqrt(250)
#Extract dataframe for dates
from = as.Date(input$date_range[1])
to = as.Date(input$date_range[2])
df_tmp = df %>% rownames_to_column("date") %>%
filter(as.Date(date)>=from & as.Date(date) <= to) %>% column_to_rownames("date")
# Calculate inputs for optimization
returns = xts(df_tmp, order.by = as.Date(row.names(df_tmp)))
mean_ret = apply(df_tmp, 2, mean) * 250
cov_matrix = cov(df_tmp) * 250
#Find optimal weights
#opt_w_ret = findEfficientFrontier.Return(returns, target_ret)
opt_w_ret = findEfficientFrontier.ReturnALT(mean_ret, cov_matrix, target_ret)
opt_w_risk = findEfficientFrontier.Risk(mean_ret, cov_matrix, target_risk)
#Return a dataframe
opt_df = data.frame(OptRet = opt_w_ret, OptRisk = opt_w_risk)
return (opt_df)
})
#Plot backtest compound return
output$graph6 = renderPlotly({
input$go
isolate({ ### To let weights settle
bt_df = bt_data()
#Calculate compound return
bt_df = bt_df %>%
gather(key="Asset", value="Return", -date) %>%
group_by(Asset) %>%
arrange(date) %>%
mutate(cumRet = cumprod(1+Return) - 1) %>%
select(date, Asset, cumRet) %>%
spread(key=Asset, value=cumRet)
#Plot
plot_ly(bt_df, x = ~date, y = ~Portfolio, type = "scatter", mode = "line", name = "Portfolio",
line = list(color = "Steelblue3", width = 2), width = 700, height = 400) %>%
add_trace(y= ~SP500, name = "SP500",
line = list(color = "black", width = 2)) %>%
add_trace(y= ~R60T10C30, name = "S&P500:60%, CorpBonds:30%, Treasury:10%",
line = list(color = "gray", width = 2)) %>%
layout(xaxis = list(title = "", showgrid = FALSE, zeroline = TRUE, showticklabels = TRUE),
yaxis = list(title = "", showgrid = TRUE, zeroline = TRUE, showticklabels = TRUE, tickformat = "%"),
legend = list(orientation = "h", x = 0.1, y=1.2),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin = list(b = 20, l = 20, t = 30))
})
})
#Create backtest preformance stats
output$bt_table1 = renderTable(digits =2, {
input$go
isolate({
#Select data
ret_df = bt_data()
ret_df = ret_df %>% rename(Mixed = R60T10C30) %>%
select(date, Portfolio, SP500, Mixed)
rf_range = rf%>% filter(as.Date(date) >= as.Date(input$date_range[1]) & as.Date(date) <= as.Date(input$date_range[2]))
#Calculate performance measures
perf_df = data.frame(Measure = c("Return (annualized), %","Risk (annualized), %","Sharpe","Sortino","Beta","Treynor"))
perf_df$Portfolio = unlist(calcPortMeasures(ret_df$Portfolio, ret_df$SP500, rf_range$rf))
perf_df$SP500 = unlist(calcPortMeasures(ret_df$SP500, ret_df$SP500, rf_range$rf))
perf_df$Mixed = unlist(calcPortMeasures(ret_df$Mixed, ret_df$SP500, rf_range$rf))
perf_df[1:2, c("Portfolio","SP500","Mixed")] = round(perf_df[1:2, c("Portfolio","SP500","Mixed")] * 100, 2)
return (perf_df)
})
})
###########
## Plots for comparison
############
#Current allocation
output$graph7 = renderPlotly({
alloc = data.frame(wght = port_weight$weight, asset = c("SP500","EuropeStocks","EMStocks","Treasury","CorpBond","RealEstate"))
g7 = plot_ly(alloc, labels = ~asset, values = ~wght, type = 'pie',
textposition = 'inside',
textinfo = 'label+percent',
insidetextfont = list(color = '#000'),
hoverinfo = 'text',
text = ~paste(round(wght,4)*100, ' %'),
marker = list(colors = my_colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = FALSE, width=250, height=250) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin = list(b = 0, l = 0, t = 0))
g7
})
#Same return
output$graph8 = renderPlotly({
opt_w = opt_weights()
alloc = data.frame(wght = opt_w$OptRet, asset = c("SP500","EuropeStocks","EMStocks","Treasury","CorpBond","RealEstate"))
g8 = plot_ly(alloc, labels = ~asset, values = ~wght, type = 'pie',
textposition = 'inside',
textinfo = 'label+percent',
insidetextfont = list(color = '#000'),
hoverinfo = 'text',
text = ~paste(round(wght,4)*100, ' %'),
marker = list(colors = my_colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = FALSE, width=250, height=250) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin = list(b = 0, l = 0, t = 0))
g8
})
#Same Risk
output$graph9 = renderPlotly({
opt_w = opt_weights()
alloc = data.frame(wght = opt_w$OptRisk, asset = c("SP500","EuropeStocks","EMStocks","Treasury","CorpBond","RealEstate"))
g9 = plot_ly(alloc, labels = ~asset, values = ~wght, type = 'pie',
textposition = 'inside',
textinfo = 'label+percent',
insidetextfont = list(color = '#000'),
hoverinfo = 'text',
text = ~paste(round(wght,4)*100, ' %'),
marker = list(colors = my_colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = FALSE, width=250, height=250) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin = list(b = 0, l = 0, t = 0))
g9
})
###########
## Comparison with the optimal portfolio
#####
opt_data = reactive({
#Get backtesting data
port_ret = bt_data()
#Get optimal weights
opt_w = opt_weights()
#Extract dataframe for dates
from = as.Date(input$date_range[1])
to = as.Date(input$date_range[2])
opt_port(df, from, to, opt_w, port_ret) # Comes from shiny_helper.R
})
########
## Graphs for optimal portfollios
########
#Plot backtest compound return
output$graph10 = renderPlotly({
input$go
isolate({ ### To let weights settle
bt_df = opt_data()
#Calculate compound return
bt_df = bt_df %>%
gather(key="Asset", value="Return", -date) %>%
group_by(Asset) %>%
arrange(date) %>%
mutate(cumRet = cumprod(1+Return) - 1) %>%
select(date, Asset, cumRet) %>%
spread(key=Asset, value=cumRet)
#Plot
plot_ly(bt_df, x = ~date, y = ~Portfolio, type = "scatter", mode = "line", name = "Portfolio",
line = list(color = "Steelblue3", width = 2), width = 700, height = 400) %>%
add_trace(y= ~OptRet, name = "Similar Return",
line = list(color = "black", width = 2)) %>%
add_trace(y= ~OptRisk, name = "Similar Risk",
line = list(color = "gray", width = 2)) %>%
layout(xaxis = list(title = "", showgrid = FALSE, zeroline = TRUE, showticklabels = TRUE),
yaxis = list(title = "", showgrid = TRUE, zeroline = TRUE, showticklabels = TRUE, tickformat = "%"),
legend = list(orientation = "h", x = 0.1, y=1.2),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin = list(b = 20, l = 20, t = 30))
})
})
## Opt Portfolio comparison table
output$bt_table2 = renderTable(digits=2, {
input$go
isolate({
#Select data
ret_df = opt_data()
ret_df = ret_df %>% rename(Same.Return=OptRet, Same.Risk = OptRisk)
rf_range = rf%>% filter(as.Date(date) >= as.Date(input$date_range[1]) & as.Date(date) <= as.Date(input$date_range[2]))
#Calculate performance measures
perf_df = data.frame(Measure = c("Return (annualized), %","Risk (annualized), %","Sharpe","Sortino","Beta","Treynor"))
perf_df$Portfolio = unlist(calcPortMeasures(ret_df$Portfolio, ret_df$SP500, rf_range$rf))
perf_df$Same.Return = unlist(calcPortMeasures(ret_df$Same.Return, ret_df$SP500, rf_range$rf))
perf_df$Same.Risk = unlist(calcPortMeasures(ret_df$Same.Risk, ret_df$SP500, rf_range$rf))
perf_df = perf_df %>% select(Measure, Portfolio, Same.Return, Same.Risk) %>% rename(Similar.Return = Same.Return,
Similar.Risk = Same.Risk)
perf_df[1:2, c("Portfolio","Similar.Return","Similar.Risk")] = round(perf_df[1:2, c("Portfolio","Similar.Return","Similar.Risk")] * 100, 2)
return (perf_df)
})
})
})
|
0b2a94ec88846f5a10cf88ab53c7a655aa10b1dd
|
438fe09be61cd0be44fa0c0e79073e5d25da4c36
|
/man/getStopWordsRatio.Rd
|
124504759ac52cc8aad576a53435c77eed18e7ec
|
[] |
no_license
|
JimSow/textutils
|
ab545ea0f432fdf5a6bc96a1bd32cfcf335041d0
|
75e438a0c5e748b3a6d0d69d83bcbde1d9456fe6
|
refs/heads/master
| 2021-01-19T22:47:18.827452
| 2016-03-22T23:00:25
| 2016-03-22T23:00:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 409
|
rd
|
getStopWordsRatio.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text_stats.R
\name{getStopWordsRatio}
\alias{getStopWordsRatio}
\title{Get the ratio of stop words}
\usage{
getStopWordsRatio(string, stpwords)
}
\arguments{
\item{string}{: input string}
\item{output_path}{: path where the output csv file is to be written}
}
\value{
integer value
}
\description{
Get the ratio of stop words
}
|
a61c6d5e9ed03d381c6de2041cdfe36f6f979fe9
|
b0f9f9e40ea341b5408664d390700a4062e253be
|
/man/rquery_prepare.Rd
|
26aeb410233545acadbe46761cbfd67952fa13ae
|
[] |
no_license
|
cran/vtreat
|
38fdc9aa43139fbe11e292e26254101af4c2d1a4
|
589514685ca9c4bd92308f8c56a338b8ba510c55
|
refs/heads/master
| 2023-09-01T16:48:11.190952
| 2023-08-19T20:00:02
| 2023-08-19T21:30:41
| 48,091,035
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,682
|
rd
|
rquery_prepare.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rquery_treatment.R
\name{rquery_prepare}
\alias{rquery_prepare}
\alias{materialize_treated}
\title{Materialize a treated data frame remotely.}
\usage{
rquery_prepare(
db,
rqplan,
data_source,
result_table_name,
...,
extracols = NULL,
temporary = FALSE,
overwrite = TRUE,
attempt_nan_inf_mapping = FALSE,
col_sample = NULL,
return_ops = FALSE
)
materialize_treated(
db,
rqplan,
data_source,
result_table_name,
...,
extracols = NULL,
temporary = FALSE,
overwrite = TRUE,
attempt_nan_inf_mapping = FALSE,
col_sample = NULL,
return_ops = FALSE
)
}
\arguments{
\item{db}{a db handle.}
\item{rqplan}{an query plan produced by as_rquery_plan().}
\item{data_source}{relop, data source (usually a relop_table_source).}
\item{result_table_name}{character, table name to land result in}
\item{...}{force later arguments to bind by name.}
\item{extracols}{extra columns to copy.}
\item{temporary}{logical, if TRUE try to make result temporary.}
\item{overwrite}{logical, if TRUE try to overwrite result.}
\item{attempt_nan_inf_mapping}{logical, if TRUE attempt to map NaN and Infnity to NA/NULL (goot on PostgreSQL, not on Spark).}
\item{col_sample}{sample of data to determine column types.}
\item{return_ops}{logical, if TRUE return operator tree instead of materializing.}
}
\value{
description of treated table.
}
\description{
Materialize a treated data frame remotely.
}
\section{Functions}{
\itemize{
\item \code{materialize_treated()}: old name for rquery_prepare function
}}
\seealso{
\code{\link{as_rquery_plan}}, \code{\link{rqdatatable_prepare}}
}
|
696a46a6d74554bceec32df3b97f669e9cd12783
|
f5435fd1b9f39bec9b199a573aaf7a5a2de2889f
|
/man/VarOut-class.Rd
|
fe4e6eefc564f6d55ba178e340dbe78b38237402
|
[] |
no_license
|
brycefrank/spsys
|
7977680a1482e294e8316e8c6f3f30124bfa15ab
|
d88d56661dcf1d6b6b77786a816a27ed1638e099
|
refs/heads/master
| 2022-12-27T11:41:34.098804
| 2020-07-31T18:02:44
| 2020-07-31T18:02:44
| 257,992,951
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 240
|
rd
|
VarOut-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarOut.R
\docType{class}
\name{VarOut-class}
\alias{VarOut-class}
\title{Base class for all variance outputs}
\description{
Base class for all variance outputs
}
|
fd251e2cac6b81dfe1719daaf7f646fd0388a205
|
de65cc24a284ee7843afee5e7676090b70b0fc3f
|
/task_9_dataframes.r
|
c638e5a0ac8b2617e40eca086fe909c2939b4a9d
|
[] |
no_license
|
Nivas138/LearningRStudio
|
101f2c92ebb7255dcdcce2558fb9e197f8ad3ce1
|
9ba1cefdec7f829cbffbd8f7f93c0f72508e7735
|
refs/heads/master
| 2020-04-18T01:41:34.290516
| 2019-01-24T17:12:04
| 2019-01-24T17:12:04
| 167,129,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 719
|
r
|
task_9_dataframes.r
|
n=c(2,3,5)
s=c("aa","bb","cc")
b=c(TRUE,FALSE,TRUE)
df=data.frame(n,s,b)
df
class(df)
library(help=datasets)
?mtcars #help/desc of car dataset
fix(mtcars) #view the table of dataset
nrow(mtcars) #number of rows
ncol(mtcars) #number of columns
head(mtcars) #top 6 default
head(mtcars,15) #top 15
mtcars[1,2] #first row and second column
mtcars[[9]] #To retrive 9th column
mtcars[9] #To retive 9th row with names not only values
mtcars$am #To diplay particular coln
mtcars["Mazda RX4","cyl"] #note case-sensitive
mtcars[c("mpg","hp")] #To display both column
mtcars[2:5,]
mtcars[30,]
mtcars[c(3,24),] #3 and 24 rows with all column
L=mtcars$am==0
L
mtcars[L,]
mtcars[L,]$mpg
|
7a93b0dafe8e1774e987c2c2671510a61d35686e
|
0bdef2bc55eaa8003ba99f9fd5b26eaa675fd2cd
|
/scripts/extract_megan_annotations.R
|
592e9d6b1aedb1483118bf3dd1c92f862e753f54
|
[] |
no_license
|
tetukas/ArsM-evolutionary-placements
|
465e4abace8e6cdf4f3828b36852ada6e567540e
|
749f47814c4cf37766dbfee872c8bd1044548079
|
refs/heads/master
| 2022-04-15T12:30:57.144225
| 2017-09-08T12:08:27
| 2017-09-08T12:08:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,729
|
r
|
extract_megan_annotations.R
|
#!/usr/bin/env Rscript
# This script help to extract for each read the full taxonomic path of MEGAN LCA annotations
# The input file must be created by copying-pasting the Inspector windows, after uncollapsing each taxonomic node.
# This script uses a function than retrieve the full taxonomic path from NCBI from any taxonomic node.
# Sometimes, a conflict occur, and one has to select manually the taxonomic path.
# See https://github.com/alex-bagnoud/ArsM-evolutionary-placements/ for more details about this script.
# Set variables
megan_file <- "8-7samples_otus/14-otu_annotations_megan.txt"
label <- "Otu"
otu_table <- "8-7samples_otus/11-prot_otu_table.txt"
blast_file <- "8-7samples_otus/13-2-reid_otus_diamond.txt"
output <- "8-7samples_otus/15-prot_otu_table_tax.txt"
# Import the MEGAN file as two list of the same length,
# one that has the seqeunce header, and another one that has the annotations
con <- file(megan_file, open = "r")
tax_list <- character()
read_list <- character()
while (length(oneLine <- readLines(con, n = 1, warn = FALSE)) > 0) {
if ( !grepl(label, oneLine) ) {
tax <- oneLine
}
else {
tax_list <- c(tax_list, tax)
read_list <- c(read_list, oneLine)
}
}
close(con)
# Remove brackets from vectors elements
library("stringr")
tax_list <- str_split_fixed(tax_list, " \\[", 2)[,1]
read_list <- str_split_fixed(read_list, " \\[", 2)[,1]
# Merge these 2 vectors into a dataframe
read_tax_df <- data.frame("otu" = read_list, "tax" = tax_list)
# Retriev full taxonomic path from NCBI
library("myTAI")
fullTaxRank <- function(org_name){
tax <- taxonomy(org_name,db = "ncbi")
output <- list("superkingdom" = NA, "phylum" = NA, "class" = NA, "order" = NA, "family" = NA, "genus" = NA, "species" = NA)
tax.d <- tax[tax$rank == "superkingdom",1]
tax.p <- tax[tax$rank == "phylum",1]
tax.c <- tax[tax$rank == "class",1]
tax.o <- tax[tax$rank == "order",1]
tax.f <- tax[tax$rank == "family",1]
tax.g <- tax[tax$rank == "genus",1]
tax.s <- tax[tax$rank == "species",1]
if (!(length(tax.s) == 0)) {
output["species"] <- tax.s
}
if (!(length(tax.g) == 0)) {
output["genus"] <- tax.g
}
if (!(length(tax.f) == 0)) {
output["family"] <- tax.f
}
if (!(length(tax.o) == 0)) {
output["order"] <- tax.o
}
if (!(length(tax.c) == 0)) {
output["class"] <- tax.c
}
if (!(length(tax.p) == 0)) {
output["phylum"] <- tax.p
}
if (!(length(tax.d) == 0)) {
output["superkingdom"] <- tax.d
}
return(unlist(output))
}
full_tax_list <- character()
full_tax_path <- character()
l.1 <- character()
l.2 <- character()
l.3 <- character()
l.4 <- character()
l.5 <- character()
l.6 <- character()
l.7 <- character()
for (i in read_tax_df[,2]) {
if ( !(i %in% full_tax_list) ) {
full_tax_list <- c(full_tax_list, i)
full_tax_path <- fullTaxRank(i)
l.1 <- c(l.1, full_tax_path[1])
l.2 <- c(l.2, full_tax_path[2])
l.3 <- c(l.3, full_tax_path[3])
l.4 <- c(l.4, full_tax_path[4])
l.5 <- c(l.5, full_tax_path[5])
l.6 <- c(l.6, full_tax_path[6])
l.7 <- c(l.7, full_tax_path[7])
}
else {
l.1 <- c(l.1, full_tax_path[1])
l.2 <- c(l.2, full_tax_path[2])
l.3 <- c(l.3, full_tax_path[3])
l.4 <- c(l.4, full_tax_path[4])
l.5 <- c(l.5, full_tax_path[5])
l.6 <- c(l.6, full_tax_path[6])
l.7 <- c(l.7, full_tax_path[7])
}
}
read_tax_df$lca_superkingdom <- l.1
read_tax_df$lca_phylum <- l.2
read_tax_df$lca_class <- l.3
read_tax_df$lca_order <- l.4
read_tax_df$lca_family <- l.5
read_tax_df$lca_genus <- l.6
# Import DIAMOND blast table
blast <- read.table(blast_file, header = FALSE)
# Sort dataframe by increasing e-value and decreasing pident
sorted_blast <- blast[order(blast$V11, -blast$V3),]
# Keep the best hit for each OTU
best_hit <- sorted_blast[!duplicated(sorted_blast$V1),]
# Merge annotation and blast dataframes
merged_df <- merge.data.frame(read_tax_df, best_hit, by.x = "otu", by.y = "V1", all = TRUE)
merged_df2 <- merged_df[,c(1,3,4,5,6,7,8,9,10,18)]
names(merged_df2)[c(8,9,10)] <- c("diamond_best_hit", "diamond_pident", "diamond_e-value")
# Import OTU table a merged it with the taxonomix assignemetn dataframe
otu_tab <- read.table(otu_table, header = FALSE, sep = '\t')
names(otu_tab) <- c("OTU_id", "S1", "S2", "S3", "S4", "S5", "S6", "S7")
otu_tab_tax <- merge(otu_tab, merged_df2, by.x = "OTU_id", by.y = "otu", all = TRUE)
# Save merged dataframe as file
write.table(otu_tab_tax, file = output, quote = FALSE, sep = '\t', row.names = FALSE)
|
b2a8b2334c5006b0ba9e39d2adc73eb1a400e8fb
|
e44f7c7c1cbcd8aa198bd61439807d7c1ff2d706
|
/plot4.R
|
5c9ad364d755f93aa398a54381117d9781baed99
|
[] |
no_license
|
pepcarrera/ExData_Plotting1
|
7ef3bfbd25453173a6ec5256409de643038b28c0
|
1cbd922bc4af1f65d9bdc01204d09456f5e2b14c
|
refs/heads/master
| 2021-01-16T19:38:23.343711
| 2014-07-08T03:09:06
| 2014-07-08T03:09:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,085
|
r
|
plot4.R
|
##Load Data from Data set
##Set ? entries as NA while using ; as seperator for the data
elecData <- read.table("household_power_consumption.txt",
header=TRUE, sep=";", na.strings="?")
##Set ? Convert Date column in data set to date format
elecData$Date <-as.Date(elecData$Date, format="%d/%m/%Y")
##Subset just the data we are interested in (2007-02-01 through 2007-02-02)
elecData <- elecData[elecData$Date >= as.Date("2007-02-01") & elecData$Date <= as.Date("2007-02-02"),]
##create vector with Date & Time column combined
Date_time <- strptime(paste(elecData$Date, elecData$Time), "%Y-%m-%d %H:%M:%S")
##Add new Date_time vector as a column while dropping existing date/time columns
elecData <- cbind(Date_time, subset(elecData,
select=Global_active_power:Sub_metering_3))
## Open PNG device; create 'plot2.png' in working directory
png(file ="plot4.png", width = 480, heigh = 480)
##Sets gloabl parameter to fix 2x2 plots
par(mfrow= c(2,2))
##Create top right graph, Date_time as x, Global Active Power as y
plot(elecData$Date_time, elecData$Global_active_power, type="l", xlab="",
ylab="Global Active Power")
##Create top right graph, Date_time as x, Voltage as y
plot(elecData$Date_time, elecData$Voltage, type="l", xlab="datetime",
ylab="Voltage")
##Create bottom left graph with Date_time as the x and Sub_metering_1 as y
plot(elecData$Date_time, elecData$Sub_metering_1, type="l", xlab="",
ylab="Energy sub metering")
##Add Sub_metering_2 as red line
lines(elecData$Date_time, elecData$Sub_metering_2, col="red")
##Add Sub_metering_3 as blue line
lines(elecData$Date_time, elecData$Sub_metering_3, col="blue")
##Add legend
legend(x="topright", legend=c("Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), lty=c(1,1,1), col=c("black", "red", "blue"), bty="n")
##Create bottom right graph, Date_time as x, Voltage as y
plot(elecData$Date_time, elecData$Global_reactive_power, type="l",
ylab="Global_reactive_power", xlab="datetime")
##Close PNG device
dev.off()
|
f8ff07bcf7d182d42e726c81cfd894612341f27c
|
ce6c631c021813b99eacddec65155777ca125703
|
/R/rbindQW.R
|
23a38407af0b6034f898ad09f3f8102a121f3b9a
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
Zhenglei-BCS/smwrQW
|
fdae2b1cf65854ca2af9cd9917b89790287e3eb6
|
9a5020aa3a5762025fa651517dbd05566a09c280
|
refs/heads/master
| 2023-09-03T04:04:55.153230
| 2020-05-24T15:57:06
| 2020-05-24T15:57:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
rbindQW.R
|
#' Combine Data by Rows
#'
#' Combines a sequence of data frame arguments and combine by rows. This is a
#'specialized version of rbind that works for data frames that contain columns
#'of class "qw."
#'
#' @param \dots any number of data frames with identical columns. The missing value \code{NA}
#'is permitted as a special case to allow the addition of missing values.
#' @return A data frame with all columns combined in the order specified in \dots.
#' @keywords data
#' @seealso \code{\link{rbind}}
#'
#' @export
rbindQW <- function(...) {
dots <- list(...)
dots <- lapply(dots, as.data.frame)
## Expand columns of class qw
dots <- lapply(dots, function(df) {
lapply(names(df), function(col) {
if(class(df[[col]])[[1L]] == "qw")
as.data.frame(df[[col]], expand=TRUE, nm=col)
else
df[, col, drop=FALSE]
} )
} )
dots <- lapply(dots, as.data.frame)
## Check for a single value appended (only NA)
ckdots <- sapply(dots, length)
if(any(ckdots == 1L)) {
target <- dots[[1L]][1L,]
for(i in which(ckdots == 1L))
dots[[i]] <- as.data.frame(lapply(target, function(x) NA))
}
## pack everything together and convert back to qw
dots <- do.call(rbind, dots)
return(convert2qw(dots, scheme="qw"))
}
|
4bb0dbd6ecde9ec48809d914c99ba382312bf4c3
|
c85471f60e9d5c462de6c60c880d05898ec81411
|
/cache/Z3tt|TidyTuesday|R__2019_27_Franchise.R
|
0ab27569b3aca4e08f921506e45379275f9118b2
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
a-rosenberg/github-content-scraper
|
2416d644ea58403beacba33349ee127e4eb42afe
|
ed3340610a20bb3bd569f5e19db56008365e7ffa
|
refs/heads/master
| 2020-09-06T08:34:58.186945
| 2019-11-15T05:14:37
| 2019-11-15T05:14:37
| 220,376,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,449
|
r
|
Z3tt|TidyTuesday|R__2019_27_Franchise.R
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, warning=FALSE)
## ----prep, message=FALSE-------------------------------------------------
## packages
library(tidyverse)
library(patchwork)
library(tvthemes)
## ggplot theme updates
source(here::here("theme", "tidy_grey.R"))
## ----data----------------------------------------------------------------
df_media <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-07-02/media_franchises.csv") %>%
mutate(
revenue_category = case_when(
revenue_category == "Video Games/Games" ~ "Video Games",
revenue_category %in% c("Home Video/Entertainment", "TV") ~ "Home Entertainment",
revenue_category %in% c("Comic or Manga", "Book sales") ~ "Books & Comics",
revenue_category == "Merchandise, Licensing & Retail" ~ "Merchandise",
TRUE ~ revenue_category
)
)
## ----yearly--------------------------------------------------------------
df_media_per_year <- df_media %>%
group_by(franchise, revenue_category) %>%
summarize(
revenue = sum(revenue),
year_created = min(year_created, na.rm = T),
original_media = unique(original_media)
) %>%
group_by(franchise) %>%
mutate(
years_running = 2018.5 - year_created,
rev_per_year = revenue / years_running,
sum_per_year = sum(revenue) / unique(years_running),
) %>%
ungroup() %>%
mutate(
franchise = case_when(
franchise == "Wizarding World / Harry Potter" ~ "Harry Potter",
franchise == "Super Sentai / Power Rangers" ~ "Power Rangers",
str_detect(franchise, "Jump") ~ "Shōnen Jump",
TRUE ~ franchise
),
original_media = case_when(
original_media %in% c("Film", "Animated film") ~ "Movie",
original_media %in% c("Television series", "Animated series", "Anime") ~ "Series",
original_media == "Video game" ~ "Game",
original_media == "Cartoon character" ~ "Character",
TRUE ~ original_media
)
) %>%
filter(sum_per_year > 0.825) %>%
mutate(franchise = fct_reorder(franchise, sum_per_year))
cols_a <- c("#646464", "#700000", "#9D5931", "#D78808", "#005173", "#747940")
revenue_yearly <- df_media_per_year %>%
ggplot(aes(franchise, rev_per_year)) +
geom_col(aes(fill = original_media), width = 0.65) +
geom_hline(yintercept = 0, color = "grey50", size = 0.2) +
geom_hline(data = tibble(y = 1:4), aes(yintercept = y),
color = "grey50", size = 0.2, linetype = "dotted") +
geom_text(data = df_media_per_year %>%
group_by(franchise) %>%
summarize(
sum_per_year = unique(sum_per_year),
label = glue::glue("${format(round(unique(sum_per_year), 2), nsmall = 2)}B")
),
aes(franchise, sum_per_year, label = label),
color = "grey90", size = 2.5, family = "Roboto Mono",
nudge_y = 0.08, hjust = 0) +
geom_text(data = df_media_per_year %>%
group_by(franchise) %>%
summarize(label = unique(original_media)),
aes(franchise, 0.05, label = label),
color = "grey90", size = 2.2, family = "Poppins",
fontface = "bold", hjust = 0, vjust = 0.45) +
geom_text(data = df_media_per_year %>%
group_by(franchise) %>%
summarize(label = glue::glue("({unique(year_created)})")),
aes(franchise, -0.18, label = label), color = "grey60",
size = 2.7, family = "Roboto Mono", hjust = 1) +
coord_flip(clip = "off") +
scale_y_continuous(limits = c(-0.5, 4.3), breaks = c(0:4, 4.3),
labels = c(glue::glue("${0:4}B"), " per year"),
expand = c(0.01, 0.01), position = "right") +
scale_fill_manual(values = cols_a, guide = F) +
theme(axis.text.x = element_text(family = "Roboto Mono", size = 8),
axis.text.y = element_text(size = 8, color = "grey90", face = "bold"),
axis.ticks = element_blank(),
panel.border = element_rect(color = "transparent"),
strip.background = element_rect(color = "transparent"),
strip.text = element_text(size = 11)) +
labs(x = NULL, y = NULL)
## ----relative------------------------------------------------------------
df_media_rel <- df_media %>%
group_by(franchise, revenue_category) %>%
summarize(
revenue = sum(revenue),
year_created = min(year_created, na.rm = T),
) %>%
group_by(franchise) %>%
mutate(
sum_revenue = sum(revenue, na.rm = T),
revenue_rel = revenue / sum_revenue
) %>%
group_by(revenue_category) %>%
mutate(sum_cat = sum(revenue)) %>%
ungroup() %>%
mutate(
franchise = case_when(
franchise == "Wizarding World / Harry Potter" ~ "Harry Potter",
franchise == "Super Sentai / Power Rangers" ~ "Power Rangers",
str_detect(franchise, "Jump") ~ "Shōnen Jump",
TRUE ~ franchise
)
) %>%
filter(franchise %in% as.vector(df_media_per_year$franchise))
categories <- df_media_rel %>%
arrange(sum_cat) %>%
mutate(revenue_category = glue::glue("{revenue_category} (${round(sum_cat, 1)}B)")) %>%
pull(revenue_category) %>%
unique() %>%
as.vector()
cols_b <- c("#D96F63", "#6D3E4E", "#945744", "#7E6A69", "#A22B2B", "#E8B02A")
revenue_relative <- df_media_rel %>%
mutate(
revenue_category = glue::glue("{revenue_category} (${round(sum_cat, 1)}B)"),
revenue_category = factor(revenue_category, levels = categories),
franchise = factor(franchise, levels = levels(df_media_per_year$franchise)),
label = glue::glue("${round(revenue, 1)}B"),
label = ifelse(revenue_rel < 0.075, "", label)
) %>%
ggplot(aes(franchise, revenue_rel, fill = revenue_category, label = label)) +
geom_col(color = "grey20", size = 0.1, width = 0.65, position = "stack") +
geom_hline(data = tibble(1:3), aes(yintercept = c(0.25, 0.5, 0.75)),
color = "grey50", size = 0.2, linetype = "dotted") +
geom_hline(data = tibble(1:2), aes(yintercept = c(0, 1)),
color = "grey50", size = 0.2) +
geom_text(color = "grey90", size = 1.8, family = "Roboto Mono",
fontface = "bold", position = position_stack(vjust = 0.5)) +
geom_text(data = df_media_rel %>%
group_by(franchise) %>%
summarize(sum = unique(sum_revenue)) %>%
mutate(
label = glue::glue("${format(round(sum, 1), nsmall = 1)}B "),
revenue_category = "Music ($16.1B)", ## just any of the existing to avoid new key in legend
),
aes(x = franchise, y = 0, label = label), color = "grey90",
family = "Roboto Mono", size = 3, fontface = "bold",
position = "stack", hjust = 1) +
coord_flip(clip = "off") +
scale_y_continuous(limits = c(-0.5, 1), breaks = c(-0.28, seq(0, 1, by = 0.25)),
expand = c(0, 0), position = "right",
labels = c("Total revenue", "0%", "25%", "50%", "75%", "100%")) +
scale_fill_manual(values = cols_b, name = "Revenue breakdown:") +
guides(fill = guide_legend(reverse = T)) +
theme(axis.text.x = element_text(family = "Roboto Mono", size = 8),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
panel.border = element_rect(color = "transparent"),
legend.title = element_text(size = 9, face = "bold"),
legend.text = element_text(size = 7.5),
legend.key.height = unit(1.25, "lines"),
legend.key.width = unit(0.5, "lines"),
legend.justification = "top") +
labs(x = NULL, y = NULL)
## ----title---------------------------------------------------------------
## left-alligned title
title <- ggplot(data.frame(x = 1:2, y = 1:10)) +
labs(x = NULL, y = NULL,
title = "Gotta Catch 'Em All! Franchise Fans Beg for Merchandise",
subtitle = "Annual and total revenue of media franchise powerhouses and breakdown of revenues by category.\n") +
theme(line = element_blank(),
plot.background = element_rect(fill = "transparent", color = "transparent"),
panel.background = element_rect(fill = "transparent"),
panel.border = element_rect(color = "transparent"),
axis.text = element_blank())
## ----caption-------------------------------------------------------------
## right-alligned caption
caption <- ggplot(data.frame(x = 1:2, y = 1:10)) +
labs(x = NULL, y = NULL,
caption = "\nVisualization by Cédric Scherer | Data source: Wikipedia") +
theme(line = element_blank(),
plot.background = element_rect(fill = "transparent", color = "transparent"),
panel.background = element_rect(fill = "transparent"),
panel.border = element_rect(color = "transparent"),
axis.text = element_blank())
## ----full-panel, fig.width = 14, fig.height = 5.5------------------------
title + revenue_yearly + revenue_relative + caption + plot_layout(widths = c(0, 1, 1, 0), nrow = 1)
ggsave(here::here("plots", "2019_27", "2019_27_FranchiseRevenue.pdf"),
width = 14, height = 5.6, device = cairo_pdf)
## ------------------------------------------------------------------------
sessionInfo()
|
8ef3f8219be36d91851d68cd056e524c214b8788
|
9895ab0556ce062451b44520b98300d501e9176a
|
/scripts/Section 4 - Matrices/MatrixOperations.R
|
01b8b234becc56c31a67c0e8a2f37e563c64789c
|
[
"MIT"
] |
permissive
|
LEMSantos/udemy-R_programming_A_to_Z
|
8359c9fab7def1e9743f01c254f7ab5d3d697b12
|
7319f9288489a9a619f1fffb0cf2016875752008
|
refs/heads/main
| 2023-04-04T18:53:47.836423
| 2021-04-18T14:59:29
| 2021-04-18T14:59:29
| 349,807,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
r
|
MatrixOperations.R
|
library(here)
source(here::here('scripts', 'Section 4 - Matrices', 's4-BasketballData.R'))
Games
rownames(Games)
colnames(Games)
Games['LeBronJames', '2012']
FieldGoals
round(FieldGoals / Games, 1)
round(MinutesPlayed / Games)
|
e8d02d347c9c5c131efb9a0010614e54b333fb73
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mlogit/examples/scoretest.Rd.R
|
d67a712c82c6ddab680f914eb6b65430fc7f1476
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 652
|
r
|
scoretest.Rd.R
|
library(mlogit)
### Name: scoretest
### Title: The three tests for mlogit models
### Aliases: scoretest scoretest.mlogit scoretest.default waldtest.mlogit
### waldtest lrtest.mlogit lrtest
### Keywords: htest
### ** Examples
library("mlogit")
data("TravelMode", package = "AER")
ml <- mlogit(choice ~ wait + travel + vcost, TravelMode,
shape = "long", chid.var = "individual", alt.var = "mode")
hl <- mlogit(choice ~ wait + travel + vcost, TravelMode,
shape = "long", chid.var = "individual", alt.var = "mode",
method = "bfgs", heterosc = TRUE)
lrtest(ml, hl)
waldtest(hl)
scoretest(ml, heterosc = TRUE)
|
da20b72d68d88af038aed6987d6472ba43cb3657
|
55e51b89b134522678d1f58d3006a5e37d1e7462
|
/man/z3_personf.Rd
|
d1b0ff05cb0eb64537eb555b862807eb39a9848b
|
[] |
no_license
|
cran/IRTpp
|
49da8ebb3f62625634aee44e3b0d4568b9d250a0
|
3cdd14c81e00802d2f1bcd022eebcc403c636798
|
refs/heads/master
| 2021-01-15T15:25:21.955389
| 2016-07-05T14:02:36
| 2016-07-05T14:02:36
| 54,411,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,350
|
rd
|
z3_personf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/itemfit.R
\name{z3_personf}
\alias{z3_personf}
\title{Z3 Person fit statistic}
\usage{
z3_personf(data, zita, patterns)
}
\arguments{
\item{data}{a data frame or a matrix with the test.}
\item{zita}{a list of estimations of the parameters of the items (discrimination,difficulty, guessing).}
\item{patterns}{matrix of patterns response, the frequency of each pattern and the latent traits.}
}
\description{
Calculates the values of statistical Z3 for individuals.
}
\examples{
#Simulates a test and returns a list:
test <- simulateTest()
#the simulated data:
data <- test$test
#model:
mod <- irtpp(dataset = data,model = "3PL")
#latent trait:
zz <- parameter.matrix(mod$z)
p_mat <- mod$prob_mat
traits <- individual.traits(model="3PL",method = "EAP",dataset = data,itempars = zz,
probability_matrix=p_mat)
#Z3 PERSONFIT-Statistic
z3_personf(data = data,zita = mod$z,patterns = traits)
}
\author{
SICS Research, National University of Colombia \email{ammontenegrod@unal.edu.co}
}
\references{
Fritz Drasgow, Michael V. Levine and Esther A. Williams (1985). Appropiateness measurement with polychotomous item response models and standarized indices.
}
\seealso{
\code{\link{z3_itemf}}, \code{\link{orlando_itemf}}
}
|
c92aa34be8ab10216ebde4ae7734ba568e32ae32
|
46b5ab567c4f63bb764972c52a407ce2db9788d4
|
/man/vertlocations.Rd
|
b78af42d5f1147ed0f8018f07e529721f557d724
|
[] |
no_license
|
jotegui/rvertnet
|
078de18438f050c0ba4a4bec514cf9f9d83f3a08
|
516937fa762173c3b09433d9e3a054b6c206d910
|
refs/heads/master
| 2021-01-17T23:14:01.187716
| 2012-09-21T05:38:13
| 2012-09-21T05:38:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,596
|
rd
|
vertlocations.Rd
|
\name{vertlocations}
\alias{vertlocations}
\title{Retrieve locations and number of occurrence records for an organism from VertNet v2 portals.}
\usage{
vertlocations(key = "r_B68F3", grp = "fish", t = NULL,
l = NULL, c = NULL, d = NULL, q = NULL, p = NULL,
m = NULL, url = NULL)
}
\arguments{
\item{key}{API Key is required to run any query}
\item{grp}{VertNet group to query. Currently available
oprions fish, bird and herp. Default fish.}
\item{t}{Taxon scientific and family names. It supports
the 'OR' operator.}
\item{l}{Location country, continent, county, ocean,
island, state, province and locality. It supports the
'OR' operator.}
\item{c}{Catalog Number and/or Institution Code. It
supports the 'OR' operator.}
\item{d}{year or years the occurrence was collected. Date
Ranges must be in yyyy-yyyy format.}
\item{q}{terms of interest that may be in the remarks,
notes, scientific name, collector, preparation type,
location fields or elsewhere in the occurrence. It
supports the 'OR' operator.}
\item{p}{geometric query in well-known text (WKT) format.
Limited to 250 vertices or 10,000 characters. Note that
the Map parameter and the Geometry paramter are mutually
exclusive. If both are submitted, the Map parameter will
be ignored.}
\item{m}{geographic area defined by one of the available
maps. Maps are designated by MapIDs ref AvailableMaps
function}
\item{url}{The VertNet url for the function (should be
left to default).}
}
\value{
Dataframe of search results OR prints "No records found"
if no matches.
}
\description{
Retrieve locations and number of occurrence records for
an organism from VertNet v2 portals.
}
\examples{
\dontrun{
# Taxon
vertlocations(t="notropis")
vertlocations(t="notropis or nezumia")
vertlocations(t="Blenniidae")
# Location
vertlocations(l="country:india")
vertlocations(l="alabama or gulf of mexico")
vertlocations(l="africa", grp="bird")
# Catalog Number/Institution Code
vertlocations(c="TU 1")
vertlocations(c="mnhn or usnm")
vertlocations(c="ku 29288 or tu 66762")
# Date Range
vertlocations(d="2000-2000")
vertlocations(d="1950-1975")
# Other keywords
vertlocations(q="larva")
vertlocations(q="ethanol or EtOH")
# Geometry
vertlocations(p="POLYGON((-93.998292265615 32.615318339629,-92.471192656236 32.606063985828,-92.635987578112 31.235349580893,-90.988038359361 31.19776691287,-90.955079374988 30.395621231989,-93.94336062499 30.386144489302,-93.998292265615 32.615318339629))")
# Map
vertlocations(m=14)
# Wrong name
vertlocations(t="notropisz")
}
}
|
1bc12e396b4aee739d62e8b1d5e79a33e0e53a2e
|
cfb5af31d5105a6b6d81adf6221ecca7e572b8c8
|
/cachematrix.R
|
3feb1b359285af408deeda374ee08c3e3ed01ef0
|
[] |
no_license
|
abie/ProgrammingAssignment2
|
ba120e34ee1b273da673ed77f9a05e9c31bf7bed
|
557344472361e6b775bbbfcb39baa0ce19accc05
|
refs/heads/master
| 2021-01-18T02:07:11.045362
| 2015-07-26T19:52:03
| 2015-07-26T19:52:03
| 38,132,763
| 0
| 0
| null | 2015-06-26T20:43:07
| 2015-06-26T20:43:03
| null |
UTF-8
|
R
| false
| false
| 773
|
r
|
cachematrix.R
|
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
im <- NULL
set <- function(y) {
x <<- y
im <<- NULL
}
get <- function() x
setinverse <- function(inverse) im <<- inverse
getinverse <- function() im
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
im <- x$getinverse()
if(!is.null(im)) {
message("Using cached version.")
return(im)
}
data <- x$get()
im <- solve(data)
x$setinverse(im)
im
}
|
ed32c47b738c4d1817cc3306d48ab84759a4fc19
|
0be1777c9406537edabcc90a704b3bd6687bb19b
|
/R/4-16.R
|
2cfcb10c292d011e314c3d0e4360c54c0ab8aa06
|
[] |
no_license
|
mitchellchris/stats-notes
|
b53b9a50423352df44269241e286f528d5e15ab4
|
05d00deb57e4627587b5b57bab29b3bf8590ab8e
|
refs/heads/master
| 2020-04-07T11:21:42.069781
| 2014-04-29T14:58:03
| 2014-04-29T14:58:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,678
|
r
|
4-16.R
|
# 3 ways to bootstrap (sample with replacement)
# 1) for loop
# 2) boot package
# 3) replicate() function
# 7: Pop with normal dist mean = 36, sd = 8.
set.seed(13)
rs <- rnorm(200, 36, 8) # 200 samples from normal dist with mean=36 and sd=8
DF <- data.frame(x = rs)
# density plot
p1 <- ggplot(data=DF, aes(x=x)) + geom_density(fill="pink") + theme_bw()
p1
# quantile quantile plot
p2 <- ggplot(data=DF, aes(sample=x)) + stat_qq() + theme_bw()
p2
# bootstrap those 200 values
B <- 10000
theta.hat.star <- numeric(B)
for (i in 1:B) {
bss <- sample(rs, size=200, replace=TRUE) # boot strap sample
theta.hat.star[i] <- mean(bss)
}
mean(theta.hat.star)
mean(rs)
sd(theta.hat.star)
8/sqrt(200)
CI <- quantile(theta.hat.star, probs=c(.025, .975))
CI
# Replicate
set.seed(12)
B <- 10000
xbar <- replicate(B, mean(sample(Fish$mercury ,size=length(Fish$Mercury), replace=TRUE)))
SE <- sd(xbar)
CI <- quantile(x=xbar, probs=c(0.025, 0.975))
# Boot
mercury <- sort(Fish$Mercury)[-30]
require(boot)
FishMean <- function(data, i) {
d <- data[i]
M <- mean(d)
M
}
boot.obj <- boot(data=mercury, statistic=FishMean, R=B)
boot.obj
boot.ci(boot.obj, type="perc")
site <- "http://www1.appstate.edu/~arnholta/Data/FlightDelays.csv"
FD <- read.csv(site)
head(FD)
UA <- FD[FD$Carrier == "UA",]$Delay
AA <- FD[FD$Carrier == "AA",]$Delay
UA
AA
set.seed(13)
B <- 10000
ths <- numeric(B)
for (i in 1:B) {
ua <- sample(UA, size=length(UA), replace=T)
aa <- sample(AA, size=length(AA), replace=T)
ths[i] <- mean(ua)/mean(aa)
}
mean(ths)
sd(ths)
mean(UA)/mean(AA)
CI <- quantile(ths, probs=c(0.025, 0.975))
CI
BIAS <- mean(ths) - (mean(UA)/mean(AA))
BIAS
BIAS/sd(ths)
|
6855c0ea044b1fbb56d5836911fbaeba73010c7f
|
b459f6b664c6b0bb693554a7a5e05413da311390
|
/02_nyc_squirrels_by_color.R
|
7a386ab00db20837fefed25689dc43509e5ca19d
|
[] |
no_license
|
denisevitoriano/Tidytuesday
|
c7dc7366e36514f2bf8c8827da6b580e26652eaa
|
e099ef7b2e1640285f75ce2376ebc0e7cfc338ed
|
refs/heads/master
| 2020-09-04T04:47:15.278256
| 2019-11-06T20:33:32
| 2019-11-06T20:33:32
| 219,661,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 846
|
r
|
02_nyc_squirrels_by_color.R
|
library(tidyverse)
theme_set(theme_light())
nyc_squirrels <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-10-29/nyc_squirrels.csv")
# By hectare
nyc_squirrels %>%
count(hectare, sort = FALSE)
by_hectare <- nyc_squirrels %>%
group_by(hectare, primary_fur_color) %>%
summarize(lon = mean(long), lat = mean(lat), n = n())
# Color graphic by the three primary colors
by_hectare %>%
filter(!is.na(primary_fur_color)) %>%
group_by(hectare, primary_fur_color) %>%
ggplot(aes(lon, lat, size = n, color = primary_fur_color)) +
geom_point() +
scale_color_manual(values = c("black", "#d2691e", "darkgray")) +
labs(title = "Spotted squirrel positions averaged by hectare",
color = "primary color",
size = "# squirrels") +
facet_wrap(~ primary_fur_color)
|
b62e822dd622e373214937460ef72e2c0f8c2975
|
5ec2fa5348d50157287668785946117ba553b4a4
|
/scripts/01_plot_fiscal_note_fiscal_solvency_data.R
|
7aae9f690558b59d46f6c62f9149736fc645da1f
|
[
"CC-BY-4.0"
] |
permissive
|
RobWiederstein/fiscal_notes
|
c615534eee52129ea79c2662ed01667d4eb277dd
|
86f87c60db7fe4f75b340e7b80346b310dae4d50
|
refs/heads/master
| 2020-04-20T10:21:09.071134
| 2019-10-03T11:32:07
| 2019-10-03T11:32:07
| 168,788,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,864
|
r
|
01_plot_fiscal_note_fiscal_solvency_data.R
|
## Rob Wiederstein
## rob@robwiederstein.org
## explore correlation between
## fiscal notes and fiscal soundness
###############################################################################
#read in cbpp better cost estimates data
file <- "./data/tabula-2015-11-24_cbpp_better_cost_estimates_table1.csv"
df <- read.csv(file = file, header = T, colClasses = "character", strip.white = F)
df <- data.frame(apply(df, 2, stringr::str_trim), stringsAsFactors = F)
#create total column for states that have adopted best practices
df$total <- apply(df, 1, function(x) length(which(x != "")))
df$total <- df$total - 1
df.1 <- dplyr::arrange(df, -total)
colnames(df.1)[1] <- "state"
#read in mercatus state financial rankings
file <- "./data/mercatus_state_rankings.csv"
df.2 <- read.csv(file = file, header = T, stringsAsFactors = F)
df.2$state <- stringr::str_trim(df.2$state)
#merge the data sets
df.3 <- merge(df.1, df.2)
df.3 <- dplyr::arrange(df.3, -mercatus.fiscal.idx)
df.3$fiscal.rank <- 1:nrow(df.3)
#add state abbreviations for plotting
df.state <- data.frame(state = state.name, state.abb = state.abb)
df.4 <- merge(df.3, df.state)
#linear model
fit.lm.1 <- lm(mercatus.fiscal.idx ~ total, data = df.4)
summary(fit.lm.1) #statistically significant, but 5% explanation value
#generalized linear model
fit.lm.2 <- glm(mercatus.fiscal.idx ~ total, data = df.4)
summary(fit.lm.2)
#plot scatter plot
library(ggplot2)
p <- ggplot(df.4, aes(total, mercatus.fiscal.idx))
p <- p + geom_smooth(method = "loess")
p <- p + geom_label(label = df.4$state.abb, size = 1.5)
p <- p + scale_x_continuous(name = "fiscal.note.totals")
p <- p + ggtitle("50 States Comparison")
p <- p + theme(plot.title = element_text(hjust = 0.5))
p
filename <- "./plots/50_state_comparison_fiscal_health_versus_fiscal_note_use.jpg"
ggsave(filename = filename, height = 4, width = 6, unit = "in")
#plot boxplot
df.4$total <- as.factor(df.4$total)
p <- ggplot(df.4, aes(total, mercatus.fiscal.idx, group = total, colour = total))
p <- p + geom_boxplot()
p <- p + geom_jitter(width = 0)
#p <- p + geom_point(color = df.4$total)
p <- p + scale_x_discrete(name = "fiscal.note.total")
p <- p + ggtitle("50 States Comparison")
p <- p + theme(plot.title = element_text(hjust = 0.5))
p <- p + theme(legend.position="none")
p
filename <- "./plots/50_state_comparison_boxplot.jpg"
ggsave(filename = filename, height = 4, width = 6, unit = "in")
#rearrange and write data out to be inserted in table
df.5 <- dplyr::select(df.4,
fiscal.rank,
state,
state.abb,
Prepared.for.all.most.bills:mercatus.fiscal.idx)
df.5$total <- as.integer(levels(df.5$total))[df.5$total]
df.5 <- dplyr::arrange(df.5, fiscal.rank)
file <- "./data/mercatus_cbpp_combined_table.csv"
write.csv(df.5, file = file, row.names = F)
|
b316c23d6a2c6f8b49d2839516b3e499c31bc0db
|
1542b8ef5c6387facf4d49f8fd4f6b5ef5d8e9c0
|
/R/xRWkernel.r
|
c10f8a39ecb0ab7fbedf4e9ce063ecc55be53d08
|
[] |
no_license
|
wuwill/XGR
|
7e7486614334b664a05e389cd646678c51d1e557
|
c52f9f1388ba8295257f0412c9eee9b7797c2029
|
refs/heads/master
| 2020-04-12T12:38:04.470630
| 2018-12-19T17:40:30
| 2018-12-19T17:40:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,494
|
r
|
xRWkernel.r
|
#' Function to calculate random walk kernel on the input graph solved analytically
#'
#' \code{xRWkernel} is supposed to calculate a weighted random walk kernel (at a predefined number of steps) for estimating pairwise affinity between nodes.
#'
#' @param g an object of class "igraph" or "graphNEL". It will be a weighted graph if having an edge attribute 'weight'. The edge directions are ignored for directed graphs
#' @param steps an integer specifying the number of steps that random walk performs. By default, it is 4
#' @param chance an integer specifying the chance of remaining at the same vertex. By default, it is 2, the higher the higher chance
#' @param verbose logical to indicate whether the messages will be displayed in the screen. By default, it sets to true for display
#' @return It returns a sparse matrix for pairwise affinity between nodes via short random walks
#' @note The input graph will treat as an unweighted graph if there is no 'weight' edge attribute associated with. The edge direction is not considered for the purpose of defining pairwise affinity; that is, adjacency matrix and its laplacian version are both symmetric.
#' @export
#' @seealso \code{\link{xRWkernel}}
#' @include xRWkernel.r
#' @examples
#' # 1) generate a random graph according to the ER model
#' set.seed(825)
#' g <- erdos.renyi.game(10, 3/10)
#' V(g)$name <- paste0('n',1:vcount(g))
#'
#' \dontrun{
#' # 2) pre-computate affinity matrix between all nodes
#' Amatrix <- xRWkernel(g)
#' # visualise affinity matrix
#' visHeatmapAdv(as.matrix(Amatrix), colormap="wyr", KeyValueName="Affinity")
#' }
xRWkernel <- function(g, steps=4, chance=2, verbose=TRUE)
{
startT <- Sys.time()
if(verbose){
message(paste(c("Start at ",as.character(startT)), collapse=""), appendLF=TRUE)
message("", appendLF=TRUE)
}
####################################################################################
if(class(g)=="graphNEL"){
ig <- igraph.from.graphNEL(g)
}else{
ig <- g
}
if (class(ig) != "igraph"){
stop("The function must apply to either 'igraph' or 'graphNEL' object.\n")
}
if(igraph::is_directed(ig)){
ig <- igraph::as.undirected(ig, mode="collapse", edge.attr.comb="max")
}
if(verbose){
now <- Sys.time()
message(sprintf("First, get the adjacency matrix of the input graph (%s) ...", as.character(now)), appendLF=TRUE)
}
if ("weight" %in% list.edge.attributes(ig)){
adjM <- get.adjacency(ig, type="both", attr="weight", edges=FALSE, names=TRUE, sparse=getIgraphOpt("sparsematrices"))
if(verbose){
message(sprintf("\tNotes: using weighted graph!"), appendLF=TRUE)
}
}else{
adjM <- get.adjacency(ig, type="both", attr=NULL, edges=FALSE, names=TRUE, sparse=getIgraphOpt("sparsematrices"))
if(verbose){
message(sprintf("\tNotes: using unweighted graph!"), appendLF=TRUE)
}
}
if(verbose){
message(sprintf("Then, laplacian normalisation of the adjacency matrix (%s) ...", as.character(Sys.time())), appendLF=TRUE)
}
## D is the degree matrix of the graph (^-1/2)
A <- adjM!=0
D <- Matrix::Diagonal(x=(Matrix::colSums(A))^(-0.5))
nadjM <- D %*% adjM %*% D
#nadjM <- as.matrix(nadjM)
steps <- as.integer(steps)
if(verbose){
message(sprintf("Last, %d-step random walk kernel (%s) ...", steps, as.character(Sys.time())), appendLF=TRUE)
}
if(verbose){
message(sprintf("\tstep 1 (%s) ...", as.character(Sys.time())), appendLF=TRUE)
}
## one-step random walk kernel
#I <- Matrix::Matrix(diag(x=chance-1,nrow=vcount(g)), sparse=TRUE)
I <- Matrix::Diagonal(x=rep(chance-1,vcount(g)))
RW <- I + nadjM
res <- RW
## p-step random walk kernel
if(steps >=2){
for (i in 2:steps){
if(verbose){
message(sprintf("\tstep %d (%s) ...", i, as.character(Sys.time())), appendLF=TRUE)
}
res <- res %*% RW
}
}
####################################################################################
endT <- Sys.time()
if(verbose){
message(paste(c("\nFinish at ",as.character(endT)), collapse=""), appendLF=TRUE)
}
runTime <- as.numeric(difftime(strptime(endT, "%Y-%m-%d %H:%M:%S"), strptime(startT, "%Y-%m-%d %H:%M:%S"), units="secs"))
message(paste(c("Runtime in total is: ",runTime," secs\n"), collapse=""), appendLF=TRUE)
invisible(res)
}
|
cccb695048d10a288fe74a878bc2f740a68f1bbe
|
401d48b917525346b9b4320607ebb4a7df373d8a
|
/R/tidal_codes.R
|
a2762c5128640aa9c4a2400c89185cb6070f6bea
|
[] |
no_license
|
CamiloCarneiro/eneRgetics
|
0f7822f027320dbf2c04eb046d1a4e3d00062c3e
|
0910832fd62330dbc11022b3206c19c1670d6158
|
refs/heads/master
| 2023-04-27T11:37:20.387666
| 2023-04-14T07:33:55
| 2023-04-14T07:33:55
| 202,355,162
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,586
|
r
|
tidal_codes.R
|
#' Creates tidal codes
#'
#' Creates a numerical code for the tide state in relation to the closest low
#' tide peak.
#'
#' For each observation, the time difference to the nearest low tide is
#' calculated (in hours) and coded into a numerical category where 0
#' represents low tide and 6 high tide. If tide is ebbing, values are negative.
#' If tide is rising values are positive.
#'
#' Note: To ensure tide code is calculated accurately for the observations given,
#' you must ensure that observation dates contain a time zone attribute. You can
#' define this with the field \code{tz} in \code{\link{as.POSIXct}}. For help with
#' accepted time zone codes, see \code{\link{OlsonNames}}.
#'
#' @param observations data to which tide code will be appended; must have a
#' column named "date" with datetime info in POSIXct format.
#' @param tide_table a tide table upon tide codes will be calculated; must
#' have a column named "date" with datetime info in POSIXct format and
#' another column named "type" with values 'High' or 'Low'.
#' @param round_digits defines the accuracy of tide codes. If round_digits = 1
#' (default), observations are coded per hour. If round_digits = 0.5, creates
#' half-hour tide codes.
#' @return The \code{observations} data frame entered with a new column named
#' 'tide_code'.
#' @examples
#' tidalCodes(observations, tides_january)
#' @export
tidal_codes <- function(observations, tide_table, round_digits = 1) {
# check data.frames are provided
if (missing(observations) | missing(tide_table))
{stop("Observations and/or tide_table is missing")
}
if(checkmate::testDataFrame(observations) == F)
{stop("Observations data must be a data frame", call. = FALSE)
}
if(checkmate::testDataFrame(tide_table) == F)
{stop("Tide table must be a data frame", call. = FALSE)
}
# create the code collumn in observations data frame
observations$tide_code <- NA
# identify datetime columns
obs_dt_col<-sapply(observations,lubridate::is.POSIXct)
tide_dt_col<-sapply(tide_table,lubridate::is.POSIXct)
if(length(which(obs_dt_col==T))==0){
stop("Failed to identify observation dates - no POSIXct data")
}else{
obs_dt<-which(obs_dt_col==T)
}
if(length(which(tide_dt_col==T))==0){
stop("Failed to identify observation dates - no POSIXct data")
}else{
tide_dt<-which(tide_dt_col==T)
}
if(is.null(attr(observations[,obs_dt],"tzone"))){
warning("Time zone attribute is missing in Observations, assuming \"GMT\"")
observations[,obs_dt]<-lubridate::with_tz(observations[,obs_dt],tzone="GMT")
}
if(attr(observations[,obs_dt],"tzone")!=attr(tide_table[,tide_dt],"tzone")){
warning("Different time zones detected in observations and tide_table")
}
# loop to calculate the differences in relation to low tide
for (i in 1:nrow(observations)) {
# select only low tides
low_tides <- tide_table[tide_table$type == "Low", ]
# calculate the differences between the observation time and the values in the tides dataset
ind <- as.numeric(difftime(observations[i, obs_dt], low_tides[,tide_dt], units = "hours"))
# adritubte the code to the new column
observations[i, "tide_code"] <- ind[which(abs(ind)==min(abs(ind)))][1]
}
# function to round values
r_any <- function(x, accuracy, f=round) {f(x / accuracy) * accuracy}
# round values
observations$tide_code <- r_any(observations$tide_code, accuracy = round_digits)
return(observations)
}
|
9ba0c56c14b94926cd4b873462ddee9ce9b7055f
|
8c20cb1afd621c732382ffe50a53b1a978010a42
|
/R/plotSimulation.R
|
4506e401f62e2138bc18f793c6f52054fda0b270
|
[] |
no_license
|
BlasBenito/virtualPollen
|
01daa3bec05c5caeefa2f52109df8a7df115c0d0
|
b33c7929ce802f3764fdcee39a911243984698ee
|
refs/heads/master
| 2022-02-28T17:42:46.520588
| 2022-02-11T17:46:27
| 2022-02-11T17:46:27
| 177,762,046
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,125
|
r
|
plotSimulation.R
|
#' Plots results of \code{\link{simulatePopulation}}.
#'
#' @description This function takes as input the output of \code{\link{simulatePopulation}}, and plots the pollen abundance, number of individuals, biomass, driver, and environmnetal suitability of each simulation outcome.
#'
#'
#' @usage plotSimulation(
#' simulation.output = NULL,
#' species = "all",
#' burnin = FALSE,
#' filename = NULL,
#' time.zoom = NULL,
#' panels = c("Driver A",
#' "Driver B",
#' "Suitability",
#' "Population",
#' "Mortality",
#' "Biomass",
#' "Pollen"
#' ),
#' plot.title = NULL,
#' width = 12,
#' text.size = 20,
#' title.size = 25,
#' line.size = 1
#' )
#'
#' @param simulation.output output of \code{\link{simulatePopulation}}.
#' @param species a number or vector of numbers representing rows in the parameters dataframe, or a string or vector of strings referencing to the "label" column of the parameters dataframe.
#' @param burnin if \code{FALSE}, burn-in period is not considered in the model.
#' @param filename character string, name of output pdf file. If NULL or empty, no pdf is produced. It shouldn't include the extension of the output file.
#' @param time.zoom vector of two numbers indicating the beginnign and end of the time interval to be plotted (i.e. "c(5000, 10000)")
#' @param panels character string or vector of character strings with these possible values: "Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen".
#' @param plot.title character string to use as plot title.
#' @param width plot width in inches.
#' @param text.size text size of the plot.
#' @param title.size plot title size.
#' @param line.size size of lines in plots.
#'
#' @details The user can decide what virtual taxa to plot (argument \code{species}), and what information to show throught the \code{panels} argument. Output is plotted on screen by default, and printed to pdf if the \code{filename} argument is filled.
#'
#' @author Blas M. Benito <blasbenito@gmail.com>
#'
#'
#' @seealso \code{\link{simulatePopulation}}, \code{\link{compareSimulations}}
#'
#' @examples
#'
#'#getting example data
#'data(simulation)
#'
#'#plot first simulation
#'plotSimulation(simulation.output = simulation[[1]])
#'
#' @export
plotSimulation <- function(
simulation.output = NULL,
species = "all",
burnin = FALSE,
filename = NULL,
time.zoom = NULL,
panels = c("Driver A",
"Driver B",
"Suitability",
"Population",
"Mortality",
"Biomass",
"Pollen"),
plot.title = NULL,
width = 12,
text.size = 20,
title.size = 25,
line.size = 1){
#checking and setting panels
if(length(panels) == 1){
if(panels == "all" | panels == "ALL" | panels == "All" | is.null(panels) | length(panels) == 0 | !is.character(panels)){
panels=c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen")
}
} else {
if(sum(!(panels %in% c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen"))) >= 1){
warning(paste("There is something wrong with your 'panels' argument. Available panels are ", c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen"), " . All panels will be plotted instead"))
panels=c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen")
}
}
#checking time.zoom
if(!is.null(time.zoom) & length(time.zoom) != 2){stop("Argument time.zoom must be a vector of length two, as in: time.zoom=c(1000, 2000)")}
#list to store plots
plots.list=list()
#SELECTING SPECIES
#----------------
#creating dictionary of species names and indexes
#getting the data
if(inherits(simulation.output, "list")){
names.dictionary = data.frame(name=names(simulation.output), index=1:length(simulation.output))
} else {
#fake names.dictionary to be used donwstream when input is a data.frame
names.dictionary = data.frame(name = 1, index = 1)
}
#if null or "all"
if(species == "all" | species == "ALL" | species == "All"){
selected.species = names.dictionary$index
} else {
#wrong names or indexes
if(!(species %in% names.dictionary$name) & !(species %in% names.dictionary$index)){stop("You have selected species that are not available in the parameters table.")}
#correct species names or indexes
if(species %in% names.dictionary$name){
selected.species = names.dictionary[names.dictionary$name %in% species, "index"]
}
if(species %in% names.dictionary$index){
selected.species = species
}
}
if(inherits(simulation.output, "data.frame")){
selected.species = 1
}
#ITERATING THROUGH SPECIES
for(i in selected.species){
#getting the data
if(inherits(simulation.output, "list")){
output = simulation.output[[i]]
}
if(inherits(simulation.output, "data.frame")){
output = simulation.output
}
#to long format
if("Period" %in% colnames(output)){
output.long = tidyr::gather(data=output, Variable, Value, 2:(ncol(output)-1))
#removing burn-in period if burnin == FALSE
if(burnin == FALSE){output.long = output.long[output.long$Period == "Simulation",]}
} else {
output.long = gather(data=output, Variable, Value, 2:ncol(output))
}
#age limits of the plot
if(is.null(time.zoom)){
age.min = 1
age.max = max(output.long$Time)
} else {
age.min = time.zoom[1]
age.max = time.zoom[2]
#burning to FALSE to avoid plotting it
burnin=FALSE
}
#preparing groups for facets
output.long$Facets = "Population"
output.long[output.long$Variable == "Pollen", "Facets"] = "Pollen"
output.long[grep("Biomass", output.long$Variable), "Facets"] = "Biomass"
output.long[grep("Mortality", output.long$Variable), "Facets"] = "Mortality"
output.long[output.long$Variable == "Suitability", "Facets"] = "Suitability"
output.long[output.long$Variable == "Driver.A", "Facets"] = "Driver A"
#checking if driver B is empty
if(sum(is.na(output$Driver.B))!=nrow(output)){
output.long[output.long$Variable == "Driver.B", "Facets"] = "Driver B"
#facets order
output.long$Facets=factor(output.long$Facets, levels=c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen"))
} else {
output.long$Facets=factor(output.long$Facets, levels=c("Driver A","Suitability", "Population", "Mortality", "Biomass", "Pollen"))
}
#preparing subgroups for color
output.long$Color = "Adults"
output.long[grep("immature", output.long$Variable), "Color"] = "Saplings"
output.long[grep("total", output.long$Variable), "Color"] = "Total biomass"
output.long[output.long$Variable == "Pollen", "Color"] = "Pollen"
output.long[output.long$Variable == "Population.viable.seeds", "Color"] = "Seedlings"
output.long[output.long$Variable == "Suitability", "Color"] = "Suitability"
output.long[output.long$Variable == "Driver.A", "Color"] = "Driver A"
#checking if driver B is empty
if(sum(is.na(output$Driver.B))!=nrow(output)){
output.long[output.long$Variable == "Driver.B", "Color"] = "Driver B"
#facets order
output.long$Color <- factor(output.long$Color, levels = c("Driver A", "Driver B", "Suitability", "Total biomass", "Adults", "Saplings", "Seedlings", "Pollen"))
#palette
color.palette <- c("#2F642A", "#57AD4F", "#000000", "#C45055", "#75E46A", "#4572A9", "gray40", "gray40")
names(color.palette) <- c("Adults", "Saplings", "Total biomass", "Pollen", "Seedlings", "Suitability", "Driver A", "Driver B")
} else {
output.long$Color <- factor(output.long$Color, levels = c("Driver A", "Suitability", "Total biomass", "Adults", "Saplings", "Seedlings", "Pollen"))
#palette
color.palette <- c("#2F642A", "#57AD4F", "#000000", "#C45055", "#75E46A", "#4572A9", "gray40")
names(color.palette) <- c("Adults", "Saplings", "Total biomass", "Pollen", "Seedlings", "Suitability", "Driver A")
}
#removing unwanted facets/panels
output.long <-output.long[output.long$Facets %in% panels, ]
#setting up plot title
if(is.null(plot.title)){
plot.title <- paste("Taxon: ", names(simulation.output)[i], sep = "")
}
#plot
p1 <- ggplot(data = output.long, aes(x = Time, y = Value, color = Color)) +
geom_rect(data = output.long, aes(xmin = min(min(Time), 0), xmax = 0, ymin = 0, ymax = Inf), inherit.aes = FALSE, fill = "gray90") +
geom_line(size = line.size) +
scale_colour_manual(values = color.palette) +
facet_wrap(facets = "Facets", scales = "free_y", ncol = 1, drop = TRUE) +
ggtitle(plot.title) +
xlab("Time (years)") +
ylab("") +
geom_vline(xintercept = seq(0, max(output.long$Time), by = 200), color = "gray") +
scale_x_continuous(breaks = seq(age.min, age.max, by = age.max/10)) +
theme(text = element_text(size = text.size),
axis.text = element_text(size = text.size),
axis.title = element_text(size = text.size),
plot.title = element_text(size = title.size),
plot.margin = unit(c(0.5, 1, 0.5, -0.5), "cm"),
panel.spacing = unit(0, "lines")) +
labs(color = 'Legend') +
guides(color = guide_legend(override.aes = list(size = 2))) +
coord_cartesian(xlim = c(age.min, age.max)) +
cowplot::theme_cowplot() +
theme(legend.position = "bottom")
# guides(linetype = guide_legend(override.aes = list(size = 4)))
# + theme(plot.margin=unit(c(1,3,1,1),"cm"))
plots.list[[i]] <- p1
} #end of iteration through species
#plots to screen
invisible(lapply(plots.list, print))
#plots to pdf
if(!is.null(filename) & is.character(filename)){
pdf(paste(filename, ".pdf", sep = ""), width = 12, height = length(unique(output.long$Facets))*2)
invisible(lapply(plots.list, print))
dev.off()
}
} #end of plotting function
|
ddefb43ab7860b30d1cf489ac0ead1c4cd10898e
|
0a9fa200d07db384931ccd5b517eb26d09d89e8b
|
/man/Approximator.Rd
|
daa5d44a2520f5e391758965db49edf57dda9c33
|
[] |
no_license
|
Alexandra930/PhyInformR
|
6b87d2f96275a3b150d0b5c7ad5c5f2c6ecedd8a
|
c111e7478123ab872842e1d29d7586ac3c74dc5f
|
refs/heads/master
| 2022-03-16T14:15:35.694345
| 2016-11-15T14:59:44
| 2016-11-15T14:59:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,271
|
rd
|
Approximator.Rd
|
\name{Approximator}
\alias{Approximator}
\title{Quantify Quartet Resolution Probabilities Using 2012 Formulation}
\description{
Quantify QIRP, QIHP, or QIPP using the equations from Townsend et al. 2012.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Approximator(t, t0, rateVector, s)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{t}{ Time from tip of tree to focal internode }
\item{t0}{ Focal internode length }
\item{rateVector}{ An object containing a vector of site rates transformed to class "matrix" }
\item{s}{ A number representing the character state space that generated the site rates (e.g., s=2 for binary data)}
}
\references{ Townsend, J. P., Su, Z., and Tekle, Y. I. “Phylogenetic Signal and Noise: Predicting the Power of a Data Set to Resolve Phylogeny” Systematic biology 61, no. 5 (2012): 835–849. }
\author{ A. Dornburg
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
as.matrix(rag1)->rr
Approximator(100,0.5,rr,3)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
9131d1e02c9f00e3ad9687c6637235c2e9639365
|
5822a9f99a63bc7f0f50ac89e67b4be7f8b7e35e
|
/modules/c_variables/wood_c_pool/make_wood_table.R
|
17f0f1d3ac0060c0a5e1572aa3b32f2725b8a079
|
[] |
no_license
|
mingkaijiang/EucFACE_modeling_2020_site_parameters
|
845362edcc0867d5becb3730305f08ddc7744b80
|
734c09a9f88b649b2300ac876bd70433839a5816
|
refs/heads/master
| 2021-04-16T01:20:29.998190
| 2020-03-24T04:49:25
| 2020-03-24T04:49:25
| 249,315,857
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,418
|
r
|
make_wood_table.R
|
# Make the live wood C pool
make_wood_table <- function(ring_area){
#### download the data from HIEv
download_diameter_data()
#### read in 2012-15 data sets
f13 <- read.csv(file.path(getToPath(), "FACE_P0025_RA_TREEMEAS_2012-13_RAW-V1.csv"))
f14 <- read.csv(file.path(getToPath(), "FACE_P0025_RA_TREEMEAS_2013-14_RAW_V1.csv"))
f15 <- read.csv(file.path(getToPath(), "FACE_P0025_RA_TREEMEAS_2015_RAW_V1.csv"))
f16 <- read.csv(file.path(getToPath(), "FACE_P0025_RA_TREEMEAS_2016_RAW_V1.csv"))
# this file is not on HIEv yet!
f12 <- read.csv("temp_files/EucFACE_dendrometers2011-12_RAW.csv")
#### Read in additional files that I used when doing the data analysis
classif <- read.csv("download/FACE_AUX_RA_TREE-DESCRIPTIONS_R_20130201.csv",stringsAsFactors = FALSE)
classif$Active.FALSE.means.dead.[classif$Tree == 608] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 125] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 206] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 210] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 212] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 510] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 518] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 520] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 524] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 527] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 531] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 605] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 615] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 616] <- FALSE # This tree dead
classif$Active.FALSE.means.dead.[classif$Tree == 617] <- FALSE # This tree dead
#classif$Active.FALSE.means.dead.[classif$Tree == 101] <- FALSE # This tree dead in 2018
#classif$Active.FALSE.means.dead.[classif$Tree == 219] <- FALSE # This tree dead in 2018
#classif$Active.FALSE.means.dead.[classif$Tree == 220] <- FALSE # This tree dead in 2018
#classif$Active.FALSE.means.dead.[classif$Tree == 621] <- FALSE # This tree dead in 2018
#### Merge the files
all <- merge(classif,f12,by=c("Tree","Ring","CO2.trt"))
all <- merge(all,f13,by=c("Tree","Ring","CO2.trt"))
all <- merge(all,f14,by=c("Tree","Ring","CO2.trt"))
all <- merge(all,f15,by=c("Tree","Ring","CO2.trt"))
all <- merge(all,f16,by=c("Tree","Ring","CO2.trt"))
#### remove dead trees
all$Active.FALSE.means.dead.[is.na(all$Active.FALSE.means.dead.)] <- "TRUE"
all <- subset(all, Active.FALSE.means.dead.== TRUE)
#all <- all[complete.cases(all),]
#### remove "CORR" columns and dead column
uncorr <- all[,-grep("CORR",names(all))]
uncorr <- uncorr[,-grep("Coor",names(uncorr))]
uncorr <- uncorr[,names(uncorr) != "Active.FALSE.means.dead."]
#### make a long-form version of dataframe
long <- reshape(uncorr,idvar="Tree",varying=list(7:58),direction="long")
dates <- names(uncorr)[7:58]
long$Date <- c(rep(Sys.Date(),length(long$time))) #wasn't sure how else to make this column date type
for (i in (1:length(long$time))) {
long$Date[i] <- as.Date(dates[long$time[i]],format="X%d.%m.%Y")
}
long <- renameCol(long,c("X17.02.2011"),c("diam"))
long$diam <- as.numeric(long$diam)
#### add biomass to long-form dataframe
long$biom <- allom_agb(long$diam) # in kg DM
#### The bark removal affects the diameters mid-year.
#### Hence, just calculate biomass once per year
#### Specify dates here - may update this to March in future
dates <- c(as.Date("2012-12-20"),as.Date("2013-12-20"),
as.Date("2014-12-23"),as.Date("2015-12-14"),
as.Date("2016-12-21"))
data <- long[long$Date=="2012-12-20",]
### calculate basal area
data$basal_area <- (pi/4) * data$Diameter^2
### calculate total number of trees per ring
outDF1 <- summaryBy(Diameter~Ring, FUN=mean, data=data, keep.names=T, na.rm=T)
outDF2 <- summaryBy(biom+basal_area~Ring, FUN=sum, data=data, keep.names=T, na.rm=T)
outDF3 <- summaryBy(Height~Ring, FUN=max, data=data, keep.names=T, na.rm=T)
### return biomass in unit of kg /m-2
outDF1$Biomass <- outDF2$biom / ring_area
outDF1$BA <- outDF2$basal_area / ring_area
outDF1$Height <- outDF3$Height
### count number of trees per plot
for (i in 1:6) {
tmpDF <- subset(data, Ring==i & Date=="2012-12-20")
outDF1[outDF1$Ring==i, "Trees"] <- nrow(tmpDF)
}
### unit conversions
# from no. tree per ring to no. tree per hecture
outDF1$Trees <- outDF1$Trees / ring_area * 10000
# from kg DM per m-2 to mg DM per hectare
outDF1$Biomass <- outDF1$Biomass * 10000 / 1000
outDF1$Trt <- "aCO2"
outDF1$Trt[outDF1$Ring%in%c(1,4,5)] <- "eCO2"
### outDF
#outDF <- summaryBy(Diameter+Biomass+BA+Height+Trees~Trt, FUN=c(mean, sd), data=outDF1, keep.names=T, na.rm=T)
return(outDF1)
}
|
cab0a203f29ee0696c967b758f3e8eed3741dfc1
|
4f6fa43be679bed6016351f81dae8d642ca8b93e
|
/R/compat-dplyr.R
|
735d259fdff272af4f348566e4614e9be6318d51
|
[] |
no_license
|
DavisVaughan/strapgod
|
9c0442b5b79d0bd47c2bfc2acaa01f4272706dff
|
ea2b1ecfc780a44ffa934c9bc7c2032954f4ffaa
|
refs/heads/master
| 2021-08-15T04:54:51.408714
| 2020-01-20T13:37:45
| 2020-01-20T13:37:45
| 149,317,827
| 67
| 7
| null | 2021-08-09T17:52:05
| 2018-09-18T16:18:02
|
R
|
UTF-8
|
R
| false
| false
| 6,049
|
r
|
compat-dplyr.R
|
# ------------------------------------------------------------------------------
# Interesting dplyr functions
# summarise()
# do()
# ungroup()
# group_nest()
# group_map()
# group_modify()
# group_walk()
# group_split()
# group_keys()
# group_indices()
# In theory we could let the default `summarise()` do its thing. But if the
# user did a double `bootstrapify()` call, only one level of it will be removed
# and the post-summarise() object will still be a resampled_df, even though
# all of the bootstrap rows have been materialized.
#' @importFrom dplyr summarise
#' @export
summarise.resampled_df <- function(.data, ...) {
maybe_new_grouped_df(NextMethod())
}
# For `group_nest()`, the default method works unless `keep = TRUE`. In that
# case, we need to `collect()` so the groups are available to be 'kept'.
#' @importFrom dplyr group_nest
#' @export
group_nest.resampled_df <- function(.tbl, ..., .key = "data", keep = FALSE) {
if (keep) {
dplyr::group_nest(collect(.tbl), ..., .key = .key, keep = keep)
}
else {
NextMethod()
}
}
# Same idea as group_nest()
#' @importFrom dplyr group_split
#' @export
group_split.resampled_df <- function(.tbl, ..., keep = TRUE) {
if (keep) {
dplyr::group_split(collect(.tbl), ..., keep = keep)
}
else {
NextMethod()
}
}
# `group_indices()` returns garbage unless we `collect()` first
#' @importFrom dplyr group_indices
#' @export
group_indices.resampled_df <- function(.data, ...) {
dplyr::group_indices(collect(.data), ...)
}
# ------------------------------------------------------------------------------
# Interesting dplyr functions - Standard evaluation backwards compat
# nocov start
#' @importFrom dplyr summarise_
#' @export
summarise_.resampled_df <- function(.data, ..., .dots = list()) {
maybe_new_grouped_df(NextMethod())
}
#' @importFrom dplyr group_indices_
#' @export
group_indices_.resampled_df <- function(.data, ..., .dots = list()) {
dplyr::group_indices_(collect(.data), ..., .dots = list())
}
# nocov end
# ------------------------------------------------------------------------------
# dplyr support
#' @importFrom dplyr mutate
#' @export
mutate.resampled_df <- function(.data, ...) {
dplyr::mutate(collect(.data), ...)
}
#' @importFrom dplyr transmute
#' @export
transmute.resampled_df <- function(.data, ...) {
dplyr::transmute(collect(.data), ...)
}
# Required to export filter, otherwise:
# Warning: declared S3 method 'filter.resampled_df' not found
# because of stats::filter
#' @export
dplyr::filter
#' @importFrom dplyr filter
#' @export
filter.resampled_df <- function(.data, ...) {
dplyr::filter(collect(.data), ...)
}
#' @importFrom dplyr arrange
#' @export
arrange.resampled_df <- function(.data, ...) {
dplyr::arrange(collect(.data), ...)
}
#' @importFrom dplyr distinct
#' @export
distinct.resampled_df <- function(.data, ..., .keep_all = FALSE) {
dplyr::distinct(collect(.data), ..., .keep_all = .keep_all)
}
#' @importFrom dplyr select
#' @export
select.resampled_df <- function(.data, ...) {
dplyr::select(collect(.data), ...)
}
#' @importFrom dplyr slice
#' @export
slice.resampled_df <- function(.data, ...) {
dplyr::slice(collect(.data), ...)
}
#' @importFrom dplyr pull
#' @export
pull.resampled_df <- function(.data, var = -1) {
dplyr::pull(collect(.data), var = !!rlang::enquo(var))
}
#' @importFrom dplyr rename
#' @export
rename.resampled_df <- function(.data, ...) {
dplyr::rename(collect(.data), ...)
}
#' @importFrom dplyr full_join
#' @export
full_join.resampled_df <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) {
dplyr::full_join(collect(x), collect(y), by = by, copy = copy, suffix = suffix, ...)
}
#' @importFrom dplyr inner_join
#' @export
inner_join.resampled_df <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) {
dplyr::inner_join(collect(x), collect(y), by = by, copy = copy, suffix = suffix, ...)
}
#' @importFrom dplyr left_join
#' @export
left_join.resampled_df <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) {
dplyr::left_join(collect(x), collect(y), by = by, copy = copy, suffix = suffix, ...)
}
#' @importFrom dplyr right_join
#' @export
right_join.resampled_df <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) {
dplyr::right_join(collect(x), collect(y), by = by, copy = copy, suffix = suffix, ...)
}
#' @importFrom dplyr anti_join
#' @export
anti_join.resampled_df <- function(x, y, by = NULL, copy = FALSE, ...) {
dplyr::anti_join(collect(x), collect(y), by = by, copy = copy, ...)
}
#' @importFrom dplyr semi_join
#' @export
semi_join.resampled_df <- function(x, y, by = NULL, copy = FALSE, ...) {
dplyr::semi_join(collect(x), collect(y), by = by, copy = copy, ...)
}
#' @importFrom dplyr group_by
#' @export
group_by.resampled_df <- function(.data, ..., add = FALSE, .drop = FALSE) {
if (add) {
.data <- collect(.data)
}
else {
.data <- dplyr::ungroup(.data)
}
dplyr::group_by(.data, ..., add = add, .drop = .drop)
}
# ------------------------------------------------------------------------------
# Backwards compat support for deprecated standard eval dplyr
# nocov start
# Only a few of them need it. arrange_.grouped_df()
# directly calls arrange_impl() causing a problem.
#' @importFrom dplyr arrange_
#' @export
arrange_.resampled_df <- function(.data, ..., .dots = list()) {
dplyr::arrange_(collect(.data), ..., .dots = .dots)
}
#' @importFrom dplyr mutate_
#' @export
mutate_.resampled_df <- function(.data, ..., .dots = list()) {
dplyr::mutate_(collect(.data), ..., .dots = .dots)
}
#' @importFrom dplyr slice_
#' @export
slice_.resampled_df <- function(.data, ..., .dots = list()) {
dplyr::slice_(collect(.data), ..., .dots = .dots)
}
# nocov end
# ------------------------------------------------------------------------------
# Util
maybe_new_grouped_df <- function(x) {
if (dplyr::is_grouped_df(x)) {
x <- dplyr::new_grouped_df(x = x, groups = dplyr::group_data(x))
}
x
}
|
1cde74b36cd7f8b56601929c9294f8badb59ad7f
|
4fed9d47a2af0bd99de61068b7ab54f08b109ebd
|
/Rmetapop/man/LVBweight.Rd
|
64092eba4f3d89af88747324e5a3b657969058bb
|
[] |
no_license
|
dkokamoto/Rmetapop
|
402d5dde93b103df757d54e1852ce20e61c490f1
|
281c1fbf4c233c1504ba0c116ffbbff9836cf351
|
refs/heads/master
| 2022-10-12T14:55:11.954234
| 2018-05-28T18:48:43
| 2018-05-28T18:48:43
| 38,710,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 326
|
rd
|
LVBweight.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ancillary.R
\name{LVBweight}
\alias{LVBweight}
\title{Weight at age using the LVB growth equation and length-weight relationship.}
\usage{
LVBweight(age)
}
\description{
Weight at age using the LVB growth equation and length-weight relationship.
}
|
100acaf5fba874cee6ced05c7a4157be1aec6099
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SpatialTools/examples/dist2.Rd.R
|
98f52957bc553c30c2a94b40cdfd8f2f99a11590
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 251
|
r
|
dist2.Rd.R
|
library(SpatialTools)
### Name: dist2
### Title: Calculate Euclidean distance matrix between coordinates of two
### matrices
### Aliases: dist2
### ** Examples
x1 <- matrix(rnorm(30), ncol = 3)
x2 <- matrix(rnorm(60), ncol = 3)
dist2(x1, x2)
|
d26d3d6ec52a10a842bc40785c2394810393c2f1
|
f3824adc3a9a5865b6d6cf78641fdda1db7598a1
|
/man/priorityqueue.Rd
|
9951f2c6e2402bee78f1f4ba4d88aa4bdffb0b26
|
[
"MIT"
] |
permissive
|
n8epi/gmRa
|
31f3d3f99e74af0eee054fa8b743100354c0a3d1
|
29764e76b65482e7663915d6ee403cc94d5620d3
|
refs/heads/master
| 2021-01-10T06:52:47.200002
| 2016-09-27T19:37:49
| 2016-09-27T19:37:49
| 54,762,691
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 250
|
rd
|
priorityqueue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_structures.R
\name{priorityqueue}
\alias{priorityqueue}
\title{Priority Queue data structure}
\usage{
priorityqueue()
}
\description{
Priority Queue data structure
}
|
a5d7bcf55c9474889e693cb84aa862e78049dd74
|
1ac8617d2cc54b131798b7ffb8eb3ad81059010c
|
/Sentiment Analysis Using R/ui.R
|
9cdc7db7a336c8449d0228254cdaff186b41affd
|
[] |
no_license
|
paonikar/Sentiment-analysis-R-
|
5ffea59251144dde4ee716c38f83f955bf7b134d
|
eab4d5f6ec5c2827bbcd6e3623b05d115ae8699c
|
refs/heads/master
| 2020-12-04T05:01:54.258414
| 2020-01-03T16:27:53
| 2020-01-03T16:27:53
| 231,622,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,611
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Sentiment Analysis Using R"),
# Getting User Inputs
sidebarPanel(textInput("searchTerm", "Enter data to be searched with '#'", "#"),
sliderInput("maxTweets","Set the number of recent tweets to be used for analysis:",min=5,max=1000,value=500),
submitButton(text="Analyse")),
mainPanel(
tabsetPanel(
tabPanel("Summarised Results",HTML("<div><h3>Sentiment analysis results:</h3></div>"),
HTML("<div><t>Total positive score:</t></div>"),verbatimTextOutput("tp"),
HTML("<div><t>Total negative score:</t></div>"),verbatimTextOutput("tn"),
HTML("<div><t>Overall sentiment score:</t></div>"),verbatimTextOutput("tt"),
HTML("<div><t>Negative sentiment index:</t></div>"),verbatimTextOutput("nSI"),
HTML("<div><t>Positive sentiment index:</t></div>"),verbatimTextOutput("pSI"),
HTML("<div><t>Negative sentiment in percent:</t></div>"),verbatimTextOutput("nSpct"),
HTML("<div><t>Positive sentiment in percent:</t></div>"),verbatimTextOutput("pSpct")),
tabPanel("Word-Cloud",HTML("<div><h3>Most used words associated with the selected hashtag</h3></div>"),plotOutput("word")),
tabPanel("Histograms",HTML("<div><h3> Graphical portrayal of opinion-mining pertinent to this hashtag
</h3></div>"), plotOutput("histPos"), plotOutput("histNeg"), plotOutput("histScore")),
tabPanel("Pie Chart",HTML("<div><h3>Pie Chart</h3></div>"), plotOutput("piechart")),
tabPanel("Analysed Tweets",HTML( "<div><h3> Tweets tabulated corresponding to their sentiment scores </h3></div>"), tableOutput("tabledata")),
tabPanel("Top Users",HTML("<div><h3> Top 20 users who used this hashtag</h3></div>"),plotOutput("tweetersplot"), tableOutput("tweeterstable")),
tabPanel("Trending Topics",HTML("<div>Top trending topics according to location</div>"),
selectInput("place","Select a location",
c("Worldwide", "Algeria", "Argentina", "Australia", "Austria", "Bahrain", "Belarus", "Belgium",
"Brazil", "Canada", "Chile", "Colombia", "Denmark", "Dominican Republic", "Ecuador", "Egypt",
"France", "Germany", "Ghana", "Greece", "Guatemala", "India", "Indonesia", "Ireland", "Israel",
"Italy", "Japan", "Jordan", "Kenya", "Korea", "Kuwait", "Latvia", "Lebanon", "Malaysia", "Mexico",
"Netherlands", "New Zealand", "Nigeria", "Norway", "Oman", "Pakistan", "Panama", "Peru", "Philippines",
"Poland", "Portugal", "Puerto Rico", "Qatar", "Russia", "Saudi Arabia", "Singapore", "South Africa",
"Spain", "Sweden", "Switzerland", "Thailand", "Turkey", "Ukraine", "United Arab Emirates",
"United Kingdom", "United States", "Venezuela", "Vietnam"), selected = "United States", selectize = TRUE),
submitButton(text="Search"),HTML("<div><h3> Location-based hot-topics, current:</h3></div>"),
tableOutput("trendtable"),
HTML("<div> </div>")),
tabPanel("User specific hashtag-usage",textInput("user", "Analyse Twitter handle:", "@"),submitButton(text="Analyse"),plotOutput("tophashtagsplot"),HTML
("<div> <h3>Hashtag frequencies in the tweets of the Twitter User</h3></div>"))
)#end of tabset panel
)#end of main panel
))#end of shinyUI
|
2398c7780a2aea98ecd2e7c7c3093880f86634de
|
4c50e336c95095ce3fac4e6333fc3a83db35dbc6
|
/R/CCAMLRGIS.R
|
c7bca2db38018221c1a223bcaeeb68b9355b808a
|
[] |
no_license
|
rsbivand/CCAMLRGIS
|
ee0a55cda86401d7904f86b03ee8c8b27f3c2006
|
8fd07db6efbab3983deeb5ebf260afb80be62782
|
refs/heads/master
| 2020-11-26T09:25:29.588205
| 2020-05-25T10:38:54
| 2020-05-25T10:38:54
| 229,028,616
| 0
| 0
| null | 2019-12-19T10:14:41
| 2019-12-19T10:14:40
| null |
UTF-8
|
R
| false
| false
| 2,012
|
r
|
CCAMLRGIS.R
|
utils::globalVariables(c('CCAMLRp','Coast','Depth_cols','Depth_cuts','Depth_cols2','Depth_cuts2',
'GridData','Labels','LineData','PointData','PolyData','SmallBathy','ID'))
#'
#' Loads and creates spatial data, including layers and tools that are relevant to CCAMLR activities.
#'
#' This package provides two broad categories of functions: load functions and create functions.
#'
#' @section Load functions:
#' Load functions are used to import CCAMLR geo-referenced layers and include:
#' \itemize{
#' \item \link{load_ASDs}
#' \item \link{load_SSRUs}
#' \item \link{load_RBs}
#' \item \link{load_SSMUs}
#' \item \link{load_MAs}
#' \item \link{load_Coastline}
#' \item \link{load_RefAreas}
#' \item \link{load_MPAs}
#' \item \link{load_EEZs}
#' }
#'
#' @section Create functions:
#' Create functions are used to create geo-referenced layers from user-generated data and include:
#' \itemize{
#' \item \link{create_Points}
#' \item \link{create_Lines}
#' \item \link{create_Polys}
#' \item \link{create_PolyGrids}
#' \item \link{create_Stations}
#' }
#'
#' @section Vignette:
#' To learn more about CCAMLRGIS, start with the vignette:
#' \code{browseVignettes(package = "CCAMLRGIS")}
#'
#' @seealso
#' The CCAMLRGIS package relies on several other package which users may want to familiarize themselves with,
#' namely \href{https://CRAN.R-project.org/package=sp}{sp},
#' \href{https://CRAN.R-project.org/package=raster}{raster},
#' \href{https://CRAN.R-project.org/package=rgeos}{rgeos} and
#' \href{https://CRAN.R-project.org/package=rgdal}{rgdal}.
#'
#'
#' @docType package
#' @import sp
#' @import rgdal
#' @import rgeos
#' @import raster
#' @import geosphere
#' @importFrom dplyr distinct group_by summarise_all
#' @importFrom grDevices colorRampPalette recordPlot replayPlot
#' @importFrom graphics par rect segments
#' @importFrom methods slot
#' @importFrom utils read.csv setTxtProgressBar txtProgressBar edit menu
#' @importFrom magrittr %>%
#' @name CCAMLRGIS
NULL
|
20a78b953f494128060025a4afe66825c6482831
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/palasso/examples/dot-cv.Rd.R
|
64269f1331fe8b89a3a726e54714765666abedc9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 103
|
r
|
dot-cv.Rd.R
|
library(palasso)
### Name: .cv
### Title: Cross-validation
### Aliases: .cv
### ** Examples
NA
|
46dd051fd04530bad14ec5cad314e65a3fece2d1
|
021498dd1ed1eb755575e7dfbc8b8f9fae927831
|
/man/ISOCitation.Rd
|
2fa6f246b3cf0915947ab7a6dd269fa0b52d8df6
|
[] |
no_license
|
65MO/geometa
|
f75fb2903a4f3633a5fcdd4259fd99f903189459
|
c49579eb5b2b994c234d19c3a30c5dad9bb25303
|
refs/heads/master
| 2020-04-08T12:22:44.690962
| 2018-11-22T22:51:57
| 2018-11-22T22:51:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,598
|
rd
|
ISOCitation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISOCitation.R
\docType{class}
\name{ISOCitation}
\alias{ISOCitation}
\title{ISOCitation}
\format{\code{\link{R6Class}} object.}
\usage{
ISOCitation
}
\value{
Object of \code{\link{R6Class}} for modelling an ISO Citation
}
\description{
ISOCitation
}
\section{Fields}{
\describe{
\item{\code{presentationForm}}{}
}}
\section{Methods}{
\describe{
\item{\code{new(xml)}}{
This method is used to instantiate an ISOCitation
}
\item{\code{setTitle(title)}}{
Sets the title
}
\item{\code{setAlternateTitle(alternateTitle)}}{
Sets an alternate title
}
\item{\code{addDate(date)}}{
Adds the date (ISODate object containing date and dateType)
}
\item{\code{setEdition(edition)}}{
Sets the edition
}
\item{\code{setEditionDate(editionDate)}}{
Sets the edition date, either an ISODate object containing date and dateType or
a simple R date "POSIXct"/"POSIXt" object. For thesaurus citations, an ISODate
should be used while for the general citation of \code{ISODataIdentification},
a simple R date should be used.
}
\item{\code{setIdentifier(identifier)}}{
Sets the identifier as object of class 'ISOMetaIdentifier'
}
\item{\code{seCitedResponsibleParty(rp)}}{
Sets the cited responsiblep party
}
\item{\code{setPresentationForm}}{
Sets the presentation form
}
}
}
\examples{
#create ISOCitation
md <- ISOCitation$new()
md$setTitle("sometitle")
md$setEdition("1.0")
md$setEditionDate(ISOdate(2015,1,1))
md$setIdentifier(ISOMetaIdentifier$new(code = "identifier"))
md$setPresentationForm("mapDigital")
#add a cited responsible party
rp <- ISOResponsibleParty$new()
rp$setIndividualName("someone")
rp$setOrganisationName("somewhere")
rp$setPositionName("someposition")
rp$setRole("pointOfContact")
contact <- ISOContact$new()
phone <- ISOTelephone$new()
phone$setVoice("myphonenumber")
phone$setFacsimile("myfacsimile")
contact$setPhone(phone)
address <- ISOAddress$new()
address$setDeliveryPoint("theaddress")
address$setCity("thecity")
address$setPostalCode("111")
address$setCountry("France")
address$setEmail("someone@theorg.org")
contact$setAddress(address)
res <- ISOOnlineResource$new()
res$setLinkage("http://www.somewhereovertheweb.org")
res$setName("somename")
contact$setOnlineResource(res)
rp$setContactInfo(contact)
md$setCitedResponsibleParty(rp)
xml <- md$encode()
}
\references{
ISO 19115:2003 - Geographic information -- Metadata
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ISO}
\keyword{citation}
|
82b34ea8535ae4837a2bfd747da4a2f5f634088f
|
c98f1fae6230551046ed2ddee74fa64a267509d2
|
/R/aws.R
|
8941ddbb5933247076d7261840b6faa27ae46eae
|
[] |
no_license
|
datacamp/r-package-parser
|
21a870b4af8725347bd499fbf7ac7dcb5de8b7d8
|
63e0062d0cb53a9fd325f24b8264c6891d1a3d06
|
refs/heads/master
| 2022-08-27T14:41:24.409045
| 2022-08-15T14:29:31
| 2022-08-15T14:29:31
| 81,352,913
| 2
| 2
| null | 2022-08-15T14:29:32
| 2017-02-08T16:54:11
|
R
|
UTF-8
|
R
| false
| false
| 3,302
|
r
|
aws.R
|
#' @importFrom jsonlite write_json
dump_jsons_on_s3 <- function(description, topics) {
pkg_name <- description$Package
pkg_version <- description$Version
local <- file.path(getwd(), pkg_name, pkg_version)
remote <- file.path("s3://assets.rdocumentation.org/rpackages/unarchived", pkg_name, pkg_version)
dir.create(local, recursive = TRUE)
# copy everything from man/figures to local/figures
pkg_folder <- file.path("packages", pkg_name)
figures_path <- file.path(pkg_folder, "man", "figures")
copy_local(local, figures_path, "figures")
# copy everything from _vignettes to local/vignettes
vignettes_path <- file.path(pkg_folder, "_vignettes")
copy_local(local, vignettes_path, "vignettes")
# copy everything from R to local/R
r_path <- file.path(pkg_folder, "R")
copy_local(local, r_path, "R")
# write files to disk
write_json(description, auto_unbox = TRUE, path = file.path(local, "DESCRIPTION.json"))
lapply(topics, function(x) write_json(x, auto_unbox = TRUE, path = file.path(local, paste0(x$name, ".json"))))
# do the sync
system(sprintf("aws --region us-east-1 s3 sync %s %s", local, remote))
# clean up again
unlink(file.path(getwd(), pkg_name), recursive = TRUE)
}
copy_local <- function(local, path, dirname){
if (file.exists(path) && !is.null(path)) {
out_path <- file.path(local, dirname)
dir.create(out_path)
pkgdown:::copy_dir(path, out_path)
}
}
send_msg <- function(queue, msg, query = list(), attributes = NULL, delay = NULL, ...) {
queue <- aws.sqs:::.urlFromName(queue)
if(length(msg) > 1) {
# batch mode
batchs <- split(msg, ceiling(seq_along(msg)/10))
for (batch in batchs) {
l <- length(batch)
n <- 1:l
id <- paste0("msg", n)
a <- as.list(c(id, batch))
names(a) <- c(paste0("SendMessageBatchRequestEntry.",n,".Id"),
paste0("SendMessageBatchRequestEntry.",n,".MessageBody"))
query_args <- list(Action = "SendMessageBatch")
query_mult <- rep(query, each = l)
front <- c(paste0("SendMessageBatchRequestEntry.",n, "."))
back <- rep(names(query), each = l)
names(query_mult) <- paste0(front, back)
body <- c(a, query_mult, query_args)
out <- aws.sqs:::sqsHTTP(url = queue, query = body, ...)
if (inherits(out, "aws-error") || inherits(out, "unknown")) {
return(out)
}
structure(out$SendMessageBatchResponse$SendMessageBatchResult,
RequestId = out$SendMessageBatchResponse$ResponseMetadata$RequestId)
}
} else {
# single mode
query_args <- append(query, list(Action = "SendMessage"))
query_args$MessageBody = msg
out <- aws.sqs:::sqsHTTP(url = queue, query = query_args, ...)
if (inherits(out, "aws-error") || inherits(out, "unknown")) {
return(out)
}
structure(list(out$SendMessageResponse$SendMessageResult),
RequestId = out$SendMessageResponse$ResponseMetadata$RequestId)
}
}
post_job <- function(queue, json, value) {
info(sprintf("Posting %s job...", value))
send_msg(queue,
msg = json,
query = list(MessageAttribute.1.Name = "type",
MessageAttribute.1.Value.DataType ="String",
MessageAttribute.1.Value.StringValue = value))
}
|
76e61bca7a86b4fe6b3c07e3ff5e462e0e6b6056
|
60d17a32a7717f2ef63ad305137e491c8dbcd558
|
/R/classes.R
|
066c9b74030774d379bb107656274e4f015018c9
|
[] |
no_license
|
benstory/mitoClone2
|
d08e10575f82a375d1ef01844a0500adcf750b8b
|
aa8ef170defb943f2eeec4993a5fe9955c1381c7
|
refs/heads/main
| 2023-06-20T13:45:55.555113
| 2023-06-16T06:22:04
| 2023-06-16T06:22:04
| 378,751,189
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,997
|
r
|
classes.R
|
#'mutationCalls class
#'
#'To create this class from a list of bam files (where each bam file corresponds
#'to a single cell), use \code{\link{mutationCallsFromCohort}} or
#'\code{\link{mutationCallsFromExclusionlist}}. To create this class if you
#'already have the matrices of mutation counts, use its contstructor, i.e.
#'\code{mutationCallsFromMatrix(M = data1, N = data2)}.
#'
#'@slot M A matrix of read counts mapping to the \emph{mutant} allele. Columns
#' are genomic sites and rows and single cells.
#'@slot N A matrix of read counts mapping to the \emph{nonmutant} alleles.
#' Columns are genomic sites and rows and single cells.
#'@slot ternary Discretized version describing the mutational status of each
#' gene in each cell, where 1 signfiies mutant, 0 signifies reference, and ?
#' signifies dropout
#'@slot cluster Boolean vector of length \code{ncol(M)} specifying if the given
#' mutation should be included for clustering (\code{TRUE}) or only used for
#' annotation.
#'@slot metadata Metadata frame for annotation of single cells (used for
#' plotting). Row names should be the same as in \code{M}
#'@slot tree Inferred mutation tree
#'@slot cell2clone Probability matrix of single cells and their assignment to
#' clones.
#'@slot mut2clone Maps mutations to main clones
#'@slot mainClone Probability matrix of single cells and their assignment to
#' main clones
#'@slot treeLikelihoods Likelihood matrix underlying the inference of main
#' clones, see \code{\link{clusterMetaclones}}
#'@export
mutationCalls <- setClass(
"mutationCalls",
slots = c(
M = "matrix",
N = "matrix",
metadata = "data.frame",
ternary = "matrix",
cluster = "logical",
tree = "list",
cell2clone = "matrix",
mut2clone = "integer",
mainClone = "matrix",
treeLikelihoods = "matrix"
),
validity = function(object) {
if (!identical(dim(object@M), dim(object@N))) {
return("Matrices M and N must have identical dimensions")
}
return(TRUE)
}
)
#'mutationCalls constructor
#'
#'To be used when allele-specific count matrices are available.
#'@param M A matrix of read counts mapping to the \emph{mutant}
#'allele. Columns are genomic sites and rows and single cells.
#'@param N A matrix of read counts mapping to the \emph{referece}
#'allele. Columns are genomic sites and rows and single cells.
#'@param cluster If \code{NULL}, only mutations with coverage in 20
#'percent of the cells or more will be used for the clustering,
#'and all other mutations will be used for cluster annotation
#'only. Alternatively, a boolean vector of length \code{ncol(M)}
#'that specifies the desired behavior for each genomic site.
#'@param metadata A data.frame of metadata that will be transfered to
#'the final output where the \code{row.names(metadata)}
#'correspond to the the \code{row.names(M)}.
#'@param binarize Allele frequency threshold to define a site as
#'mutant (required for some clustering methods)
#'@return An object of class \code{\link{mutationCalls}}.
#'@examples load(system.file("extdata/example_counts.Rda",package = "mitoClone2"))
#' ## we have loaded the example.counts object
#' known.variants <- c("8 T>C","4 G>A","11 G>A","7 A>G","5 G>A","15 G>A","14 G>A")
#' known.subset <- pullcountsVars(example.counts, known.variants)
#' known.subset <- mutationCallsFromMatrix(t(known.subset$M), t(known.subset$N),
#' cluster = rep(TRUE, length(known.variants)))
#'@export
mutationCallsFromMatrix <- function(M,
N,
cluster = NULL,
metadata = data.frame(row.names = rownames(M)),
binarize = 0.05) {
colnames(M) <- make.names(colnames(M))
colnames(N) <- make.names(colnames(N))
binfun <- function(M, N) {
alleleRatio <-
M / (M + N)
apply(alleleRatio, 2, function(x)
ifelse(is.na(x), "?", ifelse(x > binarize, "1", "0")))
}
## if (!is.null(cluster)){
## ##out@cluster <- cluster
## }else {
## out@cluster <-
## apply(out@ternary!="?", 2, mean) > 0.2
## & apply(out@ternary=="1", 2, mean) > 0.04
## the last filter was not used when I made the figure
## there was a filter on the allele freq. in RNA.
## Should maybe include this in the other routines?
## }
ternary <- binfun(M, N)
if (is.null(cluster)) {
cluster <- apply(ternary != "?", 2, mean) > 0.2
}
out <-
methods::new(
"mutationCalls",
M = M,
N = N,
metadata = metadata,
ternary = ternary,
cluster = cluster
)
return(out)
}
#'Plot clonal assignment of single cells
#'
#'Creates a heatmap of single cell mutation calls, clustered using
#' PhISCS.
#'@param mutcalls object of class \code{\link{mutationCalls}}.
#'@param what One of the following: \emph{alleleFreq}: The fraction of
#'reads mapping to the mutant allele or \emph{ternary}:
#'Ternarized mutation status
#'@param show boolean vector specifying for each mutation if it should
#'be plotted on top of the heatmap as metadata; defaults to
#'mutations not used for the clustering \code{!mutcalls@cluster}
#'@param ... any arguments passed to \code{\link[pheatmap]{pheatmap}}
#'@examples P1 <-
#'readRDS(system.file("extdata/sample_example1.RDS",package =
#'"mitoClone2"))
#'plotClones(P1)
#'@return Returns TRUE only used for generating a PostScript tree
#'image of the putative mutation tree
#'@export
plotClones <- function(mutcalls,
what = c("alleleFreq", "ternary"),
show = c(),
...) {
what <- match.arg(what)
if (what == "alleleFreq")
plotData <- mutcalls@M / (mutcalls@M + mutcalls@N)
if (what == "ternary")
plotData <-
apply(mutcalls@ternary, 2, function(x)
ifelse(x == "1", 1, ifelse(x == "?", 0,-1)))
plotData <-
t(plotData[, getNodes(mutcalls@tree)[-1]]) #how to order rows?
if (length(show) > 1)
annos <-
data.frame(row.names = rownames(mutcalls@M),
mutcalls@ternary[, show],
mutcalls@metadata)
if (length(show) == 1) {
annos <-
data.frame(row.names = rownames(mutcalls@M),
ann = mutcalls@ternary[, show],
mutcalls@metadata)
colnames(annos)[2] <- show
}
if (length(show) == 0)
annos <-
data.frame(row.names = rownames(mutcalls@M), mutcalls@metadata)
if (length(mutcalls@mut2clone) > 0) {
annos$mainClone <-
as.factor(apply(mutcalls@mainClone, 1, which.max))
annos$confidence <- apply(mutcalls@mainClone, 1, max)
plotData <- plotData[, order(annos$mainClone)]
}
pheatmap::pheatmap(
plotData,
cluster_cols = FALSE,
cluster_rows = FALSE,
show_colnames = FALSE,
color = colorRampPalette(rev(
c("#9B0000", "#FFD72E", "#FFD72E", "#00009B")
))(100),
annotation_col = annos,
...
)
}
#'mutationCalls accessors
#'
#'Retrieves the full matrix of likelihoods associating single cells
#' with clones
#'@param mutcall object of class \code{\link{mutationCalls}}.
#'@param mainClones Retrieve likelihoods associated with the main
#'Clones. Defaults to \code{TRUE} if
#'\code{\link{clusterMetaclones}} has been run.
#'@return Return \code{TRUE} if \code{\link{clusterMetaclones}} has
#'been run otherwise returns the cell by clone matrix of
#'likelihood associating each cell to a given clone.
#'@examples load(system.file("extdata/LudwigFig7.Rda",package =
#'"mitoClone2"))
#'likelihood_matrix <- getCloneLikelihood(LudwigFig7)
#'@export
getCloneLikelihood <- function(mutcall,
mainClones = length(mutcall@mut2clone) > 0)
mutcall@cell2clone
#' @describeIn getCloneLikelihood Retrieve the most likely clone
#'associate with each cell.
getMainClone <-
function(mutcall,
mainClones = length(mutcall@mut2clone) > 0)
as.factor(apply(
getCloneLikelihood(mutcall, mainClones = mainClones),
1,
which.max
))
#' @describeIn getCloneLikelihood Retrieve the likelihood of the most
#'likely clone for each cell.
getConfidence <-
function(mutcall,
mainClones = length(mutcall@mut2clone) > 0)
as.factor(apply(getCloneLikelihood(mutcall,mainClones = mainClones),
1,
max))
#' @describeIn getCloneLikelihood Retrieve the assignment of mutations
#'to clones, once \code{\link{clusterMetaclones}} has been run.
getMut2Clone <- function(mutcall)
mutcall@mut2clone
#'mutationCalls cluster accessor
#'
#'Extracts all the putative variants that we want to use for
#' clustering
#'@param mutcall object of class \code{\link{mutationCalls}}.
#'@examples load(system.file("extdata/LudwigFig7.Rda",package =
#'"mitoClone2"))
#'mutations_to_cluster <- getVarsCandidate(LudwigFig7)
#'@return Returns a character vector including all the variants to be
#'used for clustering
#'@export
getVarsCandidate <- function(mutcall)
mutcall@cluster
#'mutationCalls cluster setter
#'
#'Sets the putative variants that we want to use for clustering
#'@param mutcall object of class \code{\link{mutationCalls}}.
#'@param varlist vector of booleans with the names set to the variants
#'to use for clustering
#'@examples load(system.file("extdata/LudwigFig7.Rda",package =
#'"mitoClone2"))
#'mutations_to_cluster <- getVarsCandidate(LudwigFig7)
#'mutations_to_cluster[] <- rep(c(TRUE,FALSE),each=19)
#'LudwigFig7 <- setVarsCandidate(LudwigFig7,mutations_to_cluster)
#'@return Sets the cluster slot on a mutationCalls object
#'@export
setVarsCandidate <- function(mutcall, varlist) {
methods::slot(mutcall, 'cluster') <- varlist
return(mutcall)
}
#'mutationCalls counts accessor
#'
#'Extracts the counts of allele for either the mutant or all the
#' non-mutant alleles
#'@param mutcall object of class \code{\link{mutationCalls}}.
#'@param type character that is either `mutant` or `nonmutant`
#'depending on which allele count the user wants to access
#'@examples load(system.file("extdata/LudwigFig7.Rda",package = "mitoClone2"))
#'mutantAllele_count <- getAlleleCount(LudwigFig7,type='mutant')
#'@return Returns matrix of either mutant or non-mutant allele counts
#'@export
getAlleleCount <- function(mutcall, type = c('mutant', 'nonmutant')) {
message('Extracting sum of all ', type, ' alleles')
pulledslot <- switch(type, "mutant" = "M", "nonmutant" = "N")
return(methods::slot(mutcall, pulledslot))
}
|
0195bb2fdc226fa9022c38a446d1a1ef37be53a2
|
f56e47d46acb433fb720c3b57e5889ba761873f9
|
/data_france/Roadmap.R
|
581854647bb16444c699a2e15b94742b52433324
|
[] |
no_license
|
matthiasmace/coronavirus
|
b97b84c7280e8ff200000be6a7e7d96ee105b00b
|
aa5a344488f7bdd5ce48d18b061d5c3c8ecae898
|
refs/heads/master
| 2021-04-21T15:57:18.426905
| 2020-11-26T13:20:16
| 2020-11-26T13:20:16
| 249,794,137
| 0
| 0
| null | 2020-03-24T19:04:02
| 2020-03-24T19:04:00
| null |
UTF-8
|
R
| false
| false
| 8,574
|
r
|
Roadmap.R
|
## data France
, fluidRow(
column(12,
h1("CovId-19 for People", align="center")
, h2("SARS-CoV-2 pandemics data display & analysis Webpage for the people", align="center")
, p("Données Source",
a("INSEE ???",
href="https://ourworldindata.org/coronavirus"),
"| Link to the dataset (last updated ",
modifdate,
"):",
a("https://covid.ourworldindata.org/data/ecdc/full_data.csv",
href = "https://covid.ourworldindata.org/data/ecdc/full_data.csv")
, "&"
, a("WorldBank"
, href="https://data.worldbank.org")
, "| Shiny app by Tomasz Suchan & Matthias Mace"
, a("@tomaszsuchan",
href="https://twitter.com/tomaszsuchan")
, a("| Matthias FB",
href="https://www.facebook.com/matthias.mace.5"),
align = "center"
)
)
)
, fluidRow(
sidebarLayout(
sidebarPanel(width = 3
, radioButtons(inputId = "data_column"
, label = "Data to show:"
, choices = c("Hospitalisés" = "hosp"
, "Réanimation" = "rea"
, "Sorties" = "rad"
, "Décédés" = "dec"
)
, selected = "hosp"
)
, selectInput(inputId = "dep_sel"
, label = "Départements (with at least 1 case):"
, list('Occitanie' = unique(france.df[france.df$region == 'Occitanie',]$dep),
# 'Africa' = unique(covdat[covdat$continent == 'Africa',]$location),
# 'Americas' = unique(covdat[covdat$continent == 'Americas',]$location),
# 'Asia' = unique(covdat[covdat$continent == 'Asia',]$location),
# 'Oceania' = unique(covdat[covdat$continent == 'Oceania',]$location)
)
, selected = c(66, 31, 47, 11, 75, 67, 68)
, multiple = TRUE
)
, strong("Plot options:")
, em("For curves (multiple selections allowed)")
, checkboxInput(inputId="log"
, label = "Plot y axis on log scale", value = FALSE)
, checkboxInput(inputId="percapita",
label = "Correct for population size", value = FALSE)
, checkboxInput(inputId="dailyscale",
label = "Plot daily breaks on x axis", value = FALSE)
, checkboxInput(inputId="sync",
label = "Synchronize national epidemics (minimal cases/deaths to begin with)", value = FALSE)
, numericInput(inputId = "num.min"
, label = ""
, value = 10
)
, hr(style="border-color: black")
, checkboxInput(inputId="R0",
label = "Sliding R0 computation (select 'new_cases' or 'new_deaths') \n (remove South Korea & China before)"
#(choose the computing window in days)
, value = FALSE)
, column(5
, numericInput(inputId = "SI.min"
, label = "Serial Interval Min"
, value = 4
)
)
, column(5
, numericInput(inputId = "SI.max"
, label = "Serial Interval Max"
, value = 8
)
)
, numericInput(inputId = "window.R0"
, label = ""
, value = 3
)
, hr(style="border-color: black")
, strong("Select Socio-Economic Variable to Compare")
, selectizeInput(inputId = "socialvar"
, label = "Select variable"
, choices = c("NONE",
names(map.df.2)[-c(1:3)]
)
, selected = c("NONE")
)
, checkboxInput(inputId="map"
, label = "World Map (select one WorldBank data)"
, value = FALSE)
, checkboxInput(inputId="xyplot"
, label = "XY-plot (select one WorldBank data)"
, value = FALSE)
, checkboxInput(inputId="corrmap",
label = "Cross-Correlations (all WorldBank data)", value = FALSE)
),
mainPanel(width = 9,
fluidRow(
plotOutput(outputId = "distPlot", width="100%", height=750)
),
fluidRow(
sliderInput(inputId="dates",
label="Dates:",
min = mindate,
max = maxdate,
value = c(as.Date("2020-02-15", format = "%Y-%m-%d"),maxdate),
timeFormat = "%F",
width="100%")
)
)
)
)
france.df <- as.data.frame(read.csv("63352e38-d353-4b54-bfd1-f1b3ee1cabd7", header = T, sep =";"))
france.df$jour <- as.Date(france.df$jour)
france.df$dep <- as.character(as.numeric(france.df$dep))
input <- list(data_column = "nouveaux_deces"
, dep_sel = c("68", "75", "31", "75", "13", "66")
, SI.min = 4
, SI.max = 8
, num_min = 1
)
require(EpiEstim)
if(input$data_column == "nouveaux_cas"){
DAT.0 = france.df[france.df$dep %in% input$dep_sel & france.df$sex ==0, c(1, 3, 4)]
} else if(input$data_column == "nouveaux_deces") {
DAT.0 = france.df[france.df$dep %in% input$dep_sel & france.df$sex ==0, c(1, 3, 5)]
} else {stop(safeError(("Incompatible Data to show / plot option combination")))}
#
names(DAT.0) <- c("location", "date", "data")
#
print(DAT.0)
#
RES <- list()
#
config <- make_config(list(mean_si = (mean(c(input$SI.min, input$SI.max))), std_mean_si = 1,
min_mean_si = input$SI.min, max_mean_si = input$SI.max,
std_si = 1.5, std_std_si = 0.5,
min_std_si = 0.5, max_std_si = 2.5))
#
#window = input$window.R0
#
for(c in unique(DAT.0$location)){
DAT.1 <- DAT.0[DAT.0$location == c & (DAT.0$data >= input$num_min), ]
rownames(DAT.1) <- DAT.1$date
DAT.1 <- DAT.1[, -c(1, 2)]
es_uncertain_si <- estimate_R(DAT.1,
method = "uncertain_si",
config = config)
#
max.length <- max(table(DAT.0[DAT.0$data > input$num_min, c("location")]))
df <- rbind(do.call("rbind"
, replicate(n = (max.length - length(DAT.1))
, rep(c(NA), times = dim(es_uncertain_si$R)[2])
, simplify = FALSE)
)
, as.matrix(es_uncertain_si$R)
)
RES[[c]] <- data.frame("J" <- seq(dim(df)[1])
, "BEGIN" = df[, "t_start"]
, "END" = df[, "t_end"]
, "R0_point" = df[, "Median(R)"]
, "R0_low" = df[, "Quantile.0.05(R)"]
, "R0_high" = df[, "Quantile.0.95(R)"]
)
#rownames(RES[[c]]) <- sort(unique(DAT.0$date))
}
for(c in names(RES)){
RES[[c]]$location <- c
}
RES <- do.call("rbind", RES)
names(RES)[1] <- "J"
RES$J <- RES$J - length(unique(RES$J)) ## reverse timescale
#######
ggplot(data = RES, aes(x = J, y = R0_point, colour = location)) +
geom_line(size = 3)+
geom_ribbon(aes(ymin=R0_low, ymax=R0_high, colour = location), linetype=2, alpha=0.2)+
xlim(0, NA)+
geom_hline(
yintercept = 1,
)+
labs(x = "Time in days", y = "Basic Reproduction Number (R) estimates")+
xlim(-length(unique(RES$J)), 0)+
theme_minimal()
|
70a1ea65d77240163ae85e563e6863f5b3e0de22
|
cff3dad31f34070a0459506762e97accba287fad
|
/Pmetrics/R/makePTA.R
|
e62ef4032b5f88f2284cc43be774adfe951d958b
|
[] |
no_license
|
nickibeaks/Pmetrics
|
0264959e0f4a6281a4e5e1ec43934be481ffb4f8
|
1d10559c754e6a5bd85cda749fc4fda5045dc606
|
refs/heads/master
| 2020-05-29T12:24:45.659832
| 2014-07-23T18:33:41
| 2014-07-23T18:33:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,485
|
r
|
makePTA.R
|
#' Calculates the Percent Target Attainment (PTA)
#'
#' \code{makePTA} will calculate the PTA for any number of simulations, targets and definitions of success.
#' Simulations typically differ by dose, but may differ by other features such as children vs. adults.
#'
#' @title Calculation of PTAs
#' @param simdata A vector of simulator output filenames, e.g. c(\dQuote{simout1.txt},\dQuote{simout2.txt}),
#' with wildcard support, e.g. \dQuote{simout*} or \dQuote{simout?}, or
#' a list of PMsim objects made by \code{\link{SIMparse}} with suitable simulated doses and observations. The number and times of simulated
#' observations does not have to be the same in all objects.
#' @param targets A vector of pharmacodynamic targets, such as Minimum Inhibitory Concentrations (MICs), e.g. c(0.25, 0.5,1,2,4,8,16,32)
#' @param target.type A numeric or character vector, length 1. If numeric, must correspond to an observation time common to all PMsim objects in
#' \code{simdata}, rounded to the nearest hour. In this case, the target statistic will be the ratio of observation at time \code{target.type} to target. This enables
#' testing of a specific timed concentration (e.g. one hour after a dose or C1) which may be called a peak, but is not actually the maximum drug
#' concentration. Be sure that the time in the simulated data is used, e.g. 122 after a dose given at 120. Character values may be one of
#' \dQuote{time}, \dQuote{auc}, \dQuote{peak}, or \dQuote{min}, for, respectively, percent time above target within the time range
#' specified by \code{start} and \code{end}, ratio of area under the curve within the time range to target, ratio of peak concentration within the time range
#' to target, or ratio of minimum concentration within the time range to target.
#' @param success A single value specifying the success statistic, e.g. 0.4 for proportion time (end-start) above target, or 100 for peak:target.
#' @param outeq An integer specifying the number of the simulated output equation to use. Default is 1.
#' @param free.fraction Proportion of free, active drug. Default is 1, i.e. 100\% free drug or 0\% protein binding.
#' @param start Specify the time to begin PTA calculations. Default is a vector with the first observation time for subjects
#' in each element of \code{simdata}, e.g. dose regimen. If specified as a vector, values will be recycled as necessary.
#' @param end Specify the time to end PTA calculations so that PTA is calculated
#' from \code{start} to \code{end}. Default for end is the maximum observation
#' time for subjects in each element of \code{simdata}, e.g. dose regimen. If specified as a vector, values will be recycled
#' as necessary. Subjects with insufficient data (fewer than 5 simulated observations) for a specified interval will trigger a warning.
#' Ideally then, the simulated datset should contain sufficient observations within the interval specified by \code{start} and \code{end}.
#' @return The output of \code{makePTA} is a list of class \emph{PMpta},
#' which has 2 objects:
#' \item{results }{A data frame with the following columns: simnum,id,target,ratio.
#' \emph{simnum} is the number of the simulation; \emph{id} is the simulated profile number
#' within each simulation; \emph{target} is the specified target; and \emph{ratio} is
#' the target ratio, e.g. time > target, auc:target, etc.}
#' \item{outcome }{A data frame summarizing the results with the following columns: simnum, target, success, meanratio, and sdratio.
#' \emph{simnum} and \emph{target} are as for \code{outcome}. The \emph{prop.success} column has the proportion with a ratio > \code{success},
#' as specified in the function call. The \emph{mean.stat} and \emph{sd.stat} columns have the
#' mean and standard deviation of the target statistic (e.g. proportion end-start above target, ratio of Cmax to target) for each simulation and target.}
#' @author Michael Neely
#' @seealso \code{\link{plot.PMpta}}, \code{\link{SIMparse}}
makePTA <- function(simdata,targets,target.type,success,outeq=1,free.fraction=1,start,end){
if(missing(simdata) | missing(target.type)) stop("Simulation output and target.type must be specified.\n")
if(is.character(target.type) & !target.type %in% c("time","auc","peak","min")) stop("Please specify target.type as a numerical value corresponding to a common\ntime in all simulated datasets, or a character value of 'time', 'auc', 'peak' or 'min'.\n")
if(!inherits(simdata,"list")){ #so we are dealing with names of files
simfiles <- Sys.glob(simdata)
if(length(simfiles)==0) stop("There are no files matching \"",simdata,"\".\n",sep="")
simdata <- list()
for(i in 1:length(simfiles)){
simdata[[i]] <- tryCatch(SIMparse(simfiles[i]),
error=function(e) stop(paste(simfiles[i],"is not a PMsim object.\n")))
}
}
#check for one PMsim object only, and if so, make it a one-item list
if(!inherits(simdata[[1]],"PMsim")) {simdata <- list(simdata);class(simdata) <- c("PMsim","list")}
#number of sims
nsim <- length(simdata)
#replicate start and end times if supplied for each simulation
if(!missing(start)) {start <- rep(start,nsim)}
if(!missing(end)) {end <- rep(end,nsim)}
#number of targets
ntarg <- length(targets)
#the list to hold the PTA results
results <- list()
cat("\nCalculating PTA for each simulation and target...\n")
flush.console()
if(target.type=="time"){
maxpb <- nsim*ntarg
} else {maxpb <- nsim}
pb <- txtProgressBar(min = 0, max = maxpb, style = 3)
#loop through each simulation, calculating PTA
for(simnum in 1:nsim){
#get the simulated data for sim
wrk.sim <- simdata[[simnum]]$obs
#get the correct outeq
wrk.sim <- wrk.sim[wrk.sim$outeq==outeq,]
#take out missing observations
wrk.sim <- wrk.sim[!is.na(wrk.sim$out),]
#multiply by free fraction
wrk.sim$out <- wrk.sim$out*free.fraction
#simulated times
wrk.times <- unique(wrk.sim$time)
#if start and end times missing, set them to min/max, else use those supplied
if(missing(start)) {wrk.start <- min(wrk.times)} else {wrk.start <- start[simnum]}
if(missing(end)) {wrk.end <- max(wrk.times)} else {wrk.end <- end[simnum]}
if(wrk.start>=wrk.end) {stop(paste("For simulation ",simnum,", start is not less than end/n",sep=""))}
#filter simulated data by start/end times
wrk.sim <- wrk.sim[wrk.sim$time>=wrk.start & wrk.sim$time<=wrk.end,]
if(length(wrk.sim)==0){
cat(paste("Note: Simulation ",simnum," omitted because no simulated observations fell within the time window defined by start and end.\n",sep=""))
next
}
#recheck times after filtering
wrk.times <- unique(wrk.sim$time)
#number of observations
wrk.nobs <- length(wrk.times)
if(wrk.nobs<5) warning(paste("Only ",wrk.nobs," simulated observations available for simulation ",simnum,".\nThis can compromise estimates of target attainment.\nStrongly consider increasing the number of simulated observations.\n",sep=""))
#time above target
if(target.type=="time"){
#function to calculate time above target for pair of times/outs
timeabove <- function(times,outs,targ){
#both outs are below target
if(outs[1]<targ & outs[2]<targ) interval <- 0
#both outs are at or above target
if(outs[1]>=targ & outs[2]>=targ) interval <- times[2]-times[1]
#first is below, second is at or above
if(outs[1]<targ & outs[2]>=targ){
lm.1 <- lm(times~outs)
cross1 <- predict(lm.1,data.frame(outs=targ))
interval <- times[2]-cross1
}
#first is at or above, second is below
if(outs[1]>=targ & outs[2]<targ){
lm.1 <- lm(times~outs)
cross1 <- predict(lm.1,data.frame(outs=targ))
interval <- cross1-times[1]
}
return(interval) #the time above target
}
#function to split data into blocks of 2 rows
pairUp <- function(sim){
outs <- lapply(1:(nrow(sim)-1),function(x) c(sim$out[x],sim$out[x+1]))
times <- lapply(1:(nrow(sim)-1),function(x) c(sim$time[x],sim$time[x+1]))
return(list(times,outs))
}
#function to calculate cumulative time above target
cumTime <- function(sim,targ){
pairs <- pairUp(sim)
npairs <- length(pairs[[1]])
interval <- sum(unlist(lapply(1:npairs,function(x) timeabove(times=pairs[[1]][[x]],outs=pairs[[2]][[x]],targ=targ))))
#divide total time in the interval by the end-start interval
return(interval/(wrk.end-wrk.start))
}
#get the results, which is initially a list [[ntarg]][nsim]
pta <- list()
for(t in 1:ntarg){
targ <- targets[t]
pta[[t]] <- by(wrk.sim,wrk.sim$id,function(x) cumTime(x,targ=targ))
setTxtProgressBar(pb, (simnum-1)*ntarg + t)
}
#get results into a format consistent with the others, i.e. matrix [ntarg,nsim]
results[[simnum]] <- do.call(rbind,pta)
if(ntarg==1) results[[simnum]] <- matrix(results[[simnum]],nrow=1)
}
#auc above target
if(target.type=="auc"){
auc <- by(wrk.sim,wrk.sim$id,function(x) makeAUC(x,out~time)[,2])
results[[simnum]] <- sapply(auc,function(x) x/targets) #matrix [ntarg,nsim]
if(ntarg==1) results[[simnum]] <- matrix(results[[simnum]],nrow=1)
setTxtProgressBar(pb, simnum)
}
#peak above target
if(target.type=="peak"){
peak <- tapply(wrk.sim$out,wrk.sim$id,max)
results[[simnum]] <- sapply(peak,function(x) x/targets) #matrix [ntarg,nsim]
if(ntarg==1) results[[simnum]] <- matrix(results[[simnum]],nrow=1)
setTxtProgressBar(pb, simnum)
}
#min above target
if(target.type=="min"){
minobs <- tapply(wrk.sim$out,wrk.sim$id,min)
results[[simnum]] <- sapply(minobs,function(x) x/targets) #matrix [ntarg,nsim]
if(ntarg==1) results[[simnum]] <- matrix(results[[simnum]],nrow=1)
setTxtProgressBar(pb, simnum)
}
#specific obs above target
if(is.numeric(target.type)){ #specific timed sample
timed <- by(wrk.sim,wrk.sim$id,function(x) x$out[round(x$time,2)==target.type])
results[[simnum]] <- sapply(timed,function(x) x/targets) #matrix [ntarg,nsim]
if(ntarg==1) results[[simnum]] <- matrix(results[[simnum]],nrow=1)
setTxtProgressBar(pb, simnum)
}
} #close simnum for loop
close(pb)
require(reshape2,warn.conflicts=F,quietly=T)
resultDF <- melt(results)
names(resultDF) <- c("target","id","ratio","simnum")
resultDF$target <- targets[resultDF$target]
resultDF <- resultDF[,c("simnum","id","target","ratio")]
succSimXtarg <- tapply(resultDF$ratio,list(resultDF$target,resultDF$simnum),
function(x) sum(x>=success)/sum(!is.na(x)))
meanratio <- tapply(resultDF$ratio,list(resultDF$target,resultDF$simnum),mean,na.rm=T)
sdratio <- tapply(resultDF$ratio,list(resultDF$target,resultDF$simnum),sd,na.rm=T)
pta.outcome <- data.frame(simnum=rep(1:nsim,each=ntarg),
target=rep(targets,nsim),
prop.success=c(succSimXtarg),
mean.stat=c(meanratio),
sd.stat=c(sdratio))
rval <- list(results=resultDF,outcome=pta.outcome)
class(rval) <- c("PMpta","list")
return(rval)
}
|
fe8fb1e714093b0af43cfa567122d818500a74a1
|
958717071388748f12f69d7015cee40adf1dca83
|
/plot4.R
|
2b547388325467131c492c1d1fa634e6df1cc2e3
|
[] |
no_license
|
srishtigarg3/exploratory-data-analysis
|
8e5f15629a7e2ee7f25d95c30ed864a053bd1ce3
|
0f3d4364270981e5fc12f11e4569ac0556ab485e
|
refs/heads/master
| 2020-06-30T23:46:36.736428
| 2016-11-23T09:37:10
| 2016-11-23T09:37:10
| 74,561,625
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,045
|
r
|
plot4.R
|
data <- read.table("C:/Users/Srishti/Desktop/household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subdata <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
globalap <- as.numeric(subdata$Global_active_power)
datetime <- strptime(paste(subdata$Date, subdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
#first plot
plot(datetime, globalap, xlab="", ylab="Global Active Power (kilowatts)", type="l")
#second plot
plot(datetime, subdata$Voltage, ylab="Voltage", type="l")
#third plot
plot(datetime,c(subdata$Sub_metering_1),col="black", type="l",xlab="",ylab="Energy sub metering")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=0.5, col=c("black", "red", "blue"))
lines(datetime,c(subdata$Sub_metering_2),col="red", type="l")
lines(datetime,c(subdata$Sub_metering_3),col="blue", type="l")
#fourth plot
plot(datetime, subdata$Global_reactive_power, ylab="Global_reactive)_power", type="l")
dev.off()
|
4fad96ef622347d200df63680892cfefb38a2420
|
ef4eb23543224c14f4cae67190d1f82bd881a4a4
|
/dfg_for_kilimanjaro/ndvi_kilimanjaro/src/gimms/gimmsNdviHarmonics.R
|
f5f00e5a11b27eec433e822682b87619250cf048
|
[] |
no_license
|
environmentalinformatics-marburg/magic
|
33ed410de55a1ba6ff943090207b99b1a852a3ef
|
b45cf66f0f9aa94c7f11e84d2c559040be0a1cfb
|
refs/heads/master
| 2022-05-27T06:40:23.443801
| 2022-05-05T12:55:28
| 2022-05-05T12:55:28
| 9,035,494
| 6
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,106
|
r
|
gimmsNdviHarmonics.R
|
# # Working directory
# switch(Sys.info()[["sysname"]],
# "Linux" = {path.wd <- "/media/envin/XChange/kilimanjaro/ndvi"},
# "Windows" = {path.wd <- "D:/kilimanjaro/ndvi"})
# setwd(path.wd)
# Packages and functions
lib <- c("raster", "rgdal", "TSA", "RColorBrewer")
sapply(lib, function(...) require(..., character.only = TRUE))
source("../../ndvi/src/cellHarmonics.R")
source("../../ndvi/src/ndviPhaseShift.R")
# # Research plots
# plots <- readOGR(dsn = "data/coords/",
# layer = "PlotPoles_ARC1960_mod_20140807_final")
## Data import
# DEM
dem <- raster("data/DEM_ARC1960_30m_Hemp.tif")
# st <- "198201"
# nd <- "201112"
# 1-km GIMMS NDVI data (1982-2011)
fls_ndvi <- "data/rst/whittaker/gimms_ndvi3g_dwnscl_8211.tif"
rst_ndvi <- stack(fls_ndvi)
# ndvi.dates <- substr(basename(ndvi.fls), 5, 11)
# ndvi.years <- unique(substr(basename(ndvi.fls), 5, 8))
#
# # Setup time series
# ndvi.ts <- do.call("c", lapply(ndvi.years, function(i) {
# # seq(as.Date(paste(i, "01", ifelse(h == "MOD13Q1", "01", "09"), sep = "-")),
# # as.Date(paste(i, "12", "31", sep = "-")), 16)
# seq(as.Date(paste(i, "01", "09", sep = "-")),
# as.Date(paste(i, "12", "31", sep = "-")), 16)
# }))
#
# # Merge time series with available NDVI files
# ndvi.ts.fls <- merge(data.frame(date = ndvi.ts),
# data.frame(date = as.Date(ndvi.dates, format = "%Y%j"),
# file = ndvi.fls, stringsAsFactors = F),
# by = "date", all.x = T)
#
# # Import raster files and convert to matrices
# ndvi.rst <- foreach(i = seq(nrow(ndvi.ts.fls)), .packages = lib) %dopar% {
# if (is.na(ndvi.ts.fls[i, 2])) {
# NA
# } else {
# raster(ndvi.ts.fls[i, 2])
# }
# }
# ###
# ## KZA evaluation
# # List available files, dates and years
# ndvi.fls <- list.files("data/quality_control", pattern = h, full.names = T)
#
# ndvi.dates <- substr(basename(ndvi.fls), 13, 19)
# ndvi.years <- unique(substr(basename(ndvi.fls), 13, 16))
#
# # Setup time series
# ndvi.ts <- do.call("c", lapply(ndvi.years, function(i) {
# seq(as.Date(paste(i, "01", ifelse(h == "MOD13Q1", "01", "09"), sep = "-")),
# as.Date(paste(i, "12", "31", sep = "-")), 16)
# }))
#
# # Merge time series with available NDVI files
# ndvi.ts.fls <- merge(data.frame(date = ndvi.ts),
# data.frame(date = as.Date(ndvi.dates, format = "%Y%j"),
# file = ndvi.fls, stringsAsFactors = F),
# by = "date", all.x = T)
#
# # Import raster files and convert to matrices
# ndvi.rst.qa <- foreach(i = seq(nrow(ndvi.ts.fls)), .packages = lib) %dopar% {
# if (is.na(ndvi.ts.fls[i, 2])) {
# NA
# } else {
# raster(ndvi.ts.fls[i, 2])
# }
# }
#
# tmp.qa <- as.numeric(unlist(sapply(ndvi.rst.qa, function(i) {
# if (is.logical(i)) NA else i[cellFromXY(ndvi.rst[[20]], plots[67, ])]
# })))
# tmp.gf <- as.numeric(unlist(sapply(ndvi.rst, function(i) {
# if (is.logical(i)) NA else i[cellFromXY(ndvi.rst[[20]], plots[67, ])]
# })))
#
# ###
# ndvi.mat <- foreach(i = fls_ndvi, .packages = lib) %dopar% as.matrix(i)
#
# # Aggregate rasters on a monthly basis
# ndvi.months <- substr(ndvi.ts.fls[, 1], 1, 7)
#
# ndvi.rst.agg <- foreach(i = unique(ndvi.months), .packages = lib) %dopar% {
#
# # Rasters of current month
# index <- which(ndvi.months %in% i)
# # Dates with no available NDVI files
# navl <- sapply(ndvi.rst[index], is.logical)
#
# # Overlay non-missing data
# if (all(navl)) {
# return(NA)
# } else {
# if (sum(!navl) == 2) {
# Reduce(function(x, y) overlay(x, y, fun = function(...) {
# mean(..., na.rm = T)
# }), ndvi.rst[index[!navl]])
# } else {
# ndvi.rst[[index[!navl]]]
# }
# }
# }
# # Mean NDVI per month
# ndvi.rst.monthly_mean <- foreach(i = 1:12, .packages = lib, .combine = "stack") %dopar% {
# tmp <- ndvi.rst.agg[seq(i, length(ndvi.rst.agg), 12)]
# overlay(stack(tmp[!sapply(tmp, is.logical)]),
# fun = function(...) round(mean(..., na.rm = T) / 10000, digits = 2))
# }
# names(ndvi.rst.monthly_mean) <- month.abb
#
# ndvi.mat.monthly_mean <- as.matrix(ndvi.rst.monthly_mean)
#
# index <- cellFromXY(ndvi.rst.monthly_mean, plots)
# write.csv(data.frame(PlotID = plots$PlotID, ndvi.mat.monthly_mean[index, ]),
# "out/plots_mean_ndvi_filled.csv", quote = F, row.names = F)
# # Value extraction
# ndvi.start <- substr(unique(substr(ndvi.ts.fls[, 1], 1, 7)), 1, 4) %in%
# ndvi.years[2:4]
# ndvi.end <- substr(unique(substr(ndvi.ts.fls[, 1], 1, 7)), 1, 4) %in%
# ndvi.years[9:11]
# Temporal subsetting
st_year <- 1982
nd_year <- 2011
n_years <- 15
n_months <- n_years * 12
rst.st <- rst_ndvi[[1:n_months]]
rst.nd <- rst_ndvi[[(nlayers(rst_ndvi)-n_months+1):nlayers(rst_ndvi)]]
rst.har <- cellHarmonics(st = rst.st,
nd = rst.nd,
st.start = c(st_year, 1), st.end = c(st_year+n_years-1, 12),
nd.start = c(nd_year-n_years+1, 1), nd.end = c(nd_year, 12),
product = "GIMMS",
path.out = "data/rst/harmonic_8296_9711", n.cores = 3)
# Start variance (maximum - minimum)
st_diff_max_min <- rst.har[[1]][[2]]-rst.har[[1]][[4]]
# End variance (maximum - minimum)
nd_diff_max_min <- rst.har[[2]][[2]]-rst.har[[2]][[4]]
# Shift in maximum NDVI
diff_max_y <- overlay(rst.har[[1]][[2]], rst.har[[2]][[2]], fun = function(x, y) {
return(y - x)
})
# Shift in minimum NDVI
diff_min_y <- overlay(rst.har[[1]][[4]], rst.har[[2]][[4]], fun = function(x, y) {
return(y - x)
})
# Shift in months regarding NDVI maximum
diff_max_x <- overlay(rst.har[[1]][[1]], rst.har[[2]][[1]],
st_diff_max_min, nd_diff_max_min,
fun = function(x, y, z_max, z_min)
ndviPhaseShift(x, y, z_max, z_min,
rejectLowVariance = TRUE,
varThreshold = .04))
cols_div <- colorRampPalette(brewer.pal(5, "BrBG"))
p_diff_max_x <-
spplot(diff_max_x, col.regions = cols_div(100), scales = list(draw = TRUE),
xlab = "x", ylab = "y", at = seq(-2.5, 2.5, 1),
sp.layout = list("sp.lines", rasterToContour(dem), col = "grey65"))
# png("out/harmonic/harmonic_modis_diff_max_x_0306_1013.png", width = 20,
# height = 17.5, units = "cm", pointsize = 15, res = 300)
# print(p_diff_max_x)
# dev.off()
# Shift in months regarding NDVI minimum
diff_min_x <- overlay(rst.har[[1]][[3]], rst.har[[2]][[3]],
st_diff_max_min, nd_diff_max_min,
fun = function(x, y, z_max, z_min)
ndviPhaseShift(x, y, z_max, z_min,
rejectLowVariance = TRUE,
varThreshold = .04))
p_diff_min_x <-
spplot(diff_min_x, col.regions = cols_div(100), scales = list(draw = TRUE),
xlab = "x", ylab = "y", at = seq(-2.5, 2.5, 1),
sp.layout = list("sp.lines", rasterToContour(dem), col = "grey65"))
foreach(i = list(diff_max_x, diff_min_x, diff_max_y, diff_min_y),
j = list("diff_max_x", "diff_min_x", "diff_max_y", "diff_min_y")) %do%
writeRaster(i, paste0("data/rst/harmonic_8296_9711/", j), format = "GTiff", overwrite = TRUE)
### Visualization
# hcl colorspace
df_hcl <- data.frame(cell = 1:ncell(diff_max_x),
h = 90 + diff_max_x[] * 10,
c = 50, # increasing chroma with higher values
l = 50 + diff_max_y[] * 100) # decreasing luminance with higher values
for (i in c(3, 4)) {
if (any(df_hcl[, i] < 0))
df_hcl[which(df_hcl[, i] < 0), i] <- 0
}
df_hcl_cc <- df_hcl[complete.cases(df_hcl), ]
template <- rasterToPolygons(diff_max_x)
plot(template, col = hcl(h = df_hcl_cc[, 2], c = df_hcl_cc[, 3], l = df_hcl_cc[, 4]),
border = "transparent")
|
934adf374b741d8505cad8433e09db094c4ba47c
|
fbd1b4d98cad1db8c1aefbd55d16bf8dc3cd18c1
|
/plot2.R
|
8dd16aa39ce4de17329f4e63bd40e9c2aea28c79
|
[] |
no_license
|
adtai/ExData_Plotting1
|
c389a57eee27b04f5063e1d2127d3c65a7927e64
|
8403190efe12ae2f808c66c4612b1236ed04bba9
|
refs/heads/master
| 2020-05-19T16:35:36.718743
| 2015-04-12T02:09:40
| 2015-04-12T02:09:40
| 33,796,339
| 0
| 0
| null | 2015-04-11T23:32:18
| 2015-04-11T23:32:18
| null |
UTF-8
|
R
| false
| false
| 1,194
|
r
|
plot2.R
|
library(lubridate)
# Load the data file
# fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fileUrl <- "http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# Go from zip file to txt
# http://stackoverflow.com/questions/3053833/using-r-to-download-zipped-data-file-extract-and-import-data
temp <- tempfile()
download.file(fileUrl, temp)
filepath <- "household_power_consumption.txt"
unz(temp, filepath)
unlink(temp)
data <- read.table(file=filepath, header=TRUE, sep=";", na.strings="?")
data$Datetime <- dmy_hms(paste(data$Date, data$Time))
data$Date <- as.Date(data$Date, "%d/%m/%Y")
data$Time <- hms(data$Time)
data$Global_active_power <- as.numeric(data$Global_active_power)
data2007 <- data[year(data$Date) == 2007, ]
data2007feb <- data2007[month(data2007$Date) == 2, ]
feb1data <- data2007feb[day(data2007feb$Date) == 1, ]
feb2data <- data2007feb[day(data2007feb$Date) == 2, ]
finaldata <- rbind(feb1data, feb2data)
# Plot 2
png(filename = "plot2.png", width=480, height=480)
plot(finaldata$Datetime, finaldata$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
542dbe325baf834b2719cf604a4e1417f5a3ddf4
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.internet.of.things/man/iot1clickprojects_delete_placement.Rd
|
7c45e2659261739214ff09a555a79577ee642fea
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 819
|
rd
|
iot1clickprojects_delete_placement.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot1clickprojects_operations.R
\name{iot1clickprojects_delete_placement}
\alias{iot1clickprojects_delete_placement}
\title{Deletes a placement}
\usage{
iot1clickprojects_delete_placement(placementName, projectName)
}
\arguments{
\item{placementName}{[required] The name of the empty placement to delete.}
\item{projectName}{[required] The project containing the empty placement to delete.}
}
\value{
An empty list.
}
\description{
Deletes a placement. To delete a placement, it must not have any devices
associated with it.
When you delete a placement, all associated data becomes irretrievable.
}
\section{Request syntax}{
\preformatted{svc$delete_placement(
placementName = "string",
projectName = "string"
)
}
}
\keyword{internal}
|
f4d46c75ecb279aaab9ca5a9bc4f5bf7304ac328
|
873f2f21ba9477f77fbd63471f68fb74f0096fa7
|
/global.R
|
0cb6de84d2ecee392a15b88e407bbe1841b3712b
|
[] |
no_license
|
cashoes/shinyDIABLO
|
9724036ecbc28bf0944873cec712e5cdce90b1aa
|
6849c1dffa10db61f497ed8b94aeba85dc2ce4dd
|
refs/heads/master
| 2021-09-06T00:15:44.371020
| 2018-01-31T22:21:46
| 2018-01-31T22:21:46
| 119,751,598
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
global.R
|
# Define globally available objects
# imports -----------------------------------------------------------------
library(shiny)
library(shinycssloaders)
library(shinyBS)
library(shinydashboard)
library(shinythemes)
library(plotly)
library(network)
library(sna)
library(igraph)
library(intergraph)
library(visNetwork)
library(mixOmics)
library(ggmixOmics)
library(tidyverse)
library(cowplot)
library(GGally)
library(ggnetwork)
source('helpers.R')
# data ----
M <- readRDS('data/TCGA model.rds')
corMat <- abs(getCorMat(M))
model1 <- M
model2 <- M
# Get component names
dataNames <- names(M$X)
nEntries <- length(dataNames)
nComp <- unique(M$ncomp)
# Params ----
geneEnrichment <- TRUE
PPIIntegration <- FALSE
quarterWidth <- 3
halfWidth <- 6
tQuarterWidth <- 9
fullWidth <- 12
|
d803b3e57c4c5f3a756a79abb26f50673ac9a86d
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615842016-test.R
|
6b5da9b6d4a174c6b0080c0805ca9c6a0c6acce3
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 619
|
r
|
1615842016-test.R
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(-3.17097179177133e+148, -1.43300663669206e+306, 2.30235576924981e-92, -1.13144054336032e+193, -2.14555482385481e+110, -2.14555482385487e+110, -2.14555482385487e+110, -2.27293144816056e+197, 0.000350993746596763, -1.13907927756096e+193, -1.96882320459714e+208, -1.10977479388879e+44, -3.99165370868866e+148, 1.01992727967479e-306, 6.35413274475076e+306, 8.28904556439245e-317, 0, 0, 0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
eee8605acfa3fde8c65a632f7151dae13b04e79b
|
a8244362d0abccf33407e925d8d49251a03e4ed4
|
/code/01-data-cleaning-scripts/02-biodepth-data.R
|
843f712babfaca999ea498f08eb6b6471f48e9aa
|
[] |
no_license
|
FabianRoger/Multifunc_Lund
|
110b15a1d1bfb10703e059a3eb697b9abf5ee5a3
|
d28ed79f9b05a2dac5622f5d8c9d1a99a29b4fbd
|
refs/heads/master
| 2023-08-17T02:38:50.077334
| 2023-08-16T14:39:23
| 2023-08-16T14:39:23
| 205,349,586
| 1
| 6
| null | 2020-09-18T09:19:38
| 2019-08-30T09:25:31
|
R
|
UTF-8
|
R
| false
| false
| 2,039
|
r
|
02-biodepth-data.R
|
#'
#' @title Download and clean the BIODEPTH data
#'
#' @description Load the BIODEPTH data taken from Byrnes et al. (2014, Methods
#' in Ecology and Evolution), clean it and output a cleaned version for
#' further analyses
#'
# load relevant libraries
library(dplyr)
library(multifunc)
# get the BIODEPTH data
data("all_biodepth")
# check the downloaded data
head(all_biodepth)
summary(all_biodepth)
# make a vector of the relevant function data
all_vars <- c("biomassY3", "root3", "N.g.m2", "light3", "N.Soil", "wood3", "cotton3")
# make an id variable with the function names
var_id <- which(names(all_biodepth) %in% all_vars)
# check the possible locations
unique(all_biodepth$location)
# subset out Sweden
swe_dat <-
all_biodepth |>
dplyr::filter(location == "Sweden")
# which variables have > 2/3 of the values not NA?
swe_vars <- which(names(swe_dat) %in% multifunc::whichVars(swe_dat, all_vars, thresh = 0))
# What are the names of species in this dataset
# that have at least some values > 0?
swe_sp <- multifunc::relevantSp(swe_dat, 26:ncol(swe_dat))
# get the column ids with species that have some data that are not zero
sp_id <- which(names(swe_dat) %in% swe_sp)
# get the relevant columns
swe_dat <- swe_dat[,c(1:14, sp_id, swe_vars)]
# write this into a .rds file
saveRDS(object = swe_dat, file = "data/biodepth_swe_data.rds")
# subset out Portugal
prt_dat <-
all_biodepth |>
dplyr::filter(location == "Portugal")
# which variables have > 2/3 of the values not NA?
prt_vars <- which(names(prt_dat) %in% multifunc::whichVars(prt_dat, all_vars, thresh = 0))
# What are the names of species in this dataset
# that have at least some values > 0?
prt_sp <- multifunc::relevantSp(prt_dat, 26:ncol(prt_dat))
# get the column ids with species that have some data that are not zero
sp_id <- which(names(prt_dat) %in% prt_sp)
# get the relevant columns
prt_dat <- prt_dat[,c(1:14, sp_id, prt_vars)]
# write this into a .rds file
saveRDS(object = prt_dat, file = "data/biodepth_prt_data.rds")
### END
|
6616aebc610c54a2212ba9353813b019bafe9212
|
809619e09165bb59d4b068eb8bad833d0a30c411
|
/R/reportRelatedFunctions.R
|
7cf287e45cb23ef0678297a22a1d50eda65cfedd
|
[] |
no_license
|
cran/GWASinspector
|
2910c12799e24c0c7e9f34df871f7d19c658c36a
|
5fabba85bf8d9ce8eb30c51344be4cb4a59489fe
|
refs/heads/master
| 2023-05-24T16:53:12.048188
| 2023-05-15T17:30:02
| 2023-05-15T17:30:02
| 236,609,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,675
|
r
|
reportRelatedFunctions.R
|
create_report_files <- function() {
# print_and_log('\n','info')
print_and_log('============== Creating Report Files ==============',
'info')
if(!.QC$pandoc.exists)
print_and_log('pandoc module is required for converting report to Html format! check the manual on how to install.','warning',display=.QC$config$debug$verbose)
if(!.QC$kableExtra.package.exists)
print_and_log('kableExtra package is suggested for pretty Html format! check the manual on how to install.','warning',display=.QC$config$debug$verbose)
if(.QC$pandoc.exists){
tryCatch(
# multi file comparison report - html
writeMainReportFile(), #reportRelatedFunctions.R
error = function(err) print_and_log(paste0('Error in converting main report to html format. %s ',err$message))
)
tryCatch(
# file specific report - html
writeStudyReportFile(), #reportRelatedFunctions.R
error = function(err) print_and_log(paste0('Error in converting input file report to html format. %s ',err$message))
)
}
else
print_and_log('Writing Html report is skipped! required packages not found.','warning',display=.QC$config$debug$verbose)
# EXCEL
writeExcelReportFile()
}
writeMainReportFile <- function() {
# FIXME do nothing and return if only one file is selected
# create report of only one file exists !!!??
if(length(.QC$qc.study.list) == 1)
return(NULL)
# check if template file exists and get the path
multi.file.report.template <- get_multi_file_report_template()
# if user wants the report file and template file exists
if(.QC$config$output_parameters$html_report & !is.null(multi.file.report.template))
{
# path of html file
report.output.path <- .QC$config$paths$html.report
render.success <- tryCatch({
# clear cache and RAM
knitr::knit_meta(class=NULL, clean = TRUE)
invisible(gc())
render(multi.file.report.template,
output_dir = .QC$config$paths$dir_output,
output_file = report.output.path,
quiet = TRUE)
print_and_log(sprintf('HTML report file saved as %s!',report.output.path),
'info')
return(TRUE)
},
error=function(err){
print_and_log(paste('---[ERROR saving main html file!---]\nThe result is also saved as txt and is in the output folder.',err$message),
'warning',display=.QC$config$debug$verbose)
return(FALSE)
}
)
if(render.success)
print_and_log(sprintf('HTML report file saved as %s!',report.output.path),
'info')
}else
{
print_and_log('Writing the report file is skipped!','info')
}
}
writeStudyReportFile <- function(){
# check if template file exists and get the path
report.template <- get_study_specific_report_template()
# if user wants the report file and template file exists
if(.QC$config$output_parameters$html_report & !is.null(report.template))
{
sapply(.QC$qc.study.list, function(study){
tryCatch({
.QC$thisStudy <- study
# path of html file
report.output.path <- study$html.report.path
# clear cache and RAM
knitr::knit_meta(class=NULL, clean = TRUE)
invisible(gc())
render(report.template,
output_dir = .QC$config$paths$dir_output,
output_file = report.output.path,
quiet = TRUE)
print_and_log(sprintf('HTML report file saved as %s!',report.output.path),
'info')
}
,error=function(err){
print_and_log(paste('---[ERROR saving study html file!---]\nThe result is also saved as txt and is in the output folder.',err$message),
'warning',display=.QC$config$debug$verbose)
}
)
})
}else
{
print_and_log('Writing the report file is skipped!','info')
}
}
get_study_specific_report_template <- function() {
# check if package default report template file is present and accessible. report is skipped if template file not found
if(.QC$kableExtra.package.exists)
{
report.template.file <- system.file("rmd", "mainReport_extra.rmd", package = "GWASinspector")
}
else
{
report.template.file <- system.file("rmd", "mainReport.rmd", package = "GWASinspector")
}
if(file.exists(report.template.file))
return(report.template.file)
else
{
print_and_log('Report template file is not found in package! try re-installing GWASinspector package.','warning',display=.QC$config$debug$verbose)
print_and_log('Writing the report file is skipped!','info')
return(NULL)
}
}
get_multi_file_report_template <- function() {
# check if package default report template file is present and accessible. report is skipped if template file not found
if(.QC$kableExtra.package.exists)
report.template.file <- system.file("rmd", "multiFileReport_extra.rmd", package = "GWASinspector")
else
report.template.file <- system.file("rmd", "multiFileReport.rmd", package = "GWASinspector")
if(file.exists(report.template.file))
return(report.template.file)
else
{
print_and_log('Main-Report template file is not found in package! try re-installing GWASinspector package.','warning',display=.QC$config$debug$verbose)
print_and_log('Writing the main-report file is skipped!','info')
return(NULL)
}
}
# display a report table to user for each input file
report_to_txt_file <- function(study) {
# remove old report file if exists
if(file.exists(study$txt.report.path))
file.remove(study$txt.report.path)
# report intro
writeTXTreport('============================================================')
writeTXTreport(sprintf('================= %s v.%s ==================',
.QC$package.name,
.QC$script.version))
writeTXTreport('============================================================')
writeTXTreport(' ')
# writeTXTreport(paste('Script version:', .QC$script.version))
writeTXTreport(paste('System Information:', .QC$r.version))
writeTXTreport(sprintf('Start Time: %s', format(study$starttime, "%b %d %Y - %X")))
writeTXTreport(sprintf('End Time: %s', format(study$endtime, "%b %d %Y - %X")))
writeTXTreport(' ')
### ==================================
writeTXTreport(' ')
writeTXTreport('==========================================================')
writeTXTreport('==================== User preferences ====================')
writeTXTreport('==========================================================')
writeTXTreport(' ')
writeTXTreport(sprintf('Alterative header file: %s', basename(.QC$config$supplementaryFiles$header_translations)))
writeTXTreport(sprintf('Allele frequency standard reference dataset: %s', basename(.QC$config$supplementaryFiles$allele_ref_std)))
if(!is.na(.QC$config$supplementaryFiles$allele_ref_alt))
writeTXTreport(sprintf('Allele frequency alternate reference dataset: %s', basename(.QC$config$supplementaryFiles$allele_ref_alt)))
if(!is.na(.QC$config$supplementaryFiles$beta_ref_std))
writeTXTreport(sprintf('Effect size reference dataset: %s', basename(.QC$config$supplementaryFiles$beta_ref_std)))
writeTXTreport(' ')
# ===================================
writeTXTreport('==========================================================')
writeTXTreport('= Filter values for selecting High-Quality (HQ) variants =')
filter.table <- data.table(
"Allele frequency" = format(.QC$config$filters$HQfilter_FRQ,scientific = FALSE))
if("HWE_PVAL" %in% study$renamed.File.Columns.sorted)
filter.table <- cbind(filter.table, "HWE p-value" = format(.QC$config$filters$HQfilter_HWE,scientific = FALSE))
else
filter.table <- cbind(filter.table, "HWE p-value" = "Not included")
if("CALLRATE" %in% study$renamed.File.Columns.sorted)
filter.table <- cbind(filter.table, "Call-rate" = format(.QC$config$filters$HQfilter_cal,scientific = FALSE))
else
filter.table <- cbind(filter.table, "Call-rate" = "Not included")
if("IMP_QUALITY" %in% study$renamed.File.Columns.sorted)
filter.table <- cbind(filter.table, "Imputation quality" = format(.QC$config$filters$HQfilter_imp,scientific = FALSE))
else
filter.table <- cbind(filter.table, "Imputation quality" = "Not included")
# filter.table <- t(data.table(
# "Allele frequency" = format(.QC$config$filters$HQfilter_FRQ,scientific = FALSE),
# {
#
# },
# {
# if("CALLRATE" %in% study$renamed.File.Columns.sorted)
# "Call-rate" = format(.QC$config$filters$HQfilter_cal,scientific = FALSE)
# },
# {
# if("IMP_QUALITY" %in% study$renamed.File.Columns.sorted)
# "Imputation quality" = format(.QC$config$filters$HQfilter_imp, scientific = FALSE)
# }
# ))
filter.table <- t(filter.table)
colnames(filter.table) <- 'Value'
writeTXTreport(kable(filter.table,format = "rst"))
writeTXTreport(' ')
writeTXTreport(paste('Effect type:', .QC$config$input_parameters$effect_type))
### ==================================
writeTXTreport(' ')
writeTXTreport(' ')
writeTXTreport('==========================================================')
writeTXTreport('================= Input file description =================')
writeTXTreport('==========================================================')
writeTXTreport(' ')
# input file spec
writeTXTreport(sprintf('Input file name: %s', basename( study$file.path)))
writeTXTreport(sprintf('Input file line count (including header): %s', study$file.line.count))
writeTXTreport(sprintf('Input file ends with a new line: %s', study$file.endsWithNewLine))
# writeTXTreport(sprintf('Duplicated lines: %s', format(.QC$thisStudy$dup_lines_count,big.mark = ',',scientific = FALSE)))
# it is mentioned in log file
# if(study$hanNoneBaseAlleles)
# writeTXTreport('WARNING: Input file has unknown character for INDEL variants!')
writeTXTreport(' ')
## column names and translations
writeTXTreport(' ')
writeTXTreport('========== Column names and translations ================')
writeTXTreport(' ')
column.tbl <- rbind(.QC$thisStudy$original.File.Columns.sorted,
.QC$thisStudy$renamed.File.Columns.sorted)
rownames(column.tbl) <- c('Original', 'Renamed')
writeTXTreport(kable(t(column.tbl),format = "rst"))
writeTXTreport(' ')
writeTXTreport(' ')
writeTXTreport('================== Column report ======================')
writeTXTreport(' ')
### invalid items
b <- t(data.frame('CHR' = c(abs(study$column.NA.list$CHR - study$column.INVALID.list$CHR) ,
study$column.INVALID.list$CHR,
' '),
'POSITION' = c(abs(study$column.NA.list$POSITION - study$column.INVALID.list$POSITION) ,
study$column.INVALID.list$POSITION,
' '),
'EFFECT_ALL' = c(abs(study$column.NA.list$EFFECT_ALL - study$column.INVALID.list$EFFECT_ALL) ,
study$column.INVALID.list$EFFECT_ALL,
' '),
'OTHER_ALL' = c(abs(study$column.NA.list$OTHER_ALL - study$column.INVALID.list$OTHER_ALL) ,
study$column.INVALID.list$OTHER_ALL,
' '),
'EFFECT' = c(abs(study$column.NA.list$EFFECT - study$column.INVALID.list$EFFECT) ,
# study$column.INVALID.list$EFFECT,
' ',
' '),
'STDERR' = c(abs(study$column.NA.list$STDERR - study$column.INVALID.list$STDERR - study$column.INVALID.list$zero.STDERR) ,
study$column.INVALID.list$STDERR,
study$column.INVALID.list$zero.STDERR),
'EFF_ALL_FREQ' = c(abs(study$column.NA.list$EFF_ALL_FREQ - study$column.INVALID.list$EFF_ALL_FREQ - study$column.INVALID.list$minusone.EFF_ALL_FREQ),
study$column.INVALID.list$EFF_ALL_FREQ,
study$column.INVALID.list$minusone.EFF_ALL_FREQ),
'HWE_PVAL' = c(abs(study$column.NA.list$HWE_PVAL - study$column.INVALID.list$HWE_PVAL - study$column.INVALID.list$minusone.HWE_PVAL) ,
study$column.INVALID.list$HWE_PVAL,
study$column.INVALID.list$minusone.HWE_PVAL),
'PVALUE' = c(abs(study$column.NA.list$PVALUE - study$column.INVALID.list$PVALUE - study$column.INVALID.list$minusone.PVALUE) ,
study$column.INVALID.list$PVALUE,
study$column.INVALID.list$minusone.PVALUE),
'IMPUTED' = c(abs(study$column.NA.list$IMPUTED - study$column.INVALID.list$IMPUTED),
study$column.INVALID.list$IMPUTED,
' '),
'IMP_QUALITY' = c(abs(study$column.NA.list$IMP_QUALITY - study$column.INVALID.list$IMP_QUALITY) ,
study$column.INVALID.list$IMP_QUALITY,
' '),
'MARKER' = c(abs(study$column.NA.list$MARKER - study$column.INVALID.list$MARKER) ,
' ',
' '),
'N_TOTAL' = c(abs(study$column.NA.list$N_TOTAL - study$column.INVALID.list$N_TOTAL) ,
study$column.INVALID.list$N_TOTAL,
' '),
'STRAND' = c(abs(study$column.NA.list$STRAND - study$column.INVALID.list$STRAND) ,
study$column.INVALID.list$STRAND,
' '),
'CALLRATE' = c(abs(study$column.NA.list$CALLRATE - study$column.INVALID.list$CALLRATE - study$column.INVALID.list$minusone.CALLRATE),
study$column.INVALID.list$CALLRATE ,
study$column.INVALID.list$minusone.CALLRATE)
))
colnames(b) <- c('NA values','Invalid values','Uncertain values')
writeTXTreport(kable(b,format = "rst"))
## ===================================
writeTXTreport(' ')
writeTXTreport('=======================================================')
writeTXTreport('================= Variant processing ==================')
writeTXTreport('=======================================================')
writeTXTreport('* step1: removing variants with missing crucial values and duplicated lines.')
writeTXTreport('** step2: removing monomorphic variants and specified chromosomes.')
writeTXTreport('*** step3: removing mismatched, ambiguous and multi-allelic variants that could not be verified.')
writeTXTreport(' ')
count.table <- t(data.table(
"input variant count" = format(study$input.data.rowcount, big.mark="," , scientific = FALSE),
'Missing crucial variable' = calculatePercent(study$missing.crucial.rowcount,
study$input.data.rowcount,
pretty = TRUE),
'Duplicated variants' = calculatePercent(study$duplicate.count,
study$input.data.rowcount,
pretty = TRUE),
"variant count after step 1 *"= calculatePercent(study$rowcount.step1,
study$input.data.rowcount,
decimal.place=3,
pretty = TRUE),
'Monomorphic variants' = calculatePercent(study$monomorphic.count,
study$input.data.rowcount,
pretty = TRUE),
"variant count after step 2 **"= calculatePercent(study$rowcount.step2,
study$input.data.rowcount,
decimal.place=3,
pretty = TRUE),
"variant count after step 3 ***"= calculatePercent(study$rowcount.step3,
study$input.data.rowcount,
decimal.place=3,
pretty = TRUE)))
colnames(count.table) <- 'count'
writeTXTreport(kable(count.table,format = "rst"))
writeTXTreport(' ')
writeTXTreport('NOTE: All further reports are based on variants after step3 (which will be saved as output file).')
writeTXTreport(' ')
writeTXTreport(' ')
##==============================================
writeTXTreport('==================================================')
writeTXTreport('============ Description of variants =============')
count.table <- t(data.table(
'High Quality variants' = calculatePercent(study$HQ.count,
study$rowcount.step3,
pretty = TRUE),
'Low Quality variants' = calculatePercent(study$LQ.count,
study$rowcount.step3,
pretty = TRUE),
'Palindromic variants' = calculatePercent(study$palindromic.rows,
study$rowcount.step3,
pretty = TRUE),
'Non-Palindromic variants' = calculatePercent(study$non.palindromic.rows,
study$rowcount.step3,
pretty = TRUE),
'variants +' = calculatePercent(study$palindormicHighDiffEAF,
study$palindromic.rows,
pretty = TRUE),
'variants ++' = calculatePercent(study$nonpalindormicHighDiffEAF ,
study$non.palindromic.rows,
pretty = TRUE),
'variants +++' = calculatePercent(study$palindormicExtremeDiffEAF ,
study$palindromic.rows,
pretty = TRUE)))
colnames(count.table) <- 'count'
writeTXTreport(kable(count.table,format = "rst"))
writeTXTreport(sprintf('+ Palindromic variants with high allele frequency difference (> %s)',
.QC$config$filters$threshold_diffEAF))
writeTXTreport(sprintf('++ Non-palindromic variants with high allele frequency difference (> %s)',
.QC$config$filters$threshold_diffEAF))
writeTXTreport('+++ Palindromic variants with opposite allele frequency "compared to the reference" (> 0.65 for the input file and < 0.35 for the reference, or vice versa)')
writeTXTreport(' ')
###
writeTXTreport(' ')
writeTXTreport(paste('Negative strand variants:',study$neg.strand.count))
writeTXTreport(' ')
writeTXTreport(paste('Allele frequency = 0 :',study$column.INVALID.list$zero.EFF_ALL_FREQ))
writeTXTreport(' ')
writeTXTreport(paste('Allele frequency = 1 :',study$column.INVALID.list$one.EFF_ALL_FREQ))
writeTXTreport(' ')
### imputation table
writeTXTreport('Imputation status')
tbl = study$tables$imputed.tbl
colnames(tbl) <- c('','Count')
writeTXTreport(kable(tbl, align = "l",format = "rst"))
writeTXTreport(' ')
writeTXTreport(' ')
writeTXTreport('========================================================')
writeTXTreport('= Result from matching with standard reference dataset =')
writeTXTreport('========================================================')
## not helpful anymore
# match.table1 <- study$tables$match.ref.table
# colnames(match.table1)[colnames(match.table1) == 'Std_ref'] <- 'Standard Reference'
#
#
#
# match.table <- data.table(apply(match.table1,2, function(x)
# return(calculatePercent(x,
# study$rowcount.step2,
# pretty = TRUE,
# decimal.place = 3)
# )
# ))
#
# match.table <- cbind(colnames(match.table1),match.table)
# colnames(match.table) <- c('Reference' ,'Count')
#
#
# writeTXTreport(kable(match.table,format = "rst"))
writeTXTreport(' ')
#writeTXTreport('Variant types after matching with reference datasets\n')
writeTXTreport(kable(study$tables$multi_allele_count_preProcess,format = "rst"))
writeTXTreport(' ')
##========================================
# print_and_log('--------[Result from matching with standard reference file!]--------','info', cat = FALSE)
writeTXTreport(' ')
# writeTXTreport('========================================================')
# writeTXTreport('= Result from matching with standard reference dataset =')
# writeTXTreport('========================================================')
count.table <- t(data.table(
'Verified variants' = calculatePercent(study$found.rows.std,
study$rowcount.step2,
decimal.place=3,
pretty=TRUE),
'Not-found variants' = calculatePercent(study$not.found.rows.std,
study$rowcount.step2,
decimal.place=3,
pretty=TRUE),
# 'Mismatch variants' = calculatePercent(study$mismatched.rows.std,
# study$found.rows.std,
# decimal.place=3,
# pretty=TRUE),
# 'Non-verified multiallelic variants' = calculatePercent(study$multiAlleleVariants.rowcount,
# study$found.rows.std,
# decimal.place=3,
# pretty=TRUE),
# 'Ambiguous variants' = calculatePercent(study$ambiguos.rows,
# study$found.rows.std,
# pretty=TRUE),
'Flipped variants' = calculatePercent(study$flipped.rows.std,
study$found.rows.std,
pretty=TRUE),
'Switched variants' = calculatePercent(study$switched.rows.std,
study$found.rows.std,
pretty=TRUE),
'============================' ='==============',
'Allele frequency correlation' = '',
' r (all variants)' = study$AFcor.std_ref,
' r (palindromic)' = study$AFcor.palindromic.std_ref,
' r (non-palindromic)' = study$AFcor.non.palindromic.std_ref,
' r (INDEL)' = study$AFcor.std_ref.indel))
colnames(count.table) <- 'count'
writeTXTreport(kable(count.table,format = "rst"))
writeTXTreport(' ')
##=========================================
if(!is.na(.QC$config$supplementaryFiles$allele_ref_alt))
{
# print_and_log('-------[Result from matching with alternate reference file!]-------','info', cat = FALSE)
writeTXTreport(' ')
writeTXTreport('=========================================================')
writeTXTreport('= Result from matching with alternate reference dataset =')
writeTXTreport('=========================================================')
count.table <- t(data.table(
'Verified variants' = calculatePercent(study$found.rows.alt ,
study$not.found.rows.std,
decimal.place=3,
pretty=TRUE),
'Not-found variants' = calculatePercent(study$not.found.rows.alt ,
study$not.found.rows.std,
decimal.place=3,
pretty=TRUE),
# 'Mismatch variants' = calculatePercent(study$mismatched.rows.alt ,
# study$found.rows.alt,
# decimal.place=3,
# pretty=TRUE),
'Flipped variants' = calculatePercent(study$flipped.rows.alt ,
study$found.rows.alt,
pretty=TRUE),
'Switched variants' = calculatePercent(study$switched.rows.alt ,
study$found.rows.alt,
pretty=TRUE),
'============================' ='==============',
'Allele frequency correlation' = '',
' r (all variants)' = study$AFcor.alt_ref,
' r (palindromic)' = study$AFcor.palindromic.alt_ref,
' r (non-palindromic)' = study$AFcor.non.palindromic.alt_ref))
colnames(count.table) <- 'count'
writeTXTreport(kable(count.table,format = "rst"))
writeTXTreport(' ')
}
##========================================
writeTXTreport(' ')
writeTXTreport('AF correlation for each chromosome')
writeTXTreport(kable(study$AFcor.std_ref.CHR ,format = "rst",align = "c"))
##=========================================
# print_and_log('-------[Calculated variables]-------','info', cat = FALSE)
writeTXTreport(' ')
writeTXTreport('==============================================')
writeTXTreport('============ QC summary statistics ===========')
writeTXTreport('==============================================')
writeTXTreport(' ')
writeTXTreport('Pvalue correlation (observed vs expected)')
writeTXTreport('Note: Only variants with a valid P-value are used for P-value correlation calculation.')
count.table <- t(data.table(
'included variants' = calculatePercent(study$rownum.PVcor,
study$rowcount.step3,
pretty = TRUE),
' r' = study$PVcor
))
colnames(count.table) <- 'value'
writeTXTreport(kable(count.table,format = "rst"))
writeTXTreport(' ')
writeTXTreport(' ')
count.table <- t(data.table(
'Skewness' = study$skewness,
'Skewness (HQ)' = study$skewness.HQ,
'Kurtosis' = study$kurtosis,
'Kurtosis (HQ)'= study$kurtosis.HQ,
"Visscher's stat" = study$Visschers.stat ,
"Visscher's stat (HQ)" = study$Visschers.stat.HQ,
"Lambda - total" = study$lambda ,
'Lambda - genotyped' = study$lambda.gen,
'Lambda - imputed' = study$lambda.imp,
'============================' = '==============',
'Sample Size (Max)' = study$MAX_N_TOTAL,
"Fixed HWE P-value" = study$fixed.hwep,
"Fixed Imputation Quality" = study$fixed.impq,
"Fixed Call Rate" = study$fixed.callrate,
"Fixed Sample Size" = study$fixed.n_total
))
colnames(count.table) <- 'value'
writeTXTreport(kable(count.table,format = "rst"))
writeTXTreport(' ')
##=========================================
# print_and_log('-------[Calculated variables]-------','info', cat = FALSE)
writeTXTreport(' ')
writeTXTreport('==============================================')
writeTXTreport('========== Distribution statistics ==========')
writeTXTreport('==============================================')
writeTXTreport(' ')
writeTXTreport(sprintf('All variants (%s)' , prettyNum(.QC$thisStudy$rowcount.step3,big.mark = ",")))
writeTXTreport(kable(t(study$tables$variable.summary), format = "rst"))
writeTXTreport(' ')
if(nrow(study$tables$variable.summary.HQ ) > 0 & study$HQ.count != study$rowcount.step3)
{
writeTXTreport(sprintf('HQ variants (%s)' , prettyNum(.QC$thisStudy$HQ.count,big.mark = ",")))
writeTXTreport(kable(t(study$tables$variable.summary.HQ), format = "rst"))
writeTXTreport(' ')
}
##========================================
# writeTXTreport(' ')
# writeTXTreport('==============================================')
# writeTXTreport('============= Column statistics =============')
# writeTXTreport(' ')
### chromosome table
if(!all(is.na(study$tables$CHR.tbl)))
{
writeTXTreport(' ')
writeTXTreport('Variant count for each chromosome')
tbl = study$tables$CHR.tbl
colnames(tbl) <- c('Chromosome','Variant count')
writeTXTreport(kable(tbl, align = "c",format = "rst"))
}
if(length(study$missing_chromosomes) >0 )
{
writeTXTreport(' ')
writeTXTreport(sprintf("%s %s","Missing chromosome(s) number",paste(.QC$thisStudy$missing_chromosomes,collapse = ", ")))
}
writeTXTreport(' ')
### alleles
writeTXTreport(' ')
writeTXTreport('Effect allele distribution in SNP variants')
tbl = merge(study$tables$EFFECT_ALL.tbl,
study$tables$EFFECT_ALL.post.matching.tbl,
by="EFFECT_ALL",
all = TRUE)
tbl = t(tbl)
rownames(tbl) <- c('Allele','Count (input file)','Count (post-matching)')
colnames(tbl) <- tbl[1,]
writeTXTreport(kable(tbl[-1,], align = "c",format = "rst"))
writeTXTreport(' ')
###
writeTXTreport(' ')
writeTXTreport('Other allele distribution in SNP variants')
tbl = merge(study$tables$OTHER_ALL.tbl,
study$tables$OTHER_ALL.post.matching.tbl,
by="OTHER_ALL",
all = TRUE)
tbl = t(tbl)
rownames(tbl) <- c('Allele','Count (input file)','Count (post-matching)')
colnames(tbl) <- tbl[1,]
writeTXTreport(kable(tbl[-1,], align = "c",format = "rst"))
##
## END OF REPORT
# =============
print_and_log(sprintf('Report file saved as \'%s\'',study$txt.report.path),
'info')
}
# save each study object as rdata file
# to compare different files after each is run separately
save_rds_file <- function(study) {
# rm(list=setdiff(ls(envir = study$effect.plot$plot_env),
# c('y_lower','y_upper','df','file.N.max','file.number')),
# envir = study$effect.plot$plot_env)
# #
# rm('.QC' , envir = study$effect.plot$plot_env)
# rm('study' , envir = study$effect.plot$plot_env)
# rm('input.data' , envir = study$effect.plot$plot_env)
tryCatch(
{
if(.QC$config$output_parameters$object_file)
saveRDS(object = study, file = study$rds.study.rds.path, version = '2')
},
error = function(err)
{
print_and_log(paste('Could not save study RDS object file:',err$message),'warning',display=.QC$config$debug$verbose)
}
)
}
|
b4e79811d94acb0496ac8534e62784428959f1f0
|
08b6d63a87add543e5aab98aab3386ece7aeef1c
|
/helpers.R
|
036c5283d81ad47992da0673e431a54ba24eaa80
|
[] |
no_license
|
cyn2903flo/data_r
|
5bc148a86fb70eae013396ae1fc4c185ef586101
|
8ca6f64a91401d64bf2d29d9c06f60148f88baea
|
refs/heads/master
| 2022-12-28T17:07:03.395867
| 2020-10-14T21:19:11
| 2020-10-14T21:19:11
| 295,245,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,270
|
r
|
helpers.R
|
# Function to form a one sentence summary from a year
# of annual data
summarize_park <- function(one_year){
comma <- scales::label_comma()
one_year %>%
glue::glue_data(
"En { year }, { park_name } se tuvieron { comma(recreation_visits) } visitas."
)
}
# Takes annual data and produces a plot
plot_annual <- function(annual_data, highlight_year = 2019){
annual_data %>%
ggplot(aes(year, recreation_visits)) +
geom_point(data = ~ filter(., year == highlight_year)) +
geom_line() +
scale_y_continuous(labels = scales::label_comma()) +
labs(x = "", y = "Visitas")
}
# Takes monthly data and produces a plot
plot_monthly <- function(monthly_data, highlight_year = 2019,
display_average = TRUE){
p <- monthly_data %>%
ggplot(aes(month, recreation_visits_proportion)) +
geom_line(aes(group = year), alpha = 0.1) +
geom_line(data = ~ filter(.x, year == highlight_year)) +
scale_x_continuous(breaks = 1:12, labels = month.abb) +
scale_y_continuous(labels = scales::label_percent()) +
labs(x = "", y = "Visitas anuales")
if(display_average) {
p <- p + stat_summary(fun = mean,
geom = "line", color = "#325D88", size = 3.5)
}
p
}
|
4ee319ce8caad1e2046d44a5c2ae83453f0a6661
|
f9ee0159033cfecdf34c94b1cea99db0cc9f88b6
|
/inference/collate-comparisons.R
|
c98f1aee692c87d418a140840f041f8df33dd0bb
|
[] |
no_license
|
petrelharp/tortoisescape
|
75cce4bea5c921f261506e6e382dadd15456f750
|
4e769efdaabf16e13c690510719bf26228880e58
|
refs/heads/master
| 2020-04-15T22:03:47.835144
| 2018-08-10T04:32:42
| 2018-08-10T04:32:42
| 23,332,197
| 1
| 1
| null | 2017-06-12T22:00:46
| 2014-08-25T23:33:02
|
HTML
|
UTF-8
|
R
| false
| false
| 5,186
|
r
|
collate-comparisons.R
|
#!/usr/bin/Rscript
usage <- "Collate results produced by comparison-results.R, stored in the .RData files passed in as arguments.
Usage:
Rscript collate-comparisons.R (outfile) ( file names )
"
argvec <- if (interactive()) { scan(what='char') } else { commandArgs(TRUE) }
if (length(argvec) < 2) { stop(usage) }
outfile <- argvec[1]
infiles <- argvec[-1]
readable <- file.exists(infiles)
for (k in which(!readable)) { warning(infiles[k], " does not exist.\n") }
source("resistance-fns.R")
require(raster)
gmat <- function (geodist.tab,pimat) {
geodist <- pimat
geodist[] <- NA
geodist.inds <- cbind( match(geodist.tab[,1],rownames(geodist)), match(geodist.tab[,2],colnames(geodist)) )
usethese <- apply( !is.na(geodist.inds), 1, all )
geodist[ geodist.inds[usethese,] ] <- geodist.tab[usethese,3]
geodist[is.na(geodist)] <- t(geodist)[is.na(geodist)]
geodist
}
# null model fit
null.config.file <- "summaries/all/config.json"
null.config <- read.json.config(null.config.file)
null.env <- new.env()
load(file.path(dirname(null.config.file),null.config$setup_files),envir=null.env)
assign("geodist.tab", read.csv( file.path(dirname(null.config.file),dirname(null.config$sample_locs),"geog_distance.csv"), header=TRUE, stringsAsFactors=FALSE ), null.env )
assign("pcs", read.csv(file.path(dirname(null.config.file),dirname(null.config$divergence_file),"pcs.csv"),header=TRUE), null.env )
assign("geodist", with(null.env, { gmat(get("geodist.tab",null.env),pimat) } ), null.env )
null.results <- with( null.env, {
nearby.weights <- 1 / rowSums( geodist < 25e3 )
pairwise.weights <- outer(nearby.weights,nearby.weights,"*")
omit.comparisons <- ( pcs$PC1[match(rownames(pimat),pcs$etort)][row(pimat)] * pcs$PC1[match(colnames(pimat),pcs$etort)][col(pimat)] < 0 )
dup.inds <- match( c( "etort-296", "etort-297" ), rownames(pimat) )
omit.comparisons <- ( omit.comparisons | (row(pimat) %in% dup.inds) | (col(pimat) %in% dup.inds) )
# and omit self comparisons and ONLY UPPER TRIANGLE
omit.comparisons <- ( omit.comparisons | (row(pimat) < col(pimat)) )
resids <- resid( lm( pimat[!omit.comparisons] ~ geodist[!omit.comparisons] ) )
# weighted median abs( residual )
w.mad <- weighted.median( abs(resids), pairwise.weights[!omit.comparisons] )
w.mse <- sqrt( weighted.mean( resids^2, pairwise.weights[!omit.comparisons], na.rm=TRUE ) )
list(
summary="null",
mad=w.mad,
mse=w.mse,
converged=NA,
n.refs=NA,
file=NA
)
} )
results <- c( list(null.results), lapply( infiles[readable], function (infile) {
load(infile)
pcs <- read.csv(file.path(dirname(config.file),dirname(config$divergence_file),"pcs.csv"),header=TRUE)
omit.comparisons <- ( pcs$PC1[match(rownames(pimat),pcs$etort)][row(pimat)] * pcs$PC1[match(colnames(pimat),pcs$etort)][col(pimat)] < 0 )
pc.cols <- adjustcolor( ifelse( pcs$PC1[match(rownames(pimat),pcs$etort)][row(pimat)] < 0, "purple", "blue" ), 0.5 )
# remove duplicates: these are (etort-156 / etort-296 ) and (etort-143 / etort-297)
dup.inds <- match( c( "etort-296", "etort-297" ), rownames(pimat) )
omit.comparisons <- ( omit.comparisons | (row(pimat) %in% dup.inds) | (col(pimat) %in% dup.inds) )
# and omit self comparisons
omit.comparisons <- ( omit.comparisons | (row(pimat) == col(pimat)) )
if (is.numeric(hts)) {
fitted <- paramvec(local.config)[1] + (hts+t(hts))/2
resids <- (fitted - pimat)
resids[omit.comparisons] <- NA
fitted[omit.comparisons] <- NA
# weight residuals by 1 / number of other samples within 25km
geodist.tab <- read.csv( file.path(dirname(config.file),dirname(config$divergence_file),"geog_distance.csv"), header=TRUE, stringsAsFactors=FALSE )
geodist <- gmat(geodist.tab,pimat)
nearby.weights <- 1 / rowSums( geodist < 25e3 )
pairwise.weights <- outer(nearby.weights,nearby.weights,"*")
ut <- upper.tri(pimat,diag=FALSE)
# weighted median abs( residual )
w.mad <- weighted.median( abs(resids)[ut], pairwise.weights[ut] )
w.mse <- sqrt( weighted.mean( resids[ut]^2, pairwise.weights[ut], na.rm=TRUE ) )
} else {
w.mad <- w.mse <- NA
}
return( list(
summary=basename(dirname(config.file)),
mad=w.mad,
mse=w.mse,
converged=trust.optim.results$converged,
n.refs=length(local.config$reference_inds),
file=infile
) )
} ) )
cat("Saving results to ", outfile, "\n")
save(results, file=outfile)
csvfile <- gsub("RData$","csv",outfile)
cat(" and writing to ", csvfile, "\n")
results.tab <- do.call(rbind,lapply(results,as.data.frame))
results.tab <- results.tab[order(results.tab[,"mad"]),]
write.csv( results.tab, file=csvfile, row.names=FALSE )
|
b3a3abf3902002dc4cfc2574ce67d886aaeb5ac8
|
5419f18469d8308f34a37a1c74d7156130b45573
|
/data_analysis/data_analysis.r
|
6632cb4efbac1f39447577dca0ebb1933da738bf
|
[] |
no_license
|
i-pan/i2b2-HST2014
|
aa95e153321c1f737f8c4cd3cbf84706dcc4e4d7
|
6ba11a648d3a917886f08a9b77721cf129b29930
|
refs/heads/master
| 2021-01-23T07:30:05.188861
| 2015-01-23T21:16:28
| 2015-01-23T21:16:28
| 29,753,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,639
|
r
|
data_analysis.r
|
##### RUNTIME #####
ptm <- proc.time()
##### LOAD LIBRARIES #####
library(survival)
library(locfdr)
set.seed(10)
##### LOAD DATA #####
dem <- read.csv('dem.csv') # demographics file
dx <- read.csv('dx.csv') # diagnostics file
# phewas groupings, obtained from:
# http://knowledgemap.mc.vanderbilt.edu/research/content/phewas
phewas.code <- read.table("phewas_code.txt", header = T)
phewas.code <- as.matrix(phewas.code)
phewas.code[, 'phewas_code'] <- gsub(" ", "", phewas.code[, 'phewas_code'])
##### ORGANIZE DATA #####
# split into cases and controls
case <- dem[dem$grp == "Case", ]
cont <- dem[dem$grp == "Control", ]
case <- cbind(case, status = rep(1, length = nrow(case)))
cont <- cbind(cont, status = rep(0, length = nrow(cont)))
# create age bins (X-year bins) i.e. 10-year bins = 0-10, 10-20, etc.
year.bins <- 10
case <- cbind(case,
age_bin = as.numeric(cut(case$age,
breaks = seq(0, 11*year.bins,
by = year.bins),
labels = c(seq(0, year.bins, by = 1)),
right = TRUE)))
cont <- cbind(cont,
age_bin = as.numeric(cut(cont$age,
breaks = seq(0, 11*year.bins,
by = year.bins),
labels = c(seq(0, year.bins, by = 1)),
right = TRUE)))
# create facts bins
fact.bins <- 20
fact.qtile <- c(quantile(case$num_facts, probs = seq(0, 1, by = 1/fact.bins)))
case <- cbind(case,
fact_bin = as.numeric(cut(case$num_facts,
breaks = fact.qtile,
labels = seq(1, fact.bins,
by = 1),
include.lowest = TRUE)))
cont <- cbind(cont,
fact_bin = as.numeric(cut(cont$num_facts,
breaks = fact.qtile,
labels = seq(1, fact.bins,
by = 1),
include.lowest = TRUE)))
# sort by gender, race, age, facts
case <- case[order(case$gender, case$race, case$age_bin, case$fact_bin,
case$num_facts), ]
cont <- cont[order(cont$gender, cont$race, cont$age_bin, cont$fact_bin,
cont$num_facts), ]
##### MATCH CASES TO CONTROLS (1-TO-1) #####
index <- vector("list", nrow(case))
index.sample <- vector("numeric", nrow(case))
for(each.case in 1:nrow(case)) {
print(paste("Matching case", each.case,
"based on gender, race, exact age, exact facts ..."))
logic.case <- cont$gender == case[each.case, ]$gender &
cont$race == case[each.case, ]$race &
cont$age == case[each.case, ]$age &
cont$num_facts == case[each.case, ]$num_facts
if(!(TRUE %in% logic.case)) {
print("Could not find exact match ...")
print(paste("Matching case", each.case,
"based on gender, race, age bin, exact facts ..."))
logic.case <- cont$gender == case[each.case, ]$gender &
cont$race == case[each.case, ]$race &
cont$age_bin == case[each.case, ]$age_bin &
cont$num_facts == case[each.case, ]$num_facts
if(!(TRUE %in% logic.case)) {
print("Could not find exact match ...")
print(paste("Matching case", each.case,
"based on gender, race, age bin, fact bin ..."))
logic.case <- cont$gender == case[each.case, ]$gender &
cont$race == case[each.case, ]$race &
cont$age_bin == case[each.case, ]$age_bin &
cont$fact_bin == case[each.case, ]$fact_bin
if(!(TRUE %in% logic.case)) {
print("Could not find exact match ...")
print(paste("Matching case", each.case,
"based on gender, age bin, fact bin ..."))
logic.case <- cont$gender == case[each.case, ]$gender &
cont$age_bin == case[each.case, ]$race &
cont$fact_bin == case[each.case, ]$fact_bin
if(!(TRUE %in% logic.case)) {
print("Could not find exact match ...")
print(paste("Matching case", each.case,
"based on age bin, fact bin ..."))
logic.case <- cont$age_bin == case[each.case, ]$age_bin &
cont$fact_bin == case[each.case, ]$fact_bin
if(!(TRUE %in% logic.case)) {
print("Could not find exact match ...")
print(paste("Matching case", each.case, "based on fact bin ..."))
logic.case <- cont$fact_bin == case[each.case, ]$fact_bin
}
}
}
}
}
index[[each.case]] <- which(logic.case)
if(length(index[[each.case]]) == 1) {
index.sample[each.case] <- index[[each.case]]
} else {
index.sample[each.case] <- sample(index[[each.case]], 1)
}
}
cont.sample <- cont[index.sample, ]
# combine cases and controls
case.cont <- rbind(case, cont.sample)
##### OPTIONAL CODE TO PheWAS CODES BASED ON ROLLUP BOOLEAN VALUE #####
# for(each.code in 1:nrow(phewas.code)) {
# if(phewas.code[each.code, ]$rollup_bool == 1) {
# phewas.code[each.code, ]$phewas_code <-
# trunc(phewas.code[each.code, ]$phewas_code)
# }
# }
##### ASSIGN PheWAS CODES TO RESPECTIVE ICD-9 CODES #####
case.cont.ID <- case.cont$patient_num
case.cont.dx <- dx[which(dx$patient_num %in% case.cont.ID), ]
case.cont.dx <- as.matrix(case.cont.dx)
case.cont.dx <- cbind(case.cont.dx,
phewas_code = rep(0, length = nrow(case.cont.dx)),
exclude_range = rep(0, length = nrow(case.cont.dx)))
tmp.dx.index <- which(case.cont.dx[, 2] %in% phewas.code[, 'icd9'])
case.cont.dx <- case.cont.dx[tmp.dx.index, ]
store.phewas.code <- character(length=nrow(case.cont.dx))
store.exclude.range <- character(length=nrow(case.cont.dx))
icd9.codes <- data.frame(table(case.cont.dx[, 2]))
icd9.codes <- as.character(icd9.codes$Var1)
store.phewas.code <- character(length=nrow(case.cont.dx))
store.exclude.range <- character(length=nrow(case.cont.dx))
for(i in 1:length(icd9.codes)) {
print(paste("Retrieving PheWAS code for ICD-9 code", i, "of", length(icd9.codes)))
temp.phewas.code <- phewas.code[phewas.code[, 'icd9'] == icd9.codes[i], 'phewas_code']
temp.exclude.range <- phewas.code[phewas.code[, 'icd9'] == icd9.codes[i], 'exclude_range']
store.index <- which(case.cont.dx[, 2] == icd9.codes[i])
store.phewas.code[store.index] <- temp.phewas.code
store.exclude.range[store.index] <- temp.exclude.range
}
case.cont.dx[, 3] <- store.phewas.code
case.cont.dx[, 4] <- store.exclude.range
# index <- 1
# for(each.code in case.cont.dx[, 2]) {
# print(paste("Retrieving PheWAS code for row", index, "of row", nrow(case.cont.dx), "..."))
# store.phewas.code[index] <- phewas.code[phewas.code[, 'icd9'] == each.code, 'phewas_code']
# store.exclude.range[index] <- phewas.code[phewas.code[, 'icd9'] == each.code, 'exclude_range']
# index <- index + 1
# }
# case.cont.dx[, 3] <- store.phewas.code
# case.cont.dx[, 4] <- store.exclude.range
##### CREATE A DISEASE MATRIX BASED ON PheWAS GROUPINGS #####
##### IF ELEMENT i,j = 1, THEN CASE i HAS DISEASE j, 0 OTHERWISE #####
case.cont.dx <- case.cont.dx[complete.cases(case.cont.dx), ]
case.cont.dx <- case.cont.dx[case.cont.dx[, 4] != "555-564.99", ]
case.cont.dx[, 1] <- as.integer(case.cont.dx[, 1])
dis.names <- as.character(as.data.frame(table(case.cont.dx[, 3]))$Var1)
dis.matrix <- matrix(0, nrow = nrow(case.cont), ncol = length(dis.names))
colnames(dis.matrix) <- dis.names
dis.matrix <- cbind(patient_num = case.cont.ID, dis.matrix)
for(i in 1:nrow(case.cont.dx)) {
print(paste("Operating on row", i, "of row", nrow(case.cont.dx), "..."))
tmp.ID <- as.integer(case.cont.dx[i, 1])
tmp.phewas <- case.cont.dx[i, 3]
dis.matrix[dis.matrix[, 1] == tmp.ID, tmp.phewas] <- 1
}
dis.matrix.case <- dis.matrix[1:nrow(case), ]
dis.matrix.cont <- dis.matrix[(nrow(case)+1):(nrow(case)+nrow(cont)), ]
dis.incid.case <- numeric()
dis.incid.cont <- numeric()
dis.matrix.col <- ncol(dis.matrix)-1
for(each.dis in 1:dis.matrix.col) {
dis.incid.case[each.dis] <- sum(dis.matrix.case[, each.dis+1])
dis.incid.cont[each.dis] <- sum(dis.matrix.cont[, each.dis+1])
}
##### BEGIN COMPUTING ODDS RATIOS #####
# establish threshold for disease incidence
thres <- 5
reach.thres.case <- which(dis.incid.case >= thres) + 1
reach.thres.cont <- which(dis.incid.cont >= thres) + 1
reach.thres <- intersect(reach.thres.case, reach.thres.cont)
dis.matrix.thres <- dis.matrix[, reach.thres]
# drop Crohn's disease, if present
dis.matrix.thres <- dis.matrix.thres[, !(colnames(dis.matrix.thres) %in% "555.10")]
case.cont.data <- cbind(case.cont, dis.matrix.thres)
# compute logistic regression
dis.matrix.thres.col <- ncol(dis.matrix.thres)
coefs.nofacts <- numeric(dis.matrix.thres.col)
coefs.facts <- numeric(dis.matrix.thres.col)
pvalues.nofacts <- numeric(dis.matrix.thres.col)
pvalues.facts <- numeric(dis.matrix.thres.col)
zvalues.nofacts <- numeric(dis.matrix.thres.col)
zvalues.facts <- numeric(dis.matrix.thres.col)
dis.matrix.thres <- cbind(case.cont.data$status, case.cont.data$num_facts,
dis.matrix.thres)
for(i in 1:dis.matrix.thres.col) {
print(paste("Fitting logistic regression for disease", i, "..."))
fit <- glm(dis.matrix.thres[, i+2] ~ dis.matrix.thres[, 1] +
dis.matrix.thres[, 2],
family=binomial) # facts in model
fit2 <- glm(dis.matrix.thres[, i+2] ~ dis.matrix.thres[, 1],
family=binomial) # no facts
pvalues.facts[i] <- summary(fit)$coefficients[, 4][2]
pvalues.nofacts[i] <- summary(fit2)$coefficients[, 4][2]
coefs.facts[i] <- coef(fit)[2]
coefs.nofacts[i] <- coef(fit2)[2]
zvalues.facts[i] <- coef(summary(fit))[, "z value"][2]
zvalues.nofacts[i] <- coef(summary(fit2))[, "z value"][2]
}
# compute odds ratios
odds.facts <- exp(coefs.facts)
odds.nofacts <- exp(coefs.nofacts)
# write out values to a table
temp.df <- data.frame(odds_facts = odds.facts, odds_nofacts = odds.nofacts,
coefs_facts = coefs.facts, coefs_nofacts = coefs.nofacts,
pvalues_facts = pvalues.facts, pvalues_nofacts = pvalues.nofacts,
zvalues_facts = zvalues.facts, zvalues_nofacts = zvalues.nofacts)
write.csv(temp.df, "values_matched.csv")
fdr <- locfdr(temp.df$zvalues_nofacts, bre = 100)$fdr
Efdr <- locfdr(temp.df$zvalues_nofacts, bre = 100)$Efdr
fdr.sig.index <- which(fdr < Efdr[3])
odds.sig.index <- which(temp.df$odds_nofacts > 1)
sig.index <- intersect(fdr.sig.index, odds.sig.index)
all.disease.names <- colnames(dis.matrix.thres)[3:ncol(dis.matrix.thres)]
index <- 1
comorbs <- character()
for(each.disease in all.disease.names[sig.index]) {
comorbs[index] <- phewas.code[phewas.code[, 'phewas_code'] == each.disease, 'phewas_string'][1]
index <- index+1
}
comorbs.df <- data.frame(Comorbidities = comorbs, Odds_Ratios = temp.df$odds_nofacts[sig.index],
PheWAS_Codes = all.disease.names[sig.index])
write.csv(comorbs.df, file="comorbidities_odds_ratios_long_nofacts_5.csv")
# conditional logistic regression
cond.coefs <- numeric(dis.matrix.thres.col)
cond.pvalues <- numeric(dis.matrix.thres.col)
for(i in 1:dis.matrix.thres.col) {
print(paste("Fitting conditional logistic regression for disease", i,
"..."))
cond.fit <- clogit(dis.matrix.thres[, i+2] ~ dis.matrix.thres[, 1] + dis.matrix.thres[, 2] + strata(case.cont.data$fact_bin))
cond.pvalues[i] <- summary(cond.fit)$logtest[3]
cond.coefs[i] <- coef(cond.fit)[1]
cond.zvalues[i] <-
}
# compute odds ratios
cond.odds <- exp(cond.coefs)
# plot histograms and densities
png.file.name <- paste("Odds_Ratios_", thres, ".png", sep = "")
png(png.file.name)
odds.density <- density(odds.facts)
hist(odds.facts, col=rgb(1,0,0,1/4),
prob=T,
breaks=20,
main="Distribution of Odds Ratios",
xlab="Odds Ratios",
xaxt="n",
xlim=c(0,4),
ylim=c(0, max(odds.density$y)*1.5))
axis(side=1, at=seq(0, max(odds.facts)+1, by = 0.5))
lines(odds.density, col=rgb(1,0,0,1), lwd=3)
hist(odds.nofacts, col=rgb(0,0,1,1/4), prob=T, breaks=20, add=T)
lines(density(odds.nofacts), col=rgb(0,0,1,1), lwd=3)
hist(cond.odds, col=rgb(0,1,0,1/4), prob=T, breaks=20, add=T)
lines(density(cond.odds), col=rgb(0,1,0,1), lwd=3)
garbage <- dev.off()
##### IDENTIFY COMORBIDITIES OBTAINED FROM EACH MODEL #####
# identify comorbidities
alpha <- 0.05 # set value for alpha
cond.dis.pvalues <- which(cond.pvalues < alpha)
dis.facts.pvalues <- which(pvalues.facts < alpha)
dis.nofacts.pvalues <- which(pvalues.nofacts < alpha)
cond.dis.greater.1 <- which(cond.odds > 1)
dis.facts.greater.1 <- which(odds.facts > 1)
dis.nofacts.greater.1 <- which(odds.nofacts > 1)
cond.dis <- intersect(cond.dis.pvalues, cond.dis.greater.1)
dis.facts <- intersect(dis.facts.pvalues, dis.facts.greater.1)
dis.nofacts <- intersect(dis.nofacts.pvalues, dis.nofacts.greater.1)
dis.included.names <- colnames(case.cont.data)
most.comorbs <- max(length(cond.dis), length(dis.facts), length(dis.nofacts))
# create table for storage of comorbidities
comorb.matrix <- matrix(NA, nrow = most.comorbs, ncol = 6)
colnames(comorb.matrix) <- c("CLR_Facts", "CLR_OR", "LR_Facts", "LR_Facts_OR",
"LR_Simple", "LR_Simple_OR")
# comorbidities using conditional logistic regression
index <- 1
for(each.dis in cond.dis) {
tmp.dis.name <- dis.included.names[11+each.dis]
tmp.comorb <- phewas.code[phewas.code[, 'phewas_code'] == tmp.dis.name,
'phewas_string'][1]
comorb.matrix[index, 1] <- as.character(tmp.comorb)
comorb.matrix[index, 2] <- round(cond.odds[each.dis], digits = 2)
index <- index + 1
}
# comorbidities using logistic regression with facts in model
index <- 1
for(each.dis in dis.facts) {
tmp.dis.name <- dis.included.names[11+each.dis]
tmp.comorb <- phewas.code[phewas.code[, 'phewas_code'] == tmp.dis.name,
'phewas_string'][1]
comorb.matrix[index, 3] <- as.character(tmp.comorb)
comorb.matrix[index, 4] <- round(odds.facts[each.dis], digits = 2)
index <- index + 1
}
# comorbidities using simple logistic regression
index <- 1
for(each.dis in dis.nofacts) {
tmp.dis.name <- dis.included.names[11+each.dis]
tmp.comorb <- phewas.code[phewas.code[, 'phewas_code'] == tmp.dis.name,
'phewas_string'][1]
comorb.matrix[index, 5] <- as.character(tmp.comorb)
comorb.matrix[index, 6] <- round(odds.nofacts[each.dis], digits = 2)
index <- index + 1
}
# write out table
comorb.df <- as.data.frame(comorb.matrix)
write.csv(comorb.df, file = "comorbidities.csv")
##### RUNTIME #####
runtime <- proc.time() - ptm
print("Runtime:")
print(runtime)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.