blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a7868cb705c22469503272418e7f91f6fde6d79
|
244393a89b3f8a836ee5afdd2ec9c91f5e52a6cd
|
/R_Inference_and_Modeling/margin_of_error.R
|
a63643611ac17b211df3b588b657847288415005
|
[] |
no_license
|
mjchenko/R_for_Data_Science
|
c33e470bb7b054ba5255df99aa06f60c2940976d
|
a2d228b738400a80fa2ab6fbf9df7af40a2ad83e
|
refs/heads/main
| 2023-02-01T13:39:57.324999
| 2020-12-18T20:27:20
| 2020-12-18T20:27:20
| 322,691,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
r
|
margin_of_error.R
|
#margin of error
#2*se
#why multiply by 2
# Pr(abs(Xbar - p) <= 2SE(Xbar)) using same equations to simplify we end up
#Pr(Z <= 2) - Pr(Z <= -2)
pnorm(2) - pnorm(-2)
#95% chance that Xbar will be within 2 standard error to p
#spread of 2 parties = p - (1-p) = 2p - 1
# to estimate use xbar = 2Xbar - 1
# standard error = 2*SE(Xbar) = 2*sqrt(p(1-p)/N)
|
62f15834398fd965e4a88ebf6ccc5af440ca9dab
|
1eee16736f5560821b78979095454dea33b40e98
|
/thirdParty/HiddenMarkov.mod/demo/beta.R
|
312f994bf4321c371ceb6b9fef9e8dea4619e780
|
[] |
no_license
|
karl616/gNOMePeaks
|
83b0801727522cbacefa70129c41f0b8be59b1ee
|
80f1f3107a0dbf95fa2e98bdd825ceabdaff3863
|
refs/heads/master
| 2021-01-21T13:52:44.797719
| 2019-03-08T14:27:36
| 2019-03-08T14:27:36
| 49,002,976
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,423
|
r
|
beta.R
|
if (interactive()) par.default <- par(ask=TRUE)
#----- Test Beta Distribution -----
# "shape1" Markov dependent
# "shape2" time dependent
Pi <- matrix(c(0.8, 0.2,
0.3, 0.7),
byrow=TRUE, nrow=2)
x <- seq(0.01, 0.99, 0.01)
plot(x, dbeta(x, shape1=0.5, shape2=2), type="l",
col="blue", ylab="Density")
points(x, dbeta(x, shape1=8, shape2=2), type="l", col="red")
n <- 1000
x <- dthmm(NULL, Pi, c(0,1), "beta", pm=list(shape1=c(0.5, 6)),
pn=list(shape2=rep(2, n)))
x <- simulate(x, nsim=n)
# use above parameter values as initial values
y <- BaumWelch(x)
# check parameter estimates
print(summary(y))
print(sum(y$delta))
print(y$Pi %*% rep(1, ncol(y$Pi)))
#----- Test Beta Distribution -----
# "shape2" Markov dependent
# "shape1" time dependent
Pi <- matrix(c(0.8, 0.2,
0.3, 0.7),
byrow=TRUE, nrow=2)
x <- seq(0.01, 0.99, 0.01)
plot(x, dbeta(x, shape1=2, shape2=6), type="l",
col="blue", ylab="Density")
points(x, dbeta(x, shape1=2, shape2=0.5), type="l", col="red")
n <- 1000
x <- dthmm(NULL, Pi, c(0,1), "beta", pm=list(shape2=c(0.5, 6)),
pn=list(shape1=rep(2, n)))
x <- simulate(x, nsim=n)
# use above parameter values as initial values
y <- BaumWelch(x)
# check parameter estimates
print(summary(y))
print(sum(y$delta))
print(y$Pi %*% rep(1, ncol(y$Pi)))
if (interactive()) par(par.default)
|
e10a2c353119a19f89d09b21ac4c812340897ea1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/stringi/examples/stri_stats_general.Rd.R
|
994fc0bfb861f775688325d43847b8c2b8e02fd3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
stri_stats_general.Rd.R
|
library(stringi)
### Name: stri_stats_general
### Title: General Statistics for a Character Vector
### Aliases: stri_stats_general
### ** Examples
s <- c("Lorem ipsum dolor sit amet, consectetur adipisicing elit.",
"nibh augue, suscipit a, scelerisque sed, lacinia in, mi.",
"Cras vel lorem. Etiam pellentesque aliquet tellus.",
"")
stri_stats_general(s)
|
55171b2543bbd288e0d1957d5b0c31c114a07401
|
9e8936a8cc7beae524251c8660fa755609de9ce5
|
/man/details_poisson_reg_h2o.Rd
|
bc8147560c77fd3db056360831ca0e6cd02ce52e
|
[
"MIT"
] |
permissive
|
tidymodels/parsnip
|
bfca10e2b58485e5b21db64517dadd4d3c924648
|
907d2164a093f10cbbc1921e4b73264ca4053f6b
|
refs/heads/main
| 2023-09-05T18:33:59.301116
| 2023-08-17T23:45:42
| 2023-08-17T23:45:42
| 113,789,613
| 451
| 93
|
NOASSERTION
| 2023-08-17T23:43:21
| 2017-12-10T22:48:42
|
R
|
UTF-8
|
R
| false
| true
| 3,998
|
rd
|
details_poisson_reg_h2o.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/poisson_reg_h2o.R
\name{details_poisson_reg_h2o}
\alias{details_poisson_reg_h2o}
\title{Poisson regression via h2o}
\description{
\code{h2o::h2o.glm()} uses penalized maximum likelihood to fit a model for
count data.
}
\details{
For this engine, there is a single mode: regression
\subsection{Tuning Parameters}{
This model has 2 tuning parameters:
\itemize{
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see
below)
\item \code{penalty}: Amount of Regularization (type: double, default: see below)
}
By default, when not given a fixed \code{penalty},
\code{\link[h2o:h2o.glm]{h2o::h2o.glm()}} uses a heuristic approach to select
the optimal value of \code{penalty} based on training data. Setting the
engine parameter \code{lambda_search} to \code{TRUE} enables an efficient version
of the grid search, see more details at
\url{https://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/algo-params/lambda_search.html}.
The choice of \code{mixture} depends on the engine parameter \code{solver}, which
is automatically chosen given training data and the specification of
other model parameters. When \code{solver} is set to \code{'L-BFGS'}, \code{mixture}
defaults to 0 (ridge regression) and 0.5 otherwise.
}
\subsection{Translation from parsnip to the original package}{
\code{\link[agua:h2o_train]{agua::h2o_train_glm()}} for \code{poisson_reg()} is
a wrapper around \code{\link[h2o:h2o.glm]{h2o::h2o.glm()}} with
\code{family = 'poisson'}.
The \strong{agua} extension package is required to fit this model.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{library(poissonreg)
poisson_reg(penalty = double(1), mixture = double(1)) \%>\%
set_engine("h2o") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Poisson Regression Model Specification (regression)
##
## Main Arguments:
## penalty = double(1)
## mixture = double(1)
##
## Computational engine: h2o
##
## Model fit template:
## agua::h2o_train_glm(x = missing_arg(), y = missing_arg(), weights = missing_arg(),
## validation_frame = missing_arg(), lambda = double(1), alpha = double(1),
## family = "poisson")
}\if{html}{\out{</div>}}
}
\subsection{Preprocessing requirements}{
Factor/categorical predictors need to be converted to numeric values
(e.g., dummy or indicator variables) for this engine. When using the
formula method via \code{\link[=fit.model_spec]{fit()}}, parsnip will
convert factor columns to indicators.
Predictors should have the same scale. One way to achieve this is to
center and scale each so that each predictor has mean zero and a
variance of one.
By default, \code{h2o::h2o.glm()} uses the argument \code{standardize = TRUE} to
center and scale all numerical columns.
}
\subsection{Initializing h2o}{
To use the h2o engine with tidymodels, please run \code{h2o::h2o.init()}
first. By default, This connects R to the local h2o server. This needs
to be done in every new R session. You can also connect to a remote h2o
server with an IP address, for more details see
\code{\link[h2o:h2o.init]{h2o::h2o.init()}}.
You can control the number of threads in the thread pool used by h2o
with the \code{nthreads} argument. By default, it uses all CPUs on the host.
This is different from the usual parallel processing mechanism in
tidymodels for tuning, while tidymodels parallelizes over resamples, h2o
parallelizes over hyperparameter combinations for a given resample.
h2o will automatically shut down the local h2o instance started by R
when R is terminated. To manually stop the h2o server, run
\code{h2o::h2o.shutdown()}.
}
\subsection{Saving fitted model objects}{
Models fitted with this engine may require native serialization methods
to be properly saved and/or passed between R sessions. To learn more
about preparing fitted models for serialization, see the bundle package.
}
}
\keyword{internal}
|
25b49830ee0bd3489b882e1266e3362b8c2d20d8
|
acc80e88c26433639557f85c8b5f08cd3cdbe40a
|
/R/RunYAPSAAttributionOnly.R
|
3975f78e44385f2bb9de08f4025d8358371554c2
|
[] |
no_license
|
WuyangFF95/SynSigRun
|
32cb430ee5ea30584908f2254be459afb8c9ca81
|
62655260a460ea120de1baa623d0987d45a5a6bc
|
refs/heads/master
| 2022-11-11T10:06:45.160948
| 2021-12-30T06:40:08
| 2021-12-30T06:40:08
| 222,591,376
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,359
|
r
|
RunYAPSAAttributionOnly.R
|
#' Install YAPSA package from Bioconductor.
#'
#' @keywords internal
InstallYAPSA <- function(){
message("Installing YAPSA from Bioconductor...\n")
if (!requireNamespace("BiocManager", quietly = TRUE))
utils::install.packages("BiocManager")
BiocManager::install("YAPSA")
}
#' Run YAPSA attribution on a spectra catalog file
#' and known signatures.
#'
#' @param input.catalog File containing input spectra catalog.
#' Columns are samples (tumors), rows are mutation types.
#'
#' @param gt.sigs.file File containing input mutational signatures.
#' Columns are signatures, rows are mutation types.
#'
#' @param out.dir Directory that will be created for the output;
#' abort if it already exits. Log files will be in
#' \code{paste0(out.dir, "/tmp")}.
#'
#' @param seedNumber Specify the pseudo-random seed number
#' used to run YAPSA. Setting seed can make the
#' attribution of YAPSA repeatable.
#' Default: 1.
#'
#' @param signature.cutoff A numeric vector of values less than 1.
#' Signatures from within W with an overall exposure
#' less than the respective value in \code{in_cutoff_vector}
#' will be discarded.
#' Default: vector length of number of sigs with all zeros
#'
#' @param test.only If TRUE, only analyze the first 10 columns
#' read in from \code{input.catalog}.
#' Default: FALSE
#'
#' @param overwrite If TRUE, overwrite existing output.
#' Default: FALSE
#'
#' @return The inferred exposure of \code{YAPSA}, invisibly.
#'
#' @details Creates several
#' files in \code{paste0(out.dir, "/sa.output.rdata")}. These are
#' TODO(Steve): list the files
#'
#' @importFrom utils capture.output
#'
#' @export
#'
RunYAPSAAttributeOnly <-
function(input.catalog,
gt.sigs.file,
out.dir,
seedNumber = 1,
signature.cutoff = NULL,
test.only = FALSE,
overwrite = FALSE) {
# Install YAPSA from Bioconductor, if not found in library.
if("YAPSA" %in% rownames(utils::installed.packages()) == FALSE)
InstallYAPSA()
# Set seed
set.seed(seedNumber)
seedInUse <- .Random.seed # Save the seed used so that we can restore the pseudorandom series
RNGInUse <- RNGkind() # Save the random number generator (RNG) used
# Read in spectra data from input.catalog file
# spectra: spectra data.frame in ICAMS format
spectra <- ICAMS::ReadCatalog(input.catalog,
strict = FALSE)
if (test.only) spectra <- spectra[ , 1:10]
# Read in ground-truth signatures
# gtSignatures: signature data.frame in ICAMS format
gtSignatures <- ICAMS::ReadCatalog(gt.sigs.file)
# Create output directory
if (dir.exists(out.dir)) {
if (!overwrite) stop(out.dir, " already exits")
} else {
dir.create(out.dir, recursive = T)
}
# If signature.cutoff is NULL (by default),
# set it to all zeros of length K (number of signatures)
if(is.null(signature.cutoff))
signature.cutoff = rep(0,times = ncol(gtSignatures))
# Derive exposure count attribution results.
# Known signature matrix
in_signatures_df <- gtSignatures
class(in_signatures_df) <- "matrix"
attr(in_signatures_df,"catalog.type") <- NULL
attr(in_signatures_df,"region") <- NULL
### Tumor spectra matrix and related parameters
in_mutation_catalogue_df <- spectra # Converted spectra matrix
size <- colSums(in_mutation_catalogue_df) # Total mutation count of each spectrum
class(in_mutation_catalogue_df) <- "matrix"
attr(in_mutation_catalogue_df,"catalog.type") <- NULL
attr(in_mutation_catalogue_df,"region") <- NULL
# Plotting parameter - maximum height in the plot
ymax <- rep(0.4,ncol(in_mutation_catalogue_df))
names(ymax) <- colnames(in_mutation_catalogue_df)
# Using Linear Combination Decomposition to attribute exposures
# YAPSA::LCD() is not recommended. The author recommended YAPSA::LCD_complex_cutoff(),
# which is a wrapper of it.
# YAPSA also supports different presence cutoff for different signatures,
# this is done by providing different values of cutoff in LCD_complex_cutoff function.
# Authors suggest to use YAPSA::LCD_complex_cutoff() rather than YAPSA::LCD() in most cases.
LCD_complex_object <- YAPSA::LCD_complex_cutoff(in_mutation_catalogue_df,
in_signatures_df,
in_cutoff_vector = signature.cutoff, # If there are 2 signatures in the spectra,
# you must provide a
in_rescale = TRUE) # Rescale signature exposures so that the sum of exposure for each tumor
# equals to the exposure sum in original spectra
# This prevents the difference between original spectra and observed spectra
class(LCD_complex_object) # [1] "list"
names(LCD_complex_object) # For detail, see YAPSA user manual
##[1] "exposures" "norm_exposures"
##[3] "signatures" "choice"
##[5] "order" "residual_catalogue"
##[7] "rss" "cosDist_fit_orig_per_matrix"
##[9] "cosDist_fit_orig_per_col" "sum_ind"
##[11] "out_sig_ind_df" "aggregate_exposures_list"
# Exposures generated by LCD_complex_object()
# does not equal to exposures generated by LCD()
# Because by default, LCD_complex_object normalizes the counts.
if(FALSE){
dim(LCD_complex_object$exposures) == dim(LCD_object) # [1] TRUE
LCD_complex_object$exposures == LCD_object # [1] FALSE
}
# For each tumor spectrum, $exposures (the exposure counts inferred by LCD_complex_object())
# sums up to the total mutation counts in 500 tumors in the dataset.
# But $norm_exposures (relative exposure probs inferred by LCD_complex_object())
# sums up to number of tumors only.
sum(LCD_complex_object$exposures) == sum(spectra) # [1] TRUE
sum(LCD_complex_object$norm_exposures) # [1] (Number of tumors in spectra)
# For each tumor spectrum, sum of normalized inferred exposures by LCD_complex_cutoff()
# does not equal to the sum of ground-truth exposures.
all( colSums(LCD_complex_object$norm_exposures) == colSums(spectra) ) # [1] FALSE
# Export inferred exposure probs
LCD_exposure_prob <- LCD_complex_object$norm_exposures
# Export inferred exposure counts
exposureCounts <- LCD_complex_object$exposures # Export exposure probs
# Copy ground.truth.sigs to out.dir
file.copy(from = gt.sigs.file,
to = paste0(out.dir,"/ground.truth.signatures.csv"),
overwrite = overwrite)
# Write inferred exposures into a SynSig formatted exposure file.
SynSigGen::WriteExposure(exposureCounts,
paste0(out.dir,"/inferred.exposures.csv"))
# Save seeds and session information
# for better reproducibility
capture.output(sessionInfo(), file = paste0(out.dir,"/sessionInfo.txt")) # Save session info
write(x = seedInUse, file = paste0(out.dir,"/seedInUse.txt")) # Save seed in use to a text file
write(x = RNGInUse, file = paste0(out.dir,"/RNGInUse.txt")) # Save seed in use to a text file
# Return the exposures inferred, invisibly
invisible(exposureCounts)
}
|
07276ca1613cca53f905488bc28a919bfbab4aaa
|
5c2784e614d0acec87c3b10a609291134d2fd927
|
/supplement_7.RCircos.Plot.R
|
b226e6d43871a2e7eee3e563ebe98366930f0435
|
[] |
no_license
|
qindan2008/BiocIntegrativeCancerVis
|
d57d050c438e6d34d7ebf6810bce1bc523e90087
|
02bf4b0d0ecbde359fb2e0e643358b98fd3de81f
|
refs/heads/master
| 2020-06-02T23:55:38.206847
| 2016-08-28T17:33:39
| 2016-08-28T17:33:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,577
|
r
|
supplement_7.RCircos.Plot.R
|
#
# RCircos plot to show:
#
# 1. Difference of gene expression (mean) between tumor and normal
# Difference of miRNA expression (mean) between tumor and normal
# Difference of methylation (mean) between tumor and normal
# Link between miRNAs and their most negative linked genes
#
# 2. for one sample pair (TCGA-BC-A10Q-01A and TCGA-BC-A10Q-11A):
#
# Gene expression, miRNA expression, methylation, CNV
# ________________________________________________________________________
# <RCircos><RCircos><RCircos><RCircos><RCircos><RCircos><RCircos><RCircos>
# Load plot data
# ==========================================================
#
load("RCicos.Plot/TCGA.LIHC.RNAseq.RCircos.Plot.Data.RData")
load("RCicos.Plot/TCGA.LIHC.miRNAseq.RCircos.Plot.Data.RData")
load("RCicos.Plot/TCGA.LIHC.Methyl.RCircos.Plot.Data.RData")
load("RCicos.Plot/TCGA.LIHC.CNV.RCircos.Plot.Data.RData")
load("RCicos.Plot/TCGA.LIHC.RNA_miRNA.link.RData")
load("RCicos.Plot/UCSC.HG38.Human.CytoBandIdeogram.RData")
library(RCircos)
# Plot 1
# ========================================================
# data to be plotted. Means of RNAseq, miRNAseq, methylation
# of tumor and normal tissue, gene lables, and link lines
#
RNAseqData <- TCGA.LIHC.RNAseq.RCircos.Plot.Data
RNAseq <- data.frame(RNAseqData[, 1:4],
tumorExpr=rowMeans(as.matrix(RNAseqData[, 5:30])),
normalExpr=rowMeans(as.matrix(RNAseqData[, 31:56]))
)
miRNAseqData <- TCGA.LIHC.miRNAseq.RCircos.Plot.Data
miRNAseq <- data.frame(miRNAseqData[, 1:4],
tumorMIR=rowMeans(as.matrix(miRNAseqData[, 5:30])),
normalMIR=rowMeans(as.matrix(miRNAseqData[, 31:56]))
)
methylData <- TCGA.LIHC.Methyl.RCircos.Plot.Data
methylation <- data.frame(methylData[,1:4],
tumorMethy=rowMeans(as.matrix(methylData[, 5:30])),
normalMethy=rowMeans(as.matrix(methylData[, 31:56]))
)
miRNA2RNA <- TCGA.LIHC.RNA_miRNA.link
geneAxis <- miRNA2RNA[,c(2:4,1)]
mirAxis <- miRNA2RNA[, c(6:8,5)]
mirAxis <- mirAxis[-which(duplicated(mirAxis[,4])),]
colnames(mirAxis) <- colnames(geneAxis)
mirAxis$Gene <- gsub("hsa-", "", mirAxis$Gene)
geneLabelData <- rbind(geneAxis, mirAxis)
geneLabelData["PlotColor"] <- c(rep("blue", nrow(geneAxis)),
rep("red", nrow(mirAxis)))
geneLink <- miRNA2RNA[, -c(1,5)]
geneLink["PlotColor"] <- c(rep("blue", 3), rep("red", 4),
rep("green", 4), rep("magenta", 3))
# RCircos plot
#
hg38 <- UCSC.HG38.Human.CytoBandIdeogram
chromosomes <- paste0("chr", c(1:22, "X", "Y"))
hg38 <- hg38[which(hg38$Chromosome %in% chromosomes), ]
RCircos.Set.Core.Components(hg38, chr.exclude=NULL,
tracks.inside=14, tracks.outside=0)
params <- RCircos.Get.Plot.Parameters()
params$heatmap.width <- 400
params$hist.width <- 400
RCircos.Reset.Plot.Parameters(params)
# pdf("RCicos.Plot/TCGA.LIHC.Data.RCircos.Plot.pdf", heigh=8, width=8)
# tiff("RCicos.Plot/TCGA.LIHC.Data.RCircos.Plot.tiff",
# heigh=8, width=8,unit="in", res=300, type="windows")
RCircos.Set.Plot.Area()
RCircos.Chromosome.Ideogram.Plot()
RCircos.Heatmap.Plot(RNAseq, data.col=5, track.num=1, side="in")
RCircos.Heatmap.Plot(RNAseq, data.col=6, track.num=2, "in")
RCircos.Heatmap.Plot(miRNAseq, data.col=5, track.num=3.5, "in")
RCircos.Heatmap.Plot(miRNAseq, data.col=6, track.num=4.5, "in")
RCircos.Histogram.Plot(methylation, data.col=5, track.num=6, "in")
RCircos.Histogram.Plot(methylation, data.col=6, track.num=7, "in")
RCircos.Gene.Connector.Plot(geneLabelData, track.num=8, "in")
RCircos.Gene.Name.Plot(geneLabelData, name.col=4, track.num=9, "in")
RCircos.Link.Plot(geneLink, track.num=11, by.chromosome=FALSE)
textLabel <- c("From outside to center:\n",
"Chromosome ideogram\n",
"RNAseq: tumor, normal\n",
"miRNAseq: tumor, normal\n",
"Methylation: tumor, normal\n",
"Gene names\n",
"miRNA RNA link")
legend("topright", legend=textLabel, cex=0.4)
# dev.off()
# Plot 2
# =============================================================
# Plot data form one sample pair
#
sampleID <- c("TCGA.BC.A10Q.01A", "TCGA.BC.A10Q.11A")
RNAseqData <- TCGA.LIHC.RNAseq.RCircos.Plot.Data
colNum <- which(colnames(RNAseqData) %in% sampleID)
RNAseq <- RNAseqData[, c(1:4, colNum)]
miRNAseqData <- TCGA.LIHC.miRNAseq.RCircos.Plot.Data
colNum <- which(colnames(miRNAseqData) %in% sampleID)
miRNAseq <- miRNAseqData[, c(1:4, colNum)]
methylData <- TCGA.LIHC.Methyl.RCircos.Plot.Data
colNum <- which(colnames(methylData) %in% sampleID)
methylation <- methylData[,c(1:4, colNum)]
CNVData <- TCGA.LIHC.CNV.RCircos.Plot.Data
colNum <- which(colnames(CNVData) %in% sampleID)
cnvData <- CNVData[,c(1:4, colNum)]
geneFC <- RNAseq[,5]-RNAseq[,6]
geneRow <- which(geneFC < -12)
mirFC <- miRNAseq[,5] -miRNAseq[,6]
mirRow <- which(abs(mirFC)>=4)
geneLabelData <- rbind(RNAseq[geneRow,1:4], miRNAseq[mirRow,1:4])
geneLabelData[,4] <- gsub("hsa-", "", geneLabelData[,4])
# RCircos plot
#
RCircos.Set.Core.Components(hg38, chr.exclude=NULL,
tracks.inside=15, tracks.outside=0)
params <- RCircos.Get.Plot.Parameters()
params$heatmap.width <- 400
params$hist.width <- 400
params$point.size <- 1.5
params$track.background <- NA
RCircos.Reset.Plot.Parameters(params)
# pdf("RCicos.Plot/TCGA.LIHC.Data.RCircos.Plot.2.pdf", heigh=8, width=8)
tiff("RCicos.Plot/TCGA.LIHC.Data.RCircos.Plot.2.tiff",
heigh=8, width=8,unit="in", res=300, type="windows")
RCircos.Set.Plot.Area()
RCircos.Chromosome.Ideogram.Plot()
RCircos.Gene.Connector.Plot(geneLabelData, track.num=1, side="in")
RCircos.Gene.Name.Plot(geneLabelData, name.col=4, track.num=2, side="in")
RCircos.Heatmap.Plot(RNAseq, data.col=5, track.num=6, side="in")
RCircos.Heatmap.Plot(RNAseq, data.col=6, track.num=7, side="in")
RCircos.Heatmap.Plot(miRNAseq, data.col=5, track.num=8.5, side="in")
RCircos.Heatmap.Plot(miRNAseq, data.col=6, track.num=9.5, side="in")
RCircos.Histogram.Plot(methylation, data.col=5, track.num=11, side="in")
RCircos.Histogram.Plot(methylation, data.col=6, track.num=12, side="in")
RCircos.Scatter.Plot(cnvData, data.col=5, track.num=13.5, side="in", by.fold=1)
RCircos.Scatter.Plot(cnvData, data.col=6, track.num=14.5, side="in", by.fold=1)
dev.off()
# End of RCircos.Plot.RCicos
# =======================================================================
|
6cebfe84138258d1a8140b331d6fdd642c777e67
|
1d34ee4d22faf1b8e099995ca5d4d2e8fdbd8a94
|
/Fourth_week/rankhospital.r
|
4c7b011e059f888557e1eab81f05fd40d37d90c7
|
[] |
no_license
|
schaeferrodrigo/R_course_Coursera
|
e8092b3b16eca7156f7d1c4ea70da24df3d00e77
|
7a48a7fd83576871a511273207329a3fceeaf55b
|
refs/heads/master
| 2020-12-25T11:15:18.295221
| 2016-10-31T09:51:11
| 2016-10-31T09:51:11
| 61,059,081
| 0
| 0
| null | 2016-10-31T09:51:12
| 2016-06-13T18:14:13
|
R
|
UTF-8
|
R
| false
| false
| 961
|
r
|
rankhospital.r
|
rankhospital <- function(state, outcome, num){
if (is.character(num) == TRUE){
if (num == 'best'){source('best.R')
best(stante,outcome )}
else{source('worst.R')
worst(state,outcome)}
}
else{
data <- read.csv("outcome-of-care-measures.csv" , na.strings="Not Available" , stringsAsFactors=FALSE)
if( outcome == "heart attack"){ my_data <- data[,c(2,7,11)]}
else{
if (outcome == "heart failure"){my_data <-data[,c(2, 7 , 17 )]}
else{ if(outcome == 'pneumonia'){ my_data<- data[,c(2, 7 , 23)]}
else {stop('invalid outcome')}
}
}
names(my_data) <- c('hospital' , 'state' , 'outcome')
data_by_state <- my_data[my_data$state == state & !is.na(my_data$outcome) ,c('hospital' , 'outcome')]
if (nrow(data_by_state) == 0) stop("Invalid outcome")
best_hospitals <- data_by_state[order(data_by_state[,2], data_by_state[,1]),]
if (nrow(best_hospitals)>=num){best_hospitals[num,'hospital']}
else{'NA'}
}
}
|
62307844fb8e7e9e774fbfc12d42c00809d80b15
|
ba1edf30bca6e023562e4aed21c0ca009d22f431
|
/visualization/R/ciEnvelope.R
|
b769541c003ad649e5938ffea7961202cbe63438
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rgknox/pecan
|
79f080e77637dfb974ebb29313b5c63d9a53228e
|
5b608849dccb4f9c3a3fb8804e8f95d7bf1e4d4e
|
refs/heads/master
| 2020-12-27T20:38:35.429777
| 2014-05-06T13:42:52
| 2014-05-06T13:42:52
| 19,548,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 227
|
r
|
ciEnvelope.R
|
#plots a confidence interval around an x-y plot (e.g. a timeseries)
ciEnvelope <- function(x,ylo,yhi,...){
polygon(cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi),
ylo[1])), border = NA,...)
}
|
a8b16bdadc61ac6547183536e0fcc1e2e882cc84
|
d108d0dc2e007110ef1b1f11ebaee05cf0a41e4d
|
/man/tslars.Rd
|
df42d23d9e92a06cfd86b911409ed74ed4c2c958
|
[] |
no_license
|
cran/tslars
|
32dbe6a2c4831ea1d0e33430d0109976b232d5a1
|
ce1f40b09b6d6886dec2aab71dc6e4803c2207c5
|
refs/heads/master
| 2016-09-16T11:33:07.687781
| 2009-01-03T00:00:00
| 2009-01-03T00:00:00
| 17,700,637
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,675
|
rd
|
tslars.Rd
|
\name{tslars}
\alias{tslars}
\alias{tslars.p}
\alias{coef.tslars}
\alias{predict.tslars}
\alias{print.summary.tslars}
\alias{print.tslars}
\alias{summary.tslars}
\title{Function to obtain the selected model accoring the the time series LARS algorithm}
\description{
The \code{tslars} function applies a dynamic variable selection procedure. It is an extension of the LARS
algorithm of Efron et al (2004) which is designed for time series analysis. It provides a ranking of the
predictors and a selection of which predictors to include in the final model as well as a selection of the
appropriate lag length.
}
\usage{
tslars(formula, h = 1, p.max = 5, max.x = 10, nr.rank = NA)
}
\arguments{
\item{formula}{a formula describing the model to be fitted}
\item{h}{the forecast horizon, defaults to 1.}
\item{p.max}{the maximal number of lags to allow, defaults to 5.}
\item{max.x}{the maximal number of predictors to include in the final model, defaults to 10.}
\item{nr.rank}{the number of predictors to be ranked. This is especially interesting if the
total number of predictors is really large.}
}
\value{
A \code{tslars}-object is returned, for which \code{print()}, \code{summary()}, \code{predict()} and \code{coef()} are available.
An object of class "lm" is a list containing the following components:
\item{active}{the active set, a vector giving the TS-LARS ordering of the predictors, '0' indicates lagged values of the response.}
\item{fixedp}{indicates whether the lag length was prespecified (\code{TRUE}) or not (\code{FALSE}).}
\item{laglength.opt}{if \code{fixedp} is \code{TRUE}, the prespecified lag length. If \code{fixedp} is \code{FALSE}, the optimal lag length selected according to BIC.}
\item{nrvar.opt}{the optimal number of predictors to include in the final model, according to the BIC.}
\item{bic}{the BIC values for the nested models.}
\item{h}{the forecast horizon used.}
\item{call}{the matched call.}
\item{response}{the response used.}
\item{predictors}{the predictors used.}
}
\references{Gelper, S. and Croux, C. (2009) Time series least angle regression for selecting predictive economic sentiment series.
www.econ.kuleuven.be/sarah.gelper/public}
\author{Sarah Gelper}
\examples{
n <- 100
m <- 10 #m>5
x <- matrix(rnorm(n*m), ncol=m)
coefs <- c(rep(1,5),rep(0,m-5))
y <- c(rnorm(1),crossprod(t(x[1:(n-1),]),coefs) + rnorm(n-1))
mytslars <- tslars(y~x)
summary(mytslars)
# To obtain an h-step-ahead prediction of the response using the selected model fitted by OLS:
myprediction <- predict(mytslars)
}
\keyword{regression}
\keyword{ts}
|
0ae9b332d1b02e5f0b6b4fe82531137711906207
|
e1e9a355b4f2dbc3616b9ac34266da4a6ab7f6cf
|
/phi_analysis.R
|
30bfd0d3b0e9a6fb30e9b069295773aac0e417a3
|
[] |
no_license
|
faustovrz/kingraph
|
ed30ff94d0d01af44bc1501020ffb78564dee43d
|
17428dde4ccbfb0030806004fe3cf6eeb8ec9cbd
|
refs/heads/master
| 2021-07-11T21:05:41.444079
| 2017-10-09T19:58:41
| 2017-10-09T19:58:41
| 105,703,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,991
|
r
|
phi_analysis.R
|
#!/usr/local/bin/Rscript --verbose
library(optparse)
source('kingraph.R')
cat(as.character(Sys.time()),"\n\n", file = stderr())
phi.help <- "comma separated string defining phi values: [default= %default]"
option_list <- list(
make_option("--phi", type = "character", default = "0.177,0.0442",
help= phi.help,
metavar = "character"),
make_option(c("-c", "--col"), type = "character",
help="collection table with 2 columns: name collection",
metavar="character")
);
opt_parser <- OptionParser(usage = "%prog [options] file1 file2 ...",
option_list=option_list);
args <- parse_args2(opt_parser)
opts <- args$options
input <- args$args
if (length(input) == 0) {
stop("No input file given.\n",print_help(opt_parser))
}
kin.coeff <- as.numeric(unlist(strsplit(opts$phi, ",")))
if (!is.null(opts$col)) {
col.df <- read.table(file=opts$col, header=TRUE)
if (any(!(c('name','collection') %in% colnames(col.df)))){
stop(paste('Either `name` or `collection` headers missing from',
opts$col))
}
} else{
col.df <-data.frame(name = character(0), collection = character(0))
}
dir.create("results")
columns <- c("file.base", "phi.thresh", "related.fr", "connectivity",
"with.dup", "dup.independent", "dup.redundant", "dup.fr",
"n", "n.independent", "n.redundant")
write.table(t(columns), file= "results/summary.tab",row.names = FALSE,
quote=FALSE, sep="\t", col.names =FALSE)
for (file in input){
for(coeff in kin.coeff){
kin.summary <- kinship.analysis(file,
col= col.df,
ivs.thresh = coeff,
results.dir= "results")
write.table(kin.summary, file= "results/summary.tab",
quote=FALSE, sep="\t", append = TRUE,
row.names=FALSE, col.names = FALSE)
}
}
system("rm -r results/*_files")
|
f5e0e9b2346ac92e98611d56aa5895f49b7eeef1
|
3477491fcd1527416c6f5cc25b8b629b89625d12
|
/Assignment/assignment2/assignment_2.R
|
7a465eacecd4fa9b54379cf5af7db58c936cd51f
|
[] |
no_license
|
jason2133/data_mining
|
68645af29de2b0b0310f9a99db43fc2bb503badb
|
4663d8c951505e0055ee941bc5290d1d4c6690b4
|
refs/heads/master
| 2023-03-16T06:00:18.270339
| 2022-12-06T05:02:48
| 2022-12-06T05:02:48
| 464,190,348
| 1
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 2,483
|
r
|
assignment_2.R
|
rm(list=ls())
data <- read.csv('C:/Users/jason/바탕 화면/coding1/data_mining/Assignment/assignment2/kyungsang_univ_2.csv', header=T)
head(data)
tail(data)
## Here we do!!!
train_data <- data[17521:26280, 3:23]
head(train_data)
tail(train_data)
test_data <- data[26281:35064, 3:23]
head(test_data)
tail(test_data)
summary(test_data)
data_train_plus_test <- data[17521:35064, 3:23]
summary(data_train_plus_test)
data_train_plus_test_only_y <- data[17521:35064, 23]
summary(data_train_plus_test_only_y)
# Model Fitting
fit.all <- lm(발전량 ~., data=train_data)
fit.step = step(fit.all, direction='both')
fit.step$anova
summary(fit.step)
# Predicting
yhat = predict(fit.step, newdata = test_data, type='response')
head(yhat)
plot(test_data$발전량, yhat, xlim=c(0, 750), ylim=c(0, 750))
abline(a=0, b=1)
# Evaluating
mean((test_data$발전량 - yhat)^2) # MSE
sqrt(mean((test_data$발전량 - yhat)^2)) # RMSE
mean(abs(test_data$발전량 - yhat)) # MAE
cor(test_data$발전량, yhat) # PCC
### Time Series에 적합한 Cross-Validation을 해야 함.
# tscv는 Univariate Time Series인걸로 보여짐.
# 따라서 Multivariate Time Series를 하는 우리한테는 부적합한 것으로 사료됨.
# library(caret)
# library(ggplot2)
# library(pls)
# set.seed(2017)
# summary(data_train_plus_test)
# nrow(data_train_plus_test)
# Step 1: Creating the timeslices for the index of the data
#timeSlices <- createTimeSlices(1:nrow(data_train_plus_test),
# initialWindow = 36, horizon = 12, fixedWindow = TRUE)
######################
# Cross-Validation
### Computing the CV error
V = 10
mse.train = 0
mse.test = 0
mae.train = 0
mae.test = 0
set.seed(2017)
id = sample(1:V, nrow(data_train_plus_test), replace=T)
for(i in 1:V) {
print(i)
# Data Partitioning
test.index = which(id==i)
data.train = data_train_plus_test[-test.index,] # Train Data
data.test = data_train_plus_test[test.index,] # Test Data
# Fitting
fit.reg = lm(발전량 ~., data=data.train)
fit.step.reg = step(fit.reg, direction='both', trace=FALSE) # Stepwise variable selection
# Predicting and Evaluating
yhat.reg = predict(fit.step.reg, newdata = data.test, type='response')
mse.test = mse.test + mean((test_data$발전량 - yhat)^2) # MSE
mae.test = mae.test + mean(abs(test_data$발전량 - yhat)) # MAE
}
cv.mse.test = mse.test/V
cv.mae.test = mae.test/V
cv.mse.test
sqrt(cv.mse.test)
cv.mae.test
############################
|
9004f8c12d8a079273d23fc030a6fca404ff33d3
|
9cd15df5fa2bb47e21bef95ed4635ee129a0532a
|
/tests/testthat/testdata/test_file.R
|
ee5e78cb501784250fd0c04025a3715f24fe0d9a
|
[
"MIT"
] |
permissive
|
ryanbthomas/hazmat
|
f6df3ac55bb2b4f3eb1da970ba66e43abc06a7da
|
837d6ab72191ff476db5ec18e7e482657eeb7b1c
|
refs/heads/master
| 2022-03-11T21:05:36.366242
| 2019-11-21T03:53:38
| 2019-11-21T03:53:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 118
|
r
|
test_file.R
|
# This is a test R file
f1 <- function(x) {
x <- 2*x
rm(ls = ls())
}
f2 <- function(y) {
system('pwd')
}
|
640517d8cc538c969a44ca27f2e86d7342ab2a47
|
18389a860719d7541b209b7963b4ca512043978f
|
/afrodyn_paper_analyses/mantel/mantel.R
|
c3c01d30fc61bc06dbb15902f4ca6ea2db9f6dd4
|
[] |
no_license
|
Yunxia-li/afrodyn
|
e38f21af20de614d5ef3e84d07bde4ddfb4ddfc2
|
5d9da980c760e82fda1222b301329bfcd01915b5
|
refs/heads/master
| 2023-04-25T12:31:36.689079
| 2021-05-11T15:39:12
| 2021-05-11T15:39:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,309
|
r
|
mantel.R
|
rm(list=ls())
setwd("~/Dropbox/afrodyn_data/mantel/")
library(distancetocoast)
library(raster)
library(maps)
library(GSIF)
library(assigner)
library(radiator)
library("adegenet")
library(codep)
library(adespatial)
library(car)
library(vcfR)
library(elevatr)
library(raster)
library(maps)
library(GSIF)
library("rnaturalearth")
library("rnaturalearthdata")
library("hierfstat")
library("pegas")
library(poppr)
library(vcfR)
library(scales)
par(mar=c(3,3,3,3))
##
# distance from refugia raster
##
#create empty raster
r <- raster(ncol=5000,nrow=5000)
crs(r) = '+proj=utm +zone=12 +datum=WGS84'
e<-extent(0,30,-10,10)
r<-crop(r,e)
#approximate midpoints of refugia
xy <- c(10,5)
xy<-rbind(xy,c(11,3))
xy<-rbind(xy,c(10.5,1))
xy<-rbind(xy,c(10.5,-2))
xy<-rbind(xy,c(11.5,-1.5))
xy<-rbind(xy,c(12.5,-4.5))
xy
#world shape for mask
world <- ne_countries(scale = "medium", returnclass = "sf")
#calculate distance from points for each cell
d2 <- distanceFromPoints(r, xy)
#d2<-mask(d2,world)
###
# read in data
###
#location data
lf <- list.files("~/Dropbox/afrodyn_data/genetic_diversity/locs/")
lff <-
list.files("~/Dropbox/afrodyn_data/genetic_diversity/locs/", full.names = T)
#genetic data
rd <-
list.files("~/Dropbox/afrodyn_data/genetic_diversity/data/", full.names = T)
## check closest refugia
foo<-read.csv(lff[5])
plot(d2,xlim=c(10,15),ylim=c(-5,5))
points(foo$long,foo$lat)
###
# Geographic locations
###
#species names
spe <- c("anni", "anon", "green", "mona", "podo_a", "podo_b", "sclero")
library(vegan)
#empty lists for storage
gen_list<-list()
distgenEUCL_list<-list()
locs_list<-list()
clim_list<-list()
ref_mant_list<-list()
ibd_list<-list()
dist_ref_list<-list()
pdf("mantel.pdf")
for(i in 1:length(lff)){
par(mfrow=c(1,2))
#create location data df and list
locs<-read.csv(lff[i])
locs<-locs[,c("index","long","lat")]
locs<-locs[order(locs$index),]
locs$sp<-rep(spe[i],length(locs$long))
if(i == 1){
all_locs<-locs
locs_list[[i]]<-locs
} else {
all_locs<-rbind(all_locs,locs)
locs_list[[i]]<-locs
}
#read in table with index/diversity stat
if(i == 5){
load(rd[i])
#make genind
gen<-vcfR2genind(vcf)
#podo a has 3 inds with no coords
gen<-gen[1:(length(indNames(gen))-3)]
#1 pop per individual
pop(gen)<-indNames(gen)
#create genpop object
gp<-genind2genpop(gen)
#store in list
gen_list[[i]]<-gp
} else {
load(rd[i])
#make genind
gen<-vcfR2genind(vcf)
#1 pop per individual
pop(gen)<-indNames(gen)
#create genpop object
gp<-genind2genpop(gen)
#store in list
gen_list[[i]]<-gp
}
#calculate genetic distance (euclidean)
distgenEUCL_list[[i]] <- dist(gen_list[[i]],method = "euclidean", diag = FALSE, upper = FALSE, p = 2)
par(mar=c(4,4,4,4))
#mantel test
ibd_list[[i]]<-mantel(distgenEUCL_list[[i]], dist(locs_list[[i]][,c(2:3)]), method="pearson", permutations=9999)
#plot histogram
hist(ibd_list[[i]]$perm,
main=paste(spe[i],"genetic distance vs geographic distance"),
xlim=c((min(ibd_list[[i]]$statistic,ibd_list[[i]]$perm)),(max(ibd_list[[i]]$statistic,ibd_list[[i]]$perm)+0.1)),
col="lightgrey",
xlab="mantel statistic",ylab="frequency",
breaks=30,
border=F,
xaxt = "n",
cex.main=0.5,
cex.axis=0.5)
axis(1, at = seq(round((min(ibd_list[[i]]$statistic,ibd_list[[i]]$perm))-0.1,1),(max(ibd_list[[i]]$statistic,ibd_list[[i]]$perm)+0.1),0.1))
#line showing empirical value
abline(v=ibd_list[[i]]$statistic,lty=2,col=alpha(2,0.5),lwd=2)
#plot pairwise genetic distance vs pairwise spatial distance
plot(distgenEUCL_list[[i]],dist(locs_list[[i]][,c(2:3)]),xlab="genetic distance",ylab="spatial distance",
main=paste(spe[i],"genetic distance vs geographic distance"),cex.main=0.5)
}
dev.off()
save.image("mantel.Rdata")
###
# avg dist between individuals
###
par(mfrow=c(3,3))
avg<-vector()
stdev<-vector()
for(i in 1:length(locs_list)){
hist(dist(locs_list[[i]][,c(2:3)]),main=spe[i],xlab="pairwise geographic distance")
avg[i]<-mean(dist(locs_list[[i]][,c(2:3)]))
stdev[i]<-sd(dist(locs_list[[i]][,c(2:3)]))
}
data.frame(avg,stdev,row.names = spe)
|
21a1845b0b1f5b1a05c2430219ca55d7a69757ed
|
5fede19ec35761f9a42f1dedee2bc208121c6813
|
/LoadMetricFiles.r
|
52ff6b315a6f456583b6cbc26355d7ab58b6053a
|
[] |
no_license
|
spolavar/Statistical-Mining-of-NeuroMorpho.Org
|
4ab2607b86a8da61c2695eb320180160d7b1e615
|
c10fddc4e17db5baac6cd0067f33103a1fb49e01
|
refs/heads/master
| 2021-01-10T03:42:30.751368
| 2016-03-05T16:56:50
| 2016-03-05T16:56:50
| 53,211,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,219
|
r
|
LoadMetricFiles.r
|
#open metadata file of v5.4
# Load meta data
# Set metadata filename
#metadataFilename <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/all_7986_v5.4_metadata_2.csv"
#metadataFilename <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/all_7986_v5.4_metadata_3.csv"
#metadataFilename <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/all_8858_v5.5_metadata.csv"
metadataFilename <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/NMO_DB_metadata.csv"
masterMetaData <- read.csv(metadataFilename,header = TRUE, sep="}",na.strings=c("NA",""),quote="\"",stringsAsFactors=FALSE)
print("finished reading...")
colnames(masterMetaData)
unique(masterMetaData$order)
#removing duplicate rows
dpos <- duplicated(masterMetaData)
length(dpos[dpos==TRUE])
masterMetaData <- masterMetaData[, !(colnames(masterMetaData) %in% c("X"))]
#read the part with only metrics for the whole arbor (with specificity type > 1)
#metricWholeFile_1 <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/metricData/whole.txt"
metricWholeFile_1 <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/Report_wholeArbor_v5.6.txt"
#read the part with soma_surface
#metricWholeFile_2 <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/metricData/somawhole.txt"
metricWholeFile_2 <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/Report_somaOnly_v5.6.txt"
metricDataWhole_1 <- read.delim(metricWholeFile_1, header = TRUE, sep="\t", dec=".",stringsAsFactors=FALSE)
#rename the column name
colnames(metricDataWhole_1)[1] <- 'neuron_name'
colnames(metricDataWhole_1)
dim(metricDataWhole_1)
metricDataWhole_1 <- rmvExtnsn(metricDataWhole_1)
metricDataWhole_1$neuron_name
colnames(metricDataWhole_1)
#making sure no extraneous columns like "x" and "Soma_Surface_total_sum" are not in the first part
metricDataWhole_1 <- metricDataWhole_1[, !(colnames(metricDataWhole_1) %in% c("X","Soma_Surface_total_sum"))]
metricDataWhole_2 <- read.delim(metricWholeFile_2, header = TRUE, sep="\t", dec=".",stringsAsFactors=FALSE)
colnames(metricDataWhole_2)[1] <- 'neuron_name'
colnames(metricDataWhole_2)
metricDataWhole_2 <- rmvExtnsn(metricDataWhole_2)
#remove extraneous columns
metricDataWhole_2 <- metricDataWhole_2[, !(colnames(metricDataWhole_2) %in% c("X","N_stems_total_sum"))]
colnames(metricDataWhole_2)
dim(metricDataWhole_1)
dim(metricDataWhole_2)
#merge all metrics over neuron_name
t <- setdiff(metricDataWhole_1$neuron_name,metricDataWhole_2$neuron_name)
length(t)
#tt <- aggregate(metricDataWhole_1$neuron_name,by=list(metricDataWhole_1$neuron_name),length)
#tt[order(tt$x,decreasing=T),]
#colnames(tt)
#head(tt,n=15)
#nrow(tt)
nrow(metricDataWhole_1)
#removing duplicate rows
dpos <- duplicated(metricDataWhole_1)
metricDataWhole_1 <- subset(metricDataWhole_1, dpos==FALSE)
dim(metricDataWhole_1)
dpos <- duplicated(metricDataWhole_2)
metricDataWhole_2 <- subset(metricDataWhole_2, dpos==FALSE)
dim(metricDataWhole_2)
metricDataWhole <- merge(x=metricDataWhole_2,y=metricDataWhole_1, by = intersect(x$neuron_name, y$neuron_name), by.x="neuron_name",by.y="neuron_name")
dim(metricDataWhole)
print("finished reading whole...")
#checking the neurons that are present in masterMetaData but not in metricDataWhole
setdiff(masterMetaData$neuron_name,metricDataWhole$neuron_name)
#files that are flat (<1um) should have depth as NA
depthpos <- with(metricDataWhole,which(Depth_total_sum<1))
metricDataWhole[depthpos,"Depth_total_sum"] <- NA
colnames(metricDataWhole)
metricDataWhole$Depth_total_sum
metricDataWhole$Soma_Surface_total_sum
dim(metricDataWhole)
#v5.5 neurons without soma
somaNA <- subset(metricDataWhole,is.na(metricDataWhole$Soma_Surface_total_sum))
dim(somaNA)
#open apical dendrites only
#metricApicalFile <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/metricData/apical.txt"
metricApicalFile <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/Report_apicalOnly.txt"
metricDataApical <- read.delim(metricApicalFile, header = TRUE, sep="\t", dec=".",stringsAsFactors=FALSE)
colnames(metricDataApical)[1] <- 'neuron_name'
print("finished reading Apical...")
metricDataApical <- metricDataApical[, !(colnames(metricDataApical) %in% c("X","Soma_Surface_total_sum"))]
colnames(metricDataApical)
#removing duplicate rows
dpos <- duplicated(metricDataApical)
metricDataApical <- subset(metricDataApical, dpos==FALSE)
dim(metricDataApical)
#files that are flat (<1um) should have depth as NA
depthpos <- with(metricDataApical,which(Depth_total_sum<1))
metricDataApical[depthpos,"Depth_total_sum"] <- NA
#open apical & basal dendrites only
#metricApiBasFile <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/metricData/apicalbasal.txt"
metricApiBasFile <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/Report_apicalBasal_v5.6.txt"
metricDataApiBas <- read.delim(metricApiBasFile, header = TRUE, sep="\t", dec=".",stringsAsFactors=FALSE)
colnames(metricDataApiBas)[1] <- 'neuron_name'
print("finished reading apical & basal...")
metricDataApiBas <- metricDataApiBas[, !(colnames(metricDataApiBas) %in% c("X","Soma_Surface_total_sum"))]
colnames(metricDataApiBas)
#removing duplicate rows
dpos <- duplicated(metricDataApiBas)
metricDataApiBas <- subset(metricDataApiBas, dpos==FALSE)
dim(metricDataApiBas)
#files that are flat (<1um) should have depth as NA
depthpos <- with(metricDataApiBas,which(Depth_total_sum<1))
metricDataApiBas[depthpos,"Depth_total_sum"] <- NA
#open basal dendrites only
#metricDendFile <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/metricData/basal.txt"
metricDendFile <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/Report_basalOnly.txt"
metricDataDend <- read.delim(metricDendFile, header = TRUE, sep="\t", dec=".",stringsAsFactors=FALSE)
colnames(metricDataDend)[1] <- 'neuron_name'
print("finished reading Dend...")
metricDataDend <- metricDataDend[, !(colnames(metricDataDend) %in% c("X","Soma_Surface_total_sum"))]
colnames(metricDataDend)
#removing duplicate rows
dpos <- duplicated(metricDataDend)
metricDataDend <- subset(metricDataDend, dpos==FALSE)
dim(metricDataDend)
#files that are flat (<1um) should have depth as NA
depthpos <- with(metricDataDend,which(Depth_total_sum<1))
metricDataDend[depthpos,"Depth_total_sum"] <- NA
dim(metricDataDend)
#open axons only
#metricAxonFile <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/metricData/axon.txt"
metricAxonFile <- "C:/Users/sridevi/thesismaterial/proposal/LMpaper/Rcode/Report_axonOnly.txt"
metricDataAxon <- read.delim(metricAxonFile, header = TRUE, sep="\t", dec=".",stringsAsFactors=FALSE)
colnames(metricDataAxon)[1] <- 'neuron_name'
print("finished reading Axon...")
metricDataAxon <- metricDataAxon[, !(colnames(metricDataAxon) %in% c("X","Soma_Surface_total_sum"))]
colnames(metricDataAxon)
#removing duplicate rows
dpos <- duplicated(metricDataAxon)
metricDataAxon <- subset(metricDataAxon, dpos==FALSE)
dim(metricDataAxon)
#files that are flat (<1um) should have depth as NA
depthpos <- with(metricDataAxon,which(Depth_total_sum<1))
metricDataAxon[depthpos,"Depth_total_sum"] <- NA
dim(metricDataAxon)
|
a6f867c22c72a852fd40ad7af8fb2cd434724d8f
|
319e87f3a5cbe4b81c2394dcf0ae693909ab6600
|
/2002_enrichmentAnalyses/200117_IPA_genePathwayEnrichGroupComparison.R
|
09a12ee49ea697a215329ca7c1ff75dce9fd2abd
|
[] |
no_license
|
cfbeuchel/imise_gxmetab
|
4d1f497059669b262711f7e489179c78b5da0db8
|
3dab9a25ca713358bb91a0f013302f3fcad3adfd
|
refs/heads/master
| 2023-03-05T01:11:12.806520
| 2021-02-16T15:15:47
| 2021-02-16T15:15:47
| 339,058,123
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,263
|
r
|
200117_IPA_genePathwayEnrichGroupComparison.R
|
#' ---
#' title: "IPA Group Comparison Summaries"
#' author: "Carl Beuchel"
#' date: "`r format(Sys.time(), '%d %B, %Y')`"
#' output:
#' html_document:
#' code_download: true
#' fig_captions: yes
#' theme: spacelab #sandstone #flatfly #spacelab
#' highlight: pygments
#' toc: TRUE
#' toc_depth: 3
#' code_folding: hide
#' number_sections: TRUE
#' toc_float:
#' smooth_scroll: FALSE
#' ---
#+ include=F
#==========#
# Initiate #
#==========#
#+ set.LibPath, include=F
# define alternative package directory
r_on_server <- TRUE
if (r_on_server == TRUE) {
bp <- "/net/ifs1/san_projekte/projekte/genstat/"
computer <- "amanMRO"
.libPaths(
paste0(
bp,
"07_programme/rpackages/",
computer
)
)
# set number of cores
nc <- 10
} else {
if(grepl(x= getwd(),pattern = "mnt")){
bp <- "/mnt/mount1/genstat"
} else {
bp <- "/net/ifs1/san_projekte/projekte/genstat/"
}
# set number of cores
nc <- parallel::detectCores()-1
}
#+ set.proj.root, include=F
# set project root
root.dir <- paste0(bp, "02_projekte/1703_ge_metab_a1_b3_sorbs/1805_gxMetaboliteAssociation/")
setwd(root.dir)
#+ load.packages, include=F
for (i in c(
"data.table",
"CarlHelpR",
"toolboxH",
"here",
"ComplexHeatmap"
)){
suppressPackageStartupMessages(
library(i, character.only=TRUE
))}
#+ script.setup, include=F
library("knitr", lib.loc = .libPaths()[1])
library("evaluate", lib.loc = .libPaths()[1])
opts_chunk$set(
cache = F,
results = "markup",
echo = T,
include = T,
message = T,
warning = T
)
# set root directory not to file path
opts_knit$set(root.dir=root.dir)
source(here("../functions/all_annotation_tables.R"))
source(here("../functions/option_setup.R"))
an <- all_annotation_tables(mount.point = "/net/ifs2/san_projekte/projekte/genstat/")
option_setup()
#+ include=F
#==========#
# DONE #
#==========#
# Load data ----
# afs = all files
afs <- list.files(path = "obj/IPA/200120_ipa_comparisonResults/")
# no csv
afs <- afs[!grepl(pattern = "csv$", afs)]
# read reference data
nf <- newest_file(look_for = "metabOverview",subfolder = "obj",print_full = TRUE)
st1 <- fread(nf)
# Read single files ----
# spi = super path identifier
# spi <- st1[,.(rid, metab.super.path)]
spi <- data.table(
long = st1[,unique(metab.super.path)]
)
spi[,short:=c(
"CarnitineTransport",
"FattyAcidMetabolism",
"EnergyMetabolism",
"BCAAMetabolism",
"AminoAcidMetabolismOther",
"Other",
"ureaCycle",
"AmmoniaRecycling",
"CollagenSynthesis",
"FolicAcidCycle"
)]
# IPA table types
# tbi = table identifier
tbi <- lapply(strsplit(afs, split = "_",fixed = TRUE), function(x){
x[2]
})
tbi <- unique(unlist(tbi))
# order the files for each group into a list
# Annotate files ----
# get all the analyses of each type
all.files <- sapply(tbi, function(x){
files <- grep(pattern = paste0("_", x, "_"), afs, value = T, fixed = T)
# extract and clean file name
tbl.type <- lapply(strsplit(files, split = "_", fixed = TRUE), `[[`, 3)
tbl.type <- gsub(tbl.type, pattern = ".txt", replacement = "", fixed = TRUE)
tbl.type <- gsub(tbl.type, pattern = "[1-2]", replacement = "")
# tbl.type <- gsub(tbl.type, pattern = "\\d", replacement = "")
# assign correct name of superpathway
m1 <- match(tbl.type, spi$short)
names(files) <- spi[m1, long]
# try to read the files
files2 <- lapply(files, function(y){
tbl <- try(read.delim(paste0("obj/IPA/200120_ipa_comparisonResults/", y), sep = "\t", skip = 2))
return(tbl)
})
# return this including the name of the table type
res <- list(files2)
return(res)
}, USE.NAMES = TRUE)
# check
names(all.files)
lapply(all.files, names)
# create tables for each pathway-type
all.files$NetworksTable %>% str
# Canonical Pathways ----
# get new file for the first element
all.files[[names(all.files)[1]]]$`Amino acid metabolism, other` %>% str
can.path <- all.files[[names(all.files)[1]]]
# create molten long table of canonical enrichment
can.path.long <- lapply(seq_along(can.path), function(x){
# format and melt
dat <- can.path[[x]]
setDT(dat)
res <- melt.data.table(data = dat,
id.vars = "Canonical.Pathways",
variable.name = "metabolite",
value.name = "enrichment",
variable.factor = FALSE)
res$enrichment <- as.numeric(res$enrichment)
res[, super.path := names(can.path)[[x]]]
})
# acp = all canonical pathway info
acp <- rbindlist(can.path.long)
# remove non-enriched Pathways/metabolites
na.paths <- acp[, all(is.na(enrichment)), by = Canonical.Pathways][V1==TRUE,Canonical.Pathways]
na.metab <- acp[, all(is.na(enrichment)), by = metabolite][V1==TRUE,metabolite]
acp <- acp[Canonical.Pathways %nin% na.paths & metabolite %nin% na.metab, ]
# dcast to display as heatmap
acp2 <- dcast(acp, formula = super.path + metabolite ~ Canonical.Pathways, value.var = "enrichment")
acp3 <- copy(acp2)
acp.super.paths <- acp3$super.path
acp3$super.path <- NULL
setDF(acp3, rownames = acp3$metabolite)
acp3$metabolite <- NULL
acp3 <- as.matrix(acp3)
# looks uninteresting
Heatmap(acp3,
cluster_rows = FALSE,
cluster_columns = FALSE,
row_names_gp = gpar(fontsize = 6),
column_names_gp = gpar(fontsize = 6)
)
# Regulator Effects ----
# extract info
which.table <- names(all.files)[6]
reg.eff.tbl <- all.files[[which.table]]
# quick look into the data
names(reg.eff.tbl[[1]])
str(reg.eff.tbl)
# annotate single sets with super.pathway
reg.eff.tbl2 <- lapply(seq_along(reg.eff.tbl), function(x){
# format and melt
dat <- reg.eff.tbl[[x]]
setDT(dat)
dat[, Consistency.Score := as.numeric(sub(pattern = ",",
replacement = ".",
x = Consistency.Score,
fixed = TRUE))]
dat[, super.path := names(reg.eff.tbl)[[x]]]
return(dat)
})
# combine
reg.eff.tbl3 <- rbindlist(reg.eff.tbl2)
# make the column with the overall connection info machine readable
known.relationships.perc <- lapply(strsplit(reg.eff.tbl3$Known.Regulator.Disease.Function.Relationship,split = " ", fixed = T),`[[`, 1)
known.relationships.perc <- as.numeric(sub(unlist(known.relationships.perc), pattern = "%",replacement = "",fixed = TRUE))/100
known.relationships.num <- lapply(strsplit(reg.eff.tbl3$Known.Regulator.Disease.Function.Relationship,split = " ", fixed = T),`[[`, 2)
known.relationships.num <- unlist(known.relationships.num)
known.relationships.num <- as.integer(regmatches(known.relationships.num,regexpr("[0-9]+",known.relationships.num)))
# enter variables
reg.eff.tbl3[,`:=`(
known.regulator.rel.perc = known.relationships.perc,
known.regulator.rel.num = known.relationships.num
)]
# preview
reg.eff.tbl3[order(Consistency.Score,decreasing = TRUE),]
reg.eff.tbl3[order(known.regulator.rel.num,decreasing = TRUE),] %>% head()
# Upstream Regulators ----
which.table <- names(all.files)[10]
ups.reg.tbl <- all.files[[which.table]]
# quick look into the data
names(ups.reg.tbl[[1]])
str(ups.reg.tbl[[1]])
# annotate single sets with super.pathway
ups.reg.tbl2 <- lapply(seq_along(ups.reg.tbl), function(x){
# format
dat <- ups.reg.tbl[[x]]
setDT(dat)
dat[, super.path := names(ups.reg.tbl)[[x]]]
return(dat)
})
# flags col is not present in every instance
lapply(ups.reg.tbl2,names)
# bind together, fill the flags col with NAs if necessary
ups.reg.tbl3 <- rbindlist(ups.reg.tbl2,fill = TRUE)
# several comma-separated cols -> turn numeric
num.cols <- c("Expr.False.Discovery.Rate..q.value.",
"Expr.Log.Ratio",
"Expr.p.value",
"Activation.z.score",
"p.value.of.overlap")
# turn numeric
ups.reg.tbl3[, (num.cols) := lapply(.SD, function(x){
as.numeric(sub(pattern = ",",
replacement = ".",
x = x,
fixed = TRUE))
}),.SDcols=(num.cols)]
setorder(ups.reg.tbl3, Expr.False.Discovery.Rate..q.value.,-Activation.z.score, -Expr.Log.Ratio, na.last = TRUE)
# number of regulators
ups.reg.tbl3$Upstream.Regulator %>% uniqueN
# Tox Lists ----
|
2645a0b281e42d5b683943895cbb5e6733ab8679
|
7b74f00cd80694634e6925067aaeb6572b09aef8
|
/2020/Assignment-2020/Individual/FE8828-Zhang Yuejiao/Assignment 2/Assignment 2.R
|
23adcd114d13eba13a465eebe11350edd6abe7a0
|
[] |
no_license
|
leafyoung/fe8828
|
64c3c52f1587a8e55ef404e8cedacbb28dd10f3f
|
ccd569c1caed8baae8680731d4ff89699405b0f9
|
refs/heads/master
| 2023-01-13T00:08:13.213027
| 2020-11-08T14:08:10
| 2020-11-08T14:08:10
| 107,782,106
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,717
|
r
|
Assignment 2.R
|
#Secretary Problem
make_choice <- function(N, split_number){
# built the 'repeat' inside this function to produce the probability as the function's output
times <- 10000
GetN <- 0
for(i in 1:times){
# generate the sample of integers 1 to N
input_list <- sample(1:N,N,replace = FALSE)
# split into 2 groups
evalgrp <- input_list[1:split_number]
selectgrp <- input_list[(split_number+1) : N] # by construction, we assume split_number < N
best_eval <- max(evalgrp) #find the best in evaluation group
if(best_eval == N){
# the best is in evaluation group - chosen as 'Criteria'
# so will interview all members in selection group until the last since nobody is better than the criteria
choice <- selectgrp[length(selectgrp)]
}else{
choice <- selectgrp[min(which(selectgrp >= best_eval))]
}
if(choice == N){GetN <- GetN + 1}
}
GetN/times
}
find_optimal <- function(N){
ProbGetN = c()
for(j in 1:(N/2)){
ProbGetN[j] <- make_choice(N, j)
}
ls <- list("split"= match(max(ProbGetN),ProbGetN),"p" = max(ProbGetN))
}
# find the best split and its probability when N=3
lsResult = find_optimal(3)
cat("The best split and its probability when N=3: ", paste0(lsResult,collapse = ", "),".\n")
# find the best split and its probability when N=10
lsResult = find_optimal(10)
cat("The best split and its probability when N=10: ",paste0(lsResult,collapse = ", "),".\n")
# find the best split and its probability when N=100
lsResult = find_optimal(100)
cat("The best split and its probability when N=3: ",paste0(lsResult,collapse = ", "),".\n")
|
1b24c16c325cfa6460e0cf32b31c40350e80e95d
|
354d44b5921074077ea2f97120acdb1d01fc7301
|
/AGG Script.R
|
780c304685add4d46f46cfcf68cb966f45163022
|
[] |
no_license
|
dpalmer9/Weston_R_Script
|
3c3f411137bd7804573fd61bb5970abc269e5bf8
|
a005b2d093f634854c205aab8ad0f2e533111a84
|
refs/heads/master
| 2021-01-02T08:36:42.876714
| 2018-04-09T18:27:11
| 2018-04-09T18:27:11
| 99,031,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,286
|
r
|
AGG Script.R
|
## Library ##
library(tidyverse)
library(reshape2)
library(car)
library(mice)
library(lme4)
library(nlme)
## Parameters ##
## Acquire data ##
options(scipen=50)
options(contrasts = c('contr.sum','contr.poly'))
raw.data = read.csv('C:\\Users\\dpalmer\\Documents\\WestonANOVAProcedure\\FiveChoiceDatabase2Second.csv')
#raw.data = read.csv('C:\\Users\\Danie\\Documents\\R\\Projects\\Weston_R_Script\\Weston PAL Pretrain.csv')
raw.data$Age..months. = as.factor(raw.data$Age..months.)
#melt.data = melt(raw.data, id.vars = c("Database","Animal.ID","Site","Mouse.strain","GenoAPPPS1ype","Gender","Age..months."))
agg.list = list(raw.data$Animal.ID,raw.data$Site,raw.data$Mouse.strain,raw.data$Genotype,raw.data$Gender,raw.data$Age..months.,raw.data$Schedule.name)
raw.data$Max.number.of.trials = NULL
raw.data$Max.schedule.time = NULL
raw.data$Schedule.run.date = NULL
agg.data.2 = aggregate(raw.data.2, by= agg.list, FUN=mean, na.rm = TRUE)
agg.data.1 = aggregate(Day ~ Animal.ID + Site + Mouse.strain + Genotype + Gender + Age..months. + Schedule.name, FUN=sum, na.rm=TRUE, data=raw.data)
agg.data.2[ ,8:14] = agg.data.2 [ ,1:7]
agg.data.2[ ,1:7] = NULL
write.csv(agg.data.1, "Weston 5CSRTT 2 Second Training AGG.csv")
raw.data$Max.number.of.trials
|
2a095e412bac7286a585224c71069c499cae8048
|
ac4378367a0adba98736df1c8ff7f3a0d44dd15e
|
/codigo/ex7.R
|
4179c154a088ff97a75a453eae5eeecc4c9119f4
|
[] |
no_license
|
paulo-manzone/Data-Science-R
|
307892b83031d56b345b91d5b77a82c05960e230
|
6a08dee1da70a4c06081fe49ad6f3cfe46243f53
|
refs/heads/master
| 2020-03-31T11:02:02.113849
| 2018-10-09T20:33:46
| 2018-10-09T20:33:46
| 152,160,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 298
|
r
|
ex7.R
|
#Instalando pacotes
package.install("xlsx")
library(xlsx)
require(xlsx)
#Lendo dados
df = read.xlsx("./dados/exercicio7.xls", sheetName = "Plan1")
png("./graficos/ex7.png")
barplot(df$Atendimento, names.arg = df$Areas, ylim=c(0,400), cex.names=0.7, col=rainbow(3), main="Atendimentos")
dev.off()
|
e8d21cfc2427dd199bc316caf6ee4d4d968d496a
|
57bfec18bfc943ea128b1fe17a0af86544962447
|
/plot1.R
|
81285a8cb8dcf3837b81008fbcc9353102310550
|
[] |
no_license
|
athuroe/ExData_Plotting1
|
ce7fb50f7b5369dd19a67efef7d8701d915144cd
|
e01d9becce707b46119237b4f9fcb0f7eba98c5f
|
refs/heads/master
| 2021-01-17T21:44:43.179953
| 2015-09-13T22:05:59
| 2015-09-13T22:05:59
| 42,397,201
| 0
| 0
| null | 2015-09-13T13:40:11
| 2015-09-13T13:40:11
| null |
UTF-8
|
R
| false
| false
| 596
|
r
|
plot1.R
|
##Read and subset the data
data <- read.table("household_power_consumption.txt", header = TRUE, sep=";", stringsAsFactors = FALSE)
data$Time <- paste(data$Date, data$Time)
data$Date <- NULL
data$Time <- strptime(data$Time, format = "%d/%m/%Y %H:%M:%S")
data <- data[data$Time >= "2007-02-01" & data$Time < "2007-02-03",]
data <- data[!is.na(data$Time),]
##Create plot and save it as a png
data$Global_active_power <- as.numeric(data$Global_active_power)
png(file="plot1.png")
hist(data$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red")
dev.off()
|
c7d2bc0390cc0b7ed40190227efa408b3a2552f4
|
f0c9e167c8ceae9388986d22e89eb7293c664343
|
/data/FGClimatology/man/calc_pmv.Rd
|
da98b57a541f95658471aafa67c1e9e0fc90e7b4
|
[] |
no_license
|
gavin971/r_dev_messkonzept
|
1a2c91e51d45f18df65476bf04b7918c809f0503
|
af6073c2b18f1f036a528bf8df6143bf1b51a561
|
refs/heads/master
| 2020-03-19T03:13:34.235435
| 2015-05-22T08:07:06
| 2015-05-22T08:07:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,547
|
rd
|
calc_pmv.Rd
|
\name{calc_pmv}
\alias{calc_pmv}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Calculation of the Predicted Mean Vote (PMV) Value
}
\description{
This function calculates the pmv value for a given input data
}
\usage{
calc_pmv(x, clo, met)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
The input table must contain all relevant Biomet-Data for calculation as all radiation fluxes from all directions, relativ humidity (RH), air temperature (Ta), and wind speed. }
\item{clo}{clo-factor for insulation of clothes. See in VDI-Richtlinen for further informations. Optional argument. Default is set to 0.6 for light summer clothes}
\item{met}{met-factor for metabolic rate depending to humans activity. See in VDI-Richtlinen for further informations. Optional argument. Default is set so 2 for a slowly walking person.}
}
\details{
For fields with NA-Value(s) (missing data) it isn't possible to calculate pmv and leads to an error message. Please clean your Input from NA-Values.}
\value{
Predicted Mean Vote (PMV) according to Fanger, 1970
}
\references{
VDI 3787 Blatt 2
}
\author{
AlK akrug@mailbox.tu-berlin.de
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Biomet}
\keyword{pmv}% __ONLY ONE__ keyword per line
|
580f6c696119865830e3b1d10d25fea0d9e943bc
|
9c11bee77dee2c81d38af86a48a270914ef06fdb
|
/age_cleaning.R
|
a0a4890bd6ede39c230439231d48b075a57644a5
|
[] |
no_license
|
EllenBrandell/wolf_parasitology
|
415a99ac92b915acbfdbef60bf9a631816e30523
|
060887d488318bf341b1f0429d5a71dad0a8a1ed
|
refs/heads/main
| 2023-07-29T00:15:41.858368
| 2021-09-19T19:08:11
| 2021-09-19T19:08:11
| 367,455,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,066
|
r
|
age_cleaning.R
|
## Ellen Brandell
## May 2021
## assess the importance of the diameter used to delineate pups/adults
library(tidyverse)
library(cowplot)
theme_set(theme_minimal(base_size=11))
setwd("~/Desktop/scat_project/DATA/data_summaries")
# individual scat data
diameter <- read.csv("scat_diameter.csv")
geno <- read.csv("all_geno_sex_results.csv")
# environmental data
meta <- read.csv("scat_metadata.csv")
location <- read.csv("locations.csv")
######################################## COMBINE DATA
data1 <- merge(geno, diameter, by="scatID", all.x=T)
# data1 <- merge(data1, meta[,c(1,4)], by="scatID", all.x=T) # I want site
######################################## DIAMETER PATTERNS
ggplot(data=data1, aes(x=scat_diameter)) + geom_histogram(fill='gray', color='black')
## look at different diameter measurements by unique wolves
## look at different diameter measurements by collared wolves (known age)
######################################## EXPLORING CUTOFFS
cut1 <- 2.0 # less than OR equal to this is a pup
cut2 <- 2.5 # less than OR equal to this is a pup
data1$age <- ifelse(data1$scat_diameter<=cut1, "Pup", "Adult")
data1$age2 <- ifelse(data1$scat_diameter<=cut2, "Pup", "Adult")
# make sure age isn't changing unless the wolf ages from Juv -> Adult
ggplot(data=data1, aes(x=scat_diameter)) + geom_histogram(fill='gray', color='black') +
geom_vline(xintercept=cut1, col='red', size=1.3)
table(data1$age)
table(data1$age)/nrow(data1)
# "questionable"/"in between" scat diameters based on our cutoff
table(data1$scat_diameter>cut1-0.3 & data1$scat_diameter<cut1+0.3)
# there are quite a few of these "in between" sizes
###### make a data set of JUST KNOWN WOLVES
wolves1 <- data1[!is.na(data1$wolfID),]
wolf.sum <- wolves1 %>% group_by(wolfID) %>% summarize(sex=sex[1], ages=length(unique(age)), age=age[1],
age2=age2[1], pack=pack[1],
e.c.=max(e.c.),e.m.=max(e.m.),t.m.d.=max(t.m.d.),
sites=length(unique(indiv.site)), nsamples=n())
table(wolf.sum$ages)
table(wolf.sum$age)
table(wolf.sum$age)/nrow(wolf.sum)
table(wolf.sum$pack, wolf.sum$age)
a <- ggplot(data=wolves1, aes(x=scat_diameter)) + geom_histogram(fill='gray', color='black') +
geom_vline(xintercept=cut1, col='red', size=1.3) + geom_vline(xintercept=cut2, col='red', size=1.3, linetype="dashed") +
xlab("scat diameter") + ggtitle("Scat Diameter Known Wolves")
a
b <- ggplot(data=wolves1, aes(y=scat_diameter, x=wolfID, color=wolfID)) + geom_point() +
ylab("scat diameter") + ggtitle("Scat Diameter Known Wolves") +
theme(axis.text.x=element_text(angle=90), legend.position="none") +
geom_hline(yintercept=cut1, col='red', size=0.5) + geom_hline(yintercept=cut2, col='red', size=0.5, linetype="dashed")
b
c <- ggplot(data=wolves1, aes(y=scat_diameter, x=age, color=age)) + geom_boxplot() + geom_jitter(width=0.1) +
ylab("scat diameter") + ggtitle("Scat Diameter Known Wolves") +
geom_hline(yintercept=cut1, col='red', size=0.7)
c
d <- ggplot(data=wolves1, aes(y=scat_diameter, x=age2, color=age2)) + geom_boxplot() + geom_jitter(width=0.1) +
ylab("scat diameter") + ggtitle("Scat Diameter Known Wolves") +
geom_hline(yintercept=cut2, col='red', size=0.7, linetype="dashed")
d
ggplot(data=wolves1, aes(y=scat_diameter, x=sex, color=sex)) + geom_boxplot() + geom_jitter(width=0.2) +
ylab("scat diameter") + ggtitle("Scat Diameter Known Wolves") +
geom_hline(yintercept=cut, col='red', size=0.7) + geom_hline(yintercept=cut2, col='red', size=0.7, linetype="dashed")
plot_grid(a,b,c,d, nrow=2, ncol=2)
ggsave("scat_diameter.png", height=11, width=11, units="in", dpi=250)
###### HOW MANY MISMATCHES DO WE HAVE BASED ON CUTOFF VALUE?
# can only use resampled wolves for this
## and importantly, we want to make sure the collared wolves are correctly classified:
# pup = 1211F, 1228F, 1229F
# adult = 969F, 907F, 996M, 1005F, 1155M, 1156M
######
# change scat 145 wolf 1228F to Pup
|
7e34903d199bec96ad58d102626231b4584c8a3d
|
54619814c48f9f41c427ced3da8159c47316e601
|
/DESeq_backbone.R
|
dcd65b83a45ce77ac33d288b609b266882383493
|
[] |
no_license
|
amarseg/RNA_seq
|
f40f11d97c3a64a18ba9e413bbdaec3453214c53
|
3d07eb8b77e0f03a6c8ef610798f3af3a434c03e
|
refs/heads/master
| 2021-05-04T10:56:34.146765
| 2017-08-14T09:39:11
| 2017-08-14T09:39:11
| 36,794,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 791
|
r
|
DESeq_backbone.R
|
##DESeq analysis comparing each time point with the previous one
library("DESeq2")
###Read and prepare data
se.path <- ("~/rna_seq0206/seq_0206/Transcripts_1.rda")
counts <- assays(se)$counts[,seq(1,24,2)]
row.names(counts) <- elementMetadata(se)$ID
colData <- as.data.frame(factor(rep(c(0,1,10,11,2,3,4,5,6,7,8,9))))
rownames(colData) <- colnames(counts)
colnames(colData) <- "timepoint"
test <- DESeqDataSetFromMatrix(countData = counts,
colData = colData ,
design = ~ timepoint)
##Test for differential expression
test <- DESeq(test)
res <- results(test)
#Get significative hits using the adjusted p-value
alpha = 0.05
resSig <- subset(res, padj < alpha)
#Save diagnostic plot as pdf
pdf("Diagnostics.pdf")
plotDispEsts(test)
plotMA(res)
dev.off()
|
f49204806a6e986e39ca76fc317bb336f9f7781e
|
b1f9bdce25564f024a077b481e573829e089a407
|
/R/package.R
|
83ddbb6b1a0ed57f10ba4199d0e561fa380c9beb
|
[
"MIT",
"ISC"
] |
permissive
|
jimsforks/cordes
|
3e7c253ac408e99e7532dc7bf4672f3549416e4d
|
bda2b9b75396d935ccd201c357ecfeb4a7aec9ac
|
refs/heads/master
| 2022-12-13T04:56:17.299360
| 2020-09-15T08:16:04
| 2020-09-15T08:16:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 130
|
r
|
package.R
|
modify_package_json <- function(
field,
content
){
yml <- yaml::read_yaml(
here::here("inst/cordes/package.json")
)
}
|
07f69a3a300aef48f5460b0fa64af537e8ce17fd
|
2a5dc31cfb3ff562c9ffcb47776fa0fe542428a6
|
/R/files.R
|
35aa99a396cafb38003771b30b1dc6fa07c421c3
|
[
"MIT"
] |
permissive
|
hairizuanbinnoorazman/googledrive
|
835171842771aa97fa3dcda31c9517cc2cad8bfc
|
91d1655a72d3e5850745efdb08c9300b32cb0f32
|
refs/heads/master
| 2021-01-11T17:53:34.796340
| 2017-05-05T17:38:50
| 2017-05-05T17:38:50
| 79,862,995
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,067
|
r
|
files.R
|
#' Get the list of files from Googledrive
#' @description Utilizes the list_files Google Drive API endpoint to retrieves files and folder details.
#'
#' This function provides the raw functionality of full access to the list_files API endpoint but is not
#' the recommended way of accessing the API. It would be better to use the higher level functions provided
#' within this package to access the files/folders/data that you need.
#' @param q Optional. A query for filtering the file results. See the "Search for Files" guide for supported syntax.
#' @param page_size Optional. The maximum number of files to return per page. Acceptable values are 1 to 1000,
#' inclusive. (Default: 100)
#' @param page_token Optional. The token for continuing a previous list request on the next page. This should be
#' set to the value of 'nextPageToken' from the previous response.
#' @param order_by Optional. A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder',
#' 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime',
#' 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the
#' 'desc' modifier. Example usage: ?order_by=folder,modifiedTime desc,name.
#' @param spaces Optional. A comma-separated list of spaces to query within the corpus. Supported
#' values are 'drive', 'appDataFolder' and 'photos'.
#' @param corpus Optional. The source of files to list. Acceptable values are domain and user
#' @importFrom httr config accept_json content GET
#' @importFrom jsonlite fromJSON
#' @export
base_list_files <- function(q = NULL, page_size = NULL, page_token = NULL,
order_by = NULL, spaces = NULL, corpus = NULL){
# Get endpoint url
url <- get_endpoint("drive.endpoint.files.list")
# Get token
token <- get_token()
config <- httr::config(token=token)
# List of query parameters
query_params = list()
query_params['q'] = q
query_params['pageSize'] = page_size
query_params['pageToken'] = page_token
query_params['orderBy'] = order_by
query_params['spaces'] = spaces
query_params['corpus'] = corpus
# GET Request
result <- httr::GET(url, config = config, accept_json(), query = query_params, encode = "json")
# Process results
result_content <- content(result, "text")
result_list <- fromJSON(result_content)
# If endpoint return url status other than 200, return error message
if(httr::status_code(result) != 200){
stop(result_list$error$message)
}
return(result_list)
}
#' Get list of files from a folder
#' @param id ID of the drive folder
#' @inheritParams base_list_files
#' @export
#' @examples
#' \dontrun{
#' library(googledrive)
#' authorize()
#'
#' # Folder id is 0XXXXXXXX
#' list_files_in_folder('0XXXXXXXX')
#'
#' # If id is not specified, list of files would be obtained from root Google drive folder
#' list_files_in_folder()
#' }
list_files_in_folder <- function(id = 'root', page_size = NULL, page_token = NULL,
order_by = NULL, spaces = NULL, corpus = NULL){
q = paste0("'", id, "' in parents and mimeType != 'application/vnd.google-apps.folder' and trashed = false")
output <- base_list_files(q, page_size, page_token, order_by, spaces, corpus)
return(output)
}
#' Get list of folders from a folder
#' @param id ID of the drive folder
#' @inheritParams base_list_files
#' @export
#' @examples
#' \dontrun{
#' library(googledrive)
#' authorize()
#'
#' # Folder id is 0XXXXXXXX
#' list_folders_in_folder('0XXXXXXXX')
#'
#' # If id is not specified, list of files would be obtained from root Google drive folder
#' list_folders_in_folder()
#' }
list_folders_in_folder <- function(id = 'root', page_size = NULL, page_token = NULL,
order_by = NULL, spaces = NULL, corpus = NULL){
q = paste0("'", id, "' in parents and mimeType = 'application/vnd.google-apps.folder' and trashed = false")
output <- base_list_files(q, page_size, page_token, order_by, spaces, corpus)
return(output)
}
#' Get file via name
#' @description Allows you to pull the a list of file names that match the names that you provide to the file.
#' Due to Google Drive's nature to allow multiple files with the same name to coexist in the same folder, you may
#' be able to obtain a list of files even if you put matchType exact
#' @param filename Name of the file in the Drive folder
#' @param matchType Either exact or contains or not_equal
#' @param id FolderID to search in. 'all' is also accepted which would mean to search the whole of user's drive
#' @inheritParams base_list_files
#' @export
#' @examples
#' \dontrun{
#' library(googledrive)
#' authorize()
#' # Folder id is 0XXXXXXXX
#' get_file_by_name('some_file_name', 'exact', '0XXXXXXXX')
#' }
get_file_by_name <- function(file_name, match_type, id = 'root', page_size = NULL, page_token = NULL,
order_by = NULL, spaces = NULL, corpus = NULL){
if(match_type == 'not_equal'){
q = paste0("'", id, "' in parents and mimeType != 'application/vnd.google-apps.folder' and trashed = false and not name contains '", file_name, "'")
output <- base_list_files(q, page_size, page_token, order_by, spaces, corpus)
return(output)
}
if(match_type == 'exact'){
q = paste0("'", id, "' in parents and mimeType != 'application/vnd.google-apps.folder' and trashed = false and name = '", file_name, "'")
output <- base_list_files(q, page_size, page_token, order_by, spaces, corpus)
return(output)
}
if(match_type == 'contains'){
q = paste0("'", id, "' in parents and mimeType != 'application/vnd.google-apps.folder' and trashed = false and name contains '", file_name, "'")
output <- base_list_files(q, page_size, page_token, order_by, spaces, corpus)
return(output)
}
}
#' Copy a file in Google Drive
#' @param file_id ID of the file in Google Drive
#' @param folder_id ID of the folder to store the copied file
#' @param file_name The name to be given to the copied file. This does not need to be unique within a folder.
#' @importFrom httr config accept_json content POST
#' @importFrom jsonlite fromJSON
#' @export
copy_file <- function(file_id, folder_id = 'root', file_name = NULL){
# Get endpoint url
url <- get_endpoint("drive.endpoint.files.copy", fileID)
# Get token
token <- get_token()
config <- httr::config(token=token)
# List of query parameters
body_params = list()
body_params['name'] = file_name
body_params['parents'] = list(list(folder_id))
# POST Request
result <- httr::POST(url, config = config, accept_json(), body = body_params, encode = "json")
# Process results
result_content <- content(result, "text")
result_list <- fromJSON(result_content)
# If endpoint return url status other than 200, return error message
if(httr::status_code(result) != 200){
stop(result_list$error$message)
}
return(result_list)
}
#' Delete a file in Google Drive
#' @description Permanently deletes a file owned by the user without moving it to the trash.
#' @param file_id ID of the file in Google Drive
#' @importFrom assertthat assert_that
#' @importFrom httr config accept_json content DELETE
#' @importFrom jsonlite fromJSON
#' @export
delete_file <- function(file_id){
assert_that(is.character(file_id))
# Get endpoint url
url <- get_endpoint("drive.endpoint.files.delete", file_id)
# Get token
token <- get_token()
config <- httr::config(token=token)
# DELETE Request
result <- httr::DELETE(url, config = config, accept_json(), encode = "json")
# Process results
result_content <- content(result, "text")
result_list <- fromJSON(result_content)
# If endpoint return url status other than 200, return error message
if(httr::status_code(result) != 200){
stop(result_list$error$message)
}
return(result_list)
}
#' Download non-Google docs resources from Google Drive
#' @description The function will acknowledge that the file is safe to download. Do be careful when
#' downloading files from the web (even if its on your own Google Drive folder)
#'
#' If you are downloading images, it would be recommended for you to download the imager package
#' for quick image manipulation and saving. You would need to convert the image from a 3 dimensional array
#' to a 4 dimensional array in that case
#'
#' This function is temporarily disabled as there are issues to be resolved
#' @param fileID ID of the file in Google Drive
#' @examples
#' \dontrun{
#' library(googledrive)
#' authorize()
#' file <- get_file('0XXXXXXXXXXXXXXXXc')
#'
#' # Example with image
#' library(imager)
#' dim(file) # Check dimensions of the file dataset
#' dim(file) <- c(400, 320, 1, 3) # Example dimensions for color image (x, y, z, c)
#' save.image(file, "file.jpg")
#' }
get_file <- function(file_id){
# Get endpoint url
url <- get_endpoint("drive.endpoint.files.get", file_id)
# Get token
token <- get_token()
config <- httr::config(token=token)
# List of query parameters
query_params = list()
query_params['alt'] = 'media'
query_params['acknowledgeAbuse'] = FALSE
# GET Request
result <- httr::GET(url, config = config, query = query_params, encode = "json")
# Process results
result_content <- content(result, type = "raw")
return(result_content)
}
#' Upload file to Google Drive
#' @description Allows you to uploads files into Google Drive. During the uploading process, you would
#' not be able to define other metadata that concerns the file. Utilize other functions to edit the
#' file metadata
#' @param file_name Name of the file that is to be uploaded into Google Drive. The file will be named as
#' 'untitled'
#' @importFrom assertthat assert_that
#' @importFrom httr config accept_json content POST upload_file
#' @importFrom jsonlite fromJSON
#' @export
upload_file <- function(file_name){
assert_that(is.character(file_name))
# Get endpoint url
url <- get_endpoint("drive.endpoint.upload.files.create")
# Get token
token <- get_token()
config <- httr::config(token=token)
# List of query parameters
query_params = list()
query_params['uploadType'] = 'media'
# Body parameters
body_params = httr::upload_file(file_name)
# POST Request
result <- httr::POST(url, config = config, query = query_params, body = body_params)
# Process results
result_content <- content(result)
return(result_content)
}
#' Add or remove file from folders
#' @description Allows you to move file around. Due to the nature of Google drive which allows multiple
#' files with the same names to coexist in the same folder, we can technically "hook" files into the
#' folders. At the same time, there is a convenient feature of being able to "hook" a file into
#' multiple folders at the same time which basically means a file can be in 2 folders at once.
#' @param file_id The ID of the file.
#' @param addFolders A character vector of folder Ids
#' @param removeFolders A character vector of folder Ids
#' @importFrom httr config accept_json content PATCH
#' @importFrom jsonlite fromJSON
#' @export
move_file <- function(file_id, add_folders = NULL, remove_folders = NULL){
assert_that(is.character(file_id))
# Get endpoint url
url <- get_endpoint("drive.endpoint.files.update", file_id)
# Get token
token <- get_token()
config <- httr::config(token=token)
# List of query parameters
query_params = list()
query_params['addParents'] = paste0(add_folders, collapse = ",")
query_params['removeParents'] = paste0(remove_folders, collapse = ",")
# PATCH Request
result <- httr::PATCH(url, config = config, query = query_params)
# Process results
result_content <- content(result)
return(result_content)
}
#' Update file metadata properties
#' @description Allows you to update the name and description of the file
#' @param file_id The ID of the file.
#' @param name Name of the file. Overwrites the file name on Google Drive
#' @param description Description of the file. Overwrites the description on Google Drive
#' @param starred A boolean property to determine whether the file is to be starred or not
#' @param trashed A boolean property to determine whether the file is to be put into the trash or not
#' @importFrom httr config accept_json content PATCH upload_file content_type_json
#' @importFrom jsonlite fromJSON toJSON
#' @export
update_file_metadata <- function(file_id, name = NULL, description = NULL, starred = NULL, trashed = NULL){
assert_that(is.character(file_id))
# Get endpoint url
url <- get_endpoint("drive.endpoint.files.update", file_id)
# Get token
token <- get_token()
config <- httr::config(token=token)
# List of query parameters
body_params = list()
body_params['name'] = name
body_params['description'] = description
body_params['starred'] = starred
body_params['trashed'] = trashed
# PATCH Request
result <- httr::PATCH(url, config = config, content_type_json(), body = as.character(toJSON(body_params, auto_unbox=TRUE)))
# Process results
result_content <- content(result)
return(result_content)
}
|
3e6785e70cde1bb8a7e7c820e1acb136b00db3be
|
592931441d6f1d500e628ba63aa01164cd7d6163
|
/R/nest.R
|
cda0fdc8e7f3dd16fa35c9d93a58c4d03cdfb05b
|
[] |
no_license
|
dgrtwo/tidyr
|
689887ca65ccd41430d37bbd28e629a46db04e67
|
889251bc9999389878c6523c16bdf8fea7965dcc
|
refs/heads/master
| 2021-01-14T08:46:07.295810
| 2016-08-30T19:45:45
| 2016-08-30T19:45:45
| 66,972,183
| 1
| 0
| null | 2016-08-30T19:43:08
| 2016-08-30T19:43:08
| null |
UTF-8
|
R
| false
| false
| 2,656
|
r
|
nest.R
|
#' Nest repeated values in a list-variable.
#'
#' There are many possible ways one could choose to nest columns inside a
#' data frame. \code{nest()} creates a list of data frames containing all
#' the nested variables: this seems to be the most useful form in practice.
#'
#' @seealso \code{\link{unnest}} for the inverse operation.
#' @seealso \code{\link{nest_}} for a version that uses regular evaluation
#' and is suitable for programming with.
#' @param .key The name of the new column.
#' @inheritParams nest_
#' @param ... Specification of columns to nest. Use bare variable names.
#' Select all variables between x and z with \code{x:z}, exclude y with
#' \code{-y}. For more options, see the \link[dplyr]{select} documentation.
#' @export
#' @examples
#' library(dplyr)
#' iris %>% nest(-Species)
#' chickwts %>% nest(weight)
#'
#' if (require("gapminder")) {
#' gapminder %>%
#' group_by(country, continent) %>%
#' nest()
#'
#' gapminder %>%
#' nest(-country, -continent)
#' }
nest <- function(data, ..., .key = data) {
key_col <- col_name(substitute(.key))
nest_cols <- unname(dplyr::select_vars(colnames(data), ...))
nest_(data, key_col, nest_cols)
}
#' Standard-evaluation version of \code{nest}.
#'
#' This is a S3 generic.
#'
#' @param data A data frame.
#' @param key_col Name of the column that will contain the nested data frames.
#' @param nest_cols Character vector of columns to nest.
#' @keywords internal
#' @export
nest_ <- function(data, key_col, nest_cols = character()) {
UseMethod("nest_")
}
#' @export
nest_.data.frame <- function(data, key_col, nest_cols = character()) {
group_cols <- setdiff(names(data), nest_cols)
nest_impl(as_data_frame(data), key_col, group_cols, nest_cols)
}
#' @export
nest_.tbl_df <- function(data, key_col, nest_cols = character()) {
as_data_frame(NextMethod())
}
#' @export
nest_.grouped_df <- function(data, key_col, nest_cols = character()) {
if (length(nest_cols) == 0) {
nest_cols <- names(data)
}
group_cols <- vapply(dplyr::groups(data), as.character, character(1))
nest_impl(data, key_col, group_cols, nest_cols)
}
#' @importFrom tibble data_frame
nest_impl <- function(data, key_col, group_cols, nest_cols) {
data <- dplyr::ungroup(data)
if (length(group_cols) == 0) {
df <- data_frame(list(data))
names(df) <- enc2utf8(key_col)
return(df)
}
nest_cols <- setdiff(nest_cols, group_cols)
out <- dplyr::distinct_(dplyr::select_(data, .dots = group_cols))
idx <- dplyr::group_indices_(data, .dots = group_cols)
out[[key_col]] <- unname(split(data[nest_cols], idx))[unique(idx)]
out
}
globalVariables(".")
|
abbc17502507ccb725ab7f84c8672b6478be3a55
|
83ae358d90cb1c54c8be380bc7bd628a2f6ed530
|
/R/stats2.R
|
6f62b73830c81219bca1c7f34e9421b574597418
|
[] |
no_license
|
cran/Rlab
|
c7963e1210e2140fc6d397ff6a2cf289f0dd3bd2
|
c72e630626f6df15cf75ffd8b9ee7c85322aeda8
|
refs/heads/master
| 2022-05-28T16:35:40.306539
| 2022-05-04T22:10:02
| 2022-05-04T22:10:02
| 17,693,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 918
|
r
|
stats2.R
|
"stats2"<-function (x, by,digits=8){
if (!missing(by)) {
x <- cat.to.list(c(x), by)
}
if (!is.list(x) & !is.matrix(x))
x <- matrix(x, ncol = 1)
if (is.list(x)) {
ncol <- length(names(x))
out <- matrix(NA, ncol = ncol, nrow = length(describe2()))
dimnames(out) <- list(describe2(), names(x))
for (j in (1:ncol)) {
if (is.numeric(x[[j]])) {
out[, j] <- describe2(x[[j]])
}
}
return(round(out,digits=digits))
}
if (is.matrix(x)) {
nc <- ncol(x)
ex.skew<-rep(NA,nc)
ex.kurt<-rep(NA,nc)
out <- matrix(NA, ncol = nc, nrow = length(describe2()))
dimnames(out) <- list(describe2(), dimnames(x)[[2]])
for (j in (1:nc)) {
out[, j] <- describe2(x[, j])
}
return(round(out,digits=digits))
}
}
|
d99ff6c1956c2736b65457fb26667b82a7708973
|
29dc4ad394faa3163b6b8a42a9fc60ee01c41d15
|
/code/02_RhoneExploration.R
|
5a8951eedc3e70810c50a008050348e05d08b6bd
|
[] |
no_license
|
MaximilianPi/Snails-JSDM
|
fd4400abfc46df7d530289fccb37d3047e73f3df
|
f172f1bd7a025ddb5b92fa68b92df3ad32fb5bd1
|
refs/heads/main
| 2023-08-13T22:16:42.712183
| 2021-10-06T11:44:17
| 2021-10-06T11:44:17
| 394,691,369
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,336
|
r
|
02_RhoneExploration.R
|
################################################################################
#### Rhone River
################################################################################
# Description: Exploring the Rhone River data
# Load required packages
library(tidyverse)
library(parzer)
# Clear R's brain
rm(list = ls())
################################################################################
#### Prepare Data
################################################################################
# Load keys
url <- "https://raw.githubusercontent.com/pennekampster/Bayesian_thinking_UZH/main/Group_projects/SDM_Rhone_snails/data/"
fauna_key <- read_csv2(file.path(url, "gastero_fauna_key.csv"))
envir_key <- read_csv2(file.path(url, "gastero_environment_key.csv"))
# Make proper column names
names(fauna_key) <- c("Family", "Genus", "Species", "Short")
# Make proper species names
fauna_key$Name <- paste(fauna_key$Genus, fauna_key$Species)
# Load datasets
envir <- read.csv2(file.path(url, "gastero_environment.csv"), row.names = 1, stringsAsFactors = F)
fauna <- read.csv2(file.path(url, "gastero_fauna.csv"), row.names = 1, stringsAsFactors = F)
sampl <- read_csv2(file.path(url, "gastero_samples.csv"))
# We also need the restoration information
resto <- read_csv2(file.path(url, "restoration_info.csv"))
# Parse the latitude and longitudes
resto$Latitude <- parse_lat(resto$Latitude)
resto$Longitude <- parse_lon(resto$Longitude)
# Transpose
envir <- as.data.frame(t(envir))
fauna <- as.data.frame(t(fauna))
# Replace NAs with 0s (THIS IS A BIG ASSUMPTION)
fauna[is.na(fauna)] <- 0
fauna <- as.data.frame(fauna)
# Bind data together
dat <- cbind(fauna, envir, sampl)
print(names(dat))
# Correct year
dat$year <- dat$year + 2000
dat$site <- gsub("AV", "DO", dat$site)
dat$site <- gsub("AM", "UP", dat$site)
# Remove double downlstream sites (why though?)
table(dat$site)
dat <- subset(dat, !grepl("N", dat$site))
# Add information on restoration
info <- left_join(dat, resto, by = c("channel" = "Channel", "site" = "Site")) %>%
select(RestorationYear = rest_year, RestorationType = Type, Latitude, Longitude)
dat <- cbind(dat, info)
# Indicate if something is pre or post restoration
dat$restoration <- ifelse(dat$year <= dat$RestorationYear, "pre", "post")
dat$restoration[is.na(dat$restoration)] <- "not_restored"
# Let's recode the seasons a bit nicer
dat <- mutate(dat, season = case_when(
season == "P" ~ "Spring"
, season == "E" ~ "Summer"
, season == "A" ~ "Autumn"
))
# Make some columns factorial
dat$channel <- as.factor(dat$channel)
dat$site <- as.factor(dat$site)
dat$season <- as.factor(dat$season)
dat$restoration <- as.factor(dat$restoration)
################################################################################
#### Explore Data
################################################################################
# Let's visualize the different locations
locs <- as.matrix(unique(dat[, c("Longitude", "Latitude")]))
plot(locs, pch = 20)
# Are there any NA's
sum(is.na(envir))
sum(is.na(fauna))
sum(is.na(sampl))
# Let's see in which columns they are
print(apply(envir, 2, function(x){!all(!is.na(x))}))
print(apply(fauna, 2, function(x){!all(!is.na(x))}))
print(apply(sampl, 2, function(x){!all(!is.na(x))}))
# Check dimensions
dim(dat) # Whole dataset
dim(envir) # 19 Covariates
dim(fauna) # 26 Species
dim(sampl) # 6 Variables describing the sampling site
# What kind of data is there?
summary(dat$number) # Unique ID for each sampling occasion
length(unique(dat$number)) # Not all samples remained in the dataset
count(dat, channel) # Different channels (19 in total)
count(dat, site) # Different sites (3 in total) -> Up Down Center?
count(dat, year) # Year of sampling
count(dat, season) # Season of sampling -> Spring (P), Summer (E), Autumn (A)
count(dat, restoration) # Most samples collected after the restoration
count(dat, RestorationType) # Different restoration methods
# Are all species present before and after the restoration?
dat %>%
select(restoration, Acr_la:Viv_sp) %>%
pivot_longer(2:ncol(.), names_to = "Species", values_to = "Count") %>%
group_by(restoration, Species) %>%
summarize(Present = ifelse(max(Count) > 0, T, F), .groups = "drop") %>%
ggplot(aes(x = Species, y = restoration, fill = Present)) +
geom_tile(col = "black") +
scale_fill_manual(values = c("lightgray", "cornflowerblue")) +
theme_minimal() +
coord_equal() +
theme(axis.text.x = element_text(angle = 45, hjust = 0.8)) +
ylab("Restoration")
# How does the sampling intensity vary over time?
dat %>%
group_by(year, .drop = F) %>%
summarize(n = n()) %>%
ggplot(aes(x = year, y = n)) +
geom_point() +
geom_line() +
ylab("Number of Samples")
dat %>%
group_by(year, season, .drop = F) %>%
summarize(n = n()) %>%
ggplot(aes(x = year, y = n, col = season)) +
geom_point() +
geom_line() +
ylab("Number of Samples by Season")
dat %>%
group_by(year, channel, .drop = F) %>%
summarize(n = n()) %>%
ggplot(aes(x = year, y = n)) +
geom_point() +
geom_line() +
facet_wrap(~ channel) +
ylab("Number of Samples by Site")
# Generate groups to separate channels from the upstream and downstream
dat$grp <- paste0(dat$channel, "_", dat$site)
# How many samples are there per group?
table(dat$grp, dat$year)
# Visualize timing of restoration
ggplot(dat, aes(x = year, y = grp, col = factor(restoration))) +
geom_jitter(size = 0.5, width = 0.1) +
theme_minimal() +
scale_color_manual(values = c("gray30", "orange", "cornflowerblue"), name = "Restoration") +
ylab("Group") +
xlab("Year")
# Visualize with tiles
dat %>%
dplyr::select(channel, year) %>%
distinct() %>%
ggplot(aes(x = year, y = channel)) +
geom_tile() +
coord_equal() +
theme_minimal()
# Let's check the number of observations at different seasons
table(dat$channel, dat$season) # Season "A" only rarely sampled
table(dat$channel, dat$year) # Up to 16 samples per year
table(dat$year, dat$season) # Season "A" only sampled in year 2
# Which species are present the most?
dat %>%
dplyr::select(Acr_la:Viv_sp) %>%
colSums() %>%
as.tibble(rownames = "Species") %>%
arrange(desc(value)) %>%
left_join(fauna_key, by = c("Species" = "Short"))
|
555eedb6681e04d4cc6f4a40cead8f20faf71642
|
48b3ca27ab2270abbfd91abadd9d97314c76aab6
|
/data/uk_lower/Bills/UK UK Bills.R
|
9f97ed2ce4e49077719393f17a3449d2cd88956f
|
[] |
no_license
|
yichang-yang/CompLegFall2019
|
43e11c5cf9995ba727ff4b2c1f2feaeae6f0fe64
|
a8a897f2240502dced49b7561bfb1b9ef393201f
|
refs/heads/master
| 2020-07-21T11:15:28.180654
| 2020-02-20T01:14:42
| 2020-02-20T01:14:42
| 206,845,599
| 0
| 0
| null | 2019-09-06T17:44:15
| 2019-09-06T17:44:15
| null |
UTF-8
|
R
| false
| false
| 4,481
|
r
|
UK UK Bills.R
|
# remove objects
rm(list=ls())
# detach all libraries
detachAllPackages <- function() {
basic.packages <- c("package:stats","package:graphics","package:grDevices","package:utils","package:datasets","package:methods","package:base")
package.list <- search()[ifelse(unlist(gregexpr("package:",search()))==1,TRUE,FALSE)]
package.list <- setdiff(package.list,basic.packages)
if (length(package.list)>0) for (package in package.list) detach(package, character.only=TRUE)
}
detachAllPackages()
# load libraries
pkgTest <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
lapply(c("stringr", "dplyr", "plyr", "tidyverse", "rvest", "zoo", "lubridate"), pkgTest)
library(dplyr)
# working directoy
setwd("~/Documents/GitHub/CompLegFall2019/data/uk_lower/")
download_csv <- function(type, maxPages, fileName){
for(i in 0:maxPages) {
# make URL
url <- str_c("http://lda.data.parliament.uk/", type,".csv?_pageSize=500&_page=", i, collapse = "")
# make file name
file <- str_c(getwd(), "/", fileName, "/", type, "_page_", i, ".csv", collapse = "")
# download file
tryCatch(download.file(url, file, quiet = TRUE), error = function(e) print(paste(file, 'questions missing')))
# random delay
Sys.sleep(runif(1, 0, 0.15))
}
}
#download_csv("bills", 4, "bills")
setwd("~/Documents/GitHub/CompLegFall2019/data/uk_lower/Bills")
result <- read.csv("bills_page_0.csv")
result$identifier <-NULL
for(i in 1:4){
file <- str_c("bills_page_", i,".csv", collpase = "")
interm <- read.csv(file)
tryCatch({interm$identifier <- NULL}, warnings =function(e) print(paste(file, 'no identifier')))
result <- rbind(result,interm)
}
####Rename the column
names(result)
result <- result %>%
select(date, bill.type, title, sponsors...sponsor.printed, session...display.name)
result <- dplyr::rename(result, date_introduced = date, bill_type = bill.type, bill_title = title, member_name = sponsors...sponsor.printed, session = session...display.name )
names(result)
levels(result$session)
####Session and House
result$session <- as.character(result$session)
result$parliament <- NA
result[result$session == "2005-2006", ]$parliament <- 54
result[result$session == "2006-2007", ]$parliament <- 54
result[result$session == "2007-2008", ]$parliament <- 54
result[result$session == "2008-2009", ]$parliament <- 54
result[result$session == "2009-2010", ]$parliament <- 55
result[result$session == "2010-2012", ]$parliament <- 55
result[result$session == "2012-2013", ]$parliament <- 55
result[result$session == "2013-2014", ]$parliament <- 55
result[result$session == "2014-2015", ]$parliament <- 56
result[result$session == "2015-2016", ]$parliament <- 56
result[result$session == "2016-2017", ]$parliament <- 56
result[result$session == "2017-2019", ]$parliament <- 57
result[result$session == "", ]$parliament <- NA
result$session <-as.factor(result$session)
levels(result$session)
result$chamber_number <- NA
result$chamber_number[str_detect(result$bill_title, "HL")] <- 2
result$chamber_number[is.na(result$chamber_number)] <- 1
####Name, constituency
# result$member_name <- str_replace_all(result$member_name, "Mr ", "")
# result$member_name <- str_replace_all(result$member_name, "Ms ", "")
# result$member_name <- str_replace_all(result$member_name, "Mrs ", "")
# result$member_name <- str_replace_all(result$member_name, "Sir ", "")
# result$member_name <- str_replace_all(result$member_name, "Dr ", "")
members <- read.csv("uk_lower_members.csv")
final <- merge(result, members, by.x = "member_name", by.y = "full_name")
final <- final %>%
select(date_introduced, bill_type, bill_title, member_name, member_id = member_number, constituency_name, constituency_ID, chamber_number = chamber_number.y, parliament)
final <- final[order(final$date_introduced), ]
final <- final %>%
mutate(observation_number = 1:length(final$member_name))
final <- mutate(final, parliament_path = paste0("/parliament-", parliament))
final <- mutate(final, chamber_path = paste0(final$parliament_path, "/chamber-", chamber_number))
final <- mutate(final, observation_path = paste0(final$chamber_path, "/bill-", observation_number))
final <- mutate(final, bill_path = final$observation_path)
write_csv(final, "bill.csv")
hey <- read_csv("bill.csv")
|
502b5cd4aad93809adcc4d451f290ef4193249f3
|
b0ed6387572b52cec1b784d54f53355564027fc5
|
/run_analysis.R
|
b6eea685721ae33fc6a191555bd53914bae5b53c
|
[] |
no_license
|
csubrata/Getting-and-Cleaning-Data-Project
|
15e99d60bf0cef13cfeb455103001274b5ba6a6d
|
f261e5856c66329683b871b24e29b47d31710be6
|
refs/heads/master
| 2021-01-20T01:03:35.238678
| 2014-07-27T14:51:07
| 2014-07-27T14:51:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,289
|
r
|
run_analysis.R
|
######################################################################################################################
#
# Human Activity Recognition Using Smartphones Data Set
#
# Project Goal:
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#
######################################################################################################################
###########################
# Initial setup
###########################
library(reshape2)
setwd("Z:/Coursera/Data Science/Getting and Cleaning Data/Project")
###########################
# Read Features
###########################
features <- read.table("./UCI HAR Dataset/features.txt")
names(features) <- c("No","Name")
features <- rbind(features[grep("mean\\(\\)",features$Name),],features[grep("std\\(\\)",features$Name),])
features <- features[order(features$No),]
###########################
# Read Activities
###########################
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
names(activities) <- c("No","Name")
###########################
# Combine Test data
###########################
features.test <- read.table("./UCI HAR Dataset/test/X_test.txt")[features$Name]
names(features.test) <- features$Name
activities.test <- read.table("./UCI HAR Dataset/test/y_test.txt")
names(activities.test) <- c("Activity")
activities.test$Activity <- factor(activities.test$Activity, levels = activities$No, labels = activities$Name)
subject.test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(subject.test) <- c("Subject")
test <- cbind(features.test, Activity = activities.test$Activity, Subject = subject.test$Subject)
###########################
# Combine Train data
###########################
features.train <- read.table("./UCI HAR Dataset/train/X_train.txt")[features$Name]
names(features.train) <- features$Name
activities.train <- read.table("./UCI HAR Dataset/train/y_train.txt")
names(activities.train) <- c("Activity")
activities.train$Activity <- factor(activities.train$Activity, levels = activities$No, labels = activities$Name)
subject.train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(subject.train) <- c("Subject")
train <- cbind(features.train, Activity = activities.train$Activity, Subject = subject.train$Subject)
###########################
# Joining test and train data
###########################
data <- rbind(test, train)
###########################
# Creating independent tidy data set with the average of each variable
# for each activity and each subject.
###########################
data <- melt(data, id = c("Subject", "Activity"))
data <- dcast(data, Subject + Activity ~ variable, mean)
###########################
# Save the tidy data.
###########################
write.table(data, "tidydata.txt", row.names = FALSE, quote = FALSE)
###########################
# Clean up
###########################
remove(list=ls())
|
64055afcb22838c4eb56cbb7e4b3011e25a2b181
|
f9f70e201678f3e4d3d87beef4b35d35ef624c50
|
/R/read_and_combine_raw.R
|
9bf92f86929f116a40fa9c1ef597cced60c9be91
|
[] |
no_license
|
TikshiKE/rpackassigment
|
a7611b38dd9db847577ce619344e3706dcc449f6
|
2d6a55e184f0ed85bf3102e557b87a3735041e76
|
refs/heads/master
| 2023-02-23T00:25:21.357565
| 2020-12-17T15:23:16
| 2020-12-17T15:23:16
| 322,322,544
| 0
| 1
| null | 2021-01-11T22:21:01
| 2020-12-17T14:39:22
|
R
|
UTF-8
|
R
| false
| false
| 492
|
r
|
read_and_combine_raw.R
|
#' Data reading function
#'
#' @param dir dirrectory with all the raw data files
#' @param na.rm FALSE by default
#'
#' @return A one combined dataset with filename and data
#' @export readData
#'
#' @examples
#'
read_and_combine_raw <- function(dir, na.rm = FALSE) {
result <- list()
for(i in c(1:length(dir))) {
dta <- lapply(dir, readRDS)
names(dta) <- dir
dta <- rbindlist(dta, idcol = 'SID')
result[[i]] <- as.data.table(dta, na.rm = na.rm)
}
rbindlist(result)
}
|
1efad4257486052f980e969eb88613509c6c76c6
|
600a3941d3b09bd41bfecd3f128c25abc1beb651
|
/stream_agregation_shiny.R
|
d7708f05608d1a6c0940173f4362374756f518ca
|
[] |
no_license
|
ifranl00/BigData_Spark
|
bf4ed1700102bf2cb7592df8ccc75bc2d1310c9f
|
fb68d9eb29c1f07d9c4bb2bc40c95ba177eddc19
|
refs/heads/main
| 2023-04-03T03:10:35.711695
| 2021-03-30T14:51:10
| 2021-03-30T14:51:10
| 353,037,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
stream_agregation_shiny.R
|
library(future)
library(sparklyr)
library(dplyr, warn.conflicts = FALSE)
library(ggplot2)
sc <- spark_connect(master = "local", spark_version = "2.3.0")
if(file.exists("source")) unlink("source", TRUE)
if(file.exists("source-out")) unlink("source-out", TRUE)
stream_generate_test(iterations = 1)
read_folder <- stream_read_csv(sc, "source")
process_stream <- read_folder %>%
stream_watermark() %>%
group_by(timestamp) %>%
summarise(
max_x = max(x, na.rm = TRUE),
min_x = min(x, na.rm = TRUE),
count = n()
)
invisible(future(stream_generate_test(interval = 0.2, iterations = 100)))
library(shiny)
ui <- function(){
tableOutput("table")
}
server <- function(input, output, session){
ps <- reactiveSpark(process_stream)
output$table <- renderTable({
ps() %>%
mutate(timestamp = as.character(timestamp))
})
}
runGadget(ui, server)
|
785e5223036d4baed8605826f6a5610e10edede1
|
ab8f34c224fbe2a8a8e4dd1f3dc6c28bd0781e11
|
/Sample1.R
|
13890979fcf351fd7a18f5cb6df7bbf8bb300170
|
[] |
no_license
|
ghousiamk/RSample
|
819e0e956c8796b9fe57863febdb6778f86d0b85
|
e93e48191b5f50ef0b45ba7e645e5976ec03d2d7
|
refs/heads/master
| 2022-11-30T08:19:19.716404
| 2020-08-11T13:22:25
| 2020-08-11T13:22:25
| 286,749,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,438
|
r
|
Sample1.R
|
install.packages('nutshell')
install.packages('lsa')
install.packages('lattice')
install.packages('MASS')
install.packages('OneR')
install.packages('proxy')
library('nutshell')
library('lattice')
library('MASS')
library('OneR')
library('proxy')
library('lsa')
Lab3 <- read.csv("athlete_events.csv")
na.rm = TRUE
#1. How did you decide which two or three attributes to pick for your analysis?
##After viewing the datset I thought based on Height the weigh get changes so I picked up Height and Weight
c <- c(Lab3$Height)
d <- c(Lab3$Weight)
res <- lsa::cosine(c,d)
res
#As i got NA in my cosine i found there is no similarities between Height and Weight
Covariance_result = cov(c,d)
Covariance_result
#2.What connections between the attributes do the results of your analysis reveal?
# Result shows They do not have any relation between these attributes
#3. Were there any surprises? Did you have to go back and pick different attributes and re-run your exercise?
#After viewing the datset I thought Games and seasons have similarities
a <- c(Lab3$Games)
b <- c(Lab3$Season)
res <- lsa::cosine(a,b)
res
#Here we found the similarities the cosine value is 0.8951453
# I am finiding covarience between Games and Season
Covariance_result = cov(a,b)
Covariance_result
# the covarience result is > Covariance_result = cov(a,b)
#> Covariance_result
#[1] 0.8275204
# A positive covariance means that the two variables at hand are positively related, and they move in the same direction.
attr1 <- as.numeric(Lab3$Season)
attr2 <- as.numeric(Lab3$Games)
attr1
attr2
#Correlation between Games and Season
cor(attr1,attr2,use="all.obs",method=c("pearson"))
plot(attr2, attr1, xlab = "Games", ylab = "Season")
#Chi Square between Games and Season
summary(Lab3$Games)
summary(Lab3$Season)
#Converting Games column to categorical field
Lab3$Gamesbins<- bin(Lab3$Games, nbins = 4, labels = c("low","Average","Above Average", "high"))
bins<-data.frame(Lab3$Gamesbins, Lab3$Games)
str(bins)
summary(bins)
bins_sorted<-bins[order(Lab3$Gamesbins),]
plot(Lab3$Gamesbins, main="Automatically generated bins with equal value thresholds ",xlab="Lab3$Gamesbins",ylab="Lab3$Games")
#Converting Season column to categorical field
Lab3$Seasonbins<- bin(Lab3$Season, nbins = 4, labels = c("low","Average","Above Average", "high"))
bins2<-data.frame(Lab3$Seasonbins, Lab3$Season)
str(bins2)
summary(bins2)
bins_sorted2<-bins[order(Lab3$Seasonbins),]
plot(Lab3$Seasonbins, main="Automatically generated bins with equal value thresholds ",xlab="Lab3$longitudebins",ylab="Lab3$longitude")
# Making use of bins in Chi Square
Chi_Square=table(Lab3$Gamesbins, Lab3$Seasonbins)
Chi_Square
chisq.test(Chi_Square)
# The p value is < 2.2e-16 which is very less it means they are dependent.
#4.After calculating the ChiSquare I got the p value < 2.2e-16 which is very much less than 0.05. So I can say Games and Season are dependent to each other.
#The correlation between Games and Season is 0.1575068 which is a positive value it means the both attributes are directly related.
#They relation between them we can define as Games and Season is if one attribute is increases then the other will also get increases
#Also the covariance is 0.8275204 whisch is a positive number that indicate a positive linear relationship between the variables
#With all of the above points I can say I can able to predict future values of Games and Season attribute
|
9db8069bed8820558d1896845fa6bcdb7d1b13c0
|
de425ac8ff4158a50530aaf4f64d7247cd1aa911
|
/plot1.R
|
4303154374afd8434c882ccb473655b471688023
|
[] |
no_license
|
stahengik/ExData_Plotting1
|
a2120f96d8ba3b069143db06078be44cbf4ec0a6
|
b195a6b89f5c3d9ed687e5279cd432cb7f39d95c
|
refs/heads/master
| 2020-04-09T06:31:29.941003
| 2018-12-03T01:39:50
| 2018-12-03T01:39:50
| 160,116,569
| 0
| 0
| null | 2018-12-03T01:41:53
| 2018-12-03T01:41:52
| null |
UTF-8
|
R
| false
| false
| 704
|
r
|
plot1.R
|
# Reading data from file
dataPwr <- read.table(file.choose(), header = TRUE, sep = ";", na.strings = "?")
# head(dataPwr)
#Set types for Date and Time cols
dataPwr$Date <- as.Date(dataPwr$Date, format = "%d/%m/%Y")
dataPwr$Time <- strptime(dataPwr$Time, format = "%H:%M:%S")
# class(dataPwr$Date)
#class(dataPwr$Time)
#filter data
rdataPwr <- subset(dataPwr, Date %in% as.Date(c("2007-02-01", "2007-02-02")))
head(rdataPwr)
# Plot 1
with(rdataPwr, hist(Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"))
dev.copy(png, file = "Plot1.png", width = 480, height = 480)
dev.off()
|
6ced21a7845b701b420afd1680e429b71c079318
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/GDINA/man/cjoint.Rd
|
ddaf3f71ab05302516a7543fd5bbde07e15bea0a
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 693
|
rd
|
cjoint.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExportedFuncs.R
\name{cjoint}
\alias{cjoint}
\title{Combine R Objects by Columns}
\usage{
cjoint(..., fill = NA)
}
\arguments{
\item{...}{vectors or matrices}
\item{fill}{a scalar used when these objects have different number of rows.}
}
\value{
a data frame
}
\description{
Combine a sequence of vector, matrix or data-frame arguments by columns. Vector is treated as a column matrix.
}
\examples{
cjoint(2,c(1,2,3,4),matrix(1:6,2,3))
cjoint(v1 = 2, v2 = c(3,2), v3 = matrix(1:6,3,2),
v4 = data.frame(c(3,4,5,6,7),rep("x",5)),fill = 99)
}
\seealso{
\code{\link[base]{cbind}}
}
|
a725ced1fc240bffe25e835df84c724602cc2e73
|
53addc1b00d28e56971a216215011549b066d08b
|
/cachematrix.R
|
ddadfdcba5485c92f0d553400fa0935e527b86c5
|
[] |
no_license
|
jac0byterebel/ProgrammingAssignment2
|
f0d5898f74406ec450ad58aee804fcccedb19ab2
|
bd1624cfff249989f86c07ae9657ff07b09f2c18
|
refs/heads/master
| 2020-03-21T05:01:52.677281
| 2018-06-21T10:23:57
| 2018-06-21T10:23:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,699
|
r
|
cachematrix.R
|
## R-programming Assignment 2 - Gavin Ewan
## The functions makeCacheMatrix and cacheSolve allow us to calculate the inverse of a square matrix.
## This inverse is stored in a special 'matrix' object that can be called whenever the inverse is required
## again, rather than carrying out the computationally slow process of calculating the inverse from scratch
## each time that it is required.
## For the purposes of this assignment, we assume that the matrix is always a square invertible matrix.
## The makeCacheMatrix function creates the special 'matrix' object that can cache its inverse so that the
## cacheSolve function does not need to perform the whole process of inverting the matrix from scratch every
## time.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function will either calculate the inverse of the matrix from scratch, or will call
## the special 'matrix' object created by the makeCacheMatrix function to provide the pre-calculated
## inverse again.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data .... ")
return (m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
2c062aa922e7cd5f12a8fa904d56a362c27e18e6
|
c89b84296140e73f9ae3d25af5b1dddb7abba1ad
|
/R/congress-ids.R
|
0d310e8855c7ab2717348aeb1eba20c43799f255
|
[] |
no_license
|
SlavicaJ/congress_tweets
|
0cbeb9e7531d0c3694e390cd6ac1d4bcb87a1b30
|
d46e22e8919c05599efb1638fee01c633190411a
|
refs/heads/master
| 2022-01-24T06:11:36.845746
| 2019-09-13T20:13:38
| 2019-09-13T20:13:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,714
|
r
|
congress-ids.R
|
## function to convert strings to data frame
keypairs2df <- function(x) {
x <- strsplit(x, ": ")
x <- x[lengths(x) == 2]
if (length(x) == 0) return(NULL)
var <- map_chr(x, ~ .x[1])
val <- map(x, ~ .x[2])
names(val) <- var
as.data.frame(val, stringsAsFactors = FALSE, row.names = NULL)
}
## link to data base
legs <- httr::GET("https://github.com/unitedstates/congress-legislators/raw/master/legislators-social-media.yaml")
## read, parse, merge into single data frame, and save
cngids <- httr::content(legs, as = "text", encoding = "UTF-8") %>%
strsplit("\n") %>% .[[1]] %>%
grep("^#", ., value = TRUE, invert = TRUE) %>%
paste0(collapse = " ") %>%
gsub("\\s{2,}", " ", .) %>%
strsplit("- id: ") %>%
.[[1]] %>%
map(~ sub("social: ", "", .x)) %>%
map(~ strsplit(.x, "(?<!:) ", perl = TRUE)[[1]]) %>%
map(keypairs2df) %>%
bind_rows() %>%
as_tbl() %>%
mutate(thomas = gsub("'", "", thomas)) %>%
select(thomas, govtrack, screen_name = twitter, user_id = twitter_id)
cngids$screen_name[cngids$govtrack == "412438" & !is.na(cngids$govtrack)] <- "justinamash"
cngids$user_id[cngids$govtrack == "412438" & !is.na(cngids$govtrack)] <- "233842454"
cngids$screen_name[cngids$screen_name == "reppeteaguilar" & !is.na(cngids$screen_name)] <- "RepPeteAguilar"
cngids$screen_name[cngids$screen_name == "repdinatitus" & !is.na(cngids$screen_name)] <- "RepDinaTitus"
second_accounts$sn1 <- cngall$screen_name[match(second_accounts$sn1, tolower(cngall$screen_name))]
second_accounts$sn2 <- cngall$screen_name[match(second_accounts$sn2, tolower(cngall$screen_name))]
second_accounts$sn2[second_accounts$sn2 == "repdinatitus"] <- "RepDinaTitus"
saveRDS(cngids, "data/congress-ids.rds")
|
81022f0080518a7ab8489f2bb76cb3598c5bf2e9
|
7659496f0f1a5e8632dd853c10480874bab7dab9
|
/R/plotSignal.R
|
9c4cce186aecfeb2b05750d555a8073fe97566b4
|
[] |
no_license
|
isglobal-brge/CNVassoc
|
77db8ae797b6e7b26d51aecee6ef1ae22294d44c
|
fc6806fb9bf583cf5321c3efd1ed6df92e290c89
|
refs/heads/master
| 2021-01-22T05:47:14.323741
| 2019-04-09T12:59:39
| 2019-04-09T12:59:39
| 92,494,611
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,368
|
r
|
plotSignal.R
|
plotSignal <- function(x, my.colors = c("black",
"red", "blue"), ylab = "Peak Intensity",
xlab = c("individuals", "Phenotype"),
case.control = NULL, cex.leg = 0.6, dens.bw = "nrd0",
dens.adjust = 1, n = 0, ...) {
foo <- function(...) {
old.mfrow <- par("mfrow")
old.mar <- par("mar")
on.exit(par(mfrow = old.mfrow, mar = old.mar))
mm <- matrix(c(2:1), nrow = 1, ncol = 2,
byrow = TRUE)
layout(mm, widths = c(1, 0.4))
den <- density(x, bw = dens.bw, adjust = dens.adjust)
par(mar = c(5.1, 0, 4.1, 2.1))
plot(den$y, den$x, type = "l", axes = FALSE,
xlab = "density", ylab = "",
col = "red")
polygon(den$y, den$x, col = "red1")
ll <- par("usr")[3:4]
par(mar = c(5.1, 4.1, 4.1, 2.1))
if (!is.null(cutoffs)) {
if (all(x <= max(cutoffs)))
cat("WARNING! No data above maximum cutoff point\n")
if (all(x >= min(cutoffs)))
cat("WARNING! No data below minimum cutoff point\n")
x.ord <- as.integer(cut(x, c(-Inf,
cutoffs, Inf)))
} else x.ord <- rep(1, length(x))
if (is.null(case.control)) {
plot(x, ylim = ll, yaxs = "i",
xlab = xlab[1], type = "n",
ylab = ylab, ...)
points(x, col = my.colors[x.ord])
} else {
tt <- unique(case.control)
if (length(tt) == 1) {
stop("case.control must have 2 differents values at least")
}
if (length(tt) > 2) {
plot(case.control, x, col = my.colors[x.ord],
ylim = ll, yaxs = "i",
xlab = xlab[2], ylab = ylab,
...)
}
if (length(tt) == 2) {
plot(x, ylim = ll, yaxs = "i",
xlab = xlab[1], type = "n",
ylab = ylab, ...)
o <- case.control == tt[1]
n <- sum(o)
points(seq_len(n), x[o], col = my.colors[x.ord[o]],
pch = 16)
o <- case.control == tt[2]
points(seq(n + 1, n + sum(o)),
x[o], col = my.colors[x.ord[o]],
pch = 4)
legend("bottomright", as.character(tt),
pch = c(16, 4), title = "Case-control status",
bty = "n", horiz = TRUE,
cex = cex.leg)
}
}
if (!is.null(cutoffs)) {
cutoffs <- sort(cutoffs)
legend("bottomleft", legend = round(rev(cutoffs),
4), bty = "n", lty = rev(seq_along(cutoffs)),
title = "Cut off points:",
cex = cex.leg)
abline(h = cutoffs, lty = seq_along(cutoffs))
}
}
cutoffs = NULL
foo()
if (n > 0) {
cat("Place cut off point(s) using locator...\n")
cutoffs <- locator(n)$y
foo()
}
invisible(cutoffs)
}
|
4d4a420921336adeabc3d6637527b64c794e5c76
|
5e42a668e417fd55fe28ecee719c759016f963b9
|
/tests/testthat/test-nonportable_path_linter.R
|
9ba27f1c2ca2de950be41b75c95f720a7672a9de
|
[
"MIT"
] |
permissive
|
cordis-dev/lintr
|
2120e22820e8499ca3066fa911572fd89c49d300
|
cb694d5e4da927f56c88fa5d8972594a907be59a
|
refs/heads/main
| 2023-08-05T08:50:42.679421
| 2023-07-25T13:21:29
| 2023-07-25T13:21:29
| 225,583,354
| 0
| 0
|
NOASSERTION
| 2019-12-03T09:41:30
| 2019-12-03T09:41:30
| null |
UTF-8
|
R
| false
| false
| 1,682
|
r
|
test-nonportable_path_linter.R
|
test_that("nonportable_path_linter skips allowed usages", {
linter <- nonportable_path_linter(lax = FALSE)
# various strings
non_path_strings <- c(
"foo",
"https://cran.r-project.org/web/packages/lintr/",
encodeString("hello\nthere!")
)
for (path in non_path_strings) {
expect_lint(single_quote(path), NULL, linter)
expect_lint(double_quote(path), NULL, linter)
}
expect_lint("\"'/foo'\"", NULL, linter) # nested quotes
# system root
root_path_strings <- c("/", "~", "c:", ".")
for (path in root_path_strings) {
expect_lint(single_quote(path), NULL, linter)
expect_lint(double_quote(path), NULL, linter)
}
})
test_that("nonportable_path_linter blocks disallowed usages", {
linter <- nonportable_path_linter(lax = FALSE)
lint_msg <- rex::escape("Use file.path() to construct portable file paths.")
# paths with (back)slashes
slash_path_strings <- c(
"~/",
"c:/",
encodeString("D:\\"),
"../",
"/foo",
"foo/",
"foo/bar",
encodeString("foo\\bar"),
"/as:df",
encodeString("/a\nsdf")
)
for (path in slash_path_strings) {
expect_lint(single_quote(path), lint_msg, linter)
expect_lint(double_quote(path), lint_msg, linter)
}
})
test_that("nonportable_path_linter's lax argument works", {
# lax mode: no check for strings that are likely not paths (too short or with special characters)
linter <- nonportable_path_linter(lax = TRUE)
unlikely_path_strings <- c(
"/foo", encodeString("/a\nsdf/bar"), "/as:df/bar"
)
for (path in unlikely_path_strings) {
expect_lint(single_quote(path), NULL, linter)
expect_lint(double_quote(path), NULL, linter)
}
})
|
8f56115a407978cbf03f5caba1daad41bb215fa0
|
2f98d7200eb35cfff5a2060f1c1df3124630775d
|
/Coursera../HW3../rankall.R
|
39c4c2274d7fe363c04eea51530dd95f83f6ab18
|
[] |
no_license
|
yukiecho/R-Prog-Practice
|
d1e57e4d36dbcd7b3c878178adc96a1d6bea0a61
|
0796e830e7a9b51c3bdaa90aff9eea3c11171bb3
|
refs/heads/master
| 2016-09-05T23:45:30.009857
| 2015-04-16T20:03:14
| 2015-04-16T20:03:14
| 30,261,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,607
|
r
|
rankall.R
|
rankall <- function(outcome, num = "best") {
## Read outcome data
## define all the "Not Available" string as NA
outcomefile <- read.csv("outcome-of-care-measures.csv",colClasses = "character",na.strings="Not Available")
## check that state and outcome are valid
outcomelist <- c("heart attack","heart failure","pneumonia")
m<- 0
if (!outcome %in% outcomelist)
{ print("invalid outcome")
m<- 1
}
if (m==0){
if (outcome == "heart attack")
{i <-11 }
else if (outcome == "heart failure")
{i <-17}
else if (outcome == "pneumonia")
{i <-23 }
# create final dataframe
times <- as.numeric(length(unique(outcomefile[,7])))
finaltable <- data.frame(rep(NA,times),unique(outcomefile[,7]))
colnames(finaltable) <- c("hospital","state")
finaltable <- finaltable[order(finaltable$state),]
# extract necessary information from original dataset
newtable <- data.frame(outcomefile[,2],outcomefile[,7],outcomefile[,i])
colnames(newtable)<- c("Name","State","Rate")
newtable[,3] <- as.numeric(newtable[,3]) + 10
newtable <- newtable[complete.cases(newtable),]
#write.table(newtable,"111.txt",sep=" ")
# start to search for the value one by one
for (n in 1:times)
{
new2 <- newtable[newtable$State==finaltable[n,2],]
new2 <- new2[order(new2$Rate,new2$Name),]
#write.table(new2,"222.txt",sep=" ")
#new2 <- new2[complete.cases(new2),]
#aa<- num
if (num =="best") num11=1
if (num =="worst") num11=nrow(new2)
finaltable[n,1]<- as.character(new2[num11,1])
}
finaltable
}
}
|
f899866fb5c378cb922a40aa4de7e9851d8fe76b
|
8c0969a8aba7988ece1c4b9c20ba1d1fd2f3a0d2
|
/R/sharks_with_obs_sim_EKF_1d_interp_joint.r
|
5b4a42575e7f3ef653428d663202d3e42375cee2
|
[] |
no_license
|
cran/animalEKF
|
300e36ebc91fa8143100dfe3179b76f71f5c0fbb
|
acf5cdd8c0d92e1000d03987afb66acc0c8925fb
|
refs/heads/master
| 2022-12-27T07:30:47.740285
| 2020-10-05T10:50:06
| 2020-10-05T10:50:06
| 301,801,402
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,254
|
r
|
sharks_with_obs_sim_EKF_1d_interp_joint.r
|
sharks_with_obs_sim_EKF_1d_interp_joint <- function(env_obj) {
for (s in env_obj$sharks_with_obs) {
#print("tis values")
#print(s)
#print(Xpart_history[1:i,c("time_in_state","lambda"),1,s])
#print(shark_intervals[[ s ]])
#sequence of regular steps for which we will update values
#should be different for each shark
#sequence of step since last observation
env_obj$steps_to_resamp[[ s ]] <- min((max(env_obj$steps_to_resamp[[ s ]]) + 1), env_obj$i-1):(env_obj$i-1)
for (k in 1:env_obj$nstates) {
env_obj$sigma_draw[,k,s] <- keep_finite(pmax(1e-15, MCMCpack::rinvgamma(n=env_obj$npart, shape=env_obj$sigma_pars[, 2*k-1, s], scale=env_obj$sigma_pars[, 2*k, s])))
}
env_obj$Qt[2,2,,,s] <- t(env_obj$sigma_draw[,,s])
for (p in 1:env_obj$npart) {
#particle covariance, one for each state. only second part depends on state though
env_obj$Qt[1, 1,, p,s] <- keep_finite(MCMCpack::riwish(v=env_obj$Particle_errvar[[ s ]][[ p ]]$dof, S=env_obj$Particle_errvar[[ s ]][[ p ]]$sig))
#draw values for mu_alpha, mu_beta
#here beta is the mean of the log-transformed angle, which we have to change to
for (k in 1:env_obj$nstates) {
#this is the error for xt| x_{t-1}: first part is the same for each state
#Qt[2,2,k,p] <- sigma_draw[p,k]
#draw block covariance matrices, same as D before, depends on the state of xt ->yt
#this is R_k
env_obj$XY_errvar_draw[1, 1, k, p, s] <- MCMCpack::riwish(v=env_obj$XY_errvar[[ s ]][[ p ]][[ k ]]$dof, S=env_obj$XY_errvar[[ s ]][[ p ]][[ k ]]$sig)
#mu has diagonal matrix
#on first step dont have any turn, assume you know it
#draw logV and normalized turn for each state from current (or prior estimates)
env_obj$logv_angle_mu_draw[p,,k,s] <- mvtnorm::rmvnorm(n=1, mean=env_obj$mu[k, "mu", p, s], sigma=as.matrix(env_obj$Qt[2, 2,k,p,s] * env_obj$mu[k, "V", p, s]))
}
}#loop over part and k
env_obj$XY_errvar_draw[,,,, s] <- keep_finite(env_obj$XY_errvar_draw[,,,, s])
#multiply by gradient since later will be variance of theta
#print( logv_angle_mu_draw[,"turn",])
#logv_angle_draw[,"logv",] <- normalize_logv(logv_angle_draw[,"logv",])
for (k in 1:env_obj$nstates) {
for (p in 1:env_obj$npart) {
env_obj$mk_prev[,k,p,s] <- keep_finite(env_obj$f(mk=env_obj$mk_actual[,p,s], new_logv=env_obj$logv_angle_mu_draw[p,"velocity",k,s], dtprev=env_obj$reg_dt)) #a_{t+1}
Fx_tmp <- keep_finite(env_obj$Fx(mk=env_obj$mk_actual[,p,s], dtprev=env_obj$reg_dt))
env_obj$Pk_prev[,,k,p,s] <- keep_finite(as.matrix(Matrix::nearPD(keep_finite(keep_finite(keep_finite(Fx_tmp %*% env_obj$Pk_actual[,,p,s]) %*% t(Fx_tmp)) + env_obj$Qt[,,k,p,s]), ensureSymmetry=TRUE)$mat)) #R_{t+1}
}
}
#interpolation fraction for each shark
shark_rows <- rownames(env_obj$ynext) == s
env_obj$j_list[[ s ]][[ env_obj$i ]] <- pmax((env_obj$ynext[shark_rows, "date_as_sec"] - env_obj$t_reg[env_obj$i]) / env_obj$reg_dt, 1e-10)
print(paste("j:", paste(round(env_obj$j_list[[ s ]][[ env_obj$i ]], digits=4), collapse=", ")))
env_obj$MuY[[ s ]] <- array(NA, dim=c(env_obj$yobs_sharks[ s ], env_obj$nstates, env_obj$npart), dimnames=list(1:env_obj$yobs_sharks[ s ], env_obj$state_names, env_obj$pnames))
#rep(list(rep(list(rep(list(matrix(NA, ncol=1, nrow=2)), yobs_sharks[ s ])), nstates)), npart)
env_obj$SigY[[ s ]] <- array(NA, dim=c(env_obj$yobs_sharks[ s ], env_obj$nstates, env_obj$npart), dimnames=list(1:env_obj$yobs_sharks[ s ], env_obj$state_names, env_obj$pnames))
#rep(list(rep(list(rep(list(diag(2)), yobs_sharks[ s ])), nstates)), npart)
#Pk_prev_interp[[ s ]] <- array(NA, dim=c(2,2,yobs_sharks[ s ], nstates, npart), dimnames=list(1:2,1:2,1:yobs_sharks[ s ], state_names, pnames))
#rep(list(rep(list(rep(list(diag(4)), yobs_sharks[ s ])), nstates)), npart)
env_obj$Kgain[[ s ]] <- array(NA, dim=c(2, env_obj$yobs_sharks[ s ], env_obj$nstates, env_obj$npart), dimnames=list(1:2, 1:env_obj$yobs_sharks[ s ], env_obj$state_names, env_obj$pnames))
#rep(list(rep(list(rep(list(matrix(0, ncol=2, nrow=4)), yobs_sharks[ s ])), nstates)), npart)
#print(yobs_sharks)
#prediction of y is the direct interpolation
#print("Pk")
#print(Pk_actual[,,,s])
j_tmp <- diff(c(0, env_obj$j_list[[ s ]][[ env_obj$i ]]))
for (p in 1:env_obj$npart) {
for (k in 1:env_obj$nstates) {
mk_tmp <- c(env_obj$mk_prev[,k,p,s])
Pk_tmp <- env_obj$Pk_prev[,,k,p,s]
for (y in 1:env_obj$yobs_sharks[ s ]) {
if (y > 1) {
#take previous x values and starting logv
#mk_tmp[1] <- env_obj$MuY[[ s ]][y-1,k,p]
#take previous x-y values and starting logv and bearing
mk_tmp[1] <- env_obj$MuY[[ s ]][y-1,k,p]
Pk_tmp[1,1] <- env_obj$SigY[[ s ]][y-1,k,p]
}
env_obj$MuY[[ s ]][y,k,p] <- keep_finite(env_obj$h(mk=mk_tmp, dtprev=j_tmp[ y ] * env_obj$reg_dt))
Hx_tmp <- keep_finite(env_obj$Hx(mk=env_obj$mk_prev[,k,p,s], dtprev=j_tmp[ y ] * env_obj$reg_dt))
# env_obj$SigY[[ s ]][y,k,p] <- keep_finite(as.matrix(Matrix::nearPD(keep_finite(Hx_tmp %*% env_obj$Pk_prev[,,k,p,s] %*% t(Hx_tmp) + (j_tmp[ y ] * env_obj$XY_errvar_draw[,,k,p,s])), ensureSymmetry=TRUE)$mat))
env_obj$SigY[[ s ]][y,k,p] <- keep_finite(as.matrix(Matrix::nearPD(keep_finite(keep_finite(keep_finite(Hx_tmp %*% Pk_tmp) %*% t(Hx_tmp)) + (j_tmp[ y ] * env_obj$XY_errvar_draw[,,k,p,s])), ensureSymmetry=TRUE)$mat))
# Fx_tmp <- Fx(mk=mk_prev[,k,p,s], dtprev=j_list[[ s ]][[ i ]][ y ]*reg_dt)
# Pk_prev_interp[[ s ]][,,y,k,p] <- as.matrix(Matrix::nearPD(Fx_tmp%*%Pk_actual[,,p,s]%*%t(Fx_tmp) + Qt[,,k,p], ensureSymmetry=TRUE)$mat) #R_{t+1}
# MuY[[ s ]][y,k,p] <- keep_finite(h(mk=Xpart[p,,k,"curr",s], dtprev=j_list[[ s ]][[ i ]][ y ]*reg_dt))
# Hx_tmp <- Hx(mk=Xpart[p,,k,"curr",s], dtprev=j_list[[ s ]][[ i ]][ y ]*reg_dt)
# SigY[[ s ]][y,k,p] <- keep_finite(as.matrix(Matrix::nearPD(Hx_tmp%*%Pk_prev_interp[[ s ]][,,y,k,p]%*%t(Hx_tmp) + (j_list[[ s ]][[ i ]][ y ]^2)*XY_errvar_draw[,,k,p,s], ensureSymmetry=TRUE)$mat))
}
}
}
}
invisible(NULL)
}
|
7b23a85f76f10c9777bf9431963c7c704d326039
|
68f63ff26f6ba800eaf445f844cee713a7e3cf7d
|
/R/model/estimate-policy-effect.R
|
f1d305416b1e4ecb05374eb072d881a90c5bdb31
|
[
"MIT"
] |
permissive
|
zackbh/covid19-vsl
|
343fb8c7cd6a4501cb7743d08d78e99b5847ecaa
|
e6d9feeaad1fff5253a9f45dbe0c101329ddaf04
|
refs/heads/master
| 2021-05-21T18:18:14.448798
| 2020-09-03T13:39:11
| 2020-09-03T13:39:11
| 252,749,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,125
|
r
|
estimate-policy-effect.R
|
# Relationship between policies and mobility
library(dplyr)
library(data.table)
library(dtplyr)
library(covdata)
stringency <- data.table::setDT(readRDS(here::here("data/stringency-index.RDS")))
setkey(stringency, country_name, date)
wb <- readRDS(here::here("data/wb-data.RDS"))
pop <- readr::read_csv(here::here("data/population.csv"))
gm <- data.table::setDT(covdata::google_mobility)
gm[, census_fips_code := NULL][, ]
gm[,iso3c := countrycode::countrycode(country_region_code, origin = "iso2c", destination = "iso3c")]
setkey(gm, country_region, date)
df <- gm %>%
left_join(., stringency, by = c("country_region" = "country_name", "date")) %>%
left_join(., wb, by = c("iso3c")) %>%
left_join(., pop, by = c("iso3c"))
cc <- c("United States", "India", "Bangladesh")
ggplot(df[type == "workplaces" & country %in% c("United States", "India")],
aes(x = date)) +
geom_point(aes(y = stringency_index)) +
geom_point(aes(y = pct_diff))
ggplot[df[]]
ggplot(df[!is.na(income_group) & type == "workplaces" & pct_diff < 10],
aes(y = pct_diff, x = stringency_index, color=income_group, group = income_group)) +
stat_summary()
geom_point(position = "jitter", alpha = .3)
mod <- mgcv::gam(pct_diff ~ s(stringency_index, by = income_group, id = 1) + income_group,
data = df[type == "workplaces"] )
plot(mod, shade = TRUE, pages = 1, scale = 0)
country_level <- df[!is.na(stringency_index)][
!is.na(income_group)][
type %in% c("residential", "transit", "workplaces")][
, .(pct_diff = mean(pct_diff, na.rm = T),
stringency_index = mean(stringency_index, na.rm = T),
income_group = unique(income_group)), by = .(country_region, date, type)]
ggplot(country_level[country_region %in% c("United States", "India", "Bangladesh")],
aes(x = date, color = type)) +
geom_point(aes(y = pct_diff/100), alpha = .5) +
geom_step(aes(y = stringency_index/100), color = "black") +
stat_smooth(aes(y = pct_diff/100), se = F) +
scale_y_continuous(labels = scales::percent_format()) +
scale_color_viridis_d() +
facet_wrap(~country_region) +
theme(axis.title.x = element_blank(), legend.position = "bottom", legend.title = element_blank())
# What was the effect of policies on mobility?
df %>%
#filter(type == "workplaces") %>%
group_by(country_region, date, type) %>%
summarize(pct_diff = mean(pct_diff, na.rm = T),
stringency_index = mean(stringency_index, na.rm = T),
income_group = unique(income_group)) %>% ungroup() %>%
group_by(income_group, date, type) %>%
summarize(pct_diff = Hmisc::wtd.mean(pct_diff, pop, na.rm = T),
stringency_index = Hmisc::wtd.mean(stringency_index, pop, na.rm = T)) %>%
ggplot(., aes(x = date, color = income_group)) +
geom_line(aes(y = pct_diff/100)) +
geom_line(aes(y = stringency_index/100)) +
facet_wrap(~type)
ggplot(foo) +
geom_histogram(aes(x = stringency_index, y = ..density..)) +
facet_wrap(~income_group)
mod <- lme4::lmer(pct_diff ~ date + stringency_index + (stringency_index | income_group) + (1|region), data = foo[type == "transit"] )
|
0f90e0beb4e5b59a8af8e5a6c54db074f0d7311b
|
ed46eb7778524102331ea2ee4a3165eacaf01b5e
|
/man/plot_bb_fits.Rd
|
fbc98aac642bd2eae5329389129ea518d62ea6ac
|
[
"Apache-2.0"
] |
permissive
|
Gibbons-Lab/mbtools
|
199e2ff4c87a2c3c194372b6b56ab09c3212f312
|
3b7ae9f124c9b9bb7579688243b0b264a040e35f
|
refs/heads/master
| 2023-06-08T03:26:25.971849
| 2023-05-25T19:10:13
| 2023-05-25T19:10:13
| 146,012,432
| 20
| 9
|
Apache-2.0
| 2018-11-05T22:48:09
| 2018-08-24T16:04:15
|
R
|
UTF-8
|
R
| false
| true
| 633
|
rd
|
plot_bb_fits.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_analysis.R
\name{plot_bb_fits}
\alias{plot_bb_fits}
\title{Plot the Betabinomial fits for all taxa in a phyloseq object}
\usage{
plot_bb_fits(ps, bins = NULL)
}
\arguments{
\item{ps}{A phyloseq object containing data for a reference experiment.
This should assume absence of any differential effect (all variation is
random).}
\item{bins}{number of bins to use for histograms. Defaults to Sturges rule.}
}
\value{
A ggplot2 plot object shwoing the fits for each taxon.
}
\description{
Plot the Betabinomial fits for all taxa in a phyloseq object
}
|
8fc86e602ca27a5434b644067266e3e68046bcc3
|
0073cb8d74a83eba9a621ca158d914867435e6ff
|
/hitting_and_pitching_app/app.R
|
976bd6e1aa2971263c8c4bb78865c6fcde11d5a2
|
[] |
no_license
|
MikeCalabro/MLB-hitting-and-pitching
|
baf712d1153c94c56922627267e4b3aed66314db
|
89617caf95981368f23b4e6c7af1476b50a5e028
|
refs/heads/master
| 2022-12-03T21:22:42.445130
| 2020-08-29T15:43:41
| 2020-08-29T15:43:41
| 289,092,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72,019
|
r
|
app.R
|
library(shiny) # Allows for the interactive elements
library(tidyverse) # Data manipulation and visualization
library(baseballr) # For obtaining the statcast data
library(png) # To put that plate image on the screen
library(shinythemes) # For theme selection
library(tidybayes) # Distribution Visualizations
# data read in from popular_players rScript
popular_players <- read_rds("popular")
# Everything encased in this UI defines the layout of the app
ui <- fluidPage(
# App theme(color/text scheme) and App Title
theme = shinytheme("yeti"),
titlePanel("Hitting Statistics And Pitching Strategies: A Shiny App by Michael Calabro"),
# navbarPage creates tabs at the top of the app to switch between
navbarPage("Navbar",
# Everything within this tabPanel function is shown when the "Batted Balls" tab is clicked
tabPanel("Main",
# Every column() function creates a column on the screen, and the number associated column(3,) is its width
# An entire screen fits 12 sections worth of column, so this column takes up 3/12 (.25) of the screen
column(3,
column(12,
# style.. allows me to set the background color for the column
style = "background-color:#E3E3E3;",
column(12,
h3(strong("Data Selection"))
),
column(6,
textInput("last",
"Last Name",
value = "Devers",
placeholder = "ex: Devers" )
),
column(6,
numericInput("mlbamid",
"MLBAM ID:",
value = 646240
)
),
column(12,
actionButton("go", "Select!"),
br(),
h6("Note: The data begins to load when you"),
h6("navigate out of the main tab."),
h6("Loading may take up to 10 seconds")
),
br()
),
column(12,
h3(strong("Player ID Lookup")),
tabsetPanel(
tabPanel("Popular Players",
br(),
tableOutput("popTab")
),
tabPanel("Search",
column(6,
textInput("first_name",
"First Name",
value = "Rafael",
placeholder = "ex: Rafael")
),
column(6,
textInput("last_name",
"Last Name",
value = "Devers",
placeholder = "ex: Devers")
),
tableOutput("batterTable"),
br()
)
)
)
),
column(1),
column(6,
h4(strong("Welcome to Hitting Stats And Pitching Strats!")),
br(),
tags$u(h4("What sort of data can I view?")),
h5("Each tab provides a unique visualization of MLB pitch-by-pitch data for a selected batter:"),
tags$ul(h5("Batted Balls - displays Strike Zone and Launch Chart data for every ball hit in play")),
tags$ul(h5("All Pitches - Displays a Heat Map of which zone pitchers throw to with selected pitches")),
tags$ul(h5("Pitch Selector - displays the most effective pitches to throw when facing this batter")),
br(),
tags$u(h4("How do I select my Batter?")),
h5("To select your batter, first search for a CURRENT PLAYER's name in the sidebar."),
h5("When you find your player, input his Last Name & MLBAM ID into the 'Data Selection' Input."),
h5(strong("Press 'Select!' and switch tabs to begin you visualization adventure!")),
br(),
tags$u(h4("Where is this data from?")),
h5("All data is downloaded from the MLB's Statcast Search using Bill Petti's baseballr package."),
a("Check out the Statcast Search Website!", href="https://baseballsavant.mlb.com/statcast_search"),
br(),
a("Check out the Statcast CSV documentation!", href="https://baseballsavant.mlb.com/csv-docs"),
br(),
a("And check out the baseballr GitHub repository!", href="https://github.com/BillPetti/baseballr"),
br(),
br(),
tags$u(h4("Anything Else I Need to Know?")),
h5("This app only works for active players, and is best viewed in full screen on a computer or laptop."),
h5("And that is all! Enjoy the charts, and GO RED SOX!")
)
),
tabPanel("Batted Balls",
column(3,
style = "background-color:#E3E3E3;",
textOutput("bbTitle"),
# Inputs like selectInput, sliderInput.. create the widgets which affect the plots/tables
# The first argument is a label that can be referenced in the server
# The second argument is the label that will be shown in the App
# The third argument is a list of all the possible options for the user to pick from
# selected.. defines what is originaly chosen when the user opens the app
selectInput("bbgeom",
"Select Geom Display",
c("Point: Color = Event" = "Pe",
"Point: Color = Ball Flight" = "Pbf",
"Point: Color = Pitch Type" = "Ppt",
"Point: Color = Launch Speed" = "Pls",
"Point: Color = Launch Angle" = "Pla"
),
selected = "Pe"),
# Each Input widget works in similar ways, with slightly different features
# With this select input, for example, I can make multiple selections
selectInput("bbpitches",
"Pitches to include:",
c("2-Seam Fastball",
"4-Seam Fastball",
"Changeup",
"Curveball",
"Cutter",
"Sinker",
"Slider"),
selected = c("2-Seam Fastball",
"4-Seam Fastball",
"Changeup",
"Curveball",
"Cutter",
"Sinker",
"Slider"),
multiple = TRUE),
selectInput("bbflights",
"Ball Flights to include:",
c("Pop-up" = "popup",
"Grounder" = "ground_ball",
"Fly Ball" = "fly_ball",
"Line Drive" = "line_drive"),
selected = c("popup",
"ground_ball",
"fly_ball",
"line_drive"),
multiple = TRUE),
selectInput("bbevents",
"Events to include:",
c("Single" = "single",
"Double" = "double",
"Triple" = "triple",
"Home Run" = "home_run",
"Field Out" = "field_out"),
selected = c("single",
"double",
"home_run"),
multiple = TRUE),
# This column is nested within the column above
# Every nested column takes up x/12 of the space it is nested in
# So this column(6,) takes up 6/12 (.5) of the width of the column it is nested in
column(6,
sliderInput("bbballs",
"Balls in the Count",
min = 0,
max = 3,
value = c(0, 3)),
sliderInput("bbstrikes",
"Strikes in the Count",
min = 0,
max = 2,
value = c(0, 2))
),
column(6,
sliderInput("bbAngle",
"Lauch Angle Range",
min = -40,
max = 70,
value = c(-40, 70)),
sliderInput("bbSpeed",
"Launch Speed Range",
min = 0,
max = 120,
value = c(0, 120),
step = 0.1)
)
),
# This entirely new column(4,) makes up the middle 4/12 of the screen (essentially the 'middle')
column(4,
# "bbplot" is defined in the server below as output$bbplot
# if it were a table, it would be called using tableOutput
plotOutput("bbPlot", height = "530px"),
# plate.png is an image i screenshotted on my comp from the internet.. a home plate
img(src="plate.png", width = "68%", height = "50px")
),
# This is the final column on my first screen (Takes up the final 5/12 of the screen)
column(5,
# tabsetPanel() does the same thing as navbar, except it only cchanges the column on the screen it's in
tabsetPanel(
# Here, each tabPanel switches what you see in this column when you press the corresponding tab
tabPanel("Launch Chart",
column(2,
# Probably a better way to get this image lower, but this works for now
br(), br(), br(), br(), br(), br(), br(), br(), br(),
# An image of a batter swinging, sneaky very important for the app
img(src="swing.png", height = "80px", width = "80px")),
column(10,
plotOutput("launchPlot", height = "350px")
),
# Since the columns above filled up the column they're encompassed in, this goes underneath
tableOutput("bbLaunchTable")
),
# When the tab 'Tables' is pressed, the screen defined below appears
tabPanel("Tables",
column(5,
br(),
tableOutput("bbZoneTable")
),
column(7,
br(),
tableOutput("bbPitchTable"),
tableOutput("bbFlightTable"),
tableOutput("bbEventTable")
)
)
)
)
),
# New tabPanel results in an entirely new screen when "All Pitches" is clicked
tabPanel("All Pitches",
column(3,
style = "background-color:#E3E3E3;",
textOutput("apTitle"),
selectInput("geom",
"Select Geom Display",
c("Point: Color = Pitch Type" = "Ppt",
"Point: Color = Pitch Result" = "Ppr",
"Bin: Color = Pitch Count" = "Bpc"),
selected = "Bpc"),
selectInput("pitches",
"Pitches to include:",
c("2-Seam Fastball",
"4-Seam Fastball",
"Changeup",
"Curveball",
"Cutter",
"Sinker",
"Slider"),
selected = c("4-Seam Fastball",
"Changeup",
"Curveball",
"Slider"),
multiple = TRUE),
selectInput("results",
"Pitch Results to include:",
c("Ball" = "ball",
"Called Strike" = "called_strike",
"Foul" = "foul",
"Hit In Play, Out" = "in_play_out",
"Hit" = "hit",
"Swinging Strike" = "swinging_strike"),
selected = c("ball",
"called_strike"),
multiple = TRUE),
sliderInput("balls",
"Balls in the Count",
min = 0,
max = 3,
value = c(0, 3)),
sliderInput("strikes",
"Strikes in the Count",
min = 0,
max = 2,
value = c(0, 2)
),
br(),
checkboxInput("nums",
"Show Zone Numbers:",
value = TRUE)
),
column(4,
plotOutput("allPlot", height = "530px"),
img(src="plate.png", width = "70%", height = "50px")
),
column(5,
tabsetPanel(
tabPanel("Pie Charts",
column(6,
br(), br(), br(), br(),
plotOutput("Pie1", height = "280px")
),
column(6,
plotOutput("Pie2", height = "240px"),
plotOutput("Pie3", height = "240px")
)
),
tabPanel("Tables",
column(5,
br(),
tableOutput("zoneTable")
),
column(7,
br(),
tableOutput("typeTable"),
br(),
tableOutput("resultTable")
)
)
)
)
),
tabPanel("Pitch Selector",
column(3,
style = "background-color:#E3E3E3;",
numericInput("psSpeed",
"What Is Your Max Fastball Speed?",
value = 98,
min = 88,
max = 105,
step = 1
),
selectInput("psPitch",
"What Pitches Can You Throw?",
c("2-Seam Fastball",
"4-Seam Fastball",
"Changeup",
"Curveball",
"Cutter",
"Sinker",
"Slider"),
selected = c("4-Seam Fastball",
"Changeup",
"Curveball",
"Slider"),
multiple = TRUE),
column(6,
sliderInput("psBalls",
"Balls In The Count:",
value = c(0,3),
min = 0,
max = 3,
step = 1)
),
column(6,
sliderInput("psStrikes",
"Strikes In The Count:",
value = c(0,2),
min = 0,
max = 2,
step = 1)
),
selectInput("psBallStrike",
"Do You Want To Throw A Strike?",
c("Yes",
"I Don't Care"),
selected = "Yes"),
selectInput("psGoal",
"Your Goal - Minimize the chance of The Batter...",
c("Swinging at the Pitch" = "swing",
"Hitting the Ball In Play" = "hit_in_play",
"Hitting a Home Run" = "homerun",
"Hitting the Ball Hard" = "hit_hard",
"Hitting a Line Drive" = "line_drive"),
selected = "hit_hard"),
numericInput("psOptions",
"How Many Pitch Options Do You Want To Consider?",
value = 8,
min = 1,
max = 20,
step = 1),
numericInput("psObs",
"Minimum Number Of Observations To Be Included",
value = 6,
min = 1,
max = 50,
step = 1)
),
column(1),
column(8,
column(3),
column(6,
tags$u(strong(textOutput("psTitle")))),
column(3),
br(),
br(),
tableOutput("psTable")
)
),
tabPanel("Launch Speed Viz",
tabsetPanel(
tabPanel("Strike Zone",
column(8, align = "right",
h4(strong(textOutput("lsTabTitle")))
),
plotOutput("lsTabPlot", height = "580px")
),
tabPanel("Distribution Comparison",
column(8,
plotOutput("lsTabDist", height = "550px")
),
column(4,
plotOutput("blankZone", height = "490px"),
img(src="plate.png", width = "83%", height = "50px")
)
),
tabPanel("Regression",
br(),
h4("Linear Regression of Launch Speed on Strike Zone Location"),
br(),
tableOutput("lsReg"),
br(),
textOutput("lsRSQ")
)
)
),
tabPanel("Expected wOBA Viz",
tabsetPanel(
tabPanel("Strike Zone",
column(8, align = "right",
h4(strong(textOutput("xwOBATabTitle")))
),
plotOutput("xwOBATabPlot", height = "580px")
),
tabPanel("Distribution Comparison",
column(8,
plotOutput("xwOBATabDist", height = "550px")
),
column(4,
plotOutput("blankZone2", height = "490px"),
img(src="plate.png", width = "83%", height = "50px")
)
),
tabPanel("Regression",
br(),
h4("Linear Regression of Expected wOBA on Strike Zone Location"),
br(),
tableOutput("xwOBAReg"),
br(),
textOutput("xwOBARSQ")
)
)
)
)
)
# The server is where all the data manipulation and plot making takes place
# In here I create different plots and tables which can react to inputs from the UI's widgets
server <- function(input, output) {
# Determines the first year from which my data is collected
start_year <- reactive({
year <- deframe(playerid_lookup(input$last) %>%
filter(mlbam_id == input$mlbamid) %>%
select(mlb_played_first))
if (year < 2015){
year <- 2015
}
return(year)
})
# This is the function where the data for every plot and table is downloaded and lives
batter_data <- eventReactive(input$go, {
withProgress(
purrr::map_df((start_year():2020), function(x){
scrape_statcast_savant_batter(start_date = glue::glue("{x}-04-01"),
end_date = glue::glue("{x}-10-30"),
batterid = input$mlbamid)
}),
message = sprintf('Data for %s is loading...', input$last),
detail = 'Trust the process... and Go Sox', value = 0.98)
})
observeEvent(input$go, {
showNotification("Great Choice! When you switch tabs, the data will begin to load!")
})
# Each output$... creates an item (plot/table/text) that can be called in the UI
# When you see plotOutput("allPlot") in the UI, it calls everything encased in this renderPlot() function
#This particular Plot is the strikezone plot on the "All Pitches" page
output$allPlot <- renderPlot({
bot <- deframe(batter_data() %>%
summarise(mean(sz_bot, na.rm = TRUE)))
top <- deframe(batter_data() %>%
summarise(mean(sz_top, na.rm = TRUE)))
# This could have been done more efficiently, but it makes a table so I can display the strike zone numbers
num_x <- c(-.66, 0, .66,
-.66, 0, .66,
-.66, 0, .66,
-1.3, 1.3, -1.3, 1.3)
num_y <- c(top - 0.3, top - 0.3, top - 0.3,
top - ((top-bot)/2), top - ((top-bot)/2), top - ((top-bot)/2),
bot + 0.3, bot + 0.3, bot + 0.3,
top + 0.2, top + 0.2, bot - 0.2, bot - 0.2)
val <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14)
num_tab <- data.frame(num_x, num_y, val)
# Using the data that I downloaded from Statcast using the baseballr package
batter_data() %>%
# Renaming some descriptions so they aren't as big on screen (Long names in the key squish the strikezone)
mutate(description = ifelse(description == "hit_into_play_no_out", "hit",
ifelse(description == "hit_into_play", "in_play_out", description))) %>%
# All of this filtering means that only the data chosen from the widgets gets displayed on screen
filter(pitch_name %in% input$pitches,
description %in% input$results,
balls %in% (input$balls[1]:input$balls[2]),
strikes %in% (input$strikes[1]:input$strikes[2])) %>%
# Now the plot can be defined
# The x and y arguments within ggplot define the location of all the points that will be plotted
ggplot(aes(x = plate_x, y = plate_z)) +
# Each if statement in this renderPlot defines how the plot should react if certain inputs are altered
# This one says that the heat map bins should be shown on screen if that selection is made
{
if(input$geom == "Bpc"){
geom_bin2d(binwidth = c(0.33, 0.2323))} # These numbers make the bins fit nicely in the strikezone
} +
# Each of these geom_segments define the strike zone outline that is shown on screen
geom_segment(aes(x = -0.333, y = mean(sz_top), xend = -0.333, yend = mean(sz_bot)), color = "gray") +
geom_segment(aes(x = 0.333, y = mean(sz_top), xend = 0.333, yend = mean(sz_bot)), color = "gray") +
geom_segment(aes(x = -1, y = ((mean(sz_top) - mean(sz_bot))/3) + mean(sz_bot),
xend = 1, yend = ((mean(sz_top) - mean(sz_bot))/3) + mean(sz_bot)), color = "gray") +
geom_segment(aes(x = -1, y = mean(sz_top) - ((mean(sz_top) - mean(sz_bot))/3),
xend = 1, yend = mean(sz_top) - ((mean(sz_top) - mean(sz_bot))/3)), color = "gray") +
geom_segment(aes(x = -1, y = mean(sz_top), xend = 1, yend = mean(sz_top)), size = 1.5) +
geom_segment(aes(x = -1, y = mean(sz_bot), xend = 1, yend = mean(sz_bot)), size = 1.5) +
geom_segment(aes(x = -1, y = mean(sz_top), xend = -1, yend = mean(sz_bot)), size = 1.5) +
geom_segment(aes(x = 1, y = mean(sz_top), xend = 1, yend = mean(sz_bot)), size = 1.5) +
# These ifs are used to switch between the color of the points shown on screen (Could be simpler, should fix)
{
if(input$geom == "Ppt"){
geom_point(aes(fill = pitch_name), shape = 21, size = 3, color = "black", stroke = 0.5)
}else if(input$geom == "Ppr"){
geom_point(aes(fill = description), shape = 21, size = 3, color = "black", stroke = 0.5)
}
} +
# If the checkbox for showing numbers is pressed, this geom_text creates thos numbers
{
if(input$nums){
geom_text(data = num_tab, aes(x = num_x, y = num_y, label = val), size = 8.5,color = "black")
}
} +
# xlim and ylim define the size of the plot
ylim(bot - 0.6, top + 0.6) +
xlim(-1.67, 1.67) +
# All of these element_blank()'s make the canvas blank, unlike base ggplot which has axis/grid defaults
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
})
# The only difference between the next three tables is what I 'group by' to create the rows
output$typeTable <- renderTable({
# Same mutating and filtering as for the plot
table_data <- batter_data() %>%
mutate(description = ifelse(description == "hit_into_play_no_out", "hit",
ifelse(description == "hit_into_play", "in_play_out", description))) %>%
filter(pitch_name %in% input$pitches,
description %in% input$results,
balls %in% (input$balls[1]:input$balls[2]),
strikes %in% (input$strikes[1]:input$strikes[2]))
# Created this new dataframe so that I could add this total row
# This total row allows me to divide each group_by column by the total pitches so that I can make the share %s
table_data <- table_data %>%
mutate(total = nrow(table_data))
# This code creates the data in the table
table_data %>%
group_by(pitch_name, total) %>% # Create buckets of rows defined by pitch_name
summarise(count = n()) %>% # Create a column count which says how many observations there are for each group
mutate(share = count/total) %>% # Add the share column - the % of all pitches
select(pitch_name, count, share) %>% # Selects the three columns to show
arrange(desc(count)) # Arranges the table with the highest counts at the top
},
striped = TRUE, # Make the table striped
bordered = TRUE, # Makes the table have a border
spacing = "s") # Defines the spacing between rows on the table
# This tableOutput is the same as typeTable, but is grouped by pitch result (description)
output$resultTable <- renderTable({
table_data <- batter_data() %>%
mutate(description = ifelse(description == "hit_into_play_no_out", "hit",
ifelse(description == "hit_into_play", "in_play_out", description))) %>%
filter(pitch_name %in% input$pitches,
description %in% input$results,
balls %in% (input$balls[1]:input$balls[2]),
strikes %in% (input$strikes[1]:input$strikes[2]))
table_data <- table_data %>%
mutate(total = nrow(table_data))
table_data %>%
group_by(description, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(description, count, share) %>%
arrange(desc(count))
},
striped = TRUE,
bordered = TRUE,
spacing = "s")
# This tableOutput is the same as resultTable, but is grouped by zone location (description)
output$zoneTable <- renderTable({
table_data <- batter_data() %>%
mutate(description = ifelse(description == "hit_into_play_no_out", "hit",
ifelse(description == "hit_into_play", "in_play_out", description))) %>%
filter(pitch_name %in% input$pitches,
description %in% input$results,
balls %in% (input$balls[1]:input$balls[2]),
strikes %in% (input$strikes[1]:input$strikes[2]))
table_data <- table_data %>%
mutate(total = nrow(table_data))
table_data %>%
group_by(zone, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
mutate(zone = as.integer(zone)) %>%
select(zone, count, share) %>%
arrange(desc(count))
},
striped = TRUE,
bordered = TRUE,
spacing = "s")
# Creates the pie chart closest to the strike zone
# This will always mirror the geom style in the strike zone
output$Pie1 <- renderPlot({
table_data <- batter_data() %>%
mutate(description = ifelse(description == "hit_into_play_no_out", "hit",
ifelse(description == "hit_into_play", "in_play_out", description))) %>%
filter(pitch_name %in% input$pitches,
description %in% input$results,
balls %in% (input$balls[1]:input$balls[2]),
strikes %in% (input$strikes[1]:input$strikes[2]))
# Each if makes it so that the pie chart closes to the strikezone corresponds to the color scheme of the zone
if(input$geom == "Bpc"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(zone, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
mutate(zone = as.integer(zone)) %>%
select(zone, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=zone))+
geom_bar(width = 1, stat = "identity") + # Creates a bar plot with one bar
coord_polar("y", start=0) + # Puts the bar in polar coordinates to make a pie-chart
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}else if(input$geom == "Ppt"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(pitch_name, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(pitch_name, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=pitch_name))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}else if(input$geom == "Ppr"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(description, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(description, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=description))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}
})
# Creates the top pie chart on the right
output$Pie2 <- renderPlot({
table_data <- batter_data() %>%
mutate(description = ifelse(description == "hit_into_play_no_out", "hit",
ifelse(description == "hit_into_play", "in_play_out", description))) %>%
filter(pitch_name %in% input$pitches,
description %in% input$results,
balls %in% (input$balls[1]:input$balls[2]),
strikes %in% (input$strikes[1]:input$strikes[2]))
if(input$geom == "Ppt"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(zone, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
mutate(zone = as.integer(zone)) %>%
select(zone, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=zone))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}else if(input$geom == "Bpc"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(pitch_name, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(pitch_name, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=pitch_name))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}else if(input$geom == "Ppr"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(pitch_name, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(pitch_name, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=pitch_name))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}
})
# Creates the bottom pie chart on the right
output$Pie3 <- renderPlot({
table_data <- batter_data() %>%
mutate(description = ifelse(description == "hit_into_play_no_out", "hit",
ifelse(description == "hit_into_play", "in_play_out", description))) %>%
filter(pitch_name %in% input$pitches,
description %in% input$results,
balls %in% (input$balls[1]:input$balls[2]),
strikes %in% (input$strikes[1]:input$strikes[2]))
if(input$geom == "Ppt"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(description, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(description, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=description))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}else if(input$geom == "Bpc"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(description, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(description, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=description))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}else if(input$geom == "Ppr"){
pie_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
group_by(zone, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(zone, count, share)
pie_data %>%
ggplot(aes(x="", y=share, fill=zone))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
}
})
#
# ONTO NAV_TAB 2 (Now NAV_TAB 1) - BATTED BALLS
#
# Creates the strike zone plot in the batted balls tab - similar to other strike zone
output$bbPlot <- renderPlot({
bot <- deframe(batter_data() %>%
summarise(mean(sz_bot, na.rm = TRUE)))
top <- deframe(batter_data() %>%
summarise(mean(sz_top, na.rm = TRUE)))
# This could have been done more efficiently, but it makes a table so I can display the strike zone numbers
num_x <- c(-.66, 0, .66,
-.66, 0, .66,
-.66, 0, .66,
-1.3, 1.3, -1.3, 1.3)
num_y <- c(top - 0.3, top - 0.3, top - 0.3,
top - ((top-bot)/2), top - ((top-bot)/2), top - ((top-bot)/2),
bot + 0.3, bot + 0.3, bot + 0.3,
top + 0.2, top + 0.2, bot - 0.2, bot - 0.2)
val <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14)
num_tab <- data.frame(num_x, num_y, val)
# Using the data that I downloaded from Statcast using the baseballr package
batter_data() %>%
mutate(events = ifelse(events %in% c("double_play",
"field_error",
"fielders_choice",
"fielders_choice_out",
"force_out",
"grounded_into_double_play",
"sac_fly"), "field_out", events)) %>%
filter(
launch_angle %in% (input$bbAngle[1]:input$bbAngle[2]),
launch_speed > input$bbSpeed[1],
launch_speed < input$bbSpeed[2],
pitch_name %in% input$bbpitches,
events %in% input$bbevents,
bb_type %in% input$bbflights,
balls %in% (input$bbballs[1]:input$bbballs[2]),
strikes %in% (input$bbstrikes[1]:input$bbstrikes[2])) %>%
# Now the plot can be defined
# The x and y arguments within ggplot define the location of all the points that will be plotted
ggplot(aes(x = plate_x, y = plate_z)) +
geom_segment(aes(x = -0.333, y = mean(sz_top), xend = -0.333, yend = mean(sz_bot)), color = "gray") +
geom_segment(aes(x = 0.333, y = mean(sz_top), xend = 0.333, yend = mean(sz_bot)), color = "gray") +
geom_segment(aes(x = -1, y = ((mean(sz_top) - mean(sz_bot))/3) + mean(sz_bot),
xend = 1, yend = ((mean(sz_top) - mean(sz_bot))/3) + mean(sz_bot)), color = "gray") +
geom_segment(aes(x = -1, y = mean(sz_top) - ((mean(sz_top) - mean(sz_bot))/3),
xend = 1, yend = mean(sz_top) - ((mean(sz_top) - mean(sz_bot))/3)), color = "gray") +
geom_segment(aes(x = -1, y = mean(sz_top), xend = 1, yend = mean(sz_top)), size = 1.5) +
geom_segment(aes(x = -1, y = mean(sz_bot), xend = 1, yend = mean(sz_bot)), size = 1.5) +
geom_segment(aes(x = -1, y = mean(sz_top), xend = -1, yend = mean(sz_bot)), size = 1.5) +
geom_segment(aes(x = 1, y = mean(sz_top), xend = 1, yend = mean(sz_bot)), size = 1.5) +
# These ifs are used to switch between the color of the points shown on screen (Could be simpler, should fix)
{
if(input$bbgeom == "Pe"){
geom_point(aes(fill = events), shape = 21, size = 3, color = "black", stroke = 0.5)
}else if(input$bbgeom == "Pbf"){
geom_point(aes(fill = bb_type), shape = 21, size = 3, color = "black", stroke = 0.5)
}else if(input$bbgeom == "Ppt"){
geom_point(aes(fill = pitch_name), shape = 21, size = 3, color = "black", stroke = 0.5)
}else if(input$bbgeom == "Pla"){
geom_point(aes(fill = launch_angle), shape = 21, size = 3, color = "black", stroke = 0.5)
}else if(input$bbgeom == "Pls"){
geom_point(aes(fill = launch_speed), shape = 21, size = 3, color = "black", stroke = 0.5)
}
} +
geom_text(data = num_tab, aes(x = num_x, y = num_y, label = val), size = 8.5,color = "black") +
# xlim and ylim define the size of the plot
ylim(bot - 0.6, top + 0.6) +
xlim(-1.67, 1.67) +
# All of these element_blank()'s make the canvas blank, unlike base ggplot which has axis/grid defaults
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
})
# Creates the zone table under the Tables Tab - similar to tables is "All Pitches" tab
output$bbZoneTable <- renderTable({
table_data <- batter_data() %>%
mutate(events = ifelse(events %in% c("double_play",
"field_error",
"fielders_choice",
"fielders_choice_out",
"force_out",
"grounded_into_double_play",
"sac_fly"), "field_out", events)) %>%
filter(
launch_angle %in% (input$bbAngle[1]:input$bbAngle[2]),
launch_speed > input$bbSpeed[1],
launch_speed < input$bbSpeed[2],
pitch_name %in% input$bbpitches,
events %in% input$bbevents,
bb_type %in% input$bbflights,
balls %in% (input$bbballs[1]:input$bbballs[2]),
strikes %in% (input$bbstrikes[1]:input$bbstrikes[2]))
table_data <- table_data %>%
mutate(total = nrow(table_data))
table_data %>%
group_by(zone, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
mutate(zone = as.integer(zone)) %>%
select(zone, count, share) %>%
arrange(desc(count))
},
striped = TRUE,
bordered = TRUE,
spacing = "s")
# Creates the pitch type table under the Tables Tab
output$bbPitchTable <- renderTable({
table_data <- batter_data() %>%
mutate(events = ifelse(events %in% c("double_play",
"field_error",
"fielders_choice",
"fielders_choice_out",
"force_out",
"grounded_into_double_play",
"sac_fly"), "field_out", events)) %>%
filter(
launch_angle %in% (input$bbAngle[1]:input$bbAngle[2]),
launch_speed > input$bbSpeed[1],
launch_speed < input$bbSpeed[2],
pitch_name %in% input$bbpitches,
events %in% input$bbevents,
bb_type %in% input$bbflights,
balls %in% (input$bbballs[1]:input$bbballs[2]),
strikes %in% (input$bbstrikes[1]:input$bbstrikes[2]))
table_data <- table_data %>%
mutate(total = nrow(table_data))
table_data %>%
group_by(pitch_name, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(pitch_name, count, share) %>%
arrange(desc(count)) %>%
head(n = 6L)
},
striped = TRUE,
bordered = TRUE,
spacing = "s")
# Creates the ball flight table under the Tables Tab
output$bbFlightTable <- renderTable({
table_data <- batter_data() %>%
mutate(events = ifelse(events %in% c("double_play",
"field_error",
"fielders_choice",
"fielders_choice_out",
"force_out",
"grounded_into_double_play",
"sac_fly"), "field_out", events)) %>%
filter(
launch_angle %in% (input$bbAngle[1]:input$bbAngle[2]),
launch_speed > input$bbSpeed[1],
launch_speed < input$bbSpeed[2],
pitch_name %in% input$bbpitches,
events %in% input$bbevents,
bb_type %in% input$bbflights,
balls %in% (input$bbballs[1]:input$bbballs[2]),
strikes %in% (input$bbstrikes[1]:input$bbstrikes[2]))
table_data <- table_data %>%
mutate(total = nrow(table_data))
table_data %>%
group_by(bb_type, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(bb_type, count, share) %>%
arrange(desc(count))
},
striped = TRUE,
bordered = TRUE,
spacing = "s")
# Creates the event table under the Tables Tab
output$bbEventTable <- renderTable({
table_data <- batter_data() %>%
mutate(events = ifelse(events %in% c("double_play",
"field_error",
"fielders_choice",
"fielders_choice_out",
"force_out",
"grounded_into_double_play",
"sac_fly"), "field_out", events)) %>%
filter(
launch_angle %in% (input$bbAngle[1]:input$bbAngle[2]),
launch_speed > input$bbSpeed[1],
launch_speed < input$bbSpeed[2],
pitch_name %in% input$bbpitches,
events %in% input$bbevents,
bb_type %in% input$bbflights,
balls %in% (input$bbballs[1]:input$bbballs[2]),
strikes %in% (input$bbstrikes[1]:input$bbstrikes[2]))
table_data <- table_data %>%
mutate(total = nrow(table_data))
table_data %>%
group_by(events, total) %>%
summarise(count = n()) %>%
mutate(share = count/total) %>%
select(events, count, share) %>%
arrange(desc(count)) %>%
head(n = 4L)
},
striped = TRUE,
bordered = TRUE,
spacing = "s")
# Creates the launch angle/speed graphic
output$launchPlot <- renderPlot({
batter_data() %>%
mutate(events = ifelse(events %in% c("double_play",
"field_error",
"fielders_choice",
"fielders_choice_out",
"force_out",
"grounded_into_double_play",
"sac_fly"), "field_out", events)) %>%
filter(
launch_angle %in% (input$bbAngle[1]:input$bbAngle[2]),
launch_speed > input$bbSpeed[1],
launch_speed < input$bbSpeed[2],
pitch_name %in% input$bbpitches,
events %in% input$bbevents,
bb_type %in% input$bbflights,
balls %in% (input$bbballs[1]:input$bbballs[2]),
strikes %in% (input$bbstrikes[1]:input$bbstrikes[2])) %>%
ggplot() +
{
# Using some trigonometry allows me to visualize launch angle and velocity in this plot
if(input$bbgeom == "Pbf"){
geom_segment(aes(x = 0, y = 0, xend = launch_speed*cos(launch_angle*pi/180),
yend = launch_speed*sin(launch_angle*pi/180), color = bb_type))
}else if(input$bbgeom == "Pls"){
geom_segment(aes(x = 0, y = 0, xend = launch_speed*cos(launch_angle*pi/180),
yend = launch_speed*sin(launch_angle*pi/180), color = launch_speed))
}else if(input$bbgeom == "Pe"){
geom_segment(aes(x = 0, y = 0, xend = launch_speed*cos(launch_angle*pi/180),
yend = launch_speed*sin(launch_angle*pi/180), color = events))
}else if(input$bbgeom == "Ppt"){
geom_segment(aes(x = 0, y = 0, xend = launch_speed*cos(launch_angle*pi/180),
yend = launch_speed*sin(launch_angle*pi/180), color = pitch_name))
}else if(input$bbgeom == "Pla"){
geom_segment(aes(x = 0, y = 0, xend = launch_speed*cos(launch_angle*pi/180),
yend = launch_speed*sin(launch_angle*pi/180), color = launch_angle))
}
} +
# hline represents the ground
geom_hline(yintercept = -10) +
ylim(-35, 80) +
# Removes all the background grid lines and gray color and axes
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
legend.position = "none")
})
# Creates the table that is shown under the launch plot
output$bbLaunchTable <- renderTable({
table_data <- batter_data() %>%
mutate(events = ifelse(events %in% c("double_play",
"field_error",
"fielders_choice",
"fielders_choice_out",
"force_out",
"grounded_into_double_play",
"sac_fly"), "field_out", events)) %>%
filter(
launch_angle %in% (input$bbAngle[1]:input$bbAngle[2]),
launch_speed > input$bbSpeed[1],
launch_speed < input$bbSpeed[2],
pitch_name %in% input$bbpitches,
events %in% input$bbevents,
bb_type %in% input$bbflights,
balls %in% (input$bbballs[1]:input$bbballs[2]),
strikes %in% (input$bbstrikes[1]:input$bbstrikes[2]))
table_data <- table_data %>%
mutate(total = nrow(table_data))
if(input$bbgeom == "Pe"){
table_data %>%
group_by(events, total) %>%
summarise(count = n(), average_launch_speed = mean(launch_speed), average_launch_angle = mean(launch_angle)) %>%
mutate(share = count/total) %>%
select(events, count, share, average_launch_speed, average_launch_angle) %>%
arrange(desc(count)) %>%
head(n = 4L)
}else if(input$bbgeom == "Pbf"){
table_data %>%
group_by(bb_type, total) %>%
summarise(count = n(), average_launch_speed = mean(launch_speed), average_launch_angle = mean(launch_angle)) %>%
mutate(share = count/total) %>%
select(bb_type, count, share, average_launch_speed, average_launch_angle) %>%
arrange(desc(count)) %>%
head(n = 4L)
}else if(input$bbgeom == "Ppt"){
table_data %>%
group_by(pitch_name, total) %>%
summarise(count = n(), average_launch_speed = mean(launch_speed), average_launch_angle = mean(launch_angle)) %>%
mutate(share = count/total) %>%
select(pitch_name, count, share, average_launch_speed, average_launch_angle) %>%
arrange(desc(count)) %>%
head(n = 5L)
}
},
striped = TRUE,
bordered = TRUE,
spacing = "s")
#
# ONTO NAV_TAB 3 - PITCH SELECTOR!
#
# Creates the Main Pitch Selector Table
output$psTable <- renderTable({
table_data <- batter_data() %>%
filter(release_speed < input$psSpeed + 1,
pitch_name %in% input$psPitch,
balls %in% (input$psBalls[1]:input$psBalls[2]),
strikes %in% (input$psStrikes[1]:input$psStrikes[2]),
{
if(input$psBallStrike == "Yes"){
zone %in% (1:9)
}else{
zone %in% (1:14)
}})
table_data <- table_data %>%
mutate(total = nrow(table_data)) %>%
select(pitch_name, zone, total, launch_speed, description, events, bb_type, type)
# Turns the selected columns into their own individual dummy variable columns
# Makes life easier when adding up certain observations
table_data <- fastDummies::dummy_cols(table_data, select_columns = c("description", "events", "bb_type", "type"))
final_data <- table_data %>%
group_by(pitch_name, zone, total) %>%
summarise(observations = n(),
no_swing = sum(description_ball) + sum(description_called_strike),
homerun = sum(events_home_run),
line_drive = sum(bb_type_line_drive),
hit_in_play = sum(type_X),
average_launch_speed = mean(launch_speed, na.rm = TRUE))
# For some reason I had to separate this from the creation of final_data above
final_table <- final_data %>%
filter(observations >= input$psObs) %>%
mutate(swing_p = 1-(no_swing/observations),
homerun_p = homerun/observations,
line_drive_p = line_drive/observations,
hit_in_play_p = hit_in_play/observations,
zone = as.integer(zone)) %>%
select(pitch_name, zone, observations, swing_p, hit_in_play_p,
homerun_p, average_launch_speed, line_drive_p) %>%
arrange(desc(observations)) %>%
arrange(case_when(
input$psGoal == "swing" ~ swing_p,
input$psGoal == "hit_in_play" ~ hit_in_play_p,
input$psGoal == "homerun" ~ homerun_p,
input$psGoal == "hit_hard" ~ average_launch_speed,
input$psGoal == "line_drive" ~ line_drive_p
)) %>%
rename(
"Pitch Type" = pitch_name,
"Zone" = zone,
"Observations" = observations,
"Swing %" = swing_p,
"Ball Hit In Play %" = hit_in_play_p,
"Homerun %" = homerun_p,
"Line Drive %" = line_drive_p,
"Average Launch Speed" = average_launch_speed
) %>%
head(input$psOptions)
},
striped = TRUE,
bordered = TRUE)
#
# NAV_TAB 4 - BATTER SELECTOR
#
output$batterTable <- renderTable({
playerid_lookup(input$last_name) %>%
mutate(MLBAM_ID = as.integer(mlbam_id),
First_Season = as.integer(mlb_played_first),
First = first_name,
Last = last_name) %>%
filter(First == input$first_name) %>%
filter(!is.na(First_Season)) %>%
select(First, Last, MLBAM_ID, First_Season) %>%
head(10)
},
hover = TRUE,
bordered = TRUE,
striped = TRUE)
# Text outputs are pretty basic, allow me to use input for a text output
output$apTitle <- renderText({
sprintf("%s All Pitches %i-2020", input$last, start_year())
})
output$bbTitle <- renderText({
sprintf("%s Batted Balls %i-2020", input$last, start_year())
})
output$psTitle <- renderText({
sprintf("What Pitch Should You Throw To %s?", input$last)
})
output$popTab <- renderTable({
popular_players
},
striped = TRUE,
bordered = TRUE,
hover = TRUE)
output$lsTabPlot <- renderPlot({
bot <- deframe(batter_data() %>%
summarise(mean(sz_bot, na.rm = TRUE)))
top <- deframe(batter_data() %>%
summarise(mean(sz_top, na.rm = TRUE)))
# This could have been done more efficiently, but it makes a table so I can display the strike zone numbers
num_x <- c(-.66, 0, .66,
-.66, 0, .66,
-.66, 0, .66,
-1.3, 1.3, -1.3, 1.3)
num_y <- c(top - 0.3, top - 0.3, top - 0.3,
top - ((top-bot)/2), top - ((top-bot)/2), top - ((top-bot)/2),
bot + 0.3, bot + 0.3, bot + 0.3,
top + 0.2, top + 0.2, bot - 0.2, bot - 0.2)
val <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14)
num_tab <- data.frame(num_x, num_y, val)
batter_data() %>%
filter(!pitch_name %in% c("null", "Knuckleball")) %>%
ggplot(aes(x = plate_x, y = plate_z, z = as.double(launch_speed))) +
stat_summary_2d(binwidth = c(.35, .35)) +
geom_segment(aes(x = -0.333, y = mean(sz_top), xend = -0.333, yend = mean(sz_bot)), color = "black") +
geom_segment(aes(x = 0.333, y = mean(sz_top), xend = 0.333, yend = mean(sz_bot)), color = "black") +
geom_segment(aes(x = -1, y = ((mean(sz_top) - mean(sz_bot))/3) + mean(sz_bot),
xend = 1, yend = ((mean(sz_top) - mean(sz_bot))/3) + mean(sz_bot)), color = "black") +
geom_segment(aes(x = -1, y = mean(sz_top) - ((mean(sz_top) - mean(sz_bot))/3),
xend = 1, yend = mean(sz_top) - ((mean(sz_top) - mean(sz_bot))/3)), color = "black") +
geom_segment(aes(x = -1, y = mean(sz_top), xend = 1, yend = mean(sz_top)), size = 1.5) +
geom_segment(aes(x = -1, y = mean(sz_bot), xend = 1, yend = mean(sz_bot)), size = 1.5) +
geom_segment(aes(x = -1, y = mean(sz_top), xend = -1, yend = mean(sz_bot)), size = 1.5) +
geom_segment(aes(x = 1, y = mean(sz_top), xend = 1, yend = mean(sz_bot)), size = 1.5) +
ylim(0.7, 4) +
xlim(-5.5, 5.5) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank()) +
labs(fill = "Launch Speed") +
scale_fill_gradient2(low = "blue", mid = "gray", high = "red", midpoint = 85)
})
output$xwOBATabPlot <- renderPlot({
bot <- deframe(batter_data() %>%
summarise(mean(sz_bot, na.rm = TRUE)))
top <- deframe(batter_data() %>%
summarise(mean(sz_top, na.rm = TRUE)))
# This could have been done more efficiently, but it makes a table so I can display the strike zone numbers
num_x <- c(-.66, 0, .66,
-.66, 0, .66,
-.66, 0, .66,
-1.3, 1.3, -1.3, 1.3)
num_y <- c(top - 0.3, top - 0.3, top - 0.3,
top - ((top-bot)/2), top - ((top-bot)/2), top - ((top-bot)/2),
bot + 0.3, bot + 0.3, bot + 0.3,
top + 0.2, top + 0.2, bot - 0.2, bot - 0.2)
val <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14)
num_tab <- data.frame(num_x, num_y, val)
batter_data() %>%
filter(!pitch_name %in% c("null", "Knuckleball")) %>%
ggplot(aes(x = plate_x, y = plate_z, z = as.double(estimated_woba_using_speedangle))) +
stat_summary_2d(binwidth = c(.35, .35)) +
geom_segment(aes(x = -0.333, y = mean(sz_top), xend = -0.333, yend = mean(sz_bot)), color = "black") +
geom_segment(aes(x = 0.333, y = mean(sz_top), xend = 0.333, yend = mean(sz_bot)), color = "black") +
geom_segment(aes(x = -1, y = ((mean(sz_top) - mean(sz_bot))/3) + mean(sz_bot),
xend = 1, yend = ((mean(sz_top) - mean(sz_bot))/3) + mean(sz_bot)), color = "black") +
geom_segment(aes(x = -1, y = mean(sz_top) - ((mean(sz_top) - mean(sz_bot))/3),
xend = 1, yend = mean(sz_top) - ((mean(sz_top) - mean(sz_bot))/3)), color = "black") +
geom_segment(aes(x = -1, y = mean(sz_top), xend = 1, yend = mean(sz_top)), size = 1.5) +
geom_segment(aes(x = -1, y = mean(sz_bot), xend = 1, yend = mean(sz_bot)), size = 1.5) +
geom_segment(aes(x = -1, y = mean(sz_top), xend = -1, yend = mean(sz_bot)), size = 1.5) +
geom_segment(aes(x = 1, y = mean(sz_top), xend = 1, yend = mean(sz_bot)), size = 1.5) +
ylim(0.7, 4) +
xlim(-5.5, 5.5) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank()) +
labs(fill = "Expected wOBA") +
scale_fill_gradient2(low = "blue", mid = "gray", high = "red", midpoint = 0.5)
})
output$xwOBATabDist <- renderPlot({
batter_data() %>%
filter(type == "X") %>%
filter(!is.na(zone)) %>%
mutate(zone = as.character(zone)) %>%
filter(!zone %in% c("11", "12", "13", "14")) %>%
filter(!estimated_woba_using_speedangle == "null") %>%
mutate(estimated_woba_using_speedangle = as.double(estimated_woba_using_speedangle)) %>%
ggplot(aes(x = estimated_woba_using_speedangle, y = reorder(zone, estimated_woba_using_speedangle), fill = stat(x) < 0.5)) +
stat_halfeye() +
geom_vline(xintercept = 0.5, linetype = "dashed") +
xlab("Expected Weighted On-Base Average") +
ylab("Zone Location") +
scale_fill_manual(values = c("indianred1", "skyblue")) +
ggtitle(sprintf("%s Expected wOBA Based On Strike Zone Location", input$last)) +
theme(axis.text = element_text(size = 17),
axis.title = element_text(size = 17))
})
output$lsTabDist <- renderPlot({
batter_data() %>%
filter(type == "X") %>%
filter(!is.na(zone)) %>%
mutate(zone = as.character(zone)) %>%
filter(!is.na(launch_speed)) %>%
filter(!zone %in% c("11", "12", "13", "14")) %>%
ggplot(aes(x = launch_speed, y = reorder(zone, launch_speed), fill = stat(x) < 90)) +
stat_halfeye() +
geom_vline(xintercept = 90, linetype = "dashed") +
xlab("Launch Speed") +
ylab("Zone Location") +
scale_fill_manual(values = c("indianred1", "skyblue")) +
ggtitle(sprintf("%s Launch Speed Based On Strike Zone Location", input$last)) +
theme(axis.text = element_text(size = 17),
axis.title = element_text(size = 17))
})
output$lsTabTitle <- renderText({
sprintf("%s Launch Speed Based On Strike Zone Location", input$last)
})
output$xwOBATabTitle <- renderText({
sprintf("%s Expected wOBA Based On Strike Zone Location", input$last)
})
output$blankZone <- renderPlot({
bot <- 1.4
top <- 3.2
# This could have been done more efficiently, but it makes a table so I can display the strike zone numbers
num_x <- c(-.66, 0, .66,
-.66, 0, .66,
-.66, 0, .66)
num_y <- c(top - 0.3, top - 0.3, top - 0.3,
top - ((top-bot)/2), top - ((top-bot)/2), top - ((top-bot)/2),
bot + 0.3, bot + 0.3, bot + 0.3)
val <- c(1, 2, 3, 4, 5, 6, 7, 8, 9)
num_tab <- data.frame(num_x, num_y, val)
ggplot() +
geom_segment(aes(x = -1, y = 1.4, xend = 1, yend = 1.4)) +
geom_segment(aes(x = -1, y = 3.2, xend = 1, yend = 3.2)) +
geom_segment(aes(x = 1, y = 1.4, xend = 1, yend = 3.2)) +
geom_segment(aes(x = -1, y = 1.4, xend = -1, yend = 3.2)) +
geom_segment(aes(x = -.33, y = 1.4, xend = -.33, yend = 3.2)) +
geom_segment(aes(x = .33, y = 1.4, xend = .33, yend = 3.2)) +
geom_segment(aes(x = -1, y = 2.0, xend = 1, yend = 2.0)) +
geom_segment(aes(x = -1, y = 2.6, xend = 1, yend = 2.6)) +
geom_text(data = num_tab, aes(x = num_x, y = num_y, label = val), size = 8.5,color = "black") +
xlim(-2, 2) +
ylim(0.6, 3.4) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
})
output$blankZone2 <- renderPlot({
bot <- 1.4
top <- 3.2
# This could have been done more efficiently, but it makes a table so I can display the strike zone numbers
num_x <- c(-.66, 0, .66,
-.66, 0, .66,
-.66, 0, .66)
num_y <- c(top - 0.3, top - 0.3, top - 0.3,
top - ((top-bot)/2), top - ((top-bot)/2), top - ((top-bot)/2),
bot + 0.3, bot + 0.3, bot + 0.3)
val <- c(1, 2, 3, 4, 5, 6, 7, 8, 9)
num_tab <- data.frame(num_x, num_y, val)
ggplot() +
geom_segment(aes(x = -1, y = 1.4, xend = 1, yend = 1.4)) +
geom_segment(aes(x = -1, y = 3.2, xend = 1, yend = 3.2)) +
geom_segment(aes(x = 1, y = 1.4, xend = 1, yend = 3.2)) +
geom_segment(aes(x = -1, y = 1.4, xend = -1, yend = 3.2)) +
geom_segment(aes(x = -.33, y = 1.4, xend = -.33, yend = 3.2)) +
geom_segment(aes(x = .33, y = 1.4, xend = .33, yend = 3.2)) +
geom_segment(aes(x = -1, y = 2.0, xend = 1, yend = 2.0)) +
geom_segment(aes(x = -1, y = 2.6, xend = 1, yend = 2.6)) +
geom_text(data = num_tab, aes(x = num_x, y = num_y, label = val), size = 8.5,color = "black") +
xlim(-2, 2) +
ylim(0.6, 3.4) +
theme(axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank())
})
output$lsReg <- renderTable({
zone <- c("Zone 1 (Intercept)", "Zone 2", "Zone 3", "Zone 4", "Zone 5", "Zone 6", "Zone 7", "Zone 8", "Zone 9")
reg_table <- summary(lm(launch_speed ~ zone, data = batter_data() %>%
filter(type == "X") %>%
mutate(zone = as.character(zone)) %>%
filter(!zone %in% c("11", "12", "13", "14", "null"))))$coefficients
zone %>% cbind(reg_table)
},
striped = TRUE,
bordered = TRUE,
spacing = "m")
output$lsRSQ <- renderText({
sprintf("R-Squared: %f", summary(lm(launch_speed ~ zone, data = batter_data() %>%
filter(type == "X") %>%
mutate(zone = as.character(zone)) %>%
filter(!zone %in% c("11", "12", "13", "14", "null"))))$r.squared)
})
output$xwOBAReg <- renderTable({
zone <- c("Zone 1 (Intercept)", "Zone 2", "Zone 3", "Zone 4", "Zone 5", "Zone 6", "Zone 7", "Zone 8", "Zone 9")
reg_table <- summary(lm(estimated_woba_using_speedangle ~ zone, data = batter_data() %>%
filter(type == "X") %>%
filter(!estimated_woba_using_speedangle == "null") %>%
mutate(estimated_woba_using_speedangle = as.double(estimated_woba_using_speedangle)) %>%
mutate(zone = as.character(zone)) %>%
filter(!zone %in% c("11", "12", "13", "14", "null"))))$coefficients
zone %>% cbind(reg_table)
},
striped = TRUE,
bordered = TRUE,
spacing = "m")
output$xwOBARSQ <- renderText({
sprintf("R-Squared: %f", summary(lm(estimated_woba_using_speedangle ~ zone, data = batter_data() %>%
filter(type == "X") %>%
filter(!estimated_woba_using_speedangle == "null") %>%
mutate(estimated_woba_using_speedangle = as.double(estimated_woba_using_speedangle)) %>%
mutate(zone = as.character(zone)) %>%
filter(!zone %in% c("11", "12", "13", "14", "null"))))$r.squared)
})
}
# Glues it all together and runs the application
shinyApp(ui = ui, server = server)
|
d1fdf15920c67c53ed3701e5d0a8e65ac493073c
|
4344aa4529953e5261e834af33fdf17d229cc844
|
/input/gcamdata/R/zgcamusa_L203.water_td.R
|
e1dfccbc5eb93ed42c29d5db165fc5c082b9c31c
|
[
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
JGCRI/gcam-core
|
a20c01106fd40847ed0a803969633861795c00b7
|
912f1b00086be6c18224e2777f1b4bf1c8a1dc5d
|
refs/heads/master
| 2023-08-07T18:28:19.251044
| 2023-06-05T20:22:04
| 2023-06-05T20:22:04
| 50,672,978
| 238
| 145
|
NOASSERTION
| 2023-07-31T16:39:21
| 2016-01-29T15:57:28
|
R
|
UTF-8
|
R
| false
| false
| 27,865
|
r
|
zgcamusa_L203.water_td.R
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_gcamusa_L203.water_td
#'
#' Mapping of water consumption/withdrawal to sectoral demands at the state level.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L203.DeleteSupplysector_USA}, \code{L203.Supplysector_USA}, \code{L203.SubsectorLogit_USA},
#' \code{L203.SubsectorShrwt_USA}, \code{L203.TechShrwt_USA}, \code{L203.TechCoef_USA}, \code{L203.TechPmult_USA},
#' \code{L203.TechDesalCoef_USA}, \code{L203.TechDesalShrwt_USA}, \code{L203.TechDesalCost_USA}.
#' The corresponding file in the original data system was \code{L203.water.mapping.R} (water level2).
#' @details Generates water mapping sector input files to group demands by sectors.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter mutate select
#' @importFrom tidyr gather spread
#' @author NTG May 2020
module_gcamusa_L203.water_td <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "water/basin_to_country_mapping",
FILE = "water/water_td_sectors",
FILE = "water/A71.sector",
FILE = "water/A72.sector",
FILE = "water/A73.sector",
FILE = "water/A74.sector",
"L103.water_mapping_USA_R_LS_W_Ws_share",
"L103.water_mapping_USA_R_PRI_W_Ws_share",
"L103.water_mapping_USA_R_GLU_W_Ws_share",
"L103.water_mapping_USA_R_B_W_Ws_share",
FILE = "gcam-usa/states_subregions",
FILE = "gcam-usa/state_and_basin",
FILE = "gcam-usa/usa_seawater_states_basins",
FILE = "water/water_td_sectors",
FILE = "water/A03.sector",
"L201.RsrcTechCoef",
"L203.Supplysector_desal_basin"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L203.DeleteSupplysector_USA",
"L203.DeleteResTechInput",
"L203.DeleteSubsector_USA",
"L203.Supplysector_USA",
"L203.SubsectorLogit_USA",
"L203.SubsectorShrwt_USA",
"L203.TechShrwt_USA",
"L203.TechCoef_USA",
"L203.TechPmult_USA",
"L203.TechDesalCoef_USA",
"L203.TechDesalShrwt_USA",
"L203.TechDesalCost_USA"))
} else if(command == driver.MAKE) {
all_data <- list(...)[[1]]
# Load required inputs
basin_to_country_mapping <- get_data(all_data, "water/basin_to_country_mapping")
water_td_sectors <- get_data(all_data, "water/water_td_sectors")
A71.sector <- get_data(all_data, "water/A71.sector")
A72.sector <- get_data(all_data, "water/A72.sector")
A73.sector <- get_data(all_data, "water/A73.sector")
A74.sector <- get_data(all_data, "water/A74.sector")
L103.water_mapping_USA_R_LS_W_Ws_share <- get_data(all_data, "L103.water_mapping_USA_R_LS_W_Ws_share", strip_attributes = TRUE)
L103.water_mapping_USA_R_PRI_W_Ws_share <- get_data(all_data, "L103.water_mapping_USA_R_PRI_W_Ws_share", strip_attributes = TRUE)
L103.water_mapping_USA_R_GLU_W_Ws_share <- get_data(all_data,"L103.water_mapping_USA_R_GLU_W_Ws_share", strip_attributes = TRUE)
L103.water_mapping_USA_R_B_W_Ws_share <- get_data(all_data,"L103.water_mapping_USA_R_B_W_Ws_share", strip_attributes = TRUE)
GCAM_state_names <- get_data(all_data, "gcam-usa/states_subregions")
state_and_basin <- get_data(all_data, "gcam-usa/state_and_basin")
usa_seawater_states_basins <- get_data(all_data, "gcam-usa/usa_seawater_states_basins")
water_td_sectors <- get_data(all_data, "water/water_td_sectors")
A03.sector <- get_data(all_data, "water/A03.sector")
L201.RsrcTechCoef <- get_data(all_data, "L201.RsrcTechCoef", strip_attributes = TRUE)
L203.Supplysector_desal_basin <- get_data(all_data, "L203.Supplysector_desal_basin", strip_attributes = TRUE)
GLU <- GLU_code <- GLU_name <- water.sector <-
water_type <- supplysector <- field.eff <- conveyance.eff <-
coefficient <- region <- state <- share <- basin_name <- Basin_name <-
GCAM_basin_ID <- state_abbr <- water_sector <- year <- wt_short <- value <-
state.to.country.share <- subsector <- technology <- share.weight <-
price.unit <- input.unit <- output.unit <- logit.exponent <- logit.type <-
logit.year.fillout <- resource <- minicam.energy.input <- subresource <- NULL # silence package check notes
# Define unique states and basins that have access to seawater that will
# allow for seawate cooling
seawater_states_basins <- unique(usa_seawater_states_basins$seawater_region)
# Define in which states GCAM water basins exist by using data from R package created by Chris Vernon
state_and_basin %>%
left_join_error_no_match(basin_to_country_mapping, by = "GCAM_basin_ID") %>%
select(GCAM_basin_ID, GLU_name, basin_name, state_abbr) %>%
rename(region = state_abbr) ->
state_and_basin_mapping
# Create mappings for the sectors that have production at the state level already.
# These sectors: Industrial, Municipal, and Electricity will not need to be shared
# from the USA region to the states, and thus will not have separate market names by region
L103.water_mapping_USA_R_B_W_Ws_share %>%
mutate(water_sector = gsub("Domestic", "Municipal", water_sector)) %>%
left_join_error_no_match(water_td_sectors, by = c("water_sector" = "water.sector")) %>%
left_join_error_no_match(A03.sector, by = "supplysector", ignore_columns = c("logit.type")) %>%
mutate(supplysector = set_water_input_name(water_sector, water_type, water_td_sectors)) ->
L203.mapping_nonirr
# Using irrigation shares, define water sector and add demand categories
L103.water_mapping_USA_R_GLU_W_Ws_share %>%
rename(state = region) %>%
mutate(region = gcam.USA_REGION,
water.sector = water.IRRIGATION) %>%
left_join_error_no_match(water_td_sectors, by = "water.sector") %>%
left_join_error_no_match(A03.sector, by = "supplysector", ignore_columns = "logit.type") %>%
mutate(supplysector = set_water_input_name(water.sector, water_type, water_td_sectors, GLU_name)) ->
L203.mapping_irr
# Isolate the USA region which will share basin level demands in the USA region to
# States which are defined as subsectors
L203.mapping_irr %>%
mutate(subsector = state,
technology = supplysector,
coefficient = if_else(water.sector == water.IRRIGATION & water_type == "water withdrawals",
1 / gcamusa.CONVEYANCE_LOSSES, 1),
## ^^ conveyance losses for irrigation--applied to withdrawals only
# Note: Conveyance losses are taken out of agriculture withdrawals and...
# ... instead applied to water distribution sectors (water_td_irr). This means that to get total...
# ... ag withdrawals for reporting (i.e., when querying GCAM results)...
# ... it is necessary to include the conveyance loss.
share.weight = share,
market.name = state,
share.weight.year = year,
logit.year.fillout = first(MODEL_BASE_YEARS)) %>%
select(-share, -state) %>%
arrange(region) ->
L203.mapping_irr_region
# We must now set all subsectors in USA from gcam-core and water_mapping.xml to 0 so that we do not double count
# demands
L203.mapping_irr_region %>%
bind_rows(L203.mapping_irr_region %>%
mutate(subsector = basin_name,
technology = basin_name,
share.weight = 0,
market.name = gcam.USA_REGION)) ->
L203.mapping_irr_region
# Isolate the states and define the basins which contribute water supplies to wach one.
L203.mapping_irr %>%
select(-region) %>%
mutate(region = state,
subsector = basin_name,
technology = basin_name,
coefficient = gcamusa.DEFAULT_COEFFICIENT,
share.weight = gcamusa.DEFAULT_SHAREWEIGHT,
market.name = gcam.USA_REGION,
logit.year.fillout = first(MODEL_BASE_YEARS)) %>%
select(-share, -state) %>%
arrange(region) ->
L203.mapping_irr_state
# Combine state and USA region irrigation mappings
bind_rows(
L203.mapping_irr_region %>%
## filter out basin name subsectors
filter(subsector %in% gcamusa.STATES),
L203.mapping_irr_state
) ->
L203.mapping_irr
# Livestock sector:
# This done slightly different as production of livestock is not modeled at the state level.
# Here we take the regional (i.e. USA) water demands of livestock and map them to the state level based on
# the amount of water for livestock that each state requires compared to the USA as a whole, computed in
# L103.water_mapping_USA
L103.water_mapping_USA_R_LS_W_Ws_share %>%
mutate(region=gcam.USA_REGION,
water.sector = water.LIVESTOCK) %>%
left_join_error_no_match(water_td_sectors, by = "water.sector") %>%
left_join_error_no_match(A03.sector, by = "supplysector", ignore_columns = "logit.type") %>%
mutate(wt_short = water.MAPPED_WATER_TYPES_SHORT[water_type],
supplysector = paste(supplysector, wt_short, sep = "_"),
coefficient = gcamusa.DEFAULT_COEFFICIENT,
subsector = state,
technology = supplysector,
share.weight = value,
market.name = state,
share.weight.year = year,
logit.year.fillout = first(MODEL_BASE_YEARS)) %>%
select(-wt_short, -value, -state) %>%
arrange(region) ->
L203.mapping_livestock
L203.mapping_livestock %>%
bind_rows(L203.mapping_livestock %>%
# LJENM returns error because number of rows in data changes.
# The join is intended to duplicate rows because some states
# are mapped to multiple basisn. Thus, left_join() is used.
left_join(state_and_basin_mapping, by = c("subsector" = "region")) %>%
mutate(share.weight = 0,
subsector = basin_name,
technology = basin_name,
market.name = gcam.USA_REGION) %>%
unique()
) ->
L203.mapping_livestock
# (d) primary energy sector
# We use USGS withdrawal data for primary energy mining and ratios of fresh to saline water withdrawals to
# map the demands from USA values to state level. This is done in 2 parts in order to specify differences in
# subsectors at the state and national levels, as well as differences in share weights (i.e. mapping to states,
# mapping of fresh to desal within a state)
L103.water_mapping_USA_R_PRI_W_Ws_share %>%
mutate(region = gcam.USA_REGION,
water.sector = water.PRIMARY_ENERGY) %>%
left_join_error_no_match(water_td_sectors, by = "water.sector") %>%
left_join_error_no_match(A03.sector, by = "supplysector", ignore_columns = "logit.type") %>%
mutate(wt_short = water.MAPPED_WATER_TYPES_SHORT[water_type],
supplysector = paste(supplysector, wt_short, sep = "_"),
coefficient = gcamusa.DEFAULT_COEFFICIENT,
subsector = state,
technology = supplysector,
share.weight = state.to.country.share,
market.name = state,
share.weight.year = year,
logit.year.fillout = first(MODEL_BASE_YEARS)) %>%
select(-wt_short, -state.to.country.share, -state) %>%
arrange(region) ->
L203.mapping_primary_region
L203.mapping_primary_region %>%
bind_rows(L203.mapping_primary_region %>%
# LJENM returns error because number of rows in data changes.
# The join is intended to duplicate rows because some states
# are mapped to multiple basisn. Thus, left_join() is used.
left_join(state_and_basin_mapping, by = c("subsector" = "region")) %>%
mutate(share.weight = 0,
subsector = basin_name,
technology = basin_name,
market.name = gcam.USA_REGION) %>%
unique()
) ->
L203.mapping_primary_region
# No values are present for DC, therefore NAs are created. These are replaced with
# zero shareweights
L203.mapping_primary_region %>%
replace_na(list(share.weight = 0)) %>%
replace_na(list(fresh.share = 0)) ->
L203.mapping_primary
# combine all sectors and add additional required columns. Long format is used for
# subsector share weights, additional mapping is used for all other final outputs
L203.mapping_nonirr %>%
mutate(coefficient = gcamusa.DEFAULT_COEFFICIENT,
subsector = basin_name,
technology = basin_name,
share.weight = share,
logit.year.fillout = first(MODEL_BASE_YEARS)) %>%
arrange(region) %>%
bind_rows(L203.mapping_nonirr %>%
filter(year == gcamusa.FINAL_MAPPING_YEAR) %>%
mutate(year=max(MODEL_BASE_YEARS),
coefficient = gcamusa.DEFAULT_COEFFICIENT,
subsector = basin_name,
technology = basin_name,
share.weight = share,
logit.year.fillout = first(MODEL_BASE_YEARS)) %>%
arrange(region)) %>%
complete(nesting(region, supplysector, subsector, technology, water.sector, basin_name, water_type, coefficient, share,
share.weight, price.unit, input.unit, output.unit, logit.exponent, logit.type, logit.year.fillout),
year = c(year, MODEL_BASE_YEARS, MODEL_FUTURE_YEARS)) %>%
dplyr::filter(!is.na(year)) %>%
bind_rows(L203.mapping_livestock %>%
## Filter out basin names in subsectors as these are deleted later
filter(subsector %in% gcamusa.STATES),
L203.mapping_primary %>%
filter(subsector %in% gcamusa.STATES),
L203.mapping_irr) %>%
mutate(pMult = if_else(water.sector == water.IRRIGATION & water_type == "water withdrawals" & region != gcam.USA_REGION,
water.IRR_PRICE_SUBSIDY_MULT, water.MAPPING_PMULT)) ->
L203.mapping_all
L203.EFW_delete_supplysectors <- bind_rows(A71.sector, A72.sector, A73.sector, A74.sector) %>%
pull(supplysector)
L203.delete_desal_basin_sectors <- L203.Supplysector_desal_basin %>%
filter(region == gcam.USA_REGION) %>%
pull(supplysector)
tibble(region = gcam.USA_REGION,
supplysector = c(water.DELETE_DEMAND_TYPES,
L203.EFW_delete_supplysectors,
L203.delete_desal_basin_sectors)) ->
L203.DeleteSupplysector_USA
## Also need to delete the "elect_td_ind" input to the groundwater grades in future periods
L201.RsrcTechCoef %>%
filter(region == gcam.USA_REGION) %>%
select(region, resource, subresource, technology, year, minicam.energy.input) ->
L203.DeleteResTechInput
## We delete the basin level subsectors in the USA region
## to eliminate double counting of irrigation, livestock,
## and primary energy. This overrides the mappings from
## water_mapping.XML and maps directly to the states.
L203.mapping_irr_region %>%
filter(!subsector %in% gcamusa.STATES) %>%
select(region,supplysector,subsector) %>%
bind_rows(
L203.mapping_primary_region %>%
filter(!subsector %in% gcamusa.STATES) %>%
select(region,supplysector,subsector),
L203.mapping_livestock%>%
filter(!subsector %in% gcamusa.STATES) %>%
select(region,supplysector,subsector)
) %>%
unique()->
L203.DeleteSubsector_USA
# Sector information
L203.mapping_all %>%
select(LEVEL2_DATA_NAMES[["Supplysector"]], LOGIT_TYPE_COLNAME) ->
L203.Supplysector_USA
# Subsector logit exponents for mapping sector
L203.mapping_all %>%
mutate(logit.exponent = if_else(region != gcam.USA_REGION, water.LOGIT_EXP, 0)) %>%
select(LEVEL2_DATA_NAMES[["SubsectorLogit"]], LOGIT_TYPE_COLNAME) ->
L203.SubsectorLogit_USA
# Subsector share weights to 1 (no competition) in all states. Sharing happens at USA level. Water prices
# will drive competition between the basins at the state level
L203.mapping_all %>%
select(LEVEL2_DATA_NAMES[["SubsectorShrwt"]]) ->
L203.SubsectorShrwt_USA
# Technology share weights, defined by state and sector
# Zero out technology shareweights in the USA region to make sure values are not counted multiple times
L203.mapping_all %>%
complete(nesting(region, supplysector, subsector, technology, water.sector, basin_name, water_type, coefficient),
year = c(year, MODEL_BASE_YEARS,MODEL_FUTURE_YEARS)) %>%
mutate(share.weight = if_else(region == gcam.USA_REGION & !(subsector %in% gcamusa.STATES) & !grepl("irr", supplysector), 0, 1)) %>%
dplyr::filter(!is.na(year)) %>%
select(LEVEL2_DATA_NAMES[["TechShrwt"]]) ->
L203.TechShrwt_USA
# Define market name and minicam energy input dependent upon whether the sector is
# produced at the state level or is we map from USA region to state
L203.mapping_all %>%
complete(nesting(region, supplysector, subsector, technology, water.sector, basin_name, water_type, coefficient),
year = c(year, MODEL_BASE_YEARS, MODEL_FUTURE_YEARS)) %>%
mutate(minicam.energy.input = if_else((region == gcam.USA_REGION & grepl("water_td", technology)),
supplysector,
paste0(basin_name, "_", water_type)),
market.name = if_else((region == gcam.USA_REGION & grepl("water_td", technology)), subsector, gcam.USA_REGION)) %>%
dplyr::filter(!is.na(year)) %>%
select(LEVEL2_DATA_NAMES[["TechCoef"]]) ->
L203.TechCoef_USA
# Pass-through technology water price adjust if there one
L203.mapping_all %>%
complete(nesting(region, supplysector, subsector, technology, water.sector, basin_name, water_type, coefficient),
year = c(year, MODEL_BASE_YEARS, MODEL_FUTURE_YEARS)) %>%
replace_na(list(pMult=1)) %>%
select(LEVEL2_DATA_NAMES[["TechPmult"]]) ->
L203.TechPmult_USA
L203.TechCoef_USA %>%
filter(region!=gcam.USA_REGION) %>%
mutate(technology = "desalination",
minicam.energy.input = gcamusa.WATER_TYPE_SEAWATER,
market.name = gcam.USA_REGION) %>%
dplyr::filter(!is.na(year))->
L203.TechDesalCoef_USA
# Set shareweight of desalination technologies to 0 in all non-coastal states
# and basins that do not come in contact with the ocean. This removes the possibility
# of having desalination required in Texas, but coming from the Rio Grande which does not
# have access to seawater without inland transportation.
#
# Additionally, desalination is now allowed for all sectors, including irrigation.
# Given the price subsidy on agricultural water, desalination should never come
# for irrigated agriculture as the price required would exceed the limits defined in
# water_supply_constrained.xml
L203.TechShrwt_USA %>%
filter(region != gcam.USA_REGION) %>%
mutate(technology = "desalination",
share.weight = if_else(!(region %in% seawater_states_basins), 0, 1)) %>%
dplyr::filter(!is.na(year)) ->
L203.TechDesalShrwt_USA
L203.TechDesalShrwt_USA %>%
rename(minicam.non.energy.input = share.weight) %>%
mutate(minicam.non.energy.input = "final cost",
input.cost = gcamusa.DESALINATION_PRICE) %>%
dplyr::filter(!is.na(year)) ->
L203.TechDesalCost_USA
# ===================================================
# Produce outputs
L203.DeleteSupplysector_USA %>%
add_title("Remove the water sectors from the USA region that are produced at the state level") %>%
add_units("Unitless") %>%
add_comments("Remove the USA electricity, municipal, and industrial water_td's") %>%
add_comments("Also remove all energy-for-water (EFW) sectors") %>%
add_precursors("L203.Supplysector_desal_basin",
"water/A71.sector",
"water/A72.sector",
"water/A73.sector",
"water/A74.sector") ->
L203.DeleteSupplysector_USA
L203.DeleteResTechInput %>%
add_title("Remove the electricity inputs to groundwater supply curves") %>%
add_units("Unitless") %>%
add_comments("These would be pulling from a USA electricity market that does not exist in GCAM-USA") %>%
add_precursors("L201.RsrcTechCoef") ->
L203.DeleteResTechInput
L203.DeleteSubsector_USA %>%
add_title("Remove the three sectors that are produced at the state level") %>%
add_units("Unitless") %>%
add_comments("Remove the USA electricity, municipal, and industrial water_td's") %>%
add_legacy_name("L2232.DeleteSubsector_USA") ->
L203.DeleteSubsector_USA
L203.Supplysector_USA %>%
add_title("Water sector information") %>%
add_units("Unitless") %>%
add_comments("Supply sector info expanded to USA and state regions for water demand sectors") %>%
add_legacy_name("L203.Supplysector") %>%
add_precursors("water/basin_to_country_mapping",
"L103.water_mapping_USA_R_LS_W_Ws_share",
"L103.water_mapping_USA_R_PRI_W_Ws_share",
"L103.water_mapping_USA_R_GLU_W_Ws_share",
"L103.water_mapping_USA_R_B_W_Ws_share",
"gcam-usa/states_subregions",
"gcam-usa/state_and_basin",
"water/water_td_sectors",
"water/A03.sector") ->
L203.Supplysector_USA
L203.SubsectorLogit_USA %>%
add_title("Water subsector logit exponents for mapping sector") %>%
add_units("Unitless") %>%
add_comments("Subsector info expanded to USA and state regions for water demand sectors") %>%
add_legacy_name("L203.SubsectorLogit") %>%
add_precursors("water/basin_to_country_mapping",
"L103.water_mapping_USA_R_LS_W_Ws_share",
"L103.water_mapping_USA_R_PRI_W_Ws_share",
"L103.water_mapping_USA_R_GLU_W_Ws_share",
"L103.water_mapping_USA_R_B_W_Ws_share",
"gcam-usa/states_subregions",
"gcam-usa/state_and_basin",
"water/water_td_sectors",
"water/A03.sector") ->
L203.SubsectorLogit_USA
L203.SubsectorShrwt_USA %>%
add_title("Water subsector share weights") %>%
add_units("Unitless") %>%
add_comments("Subsector shareweights expanded to USA and state regions for water demand sectors") %>%
add_legacy_name("L203.SubsectorShrwtFllt") %>%
add_precursors("water/basin_to_country_mapping",
"L103.water_mapping_USA_R_LS_W_Ws_share",
"L103.water_mapping_USA_R_GLU_W_Ws_share",
"L103.water_mapping_USA_R_B_W_Ws_share",
"gcam-usa/states_subregions",
"gcam-usa/state_and_basin",
"water/water_td_sectors",
"water/A03.sector") ->
L203.SubsectorShrwt_USA
L203.TechShrwt_USA %>%
add_title("Water technology shareweights") %>%
add_units("Unitless") %>%
add_comments("Technology shareweights expanded to USA and state regions for water demand sectors") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L203.TechShrwt") %>%
add_precursors("water/basin_to_country_mapping",
"L103.water_mapping_USA_R_LS_W_Ws_share",
"L103.water_mapping_USA_R_GLU_W_Ws_share",
"L103.water_mapping_USA_R_B_W_Ws_share",
"gcam-usa/states_subregions",
"gcam-usa/state_and_basin",
"water/water_td_sectors",
"water/A03.sector") ->
L203.TechShrwt_USA
L203.TechCoef_USA%>%
add_title("Water technology coefficients") %>%
add_units("Unitless") %>%
add_comments("Technology coefficients expanded to USA and state regions for water demand sectors") %>%
add_legacy_name("L203.TechCoef") %>%
add_precursors("water/basin_to_country_mapping",
"L103.water_mapping_USA_R_LS_W_Ws_share",
"L103.water_mapping_USA_R_GLU_W_Ws_share",
"L103.water_mapping_USA_R_B_W_Ws_share",
"gcam-usa/states_subregions",
"gcam-usa/state_and_basin",
"water/water_td_sectors",
"water/A03.sector") ->
L203.TechCoef_USA
L203.TechDesalCoef_USA %>%
add_title("Water technology desal coefficients") %>%
add_units("Unitless") %>%
add_comments("Desalination Coefficients for USA region and states. Available only for coastal states and basins") %>%
add_legacy_name("L203.TechCoef") %>%
same_precursors_as(L203.TechCoef_USA) ->
L203.TechDesalCoef_USA
L203.TechPmult_USA %>%
add_title("Water technology price multipliers") %>%
add_units("Unitless") %>%
add_comments("Water price subsidy applied at USA and state level") %>%
add_legacy_name("L203.TechCoef") %>%
add_precursors("water/basin_to_country_mapping",
"L103.water_mapping_USA_R_LS_W_Ws_share",
"L103.water_mapping_USA_R_PRI_W_Ws_share",
"L103.water_mapping_USA_R_B_W_Ws_share",
"gcam-usa/states_subregions",
"gcam-usa/state_and_basin",
"water/water_td_sectors",
"water/A03.sector") ->
L203.TechPmult_USA
L203.TechDesalShrwt_USA %>%
add_title("Water technology desal shareweights") %>%
add_units("Unitless") %>%
add_comments("Desalination Shareweights for USA region and states. Available only for coastal states and basins") %>%
add_legacy_name("L203.TechCoef") %>%
same_precursors_as(L203.TechShrwt_USA) %>%
add_precursors("gcam-usa/usa_seawater_states_basins") ->
L203.TechDesalShrwt_USA
L203.TechDesalCost_USA %>%
add_title("Water technology desal costs") %>%
add_units("Unitless") %>%
add_comments("Desalination fixed costs") %>%
add_legacy_name("L203.TechCoef") %>%
same_precursors_as(L203.TechShrwt_USA) ->
L203.TechDesalCost_USA
return_data(L203.DeleteSupplysector_USA,
L203.DeleteResTechInput,
L203.DeleteSubsector_USA,
L203.Supplysector_USA,
L203.SubsectorLogit_USA,
L203.SubsectorShrwt_USA,
L203.TechShrwt_USA,
L203.TechCoef_USA,
L203.TechDesalCoef_USA,
L203.TechDesalShrwt_USA,
L203.TechDesalCost_USA,
L203.TechPmult_USA)
} else {
stop("Unknown command")
}
}
|
a1de49e65d89275a13274ddc854713a6167ed0cf
|
82775aacbd58b84f49b9d6a21b433dcf10a0fb79
|
/R/convert_usd_to_eur.R
|
4d340c5b8afae23945c2b86266d7dee9af5a375d
|
[] |
no_license
|
karlofiguerres/masteringRskills
|
4528cb929143154323728c31c15b8ad08cd00fbf
|
4c185fde12d460f25b252f4138a40eedd07eee06
|
refs/heads/master
| 2020-06-02T03:14:50.240356
| 2019-06-09T20:26:37
| 2019-06-09T20:26:37
| 191,016,668
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 362
|
r
|
convert_usd_to_eur.R
|
#' Looks up the most recent USD to EUR exchange rate and computes the provided USD amount in EUR
#' @param usd amount in USD to be converted to EUR
#' @return number
#' @export
#' @importFrom httr GET content
#' @importFrom logger log_info
#' @examples
#' convert_usd_to_eur(10)
#' convert_usd_to_eur(25)
convert_usd_to_eur <- function(usd) {
eurusd() * usd
}
|
d860319b19d990befbbd3863a129dcf2304a17a4
|
26d5c692d5272c549bd4e77d6b34cef52ee4b132
|
/testscript.R
|
3fd7a5ba4655cd9f47486bb9efb5f04d860c545e
|
[
"MIT"
] |
permissive
|
LenaAckermann/my_first_rproj
|
d75b99a54fc43a358f2ae627d36765cd0dbaf366
|
d3546690e91e265b89c4b1178e556a5f901c908b
|
refs/heads/main
| 2023-04-09T12:15:40.824938
| 2021-04-21T23:17:51
| 2021-04-21T23:17:51
| 360,329,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28
|
r
|
testscript.R
|
# This is just a test script
|
e4d5dc6b8a81ffa256410e1de6e8c013e6a7bb9b
|
cedd4a491cba1747c6874fc9e1bdf03e9790fb77
|
/man/apollo_validateInputs.Rd
|
996d8b0da4b6f65a8ea2afbcb442054450cc266d
|
[] |
no_license
|
cran/apollo
|
eee0e11d7be26f3225e4f024c7d1c4042e627ade
|
e390dbad0a9041a610456f1a54c2e548c470356c
|
refs/heads/master
| 2023-08-19T16:03:28.519668
| 2023-08-10T12:40:02
| 2023-08-10T13:36:07
| 166,079,906
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 8,527
|
rd
|
apollo_validateInputs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apollo_validateInputs.R
\name{apollo_validateInputs}
\alias{apollo_validateInputs}
\title{Prepares input for \code{apollo_estimate}}
\usage{
apollo_validateInputs(
apollo_beta = NA,
apollo_fixed = NA,
database = NA,
apollo_control = NA,
apollo_HB = NA,
apollo_draws = NA,
apollo_randCoeff = NA,
apollo_lcPars = NA,
recycle = FALSE,
silent = FALSE
)
}
\arguments{
\item{apollo_beta}{Named numeric vector. Names and values for parameters.}
\item{apollo_fixed}{Character vector. Names (as defined in \code{apollo_beta}) of parameters whose value should not change during estimation.}
\item{database}{data.frame. Data used by model.}
\item{apollo_control}{List. Options controlling the running of the code. User input is required for all settings except those with a default or marked as optional.
\itemize{
\item \strong{\code{analyticGrad}}: Boolean. TRUE to use analytical gradients during parameter estimation, if they are available. FALSE to use numerical gradients. - TRUE by default.
\item \strong{\code{calculateLLC}}: Boolean. TRUE if user wants to calculate LL at constants (if applicable). - TRUE by default.
\item \strong{\code{HB}}: Boolean. TRUE if using RSGHB for Bayesian estimation of model.
\item \strong{\code{indivID}}: Character. Name of column in the database with each decision maker's ID.
\item \strong{\code{mixing}}: Boolean. TRUE for models that include random parameters.
\item \strong{\code{modelDescr}}: Character. Description of the model. Used in output files.
\item \strong{\code{modelName}}: Character. Name of the model. Used when saving the output to files.
\item \strong{\code{nCores}}: Numeric>0. Number of cores to use in calculations of the model likelihood.
\item \strong{\code{noDiagnostics}}: Boolean. TRUE if user does not wish model diagnostics to be printed - FALSE by default.
\item \strong{\code{noValidation}}: Boolean. TRUE if user does not wish model input to be validated before estimation - FALSE by default.
\item \strong{\code{outputDirectory}}: Character. Optional directory for outputs if different from working director - empty by default
\item \strong{\code{panelData}}: Boolean. TRUE if there are multiple observations (i.e. rows) for each decision maker - Automatically set based on \code{indivID} by default.
\item \strong{\code{seed}}: Numeric. Seed for random number generation.
\item \strong{\code{weights}}: Character. Name of column in database containing weights for estimation.
\item \strong{\code{workInLogs}}: Boolean. TRUE for increased numeric precision in models with panel data - FALSE by default.
}}
\item{apollo_HB}{List. Contains options for Bayesian estimation. See \code{?RSGHB::doHB} for details.
Parameters \code{modelname}, \code{gVarNamesFixed}, \code{gVarNamesNormal},
\code{gDIST}, \code{svN} and \code{FC} are automatically set based on the
other arguments of this function.
Other settings to include are the following.
\itemize{
\item \strong{\code{constraintNorm}}: Character vector. Constraints for \emph{random} coefficients
in bayesian estimation. Constraints can be written as
"b1>b2", "b1<b2", "b1>0", or "b1<0".
\item \strong{\code{fixedA}}: Named numeric vector. Contains the names and fixed mean values of
random parameters. For example, c(b1=0) fixes the mean of b1 to zero.
\item \strong{\code{fixedD}}: Named numeric vector. Contains the names and fixed variance of
random parameters. For example, c(b1=1) fixes the variance of b1 to zero.
\item \strong{\code{gNCREP}}: Numeric. Number of burn-in iterations to use prior to convergence (default=10^5).
\item \strong{\code{gNEREP}}: Numeric. Number of iterations to keep for averaging after convergence has been reached (default=10^5).
\item \strong{\code{gINFOSKIP}}: Numeric. Number of iterations between printing/plotting information about the iteration process (default=250).
\item \strong{\code{hbDist}}: \emph{Mandatory} setting. A named character vector determining
the distribution of each parameter to be estimated. Possible
values are as follows.
\itemize{
\item \strong{\code{"CN+"}}: Positive censored normal.
\item \strong{\code{"CN-"}}: Negative censored normal.
\item \strong{\code{"DNE"}}: Parameter kept at its starting value (not estimated).
\item \strong{\code{"JSB"}}: Johnson SB.
\item \strong{\code{"LN+"}}: Positive log-normal.
\item \strong{\code{"LN-"}}: Negative log-normal.
\item \strong{\code{"N"}}: Normal.
\item \strong{\code{"NR"}}: Fixed (as in non-random) parameter.
}
}}
\item{apollo_draws}{List of arguments describing the inter and intra individual draws. Required only if \code{apollo_control$mixing = TRUE}. Unused elements can be ommited.
\itemize{
\item \strong{\code{interDrawsType}}: Character. Type of inter-individual draws ('halton','mlhs','pmc','sobol','sobolOwen',
'sobolFaureTezuka', 'sobolOwenFaureTezuka' or the name of an object loaded in memory,
see manual in www.ApolloChoiceModelling.com for details).
\item \strong{\code{interNDraws}}: Numeric scalar (>=0). Number of inter-individual draws per individual. Should be set to 0 if not using them.
\item \strong{\code{interNormDraws}}: Character vector. Names of normaly distributed inter-individual draws.
\item \strong{\code{interUnifDraws}}: Character vector. Names of uniform-distributed inter-individual draws.
\item \strong{\code{intraDrawsType}}: Character. Type of intra-individual draws ('halton','mlhs','pmc','sobol','sobolOwen','sobolFaureTezuka', 'sobolOwenFaureTezuka' or the name of an object loaded in memory).
\item \strong{\code{intraNDraws}}: Numeric scalar (>=0). Number of intra-individual draws per individual. Should be set to 0 if not using them.
\item \strong{\code{intraUnifDraws}}: Character vector. Names of uniform-distributed intra-individual draws.
\item \strong{\code{intraNormDraws}}: Character vector. Names of normaly distributed intra-individual draws.
}}
\item{apollo_randCoeff}{Function. Used with mixing models. Constructs the random parameters of a mixing model. Receives two arguments:
\itemize{
\item \strong{\code{apollo_beta}}: Named numeric vector. Names and values of model parameters.
\item \strong{\code{apollo_inputs}}: The output of this function (\code{apollo_validateInputs}).
}}
\item{apollo_lcPars}{Function. Used with latent class models. Constructs a list of parameters for each latent class. Receives two arguments:
\itemize{
\item \strong{\code{apollo_beta}}: Named numeric vector. Names and values of model parameters.
\item \strong{\code{apollo_inputs}}: The output of this function (\code{apollo_validateInputs}).
}}
\item{recycle}{Logical. If TRUE, an older version of \code{apollo_inputs} is looked for in the calling environment (parent frame), and any
element in that old version created by the user is copied into the new \code{apollo_inputs} returned by this function.
For \code{recycle=TRUE} to work, the old version of \code{apollo_inputs} \strong{must} be named "apollo_inputs".
If FALSE, nothing is copied from any older version of apollo_inputs. FALSE is the default.}
\item{silent}{Logical. TRUE to keep the function from printing to the console. Default is FALSE.}
}
\value{
List grouping several required input for model estimation.
}
\description{
Searches the user work space (.GlobalEnv) for all necessary input to run \code{apollo_estimate}, and packs it in a single list.
}
\details{
All arguments to this function are optional. If the function is called without arguments, then it it will look in
the user workspace (i.e. the global environment) for variables with the same name as its omitted arguments.
We strongly recommend users to visit \url{http://www.apollochoicemodelling.com/} for examples on how to use Apollo.
In the website, users will also find a detailed manual and a user-group for help and further reference.
}
|
f22f7e5c87d123fb1a5fa9e22b6fabf963b3dda3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dse/examples/shockDecomposition.Rd.R
|
c7201824bee5c711ce85a5a042007b447a075088
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 250
|
r
|
shockDecomposition.Rd.R
|
library(dse)
### Name: shockDecomposition
### Title: Shock Decomposition
### Aliases: shockDecomposition
### Keywords: ts
### ** Examples
data("eg1.DSE.data.diff", package="dse")
model <- estVARXls(eg1.DSE.data.diff)
shockDecomposition(model)
|
fad877c6eac84ce1e58c9733d666d0afcb3f033b
|
54d0e0b1cfb9935174e0f9907f176e721d6d3bf3
|
/16. CH8 - Tree-Based Methods Bagging, Random Forests/Section2BaggingAndRandomForests/LabCustomerChurnRandomForest.R
|
51a22a0f286681c5ef40cf9284d8504796fe39a8
|
[] |
no_license
|
clairehu9/R_ML_ISLR
|
29f16ddcb02d654ae272f06510d85243ea30c68e
|
26bce2a45a1037cfbbc64eef4dca0d93ea56f461
|
refs/heads/master
| 2020-09-12T06:11:09.600859
| 2019-11-18T01:38:55
| 2019-11-18T01:38:55
| 222,336,639
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,395
|
r
|
LabCustomerChurnRandomForest.R
|
rm(list=ls())
installIfAbsentAndLoad <- function(neededVector) {
if(length(neededVector) > 0) {
for(thispackage in neededVector) {
if(! require(thispackage, character.only = T)) {
install.packages(thispackage)}
require(thispackage, character.only = T)
}
}
}
#pRoc contains roc.plot
#verification contains roc.area
#rpart for building a single tree
needed <- c('randomForest', 'pROC', 'verification', 'rpart')
installIfAbsentAndLoad(needed)
###Get the data
churndata<-read.table("churndata.csv",sep=",",header=T)
###Clean up, change area code to a factor
sum(is.na(churndata))
data <- na.omit(churndata)
str(data)
data$area<-factor(data$area)
###Create training and test sets
nobs <- nrow(data)
set.seed(5082)
train <- sample(nobs, 0.7*nobs)
###Grow a 500-tree forest
rf <- randomForest(formula=churn ~ .,data=data[train,],ntree=500, mtry=4,
importance=TRUE,localImp=TRUE,na.action=na.roughfix,replace=FALSE)
summary(rf)
head(rf$predicted,25)
rf$oob.times
###Display Variable Importance
importance(rf)[order(importance(rf)[,"MeanDecreaseAccuracy"], decreasing=T),]
importance(rf)[order(importance(rf)[,"MeanDecreaseGini"], decreasing=T),]
###Display a chart of Variable Importance
varImpPlot(rf, main="Variable Importance in the Random Forest")
###Examine Error Rates for the Trees
head(rf$err.rate)
plot(rf, main="Error Rates for Random Forest")
legend("topright", c("OOB", "No", "Yes"), text.col=1:6, lty=1:3, col=1:3)
rf$confusion
min.err <- min(rf$err.rate[,"OOB"])
min.err.idx <- which(rf$err.rate[,"OOB"]== min.err)
min.err.idx
rf$err.rate[min.err.idx[1],]
###Rebuild the forest with the number of trees that minimizes the OOB error rate - use the first one if there are more than one
set.seed(5082)
rf <- randomForest(formula=churn ~ .,data=data[train,],ntree= min.err.idx[1], mtry=4,
importance=TRUE,localImp=TRUE,na.action=na.roughfix,replace=FALSE)
###Look at voting info for each observatoion
head(rf$votes)
###Plot the OOB ROC curve and calculate AUC.
aucc <- roc.area(as.integer(as.factor(data[train, "churn"]))-1,rf$votes[,2])
aucc$A
aucc$p.value #null hypothesis: aucc=0.5
roc.plot(as.integer(as.factor(data[train,"churn"]))-1,rf$votes[,2], main="")
legend("bottomright", bty="n",
sprintf("Area Under the Curve (AUC) = %1.3f", aucc$A))
title(main="OOB ROC Curve Random Forest churndata.csv",
sub=paste("David Murray", format(Sys.time(), "%Y-%b-%d %H:%M:%S"), Sys.info()["user"]))
###Evaluate by scoring the training set
prtrain <- predict(rf, newdata=data[train,])
table(data[train,"churn"], prtrain,dnn=c("Actual", "Predicted"))
round(100* table(data[train,"churn"], prtrain,dnn=c("% Actual", "% Predicted"))/length(prtrain))
###Evaluate by scoring the test set
test <- setdiff(1:nobs, train)
prtest <- predict(rf, newdata=na.omit(data[test,]))
table(data[test,"churn"], prtest,dnn=c("Actual", "Predicted"))
round(100* table(data[test,"churn"], prtest,dnn=c("% Actual", "% Predicted"))/length(prtest),1)
###Change vote cutoff to reduce Type II Error Rate (at the expense of the Type I Error Rate)
set.seed(527)
rfLowerT2Error <- randomForest(formula=churn ~ .,data=data[train,],ntree=500, mtry=4,
importance=TRUE,localImp=TRUE,na.action=na.roughfix,replace=FALSE,cutoff=c(0.8,0.2))
rfLowerT2Error
###Evaluate by scoring the test set
prtestLowerT2Error <- predict(rfLowerT2Error, newdata=na.omit(data[test,]))
table(data[test,"churn"], prtestLowerT2Error ,dnn=c("Actual", "Predicted"))
round(100* table(data[test,"churn"], prtestLowerT2Error ,dnn=c("% Actual", "% Predicted"))/length(prtestLowerT2Error))
###Build a single tree using rpart, prune it, and evaluate it using the same test set
rpart<-rpart(churn ~ .,data=data[test,], method="class",parms=list(split="information"),control=rpart.control(usesurrogate=0, maxsurrogate=0))
xerr<-rpart$cptable[,"xerror"]
minxerr<-which.min(xerr)
mincp<-rpart$cptable[minxerr,"CP"]
###prune classification tree###
rpart.prune<-prune(rpart,cp=mincp)
predictonetree <- predict(rpart.prune, newdata=data[test,], type="class")
mytable<-table(data[test,"churn"], predictonetree ,dnn=c("Actual", "Predicted"))
mytable
round(100*mytable/sum(mytable))
|
b2d4019ac14923868a2cb549ca763ee5871ab6c5
|
3b2da3a6f650cd7b39fef745aaa3fdd358008518
|
/Cousera Software dev in R/2.Advanced R programming/Quiz.R
|
49f4e7eb476e4f229811073d4bf5d7463d82b7c8
|
[] |
no_license
|
fukeyouko/TestPackage
|
d21af339da4610e72953978ecf5af4ba2681ba4a
|
a2ee7a8d0a132b8b27f9020810e6861d0cd4726f
|
refs/heads/master
| 2020-03-14T05:19:37.371713
| 2018-04-29T08:40:37
| 2018-04-29T08:40:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,397
|
r
|
Quiz.R
|
library(readr)
library(dplyr)
library(ggplot2)
DailySpec<-read.csv("data/daily_SPEC_2014.csv.bz2")
object.size(DailySpec)
#Q1, What is average Arithmetic.Mean for “Bromine PM2.5 LC” in the state of Wisconsin in this dataset?
DailySpec %>% filter(State.Name=="Wisconsin",
Parameter.Name=="Bromine PM2.5 LC") %>%
summarise(mean=mean(Arithmetic.Mean,na.rm=TRUE))
#Q2, Which constituent Parameter.Name has the highest average level?
q2<-DailySpec %>% select(Parameter.Name,Arithmetic.Mean) %>%
group_by(Parameter.Name)%>%
summarise(mean=mean(Arithmetic.Mean,na.rm=TRUE))%>%
arrange(desc(mean))
q2
#Q3,Which monitoring site has the highest average level of “Sulfate PM2.5 LC” across all time?
q3<-DailySpec %>% filter(Parameter.Name=="Sulfate PM2.5 LC")%>%
group_by(State.Code,County.Code,Site.Num)%>%
summarize(mean=mean(Arithmetic.Mean))%>%
arrange(desc(mean))
#Q4,What is the absolute difference in the average levels of “EC PM2.5 LC TOR” between the states
#California and Arizona, across all time and all monitoring sites?
q4<-data.frame()
q4<-DailySpec %>% filter(Parameter.Name=="Sulfate PM2.5 LC" &
State.Name%in%c("California","Arizona"))%>%
group_by(State.Name)%>%
summarize(mean=mean(Arithmetic.Mean,na.rm=TRUE))
Ca<-q4%>%filter(State.Name=="California")
Ar<-q4%>%filter(State.Name=="Arizona")
Ca-Ar
#What is the median level of “OC PM2.5 LC TOR” in the western United States, across all time?
#Define western as any monitoring location that has a Longitude LESS THAN -100.
q5<-DailySpec %>% filter(Parameter.Name=="OC PM2.5 LC TOR",
Longitude < -100)%>%
summarize(median=median(Arithmetic.Mean,na.rm=TRUE))
#Q6
library(readxl)
AqsSite<-read_excel("data/aqs_sites.xlsx")
AqsSite%>%filter(`Land Use`=="RESIDENTIAL",`Location Setting`=="SUBURBAN")%>%
summarise(count=n())
#Q7 conbine two
Sub_DailySpec<-DailySpec%>%select(State.Code,County.Code,Site.Num,Longitude,Parameter.Name,CBSA.Name,Arithmetic.Mean)%>%
filter(Parameter.Name=="EC PM2.5 LC TOR")%>%
filter(Longitude >= -100)
Sub_AqsSite<- AqsSite %>% select(`State Code`,`County Code` ,`Site Number`,`Land Use`, `Location Setting`)
colnames(Sub_AqsSite)<-c("State.Code","County.Code","Site.Num","LandUse", "LocationSetting")
Sub_DailySpec<-Sub_DailySpec %>%
left_join(Sub_AqsSite,by=c("State.Code" ,"County.Code","Site.Num"))
Sub_DailySpec %>% filter(LandUse=="RESIDENTIAL",LocationSetting=="SUBURBAN")%>%
summarise(median=median(Arithmetic.Mean))
#Q8 Amongst monitoring sites that are labeled as COMMERCIAL for "Land Use",
#which month of the year has the highest average levels of "Sulfate PM2.5 LC"?
library(lubridate)
Sub_DailySpec<-DailySpec%>%
select(State.Code,County.Code,Site.Num,Date.Local,Parameter.Name,Arithmetic.Mean)%>%
filter(Parameter.Name=="Sulfate PM2.5 LC")
Sub_DailySpec$Date.Local=as.Date(Sub_DailySpec$Date.Local)
Sub_DailySpec<-Sub_DailySpec %>%
mutate(month=months(Date.Local))
Sub_DailySpec<-Sub_DailySpec %>%
left_join(Sub_AqsSite,by=c("State.Code" ,"County.Code","Site.Num"))
Sub_DailySpec %>% filter(LandUse=="COMMERCIAL")%>%
group_by(month) %>%
summarise(mean=mean(Arithmetic.Mean))%>%
arrange(desc(mean))
#Q9 Take a look at the data for the monitoring site identified by State Code 6,
#County Code 65, and Site Number 8001 (this monitor is in California). At this monitor,
#for how many days is the sum of "Sulfate PM2.5 LC" and "Total Nitrate PM2.5 LC" greater than 10?
Sub_DailySpec<-DailySpec %>% filter(State.Code==6,County.Code==65,Site.Num==8001)%>%
filter(Parameter.Name %in% c("Sulfate PM2.5 LC","Total Nitrate PM2.5 LC"))%>%
select(Date.Local,Parameter.Name,Arithmetic.Mean)
Sub_DailySpec$Date.Local=as.Date(Sub_DailySpec$Date.Local)
Sub_DailySpec$Parameter.Name=as.character(Sub_DailySpec$Parameter.Name)
total <- Sub_DailySpec%>%
group_by(Date.Local)%>%
summarise(sum=sum(Arithmetic.Mean))%>%
filter(sum>10)%>%
summarise(count=n())
#Q10 Which monitoring site in the dataset has the highest correlation
#between "Sulfate PM2.5 LC" and "Total Nitrate PM2.5 LC" across all dates?
Sub_DailySpec<-DailySpec %>% filter(State.Code==6,County.Code==65,Site.Num==8001)%>%
filter(Parameter.Name==c("Sulfate PM2.5 LC","Total Nitrate PM2.5 LC"))%>%
select(Date.Local,Parameter.Name,Arithmetic.Mean)
|
001d35c155331d760020727a4f7ff6edbd1ad8f1
|
a7dce817823c6f4543a401af89aa7980ee962af7
|
/cachematrix.R
|
432f1c8457cb5f14c8b3773229036f2d87031ef9
|
[] |
no_license
|
zboniecki/ProgrammingAssignment2
|
8c9032f07e6fbfa94eae2c152559feed03e7f2c9
|
effe7de28c26e15894121d711b404c19d280403f
|
refs/heads/master
| 2021-07-18T05:01:17.161680
| 2017-10-22T10:05:31
| 2017-10-22T10:05:31
| 107,846,719
| 0
| 0
| null | 2017-10-22T07:41:21
| 2017-10-22T07:41:21
| null |
UTF-8
|
R
| false
| false
| 1,178
|
r
|
cachematrix.R
|
## MakeCacheMatrix creates a matrix
## cacheSolve calculates an inverse of that matrix and caches the result
## if run a second time it just returns the cached result
## makeCacheMatrix creates a matrix; default is a 2 by 2 matrix with 4, 2, 7, 6. Then sets inv_cache as NULL
makeCacheMatrix <- function(x = matrix(c(4, 2, 7, 6),2,2)) {
inv_cache <- NULL
set <- function(y) {
x <<- y
inv_cache <<- NULL
}
get <- function() x
setsolve <- function(solve) inv_cache <<- solve
getsolve <- function() inv_cache
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve returns inverse of matrix created by makeCacheMatrix then stores value in inv_cache, when run a second time it
## will just retrieve the already calculated value from inv_cache
##Note: save makeCacheMatrix to a value first eg: x <- makeCacheMatrix((matrix(c(8,3,2,4,5,6,7,8,5),3,3))) then run cacheSolve(x)
cacheSolve <- function(x, ...) {
inv_cache <- x$getsolve()
if(!is.null(inv_cache)) {
message("getting cached data")
return(inv_cache)
}
data <- x$get()
inv_cache <- solve(data, ...)
x$setsolve(inv_cache)
inv_cache
}
|
ac3b3e472ffff43a68b9d4c88fac66a7b00bdad1
|
3ba0e7569ae04208f34c3f5798120513b22be416
|
/man/getAddLine.Rd
|
a6733f1ae3fef0b764abf61381c1fa42d7e45d4d
|
[] |
no_license
|
bfanson/hormLong
|
edf5cc9e5c0dd93bdf0129e246671195d325138a
|
63fb9b760f2c9335a28d4b7549bda3394939610c
|
refs/heads/master
| 2016-09-05T18:08:06.750089
| 2015-10-10T10:39:34
| 2015-10-10T10:39:34
| 26,885,167
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 455
|
rd
|
getAddLine.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/hormHelpers.R
\name{getAddLine}
\alias{getAddLine}
\title{Helper function for getting Baseline for plotting}
\usage{
getAddLine(d_s, crit, conc, a_l)
}
\arguments{
\item{d_s}{ds_sub}
\item{crit}{x$criteria}
\item{conc}{conc_var}
\item{a_l}{add_line (which line type to return)}
}
\value{
baseline
}
\description{
Helper function for getting Baseline for plotting
}
|
8700fbc9eddbe954137c93e1936e636e58a79fb0
|
6ba493ca9129518a3a9d52826beb6d3404b140da
|
/R/CAARiseTransitSet_Calculate.R
|
596361b5ba1cf722bd71ad5b7172fa0ef919caed
|
[] |
no_license
|
helixcn/skycalc
|
a298e7e87a46a19ba2ef6826d611bd9db18e8ee2
|
2d338b461e44f872ceee13525ba19e17926b2a82
|
refs/heads/master
| 2021-06-16T08:54:46.457982
| 2021-03-25T02:15:38
| 2021-03-25T02:15:38
| 35,885,876
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 238
|
r
|
CAARiseTransitSet_Calculate.R
|
CAARiseTransitSet_Calculate <-
function(JD, Alpha1, Delta1, Alpha2, Delta2, Alpha3, Delta3, Longitude, Latitude, h0){
.Call("CAARiseTransitSet_Calculate", JD, Alpha1, Delta1, Alpha2, Delta2, Alpha3, Delta3, Longitude, Latitude, h0)
}
|
6a44b83bd609bb5c3ffc60d481ab7bcb1566ca47
|
44cf65e7ab4c487535d8ba91086b66b0b9523af6
|
/data/Newspapers/2001.03.12.editorial.38894.0568.r
|
844408a2a2182bd2e098e347a8b97aaeaf28efe1
|
[] |
no_license
|
narcis96/decrypting-alpha
|
f14a746ca47088ec3182d610bfb68d0d4d3b504e
|
5c665107017922d0f74106c13d097bfca0516e66
|
refs/heads/master
| 2021-08-22T07:27:31.764027
| 2017-11-29T12:00:20
| 2017-11-29T12:00:20
| 111,142,761
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,068
|
r
|
2001.03.12.editorial.38894.0568.r
|
geaba am scris de mai multe ori in " Evenimentul zilei " ca lucrarea " Supliciu " de Constantin Brancusi nu valoreaza o jumatate de milion de dolari .
pentru a arata ce mare patriot si harnic ministru al culturii este , Ion Caramitru a facut tot ce a putut ca guvernul sa plateasca enorma suma .
pentru aceasta a infiintat o comisie de evaluare si , folosindu - se de numele personalitatilor incluse , a crezut ca poate da greutate pretului , in ciuda faptului ca la o cercetare mai atenta nu se ajungea nicicum la atitia bani .
s - au facut hirtiile , s - au creat aparentele unei mari lovituri , dar achizitia aceasta continua sa nu miroasa a bine .
nu pentru ca am aduce in tara inca o piesa de Constantin Brancusi . Doamne fereste !
si o fotografie in plus e pretioasa . Dar nu cumparata cu sute sau cu milioane de dolari .
poate sa aiba dosarul achizitiei statuii " Supliciu " toate hirtiile necesare , de la Gaesti pina la Papa . Noi tot ne vom stradui sa - i demonstram maruntului ministru ca isprava sa e dubioasa si ca ramine tot un copan cultural de conjunctura .
ba , chiar continuam sa raminem banuitori ca pretul a fost supraevaluat din cine stie ce ratiuni si aducator de bucurie pe capul cine stie cui .
ca sa ne arate ca politicianul roman nu sare de o anume conditie , indiferent ca vine din teatru sau din istoria artei , Razvan Theodorescu ( alaturi de Ion Iliescu si Adrian Nastase ) s - a asezat si el in fruntea " Anului Brancusi " ( de parca l - ar fi inscris in PDSR si pe marele sculptor ) si a anuntat ca va cumpara orice alta lucrare .
si , ca sa - si arate straduinta si dragostea fata de opera lui Brancusi , a si anuntat achizitionarea unui " Cap de copil " la un pret pe care romanii , din patriotism , il pot accepta .
dar suma aceasta nu poate decit sa stirneasca zimbetul colectionarilor de arta si al expertilor .
e vorba iarasi despre un pret umflat .
umflat doar din dragoste de Brancusi , umflat doar pentru ca guvernul Nastase are bani si n - are pe ce - i cheltui ?
sau pretul anuntat de Razvan Theodorescu pentru " Cap de copil " e mai mare din aceleasi ratiuni ca si in cazul lucrarii " Supliciu " ?
faptul ca aceste opere au fost oferite si altor amatori de arta , la sume mult mai mici , ne duce si de asta data cu gindul la posibilitatea unor aranjamente de culise .
e Razvan Theodorescu expert in materie de preturi la sculpturi ?
sint specialisti cei care si - au pus semnatura pe procesul - verbal al Comisiei de achizitionare constituita de Ion Caramitru ?
nici vorba !
membrii comisiei sint muzeografi , critici de arta , dar novici in materie de tranzactii pe piata de arta .
ce ne roade pe noi ?
nu " Anul Brancusi " , ci folosirea acestei ocazii pentru ca banii publici sa o ia pe diferite drumuri fara nici un control si fara o acoperire temeinica .
miine , poimiine se va cumpara si o scrisoare de Mihai Eminescu cu un milion de dolari , sau o palarie a lui Ion Luca Caragiale cu cinci sute de mii de franci elvetieni si asa mai departe .
romanii , din respect pentru clasicii lor , nu vor zice nimic .
dar in spatele acestui comert cu sentimente si valori de patrimoniu se ascunde de fapt altceva .
si daca Mugur Isarescu si Decebal Traian Remes au facut o prostie si s - au lasat imbroboditi de Ion Caramitru ( daca nu cumva stiau si ei ceva in plus despre conditiile achizitiei ) , de ce Adrian Nastase si Ion Iliescu l - ar lasa pe Razvan Theodorescu sa faca acelasi lucru ?
( oare nu stiu si ei ceva in plus ? ) " Anul Brancusi " e un bun prilej de cunoastere a unui titan din cultura romaneasca si universala .
poate asa nu vom mai avea compatrioti care sa fure chivotul bisericii ctitorite de maestru .
dar , in acelasi timp , nu poate fi pretext pentru cheltuieli nejustificate . Doar daca Brancusi a fost inscris in PDSR .
si noi nu stim . Atunci da , guvernul Nastase poate oferi si un milion de dolari pe o tesla din atelierul lui Brancusi , din Paris , de linga Centrul Georges Pompidou .
atunci da , merita banii !
si ar trebui inscris si Caragiale in acelasi partid !
|
073406f597a88eecd0a59f48fcd83cd2a2b04de1
|
5debecde26fc6cf6a5796a86d768f994b39741cd
|
/DemographieAmericaine/Scripts.R
|
becb06601a4c196985ab9ff42becf0b420b00a8b
|
[] |
no_license
|
rcourivaud/FouilleDeDonneesAvecR
|
154754fa63fcd74d880f061966ecbff28a5e7b30
|
096b442beb47085686c0e1f391dcc7a4e7d9b891
|
refs/heads/master
| 2021-01-12T08:00:31.924620
| 2016-12-21T21:22:41
| 2016-12-21T21:22:41
| 77,086,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,846
|
r
|
Scripts.R
|
# install.packages("maps")
# install.packages("FactoMineR")
library(FactoMineR)
library(maps)
library(RColorBrewer)
library(ade4)
library(corrplot)
source('ACP_TOOLS.R')
df <- read.csv('DataImi.csv', sep=';', header=TRUE)
summary(df)
head(df)
# =========================================================
# Cleaning Data
# =========================================================
#On met le nom des états en minuscule pour l'affichage sur une carte
df$State <- tolower(df$State)
png("HistoIm_loc.png")
hist(df$IM_Loc_01, breaks=100)
dev.off()
hist(df$IM_Loc_00, breaks=100)
df.2001 <- df[,c(1, 3, 5, 7, 9, 11, 13, 15)]
IM_Loc_01_cat <- as.factor(ifelse(df.2001$IM_Loc_01<= -6000, 'VHD',
ifelse(df.2001$IM_Loc_01>=-6000 & df.2001$IM_Loc_01<0 , 'HD',
ifelse(df.2001$IM_Loc_01>=0 & df.2001$IM_Loc_01<6000 , 'HA',
ifelse(df.2001$IM_Loc_01>=6000 , 'VHA', 0)))))
POP_01_cat <- as.factor(ifelse(df.2001$Pop_Tot_01< 1000000, 'LP',
ifelse(df.2001$Pop_Tot_01>=1000000 & df.2001$Pop_Tot_01<3500000 , 'MP',
ifelse(df.2001$Pop_Tot_01>=3500000 & df.2001$Pop_Tot_01<8000000 , 'HP',
ifelse(df.2001$Pop_Tot_01>=8000000 , 'VHP', 0)))))
hist(df.2001$IM_Int_01, breaks=100)
IM_INT_01_cat <- as.factor(ifelse(df.2001$IM_Int_01< 10000, 'LD',
ifelse(df.2001$IM_Int_01>=10000 & df.2001$IM_Int_01<25000 , 'MD',
ifelse(df.2001$IM_Int_01>=25000 & df.2001$IM_Int_01<50000 , 'HD',
ifelse(df.2001$IM_Int_01>=50000 , 'VHD', 0)))))
hist(df.2001$Nb_Nai_01, breaks=100)
Nb_Nai_01_cat <- as.factor(ifelse(df.2001$Nb_Nai_01< 25000, 'LN',
ifelse(df.2001$Nb_Nai_01>=25000 & df.2001$Nb_Nai_01<50000 , 'MN',
ifelse(df.2001$Nb_Nai_01>=50000 & df.2001$Nb_Nai_01<110000 , 'HN',
ifelse(df.2001$Nb_Nai_01>=110000 , 'VHN', 0)))))
hist(df.2001$Nb_DC_01, breaks=100)
Nb_DC_01_cat <- as.factor(ifelse(df.2001$Nb_DC_01< 15000, 'LDC',
ifelse(df.2001$Nb_DC_01>=15000 & df.2001$Nb_DC_01<30000 , 'MDC',
ifelse(df.2001$Nb_DC_01>=30000 & df.2001$Nb_DC_01<60000 , 'HDC',
ifelse(df.2001$Nb_DC_01>=60000 , 'VHDC', 0)))))
hist(df.2001$Pop_inf_65_01, breaks=100)
Pop_inf_65_01_cat <- as.factor(ifelse(df.2001$Pop_inf_65_01< 1000000, 'LP',
ifelse(df.2001$Pop_inf_65_01>=1000000 & df.2001$Pop_inf_65_01<3500000 , 'MP',
ifelse(df.2001$Pop_inf_65_01>=3500000 & df.2001$Pop_inf_65_01<8000000 , 'HP',
ifelse(df.2001$Pop_inf_65_01>=8000000 , 'VHP', 0)))))
hist(df.2001$Pop_Sup_65_01, breaks=100)
Pop_Sup_65_01_cat <- as.factor(ifelse(df.2001$Pop_Sup_65_01< 400000, 'LP',
ifelse(df.2001$Pop_Sup_65_01>=400000 & df.2001$Pop_Sup_65_01<800000 , 'MP',
ifelse(df.2001$Pop_Sup_65_01>=800000 & df.2001$Pop_Sup_65_01<1100000 , 'HP',
ifelse(df.2001$Pop_Sup_65_01>=1100000 , 'VHP', 0)))))
df.2001['IM_LOC'] <- IM_Loc_01_cat
df.2001['POP'] <- POP_01_cat
df.2001['IM_INT'] <- IM_INT_01_cat
df.2001['NB_NAI'] <- Nb_Nai_01_cat
df.2001['NB_DC'] <- Nb_DC_01_cat
df.2001['POP_INF'] <- Pop_inf_65_01_cat
df.2001['POP_SUP'] <- Pop_Sup_65_01_cat
df.2001.AFCM <- df.2001[,c(9:15)]
res.mca = MCA(df.2001.AFCM)
dimdesc(res.mca)
png("EllispseDC_NAI.png")
plotellipses(res.mca,keepvar=c(5,4))
dev.off()
png("EllispseINF_SUP.png")
plotellipses(res.mca,keepvar=c(6,7))
dev.off()
png("EllispsePop_IM.png")
plotellipses(res.mca,keepvar=c(1,2,3))
dev.off()
colones_interet <- c(1,3, 9, 11, 13, 15,16)
df<- df[,colones_interet]
stat.graph <- 1
va.num<-which(sapply(df,is.numeric))
va.cat<-which(sapply(df,is.factor))
df.num<- df[,va.num]
df.cat<- df[,va.cat]
nb.ind <- dim(df.2001.AFCM)[1]
if (stat.graph)
{
df.num.scale <-apply(df.num,2,scale)
PROC_BOXPLOTALL(df.num.scale, p = c(1,1), main.name = 'donn?es standardis?es')
}
# =========================================================
# --------------- POPULATION -----------------------
# =========================================================
states.latlon <- read.csv("state_latlon.csv", header=TRUE, sep=',')
US = map("state", fill = TRUE, plot = FALSE)
Pop01 <- df[order(df$Pop_Tot_01),]
Pop00 <- df[order(df$Pop_Tot_00),]
dpt01 <- Pop01$State
dpt00 <- Pop00$State
match01 <- match.map(US, dpt01, exact=FALSE)
match00 <- match.map(US, dpt00, exact=FALSE)
blues <- colorRampPalette(brewer.pal(9,"Blues"))(100)
colors01 <- blues[match01]
colors00 <- blues[match00]
png("pop01.png")
map("state", fill=TRUE, col=colors01, resolution=0)
text(states.latlon$state, x=states.latlon$longitude, y=states.latlon$latitude)
dev.off()
png("pop00.png")
map("state", fill=TRUE, col=colors00, resolution=0)
text(states.latlon$state, x=states.latlon$longitude, y=states.latlon$latitude)
dev.off()
# =======================================================
# --------------- Local Immigration -----------------------
# =========================================================
US = map("state", fill = TRUE, plot = FALSE)
Im.Loc.01 <- df[order(df$IM_Loc_00),]
Im.Loc.00 <- df[order(df$IM_Loc_01),]
dpt01.Im <- Im.Loc.01$State
dpt00.Im <- Im.Loc.00$State
match01.Im <- match.map(US, dpt01.Im, exact=FALSE)
match00.Im <- match.map(US, dpt00.Im, exact=FALSE)
Reds <- colorRampPalette(brewer.pal(9,"Reds"))(100)
colors01.Im <- Reds[match01.Im]
colors00.Im <- Reds[match00.Im]
png("Map_IM_Loc_01.png")
map("state", fill=TRUE, col=colors01.Im, resolution=0)
text(states.latlon$state, x=states.latlon$longitude, y=states.latlon$latitude)
min <- Im.Loc.01$IM_Loc_01[1]
max <- Im.Loc.01$IM_Loc_01[length(Im.Loc.01$IM_Loc_01)]
legend("bottomright", legend = trunc(seq(min, max, abs(max-min)/10)), pch = 20, col = Reds[seq(0,63, 63/11)])
dev.off()
png("Map_IM_Loc_00.png")
map("state", fill=TRUE, col=colors00.Im, resolution=0)
text(states.latlon$state, x=states.latlon$longitude, y=states.latlon$latitude)
min <- Im.Loc.00$IM_Loc_00[1]
max <- Im.Loc.00$IM_Loc_00[length(Im.Loc.00$IM_Loc_00)]
legend("bottomright", legend = trunc(seq(min, max, abs(max-min)/10)), pch = 20, col = Reds[seq(0,63, 63/11)])
dev.off()
# =======================================================
# --------------- International Immigration -----------------------
# =========================================================
US = map("state", fill = TRUE, plot = FALSE)
Im.Int.01 <- df[order(df$IM_Int_00),]
Im.Int.00 <- df[order(df$IM_Int_01),]
dpt01.Im.Int <- Im.Int.01$State
dpt00.Im.Int <- Im.Int.00$State
match01.Im.Int <- match.map(US, dpt01.Im.Int, exact=FALSE)
match00.Im.Int <- match.map(US, dpt00.Im.Int, exact=FALSE)
Purples <- colorRampPalette(brewer.pal(9,"Purples"))(100)
colors01.Im.Int <- Purples[match01.Im.Int]
colors00.Im.Int <- Purples[match00.Im.Int]
png("Map_IM_Int_01.png")
map("state", fill=TRUE, col=colors01.Im.Int, resolution=0)
text(states.latlon$state, x=states.latlon$longitude, y=states.latlon$latitude)
min <- Im.Int.01$IM_Int_01[1]
max <- Im.Int.01$IM_Int_01[length(Im.Int.01$IM_Int_01)]
legend("bottomright", legend = trunc(seq(min, max, abs(max-min)/10)), pch = 20, col = Purples[seq(0,63, 63/11)])
dev.off()
png("Map_IM_Int_00.png")
map("state", fill=TRUE, col=colors00.Im.Int, resolution=0)
text(states.latlon$state, x=states.latlon$longitude, y=states.latlon$latitude)
min <- Im.Int.00$IM_Int_00[1]
max <- Im.Int.00$IM_Int_00[length(Im.Int.00$IM_Int_00)]
legend("bottomright", legend = trunc(seq(min, max, abs(max-min)/10)), pch = 20, col = Purples[seq(0,63, 63/11)])
dev.off()
# =========================================================
# --------------- POPULATION -----------------------
# =========================================================
US = map("state", fill = TRUE, plot = FALSE)
Sup_65_01 <- df[order(df$Pop_Sup_65_01),]
Sup_65_00 <- df[order(df$Pop_sup_65_00),]
dpt01.sup <- Sup_65_01$State
dpt00.sup <- Sup_65_00$State
match01.sup <- match.map(US, dpt01.sup, exact=FALSE)
match00.sup <- match.map(US, dpt00.sup, exact=FALSE)
Greens <- colorRampPalette(brewer.pal(9,"Greens"))(100)
colors01.sup <- Greens[match01.sup]
colors00.sup <- Greens[match00.sup]
png("Map_Pop_sup_01.png")
map("state", fill=TRUE, col=colors01.sup, resolution=0)
text(states.latlon$state, x=states.latlon$longitude, y=states.latlon$latitude)
min <- Sup_65_01$Pop_Sup_65_01[1]
max <- Sup_65_01$Pop_Sup_65_01[length(Sup_65_01$Pop_Sup_65_01)]
# legend("bottomright", legend = trunc(seq(min, max, abs(max-min)/10)), pch = 20, col = Reds[seq(0,63, 63/11)])
dev.off()
png("Map_Pop_sup_00.png")
map("state", fill=TRUE, col=colors00.sup, resolution=0)
text(states.latlon$state, x=states.latlon$longitude, y=states.latlon$latitude)
dev.off()
# =========================================================
#
# =========================================================
#-> Statistiques univari?es
stat.sum <-apply(df.2001.AFCM,2,summary) ;
stat.sd <-apply(df.2001.AFCM,2,sd)
pca.stat <-rbind(stat.sum,stat.sd)
#-> Matrice des corr?lations
df01 <- df[,c(3,5,7,9,11,13,15)]
df01.cor<-cor(df01)
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
png("Corrélation.png")
corrplot(df01.cor, method="shade", shade.col=NA, tl.col="black", tl.srt=45,col=col(200), addCoef.col="black", addcolorlabel="no", order="AOE")
dev.off()
#-> Graphique en pairs
pairs(df.num)
#=====================================================================================
# AFCM
#=====================================================================================
res.mca = MCA(df.2001.AFCM)
#-------------------- NB AXES ET INERTIE -----------------------
#-> inertie
inertie<-matrix(c(seq(1,21,1),res.mca$eig[,3]),ncol = 2) ;
colnames(inertie)<-c('axe','% inertie cumul?e')
png("AxeKaiser.png")
plot(inertie[,2]~inertie[,1],type = 'b',xlab='axe',ylab='% inertie cumul?e')
dev.off()
#-> indice de Kaiser
axe.kaiser<-which(res.mca$eig[,1] >= 1)
#-> intervalle de confiance d'Anderson
Icm<-ca$eig[,1]*exp(-1.96*sqrt(2/(nb.ind-1)))
Icp<-ca$eig[,1]*exp(+1.96*sqrt(2/(nb.ind-1)))
axe.anderson<-as.matrix(cbind(Icm,res.mca$eig[,1],Icp),ncol = 3)
#-> boostrap total sur les axes
B = 2000 ; alpha = 0.1 ; nb.axe <-4
lam.star <-matrix(0,ncol = nb.axe,nrow = B)
for (i in 1 : B)
{ boot.lam<- sample(size<-seq(1,nb.ind), replace = TRUE)
df.num.star <- df.2001.AFCM[boot.lam,]
mca.star<-MCA(df.num.star,graph = FALSE)
for (j in 1 : nb.axe)
{ lam.star[i,j]<-mca.star$eig[j,1]}
}
lam.star.mean <-mean(lam.star[,1])
lam.star.sd <- sd(lam.star[,1])
qt<-quantile(lam.star[,1], c(alpha/2, 1 - alpha/2))
ICm<-qt[[1]] ;
ICp<-qt[[2]]
# histogramme
png("Bootstrap.png")
hist(lam.star[,1],nclass = 50,cex.main = 0.8,freq = FALSE, cex.lab = 0.7,proba=TRUE, main = paste("f2 boostrap : nb = ", B,sep = "" ))
s<-seq(min(lam.star[,1]),max(lam.star[,1]),le=50)
# distribution normale et densit?
prov<- dnorm(s,lam.star.mean,lam.star.sd)
lines(prov~s,col = 'red')
lines(density(lam.star[,1]),col = 'blue',lty = 2)
# limite des intervalles de confiance et moyenne + m?diane
abline(v=mean(lam.star[,1]),col = 'red')
abline(v=median(lam.star[,1]),col = 'blue')
abline(v=ICm,col = 'red',lty = 2)
abline(v=ICp,col = 'red',lty = 2)
dev.off()
# graphique des densit?s des axes s?lectionn?s
png("AxesDensity.png")
plot(density(lam.star[,1]),col = 'blue',lty = 1,type ='l',xlim=c(0,1), ylim =c(0,20),
main = 'densit? axes',cex.main = 0.6,xlab = '',ylab = '')
text(x=mean(lam.star[,1]),y = 1,label = paste('Axe ',1,sep = ''),cex =0.7,col = 'red')
for (i in 2: nb.axe)
{
lines(density(lam.star[,i]),col = 'blue' ,lty = 1)
text(x=mean(lam.star[,i]),y = 1,label = paste('Axe ',i,sep = ''),cex =0.7,col = 'red')
}
dev.off()
#-------------------- VARIABLES ET QUALITE DE REPRESENTATION -----------------------
mca.var<- res.mca$var$coord ;
colnames(mca.var)<-paste('axes ',seq(1,5,1),sep ='')
mca.var.qlt<-res.mca$var$cos2[,c(1,2)]
mca.var.qlt<-cbind(mca.var.qlt,(apply(mca.var.qlt,1,sum))) ;
colnames(mca.var.qlt)[3]<-'Sum qtl'
#-------------------- INDIVIDUS ET CONTRIBUTION RELATIVE -----------------------
mca.ind <- res.mca$ind$coord ;
colnames(mca.ind)<-paste('axes ',seq(1,5,1),sep ='')
ca.ind.ctr <- ca$row$contrib[,c(1,2)]
#-------------------- GRAPHIQUE AFC ----------------------- -------------------------
png("MCAPlot.png")
plot(res.mca, axe=c(1,2))
dev.off()
png("VariableMCA.png")
plot.MCA(res.mca, invisible=c("ind"))
dev.off()
png("IndividusMCA.png")
plot.MCA(res.mca, invisible=c("var"))
dev.off()
df.2001.AFCM$num <- 1
ggplot(df.2001.AFCM, aes(x=df.2001.AFCM$IM_LOC, y=df.2001.AFCM$num)) +
geom_bar(stat="identity", fill="lightblue")
ggsave("HistoPopu.png")
ggplot(df, aes(y=df$State, x=df$Pop_Tot_01)) +
geom_point(size=4) +
scale_colour_brewer(palette="Set1")
ggsave("StatePopu.png")
ggplot(df, aes(x=df$IM_Loc_01, y=df$IM_Int_01)) +
geom_text(aes(label=df$State), vjust=-1) +
geom_point() +
scale_colour_brewer(palette="Set1")
ggsave("ImiLocInt.png")
ggplot(df, aes(y=df$State, x=df$IM_Loc_00)) +
geom_point(size=4) +
scale_colour_brewer(palette="Set1")
ggsave("ImLOC00.png")
df$DiffPop <- df$Pop_Tot_01 - df$Pop_Tot_00
mean.diff.pop <- mean(df$DiffPop)
hist(df$DiffPop, breaks=100)
ggplot(df, aes(x=df$DiffPop)) +
geom_vline(xintercept = mean.diff.pop, color='red') +
geom_bar()
ggsave("DiffPop.png")
|
4bb3af56c2d6fe26baf5b2280f7693df0d17aeb8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qdap/examples/NAer.Rd.R
|
247637d1c5b2c55b3455e0342d992ff919f71757
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
NAer.Rd.R
|
library(qdap)
### Name: NAer
### Title: Replace Missing Values (NA)
### Aliases: NAer
### Keywords: missing-value
### ** Examples
## Not run:
##D set.seed(10)
##D (x <- sample(c(rep(NA, 4), 1:10), 20, rep=T))
##D NAer(x)
##D
##D set.seed(10)
##D (y <- data.frame(matrix(x, 5, 4)) )
##D NAer(y)
##D NAer(y, "MISSING")
## End(Not run)
|
8c77643a711b389d6d3e69b40ae14a8e3475b3a0
|
0bd4d5ee50ebfb5a5325ae0284087ee886be4f37
|
/inst/simulations/ExactPredictionInterval/EPI_simulations00.R
|
b4300daa3c4023b33ffa08ecf34c846b7fb0f2c7
|
[] |
no_license
|
stla/SLutils
|
91f53e3ef48b22154642b7425a1be94c0c48053e
|
5c5ef7dbb5d172c0a7788b3975a1363a47c4bf67
|
refs/heads/master
| 2020-04-10T06:21:30.476088
| 2019-09-10T10:00:57
| 2019-09-10T10:00:57
| 160,851,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
EPI_simulations00.R
|
setwd("~/MyPackages/SLutils/inst/simulations/ExactPredictionInterval")
library(SLutils)
conf <- c(80,95)/100
k <- c(2, 3)
n <- c(5, 10)
# f1 <- function(){
# DT <- data.table(conf)
# DT <- DT[, list(n), by=names(DT)]
# DT <- DT[, list(k), by=names(DT)]
# }
# f2 <- function(){
# DT <- data.table(expand.grid(conf=conf, n=n, k=k))
# }
# f3 <- function(){
# DT <- CJ(conf=conf, n=n, k=k)
# }
# library(microbenchmark)
# microbenchmark(
# chaining = f1(),
# expand = f2(),
# CJ = f3(), # the best !
# times=100
# )
dat <- CJ(conf=conf, n=n, k=k)
setkey(dat)
# dat <- cbind(dat,
# dat[, .(K=predictionFactor(n=n, df=n-1, k=k, method="exact", conf.level=conf)),
# keyby=.(id=rownames(dat))][,id:=NULL])
dat <- dat[, .(K=predictionFactor_SL(n=`n`, k=`k`, method="exact", conf.level=`conf`)),
keyby=.(conf,n,k)]
dim(dat)
#
nsims <- 150000
sims1 <- round(matrix(rnorm(nsims*max(n)), nrow=nsims), 1)
# # nouveau DT
# DTsims <- dat[, .(sims=list(sims1[,1:`n`])), by=.(conf,n,k)]
# DTsims$sims
# ou merge :
dat[, `:=`(sims=list(sims1[,1:`n`])), by=.(conf,n,k)]
#dat$sims : colonne de matrices
dim(dat)
# mean and sd
dat <- dat[, .(K=K,
mean =apply(sims[[1]], 1, mean),
sd = apply(sims[[1]], 1, sd)),
by=.(conf,n,k)]
dim(dat)
# prediction bounds
dat <- dat[, .(lwr=`mean`-K*`sd`, upr=`mean`+K*`sd`, index=1:.N), by=.(conf,n,k)]
dim(dat)
# new sims
sims2 <- round(matrix(rnorm(nsims*max(k)), nrow=nsims), 1)
# tests
dat[, `:=`(test=all(sims2[index, 1:`k`]<upr & sims2[index, 1:`k`]>lwr)),
by=.(index,conf,n,k)]
dim(dat)
# coverage
dat[, .(coverage=mean(test)),
by=.(conf,n,k)]
# # t'as besoin que de mean et sd !!
# dat[, .(lwr=apply(sims[[1]], 1,
# function(sample){
# mean(sample)-K*sd(sample)
# }),
# upr=apply(sims[[1]], 1,
# function(sample){
# mean(sample)+K*sd(sample)
# })),
# by=.(conf,n,k)]
#
|
efd75a895a5d6650774e91263541b0f0c5126b64
|
1a83ac47bb1ffe39b416dfce1964051fa77d5b7c
|
/man/MonteCarloSamplingIZ-class.Rd
|
51a58b3180779a889b6fe1379776c30ccf6d86ce
|
[] |
no_license
|
cran/sampSurf
|
9052ab60378e3295ecade04e573e6770c145cf74
|
9388a099e8cef6109c544bcc95770bc9a60670e6
|
refs/heads/master
| 2021-06-05T21:01:53.864724
| 2021-03-05T14:50:02
| 2021-03-05T14:50:02
| 17,699,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,754
|
rd
|
MonteCarloSamplingIZ-class.Rd
|
\name{MonteCarloSamplingIZ-class}
\Rdversion{1.1}
\docType{class}
\alias{MonteCarloSamplingIZ-class}
\title{Class \code{"MonteCarloSamplingIZ"}}
\description{ This virtual class exists to be combined with an areal
sampling class of choice. This class facilitates creation of a new
areal sampling method that employs one of the Monte Carlo subsampling
methods supported through subclasses of
\code{"MonteCarloSampling"}. Generally it will only be of interest to
someone desiring to write extensions to \pkg{sampSurf} in the form of
Monte Carlo subsampling on down logs or standing trees within areal
methods. Please see the class definition for
\code{\linkS4class{horizontalPointCMCIZ}} for an example of how this
can be combined with an areal method.}
\section{Objects from the Class}{A virtual Class: No objects may be
created from it.}
\section{Slots}{
\describe{
\item{\code{mcsObj}:}{Object of class \code{"MonteCarloSampling"} A
subclass objects of \code{\linkS4class{MonteCarloSampling}};
please see that class for extant subclasses/sampling methods. }
\item{\code{antithetic}:}{Object of class \code{"logical"}
\code{TRUE}: if antithetic sampling variant has been used for the
object in the \code{mcsObj} slot; \code{FALSE}: if not. }
\item{\code{proxy}:}{Object of class \code{"character"} The named of
the proxy function used in Monte Carlo sampling.}
}
}
\section{Methods}{
No methods defined with class "MonteCarloSamplingIZ" in the signature.
}
\author{
Jeffrey H. Gove %, \email{jhgove@unh.edu}
}
\seealso{
\code{\linkS4class{MonteCarloSampling}}, \code{\linkS4class{horizontalPointCMCIZ}}
}
\examples{
showClass("MonteCarloSamplingIZ")
}
\keyword{classes}
|
7faf244df7cad0e19aed2ae645d52d4a60596e3f
|
909635ddbeecedcbafb005e69f01c0d3ddae8289
|
/man/Transaction.Rd
|
af92a074f404d27d06e3c9f599361619ba655e7a
|
[] |
no_license
|
hitfuture/Neo4jDriveR
|
6d45900707d3a93e4567916c3d634ef8ae45c492
|
ef8f1226f98dd40de3bf2a89ca8d249715a0eeef
|
refs/heads/master
| 2020-03-18T07:18:39.319158
| 2018-07-19T18:33:36
| 2018-07-19T18:33:36
| 134,444,172
| 0
| 0
| null | 2018-07-19T18:33:37
| 2018-05-22T16:31:54
|
R
|
UTF-8
|
R
| false
| true
| 354
|
rd
|
Transaction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graph_database.R
\docType{data}
\name{Transaction}
\alias{Transaction}
\title{Transaction}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
Transaction
}
\description{
Transaction is the class that allows execution in the Neo4jDB
}
\keyword{datasets}
|
73828853ec8db46d6a3e5b9e04c9924b68d97224
|
a5b757b279966b9e86cef3e70092fd5b791b11f9
|
/man/med_runtime.Rd
|
c0c5e21e9887651da8ff76acfe37821e5b5ddf13
|
[] |
no_license
|
bgulbis/mbohelpr
|
62c15326680e91a2d394614287770e101b13daf6
|
e362dd59db37edb2c870a46d3389e7f95d9a745b
|
refs/heads/master
| 2023-01-24T11:42:03.100949
| 2023-01-11T20:30:08
| 2023-01-11T20:30:08
| 197,245,899
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,638
|
rd
|
med_runtime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_runtime.R
\name{med_runtime}
\alias{med_runtime}
\title{Calculate the running time for intermittent medication data}
\usage{
med_runtime(
df,
...,
.id = encntr_id,
.med = medication,
.dt_tm = med_datetime,
.dose = dose,
.med_off = 36,
.no_doc = 24,
.units = "hours"
)
}
\arguments{
\item{df}{A data frame}
\item{...}{Optional columns to group by}
\item{.id}{Patient identifier column, defaults to encntr_id}
\item{.med}{Medication column, defaults to medication}
\item{.dt_tm}{Date/time column, defaults to med_datetime}
\item{.dose}{Dose column, defaults to dose}
\item{.med_off}{Number of hours between medication doses which will be
counted as a new course, defaults to 36 hours}
\item{.no_doc}{Number of hours without documentation which will be used to
indicate a course has ended, defaults to 24 hours}
\item{.units}{A string specifying the time units to use in calculations,
default is "hours"}
}
\value{
A data frame
}
\description{
\code{med_runtime} calculates the duration of time at current value and
total run time
}
\details{
This function takes a data frame with intermittent medication data and produces a
data frame with the with the duration of time at each rate and cumulative run
time.
This could be used to then calculate the AUC or to summarize the medication course data.
The data will be grouped into distinct courses of medications, for patients who
may have been restarted on the medication one or more times. Use the
\code{med_off} argument to modify the criteria for determining distinct
courses.
}
|
08836a6d4ec1203d32f781704043b88b6bd98322
|
9f80efc84afd9501fa8c3b762399e4fcea8a42ec
|
/plot3.R
|
0163ef74e034234130030281ad33a09310f2a553
|
[] |
no_license
|
saoconnell/ExData_Plotting1
|
5b5bbdbda589905215f53d4b5455ca9c62310ce3
|
97d51db67a65da562ae91a49f7f9a202524d7211
|
refs/heads/master
| 2021-01-14T09:29:33.545945
| 2014-05-09T05:48:18
| 2014-05-09T05:48:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
plot3.R
|
###--------------------------------------------------------------
### plot3.R:
### Creates a plot of house hold Sub_Metering 1-3
###
### Date By Notes
### 2014-05-07 saoconnell Initial development
###--------------------------------------------------------------
## CLEANUP THE WORK SPACE
rm(list=ls())
### SET THE WORKING DIRECTORY
setwd("~/Documents/workspace/coursera/Data_Science/Exploratory_Data_Analysis/assignment_one/ExData_Plotting1")
### READ IN DATA
in_data <- read.csv("household_power_consumption.txt", sep=';', header=TRUE, stringsAsFactors=FALSE)
## CONVERT COLUMNS TO NUMERIC (WARNING IS '?' ARE COERCED TO NA)
in_data$Global_active_power <- as.numeric(in_data$Global_active_power)
in_data$Sub_metering_1 <- as.numeric(in_data$Sub_metering_1)
in_data$Sub_metering_2 <- as.numeric(in_data$Sub_metering_2)
## CREATE A POSIX DATE FOR SUBSETTING
in_data$POSIX <- as.POSIXct(strptime(paste(in_data$Date, in_data$Time), format="%d/%m/%Y %H:%M:%S"))
### SUBSET DATA
plot_data <- subset(in_data, POSIX >= as.POSIXct("2007-02-01") & POSIX < as.POSIXct("2007-02-03") )
summary(plot_data)
### PLOT DATA
png("plot3.png", width = 480, height = 480)
plot(plot_data$POSIX, plot_data$Sub_metering_1,
type='n',
ylab="Energy sub metering",
xlab=NA
)
lines(plot_data$POSIX, plot_data$Sub_metering_1, col='black')
lines(plot_data$POSIX, plot_data$Sub_metering_2, col='red')
lines(plot_data$POSIX, plot_data$Sub_metering_3, col='blue')
legend("topright",
lty=c(1,1,1),
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
806855fba7f0b692f0970a20d6e0243ca4e0b5c6
|
55d830d823cf4578c10d2c55ba1d1682819ea0ab
|
/stat331-project-master/get_imputed_income.R
|
a055ef05e70f0133580393cffe2b2b233cefcfd7
|
[
"MIT"
] |
permissive
|
ShreyaPrasad31/Birth-Weight-Prediction-Model-
|
d8af0f80cb53cb2330214c4d6ab2368faa6acfc6
|
31c979124a6d9ef77fdfeda205b7776698402da5
|
refs/heads/master
| 2020-05-22T02:03:08.972446
| 2019-05-11T23:22:42
| 2019-05-11T23:22:42
| 186,190,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,735
|
r
|
get_imputed_income.R
|
#This file only contains the code necessary to get the imputed income covariate (does not include analysis code required to select imputed dataset)
#Reading in the data
fdata <- read.csv("chds_births.csv")
head(fdata)
#Calculat the number of missing valuesin each column
na_count <- sapply(fdata, function(y) sum(length(which(is.na(y)))))
na.count <- data.frame(na_count)
na.count
#Check the columns that have more than 10% of the data missing
count <- sapply(fdata, function(y) length(y))
na_percent <- (na_count/count)*100
na_percent <- data.frame(na_percent)
na_percent
sapply(fdata, function(x) sum(is.na(x)))
#Visualizing the missing data
#library(VIM)
#miss_plot <- aggr(fdata, col=c('navyblue','yellow'),
# numbers=TRUE, sortVars=TRUE,
# labels=names(fdata), cex.axis=.7,
# gap=3, ylab=c("Missing data","Pattern"))
fdata$fht <- NULL
fdata$fwt <- NULL
colnames(fdata)
head(fdata)
library(mice)
md.pattern(fdata)
#pbox(fdata, pos = 1, int = FALSE, cex = 0.7)
#Imputting the data with "pmm" method as referenced from "https://stefvanbuuren.name/mice/"
imp <- mice(fdata, m = 5, maxit = 50, meth = 'pmm', seed = 500)
#summary(imp)
densityplot(imp)
stripplot(imp, pch = 20, cex = 1.2)
modelFit1 <- with(data = imp, lm(wt ~ gestation + parity + meth + mage + income))
#summary(modelFit1)
combine <- pool(modelFit1)
imp_1 <- data.frame(complete(imp,1))
imp_2 <- data.frame(complete(imp,2))
imp_3 <- data.frame(complete(imp,3))
imp_4 <- data.frame(complete(imp,4))
imp_5 <- data.frame(complete(imp,5))
mp_list <- list(imp_1, imp_2, imp_3, imp_4, imp_5)
#Get sample 4 income values
imputed_income <- imp_4$income
#Final dataset is fdata
fdata$income <- imputed_income
|
4c15fd2d48c4fa75c2796a2a9e3c8d928cae1e31
|
b0127d0af88c0ba22e6206a4722ffb69e450d69c
|
/man/filterSQMASSdb.Rd
|
bdf41d6140de660e8440651c079371eaf3473a1d
|
[] |
no_license
|
Roestlab/mstools
|
0e4ff24e265b885a761f129dbe40433076edf6f1
|
d3bdfb5a714a4c103df96a8c7d92b050f67326c8
|
refs/heads/master
| 2020-12-13T13:09:57.023082
| 2020-02-04T18:42:05
| 2020-02-04T18:42:05
| 234,427,121
| 1
| 0
| null | 2020-02-04T17:46:16
| 2020-01-16T22:51:44
|
R
|
UTF-8
|
R
| false
| true
| 776
|
rd
|
filterSQMASSdb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterSQMASSdb.R
\name{filterSQMASSdb}
\alias{filterSQMASSdb}
\title{Filter and sqMass db file}
\usage{
filterSQMASSdb(sqmass_file, unmodified_sequence_filter)
}
\arguments{
\item{sqmass_file}{A character vector of the absolute path and filename of the sqMass file. (Must be .osw format)}
\item{unmodified_sequence_filter}{A character vector for extraction of specific peptide(s). I.e. c('ANSSPTTNIDHLK', 'ESTAEPDSLSR', 'NLSPTKQNGKATHPR', 'KDSNTNIVLLK', 'NKESPTKAIVR')}
}
\value{
A data.table containing spectral library information
}
\description{
This function can be used to filter an sqMass db file given a list of unmodified sequences
}
\author{
Justin Sing \url{https://github.com/singjc}
}
|
42e8444ffd8e5ae11d79ba99422ef90d57a61f9c
|
e7df3662ef856d11842c338fc02545eddd9066c5
|
/ggPlotShortcuts.R
|
3e0c25e5ca21abd5a5b566f34bafd2b705a3a8e4
|
[
"MIT"
] |
permissive
|
SchwartzLab/SchwartzLab_ToolBox
|
2025172d9d6314dedf9ceaa792e24dae4270c83e
|
1ec9494f210a680bc4816573985f3e1df280fc4c
|
refs/heads/master
| 2022-07-22T05:35:48.495118
| 2022-06-24T22:24:00
| 2022-06-24T22:24:00
| 99,702,434
| 0
| 2
|
MIT
| 2020-12-08T14:33:47
| 2017-08-08T14:33:06
|
HTML
|
UTF-8
|
R
| false
| false
| 9,125
|
r
|
ggPlotShortcuts.R
|
# GGPLOT functions #############################################################
# Multiplot for ggplot2 by winston@stdout.org from Cookbook for R
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
# library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# Boxplot shortcut function (ggplot2)
ggBoxplot <- function(matrix, title = "", xlab = "x", ylab = "y", outLCol = NA){
ggplot(data=melt(as.data.frame(matrix)), aes(variable, value)) +
geom_boxplot(outlier.colour= outLCol, outlier.size = 1) + xlab(xlab) +
ylab(ylab) + ggtitle(title) + theme_classic() +
stat_n_text(size = 3, angle = 90) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
# Pie Chart Nucleotide frequency
ggPieFreq <- function(Freq, labSize = 5){
palette(brewer.pal(9, "Set1")) #RColorBrewer
tmpDF <- data.frame(nucs = names(Freq), Percent = Freq, stringsAsFactors = F)
tmpDF <- tmpDF[order(tmpDF[,2], decreasing = T),]
tmpDF <- data.frame(rbind(tmpDF[1:5,], c("All Others", sum(tmpDF[-1:-5,2]))))
tmpDF[,2] <- as.numeric(tmpDF[,2]) / sum(as.numeric(tmpDF[,2]))
tmpDF[,1] <- factor(tmpDF[,1], levels = tmpDF[,1])
ggPie <- ggplot(tmpDF, aes(x="", y=Percent, fill=nucs)) +
geom_bar(width = 1, stat = "identity") +
coord_polar(theta = "y",start = 0,direction = 1) +
geom_text(aes(label = round(Percent,2)), size= labSize, position = position_stack(vjust = 0.5)) +
theme(axis.text.x =element_blank()) + theme_classic()
return(ggPie)
}
# GGplot alternative to pairs function (additionally it fits linear models to all pair-wise comparisons)
ggPairs <- function(DF, alpha = 1){
iCol <- colnames(DF)
matD <- combinations(n = length(iCol), r = 2, v = 1:length(iCol))
ggSC <- lapply(1:nrow(matD), function(x){
tmpL <- lm(DF[,matD[x,2]] ~ DF[,matD[x,1]])
if(tmpL$coefficients[1]>=0){
linModEq = paste("y = x *",tmpL$coefficients[2] %>% signif(2),
"+", tmpL$coefficients[1] %>% signif(2))
}else if(tmpL$coefficients[1]<0){linModEq = paste("y = x *",
signif(tmpL$coefficients[2],2), "-", tmpL$coefficients[1] %>%
signif(2) %>% abs)}
tmpC <- cor(DF[,matD[x,1]], DF[,matD[x,2]], use = "p") %>% round(4)
tmpP <- cor.test(DF[,matD[x,1]], DF[,matD[x,2]], use = "p")$p.value %>% signif(4)
tmpC2 <- cor(DF[,matD[x,1]], DF[,matD[x,2]], use = "p", method = "spearman") %>% round(4)
tmpP2 <- cor.test(DF[,matD[x,1]], DF[,matD[x,2]], use = "p", method = "spearman")$p.value %>% signif(4)
ggplot(DF, aes(x= DF[,matD[x,1]], y= DF[,matD[x,2]])) +
geom_point(alpha = alpha, shape = 16) +
geom_smooth(method = lm, se=FALSE, fullrange= TRUE, aes(group=1)) +
geom_abline(intercept = 0, slope = 1, colour = "gray") +
theme_classic() + xlab(iCol[matD[x,1]]) + ylab(iCol[matD[x,2]]) +
ggtitle(paste("R =", tmpC, "p = ", tmpP, "\nrho =", tmpC2, "p =", tmpP2),
subtitle = linModEq) +
coord_cartesian(ylim = range(DF, na.rm = T), xlim = range(DF, na.rm = T))
})
ggLabs <- lapply(iCol, function(x){
df <- data.frame(x = 1, y = 1, text = x)
ggO <- ggplot(df, aes(x, y)) +
geom_text(aes(label = text), size = 5) + theme_classic() +
theme(panel.border = element_rect(colour = 1, fill = NA), axis.line = element_line())+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
return(ggO)
})
ggCf <- lapply(1:nrow(matD), function(x){
return()
})
lOut <- matrix(NA, ncol = ncol(DF), nrow = ncol(DF))
for(i in 1:nrow(matD)){lOut[matD[i,2], matD[i,1]] <- i}
for(i in 1:length(iCol)){lOut[i, i] <- length(ggSC) + i}
for(i in 1:nrow(matD)){lOut[matD[i,1], matD[i,2]] <- length(ggSC) + length(iCol) + i}
multiplot(plotlist = c(ggSC, ggLabs), layout = lOut)
}
# Simple Barplot function
ggBarplot <- function(x, ci = NA, title = NULL, subt = NULL, xLab = "Names", yLab = "Values"){
if(is.null(names(x))){names(x) <- 1:length(x)}
df <- data.frame(names = names(x), value=x, CI = ci)
outGG <- ggplot(data=df, aes(x=names, y=value)) +
geom_bar(stat="identity") + theme_classic() +
geom_errorbar(aes(ymin=value-CI, ymax=value+CI), width=.2, position=position_dodge(.9)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) + ggtitle(title, subt) +
ylab(yLab) + xlab(xLab)
return(outGG)
}
# Scatterplot with linear model fitted line and calculates correlation
ggScattLinePlot <- function(x, y, title = "", xLab = "", yLab = "", alpha = 1){
tmpC <- cor(x, y, use = "p") %>% round(4)
tmpP <- cor.test(x, y, use = "p")$p.value %>% signif(3)
tmpC2 <- cor(x, y, use = "p", method = "spearman") %>% round(4)
tmpP2 <- cor.test(x, y, use = "p", method = "spearman")$p.value %>% signif(3)
tmpDF <- data.frame(var1 = x, var2 = y)
ggSCLINE <- ggplot(tmpDF, aes(x = var1, y = var2)) + geom_point(alpha = alpha) +
geom_smooth(method = lm, se=FALSE, fullrange= TRUE, aes(group=1)) +
ggtitle(title, paste("R =", tmpC, "p = ", tmpP, "\nrho =", tmpC2, "p =", tmpP2)) +
ylab(yLab) + xlab(xLab) + theme_classic()
return(ggSCLINE)
}
# ggplot heatmap
ggHeatmap <- function(x, y, logTransform = T, nBins = 100){
tmpC <- cor(x, y, use = "p") %>% round(4)
tmpP <- cor.test(x, y, use = "p")$p.value %>% signif(3)
tmpC2 <- cor(x, y, use = "p", method = "spearman") %>% round(4)
tmpP2 <- cor.test(x, y, use = "p", method = "spearman")$p.value %>% signif(3)
tmpDF <- data.frame(var1 = x, var2 = y)
if(logTransform == T){
ggplot(tmpDF, aes(x = var1, y = var2)) + geom_bin2d(bins = nBins) +
scale_fill_gradientn(trans = "log", colours = rev(brewer.pal(9, "Spectral"))) +
theme_classic() + ggtitle("", paste("R =", tmpC, "p = ", tmpP, "\nrho =", tmpC2, "p =", tmpP2)) +
geom_smooth(method = lm, se=FALSE, fullrange= TRUE, aes(group=1)) + theme(legend.position = "bottom")
}else{
ggplot(tmpDF, aes(x = var1, y = var2)) + geom_bin2d(bins = nBins) +
scale_fill_gradientn(colours = rev(brewer.pal(9, "Spectral"))) +
theme_classic() + ggtitle("", paste("R =", tmpC, "p = ", tmpP, "\nrho =", tmpC2, "p =", tmpP2)) +
geom_smooth(method = lm, se=FALSE, fullrange= TRUE, aes(group=1)) + theme(legend.position = "bottom")
}
}
# ggboxplot with variables cut into ordered categories by Interval
ggBoxPlot_cutInterval <- function(x, y, nCut_x){
tmpDF <- data.frame(var1 = x, var2 = y)
tmpDF$cut_x <- cut_interval(tmpDF$var1, nCut_x)
ggplot(tmpDF, aes(y= var2, x = cut_x)) + geom_boxplot(outlier.colour = NA) + theme_classic() +
stat_n_text(size = 3, angle = 90) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
# ggboxplot with variables cut into ordered categories by number of data points
ggBoxPlot_cutNumber <- function(x, y, nCut_x){
tmpDF <- data.frame(var1 = x, var2 = y)
tmpDF$cut_x <- cut_number(tmpDF$var1, nCut_x)
ggplot(tmpDF, aes(y= var2, x = cut_x)) + geom_boxplot(outlier.colour = NA) + theme_classic() +
stat_n_text(size = 3, angle = 90) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
# Load/install dependencies ####################################################
# Load/install CRAN packages
installLoad_CRAN <- function(package){
if (!require(package, character.only = T)) {
install.packages(package, dependencies = TRUE)
library(package, character.only = T, quietly = T)
}
}
CRAN_packs <- c("magrittr", "plyr", "ggplot2", "grid", "gtools", "reshape2", "EnvStats")
sapply(CRAN_packs, function(x) installLoad_CRAN(x))
|
1974c5889f088622455eec18f8322bfc3eda544b
|
e29977bff3e7a6271286059797b67c08b695944d
|
/man/formatfilFormateringsfunktioner.Rd
|
c889da96aa3824344f1ec035f7d639e1c06b0215
|
[] |
no_license
|
KubranurSahan/Formateringsfunktioner
|
fcf607927f3a5a98cedb2aeb3474150ad5fd9612
|
0e68c5292523022fe0a6f28a3f122928f215c005
|
refs/heads/master
| 2021-04-17T11:17:12.086766
| 2020-04-24T13:33:50
| 2020-04-24T13:33:50
| 249,440,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,166
|
rd
|
formatfilFormateringsfunktioner.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data dokument.R
\docType{data}
\encoding{UTF-8}
\name{formatfilFormateringsfunktioner}
\alias{formatfilFormateringsfunktioner}
\title{Et eksempel på en formatfil}
\format{Formatfil til en datasæt indeholder fem variable og x antal observationer svarende til de variable man ønsker at beholde i datasættet som formatfilen tilhører.
Hver række i formatFilen svarer til en variabel, man ønsker at beholde og formatere i datasættet.\cr
formatFilen består af følgende fem variable:\cr
\itemize{
\item 'variabelNavn' navnet på variablen man ønsker at formatere og beholde i datasættet
\item 'type' den ønskede variabeltype for variablen
\item 'datoFormat' datoformatet som variablen skal konverteres fra (eks. \%d-\%m-\%Y eller '\%d-\%m-\%Y \%H:\%M')
\item 'nytVarNavn' navnet på variablen fremover
\item 'origin' origin for dato (dato og tid) variablen, når denne er numerisk. Default for SQL er '1960-01-01' og '1970-01-01' for R.
}}
\usage{
formatfilFormateringsfunktioner
}
\description{
Et eksempel på en formatfil
}
\keyword{datasets}
|
ca8eee036abc6ae852e57f30d25b074c95263039
|
f89d8437bf557d258cd113e87fed55fe6eb37e61
|
/analyses/traits/height_cleaning.R
|
525d01393bb1ac1716dcfec1410d2eb4bb06687d
|
[] |
no_license
|
lizzieinvancouver/wildhellgarden
|
decfef79b0cf63c7a34e554cd59ee13e5ade707d
|
a8c09abe9736a29336d1d821e40f6f8038e8dd34
|
refs/heads/master
| 2023-08-07T14:19:11.048287
| 2023-07-19T14:12:13
| 2023-07-19T14:12:13
| 55,004,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,061
|
r
|
height_cleaning.R
|
# code written by Tolu A., summer of 2022
# edited by Deirdre on Sept 1, 2022
rm(list=ls())
options(stringsAsFactors = FALSE)
library(tidyr)
library(ggplot2)
library(readr)
library(dplyr)
if(length(grep("deirdreloughnan", getwd())>0)) {
setwd("~/Documents/github/wildhellgarden")
} else if(length(grep("Lizzie", getwd())>0)) {
setwd("~/Documents/git/projects/others/deirdre/synchrony")
} else{
setwd("/home/deirdre/wildhellgarden") # for midge
}
#Could you clean and combine all the different height data files into one?
# 1. There is height data from 2019 which is clean and in m: analyses/2019_data/2019_heightanddbh.csv
# Could you convert this file to just be ind, plot, and height columns without the blank rows?
# 2. There is also height data in the 2020_data that has not been cleaned and the values will need to be
# corrected for the measuring rod issues and converted to m
# 3. And of course the 2022 data needs to be cleaned and corrected as well
# 4-7. Same instructions to look for outliers and make plots as Grace above.
#Notes on the measure stick can be found here: misc/Jun2021measuringstick/measuringstick.txt
# first compare the actual conversions with our rod conversions in m
# 1ft = 3.2787m
# 2019 Cleaning
wildhell_2019 <- read_csv("Documents/WL2022/WildHell/wildhellgarden/analyses/2019_data/2019_heightanddbh.csv")
wildhell_2019 <- wildhell_2019 %>%
select(IND, Plot, Height)
wildhell_2019 <- wildhell_2019[complete.cases(wildhell_2019),]
colnames(wildhell_2019)[which(names(wildhell_2019) == "IND")] <- "ID"
wildhell_2019$Year <- "2019"
wildhell_2019$Height <- as.numeric(as.character(wildhell_2019$Height)) / 100
colnames(wildhell_2019)[which(names(wildhell_2019) == "Height")] <- "Height_m"
# 2021 Cleaning
wildhell_2021 <- read_csv("Documents/WL2022/WildHell/wildhellgarden/analyses/2021_data/heights2021.csv")
wildhell_2021$Height <- paste0(wildhell_2021$Feet, ".", wildhell_2021$Inches2)
wildhell_2021$Height[which(wildhell_2021$Height == "NA.NA")] <- NA
# cleaning the 0.5 inches
temp2021 <- wildhell_2021 %>% dplyr::filter(nchar(Height) >= 5)
wildhell_2021$Height[which(wildhell_2021$Height == "3.0.5")] <- 3.05
wildhell_2021$Height[which(wildhell_2021$Height == "12.0.5")] <- 12.05
wildhell_2021$Height[which(wildhell_2021$Height == "6.0.5")] <- 6.05
wildhell_2021$Height[which(wildhell_2021$Height == "13.0.5")] <- 13.05
wildhell_2021$Height[which(wildhell_2021$Height == "4.0.5")] <- 4.05
wildhell_2021$Height[which(wildhell_2021$Height == "3.0.5")] <- 3.05
wildhell_2021$Height[which(wildhell_2021$Height == "3.NA")] <- 3.0
wildhell_2021$Height[which(wildhell_2021$Height == "4.NA")] <- 4.0
# converting heights
wildhell_2021$Height_m <- as.numeric(as.character(wildhell_2021$Height)) / 3.2787
cleaned_2021 <- wildhell_2021 %>%
select(ID, Plot, Height_m)
cleaned_2021[,'Height_m']=round(cleaned_2021[,'Height_m'],2)
cleaned_2021 <- cleaned_2021[!is.na(cleaned_2021$ID),]
cleaned_2021$Year <- "2021"
# 2022 Cleaning
wildhell_2022 <- read_csv("Documents/WL2022/WildHell/wildhellgarden/analyses/2022_data/height2022.csv")
wildhell_2022$Height <- paste0(wildhell_2022$Feet, ".", wildhell_2022$Inches2)
wildhell_2022$Height[which(wildhell_2022$Height == "NA.NA")] <- NA
temp2022 <- wildhell_2022 %>% dplyr::filter(nchar(Height) >= 5)
wildhell_2022$Height[which(wildhell_2022$Height == "3.0.5")] <- 3.05
wildhell_2022$Height[which(wildhell_2022$Height == "4.0.5")] <- 4.05
wildhell_2022$Height[which(wildhell_2022$Height == "5.0.5")] <- 5.05
# converting heights
wildhell_2022$Height_m <- as.numeric(as.character(wildhell_2022$Height)) / 3.2787
cleaned_2022 <- wildhell_2022 %>%
select(id, Plot, Height_m)
cleaned_2022[,'Height_m']=round(cleaned_2022[,'Height_m'],2)
colnames(cleaned_2022)[which(names(cleaned_2022) == "id")] <- "ID"
cleaned_2022$Year <- "2022"
# combining all three data frames
wildhell_height <- rbind(wildhell_2019, cleaned_2021, cleaned_2022)
plot <- wildhell_height[which(wildhell_height$Plot == "3"),]
ggplot(wildhell_height, aes(x = Plot, y = Height_m)) + geom_point()
|
5bb147207a4994b2a0f642757e6a9c7df4190712
|
91c8414b0f8b8e8e307e5339cc772b49ce042825
|
/R/eigen.analysis.R
|
60ea3966ee6aa367dc92e1fc3d2b87f945d9913e
|
[] |
no_license
|
cstubben/popbio
|
49c5fc70781a080fead3162914acc5a5e8f6c50f
|
6b1c070f3d7fd8909269c0dc37b3616e7526ab56
|
refs/heads/master
| 2021-01-16T18:43:57.806548
| 2019-09-25T23:53:05
| 2019-09-25T23:53:05
| 3,156,368
| 10
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,902
|
r
|
eigen.analysis.R
|
#' Eigenvalue and eigenvector analysis of a projection matrix
#'
#' Calculate population growth rate and other demographic parameters from a
#' projection matrix model using matrix algebra
#'
#' The calculation of eigenvalues and eigenvectors partly follows Matlab code in
#' section 4.8.1 (p. 107) in Caswell (2001). Since \code{popbio} version 2.0,
#' each part returned by \code{eigen.analysis} is now inlcuded as a separate
#' function.
#'
#' @param A A projection matrix
#' @param zero Set sensitivities for unobserved transitions to zero, default is FALSE
#'
#' @return A list with 6 items
#' \item{lambda1}{dominant eigenvalue with largest real part }
#' \item{stable.stage}{proportional stable stage distribution}
#' \item{sensitivities }{matrix of eigenvalue sensitivities}
#' \item{elasticities}{matrix of eigenvalue elasticities}
#' \item{repro.value}{reproductive value scaled so v[1]=1}
#' \item{damping.ratio}{damping ratio }
#'
#' @note If matrix A is singular, then \code{eigen.analysis} will return
#' elasticities, sensitivities, and reproductive values with NAs.
#'
#' @seealso \code{\link{eigen}} and \code{\link{pop.projection}}
#'
#' @references Caswell, H. 2001. Matrix population models: construction,
#' analysis and interpretation, Second edition. Sinauer, Sunderland,
#' Massachusetts, USA.
#'
#' @author Original code by James Holland Jones, Stanford University, August 2005
#'
#' @examples
#' ## Imprimitive matrix
#' A <- matrix(c(0,0,2,.3,0,0,0,.6,0), nrow=3,byrow=TRUE)
#' A
#' ev <- eigen(A)
#' ev$values
#' Mod(ev$values)
#' lmax <- which.max(Re(ev$values))
#' lmax
#' Re(ev$values)[lmax]
#' ## damping ratio is NA
#' eigen.analysis(A)
#' ## cycles every 3 years
#' stage.vector.plot(pop.projection(A, c(1,1,1), 10)$stage.vectors)
#' ### Teasel
#' a <- eigen.analysis(teasel)
#' a
#' barplot(a$stable.stage, col="green", ylim=c(0,1),
#' ylab="Stable stage proportion", xlab="Stage class", main="Teasel")
#' box()
#' op <- par(mfrow=c(2,2))
#' image2(teasel, cex=.8, mar=c(0.5,3,4,1) )
#' title("Teasel projection matrix", line=3)
#' image2(a$elasticities, cex=.8, mar=c(0.5,3,4,1) )
#' title("Elasticity matrix", line=3)
#' ## default is sensitivity for non-zero elements in matrix
#' image2(a$sensitivities, cex=.8, mar=c(0.5,3,4,1) )
#' title("Sensitivity matrix 1", line=3)
#' ## use zero=FALSE to get sensitivities of all elements
#' image2(eigen.analysis(teasel, zero=FALSE)$sensitivities, cex=.8, mar=c(0.5,3,4,1) )
#' title("Sensitivity matrix 2", line=3)
#' par(op)
#'
#' @export
eigen.analysis <- function(A, zero = FALSE) {
ev <- eigen(A)
# R sorts eigenvalues in decreasing order, according to Mod(values), usually
# dominant eigenvalue is first (ev$values[1]) except for imprimitive matrices
# with d eigenvalues of equal modulus.
lmax <- which.max(Re(ev$values))
lambda <- Re(ev$values[lmax])
# Damping ratio. Use second eigenvalue OR second largest magnitude in case of
# ties using rle - round needed for imprimitive matrices
dr <- rle(round(Mod(ev$values), 5))$values
dr <- dr[1] / dr[2]
W <- ev$vectors
w <- abs(Re(W[, lmax]))
# check if matrix is singular and output NAs rather than stop (better for
# loops and bootstrapping)
V <- try(Conj(solve(W)), silent = TRUE)
if (class(V) == "try-error") {
eigen.analysis <- list(
lambda1 = lambda, stable.stage = w / sum(w),
sensitivities = A * NA, elasticities = A * NA, repro.value = w * NA,
damping.ratio = dr
)
}
else {
v <- abs(Re(V[lmax, ]))
s <- v %o% w
if (zero) {
s[A == 0] <- 0
}
e <- s * A / lambda
x <- dimnames(A)
dimnames(s) <- x
names(w) <- x[[1]]
names(v) <- x[[1]]
eigen.analysis <- list(
lambda1 = lambda, stable.stage = w / sum(w),
sensitivities = s, elasticities = e, repro.value = v / v[1],
damping.ratio = dr
)
}
eigen.analysis
}
|
2a9c26edf9c53d30b76840182ef445d6d11a05f1
|
05a2342858956ba4922a8c0a15a9eb70c28cae7b
|
/r-essentials-gists/ch1410.R
|
cba9350e528c044ab87d85f26684728f1a476631
|
[] |
no_license
|
jtlai0921/AEL023000_-
|
c0120f78d4c195a703263d57ae76de8544d5771a
|
d9a95c63bda951b0e2f8bbb338b63bb388ca8005
|
refs/heads/master
| 2020-12-19T02:26:47.710279
| 2020-01-22T14:37:24
| 2020-01-22T14:37:24
| 235,592,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 388
|
r
|
ch1410.R
|
# 安裝 ggplot2, gapminder, magrittr
# install.packages(c("ggplot2", "gapminder", "magrittr"))
# 載入 ggplot2, gapminder, magrittr
library(ggplot2)
library(gapminder)
library(magrittr)
# 在直方圖加上標題與標籤
gapminder %>%
ggplot(aes(x = gdpPercap)) +
geom_histogram(bins = 30) +
ggtitle("GDP Per Capita is left-skewed") +
xlab("GDP Per Capita") +
ylab("Freq")
|
1a91d6bf9baf4cfcdfea256afec53d30f0b23f5d
|
e70aa6b0e12b96033c9d06b7f1ccdb973cd3a71a
|
/choropleth_map.R
|
4d5be24e68c6b717fe7bc88dbebfdd9e6b7231b9
|
[] |
no_license
|
aprilcs/Cholera-in-Ireland
|
37729f2f5256b6ef287187312909e9effdc31a07
|
f4d191e5265459909ab0e7d33279a1964c69e366
|
refs/heads/master
| 2021-01-16T22:27:57.532616
| 2016-09-28T18:36:05
| 2016-09-28T18:36:05
| 26,297,424
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,214
|
r
|
choropleth_map.R
|
setwd("C:/Users/April/Desktop/MSc project/github/spacetime-vis")
library(lattice)
library(ggplot2)
library(latticeExtra)
library(gridBase)
library(gridExtra)
myTheme <- custom.theme.2(pch=19, cex=0.7,
region=rev(brewer.pal(9, 'YlOrRd')),
symbol = brewer.pal(n=8, name = "Dark2"))
myTheme$strip.background$col='transparent'
myTheme$strip.shingle$col='transparent'
myTheme$strip.border$col='transparent'
xscale.components.custom <- function(...){
ans <- xscale.components.default(...)
ans$top=FALSE
ans}
yscale.components.custom <- function(...){
ans <- yscale.components.default(...)
ans$right=FALSE
ans}
myArgs <- list(as.table=TRUE,
between=list(x=0.5, y=0.2),
xscale.components = xscale.components.custom,
yscale.components = yscale.components.custom)
defaultArgs <- lattice.options()$default.args
lattice.options(default.theme = myTheme,
default.args = modifyList(defaultArgs, myArgs))
##################################################################
## Choropleth maps
##################################################################
##################################################################
## Administrative boundaries
##################################################################
library(sp)
library(maptools)
#old <- setwd(tempdir())
IreMap <- readShapePoly(fn="Ireland choropleth")
Encoding(levels(IreMap$NAME_1)) <- "latin1"
##################################################################
## Map
##################################################################
library(colorspace)
pdf(file="figs/population.pdf")
quantPal <- rev(heat_hcl(16))
spplot(IreMap["POP"], col='transparent', col.regions=quantPal)
dev.off()
pdf(file="figs/cases.pdf")
quantPal <- rev(heat_hcl(16))
spplot(IreMap["CASES"], col='transparent', col.regions=quantPal)
dev.off()
pdf(file="figs/cases per pop.pdf")
quantPal <- rev(heat_hcl(16))
spplot(IreMap["Cases.Pop"], col='transparent', col.regions=quantPal)
dev.off()
pdf(file="figs/case fatality.pdf")
quantPal <- rev(heat_hcl(16))
spplot(IreMap["Case.fatal"], col='transparent', col.regions=quantPal)
dev.off()
|
ea40f8b6842cff4d6526a8f5e701bb69b1145402
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/RNOmni/man/BAT.Rd
|
197d3642fb728b83a5caeeb403d6c24d073701dc
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,359
|
rd
|
BAT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BAT.R
\name{BAT}
\alias{BAT}
\title{Basic Association Test}
\usage{
BAT(y, G, X = NULL, test = "Score", simple = FALSE)
}
\arguments{
\item{y}{Numeric phenotype vector.}
\item{G}{Genotype matrix with observations as rows, SNPs as columns.}
\item{X}{Model matrix of covariates and structure adjustments. Should include
an intercept. Omit to perform marginal tests of association.}
\item{test}{Either Score or Wald.}
\item{simple}{Return the p-values only?}
}
\value{
If \code{simple = TRUE}, returns a vector of p-values, one for each column
of \code{G}. If \code{simple = FALSE}, returns a numeric matrix, including the
Wald or Score statistic, its standard error, the Z-score, and the p-value.
}
\description{
Conducts tests of association between the loci in \code{G} and the
untransformed phenotype \code{y}, adjusting for the model matrix \code{X}.
}
\examples{
set.seed(100)
# Design matrix
X <- cbind(1, rnorm(1e3))
# Genotypes
G <- replicate(1e3, rbinom(n = 1e3, size = 2, prob = 0.25))
storage.mode(G) <- "numeric"
# Phenotype
y <- as.numeric(X \%*\% c(1, 1)) + rnorm(1e3)
# Association test
p <- BAT(y = y, G = G, X = X)
}
\seealso{
\itemize{
\item Direct INT \code{\link{DINT}}
\item Indirect INT \code{\link{IINT}}
\item Omnibus INT \code{\link{OINT}}
}
}
|
e7de5d75310995cb1e631520a9efcf590e3ea2a2
|
86f9975c186bd4bb98ffd236891ed4b5ee55c348
|
/FINEMAP/FINEMAP-regional-plots.R
|
4f26299eecd4d854f9b267da88027185721d217a
|
[] |
no_license
|
ashwinas88/parra-lab
|
716d5b2292ac52e9abde51d867b5938fccaa7cee
|
aff2765f0ccfca85859eeda6f5009053db01734f
|
refs/heads/main
| 2023-06-03T07:46:54.219579
| 2021-06-22T13:47:35
| 2021-06-22T13:47:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,218
|
r
|
FINEMAP-regional-plots.R
|
library(tidyverse)
library(ggplot2)
setwd("C:/Users/etc-etc/")
# plot the log10(Bayes Factor) results for each region (Y axis; colored by LD).
# also works to plot the PIP instead, check out the comments on the script below to modify.
for (subset in 1:5) { # change according to the number of susbsets you are plotting
# read SNP file from FINEMAP results for each subset:
snpfile <- read.csv(paste("subset",subset,".snp",sep=""), sep = " ", header = TRUE)
snpfile <- filter(snpfile, prob >= 0)
# extract the chromosome number:
chr <- snpfile[1,3]
# read the local LD results computed on LDstore:
ld <- read.csv(paste("subset",subset,".ld",sep=""), sep = " ", header = TRUE)
# separate rsids and other columns to compute R-squared:
snps <- colnames(ld[2:ncol(ld)])
head(ld,n=2)
name_tmp <- data.frame(ld[,1])
ld_tmp <- data.frame(ld[,2:ncol(ld)])
ld_tmp <- data.frame(lapply(ld_tmp,"^",2))
# bind the tables back again:
ld <- bind_cols(name_tmp, ld_tmp)
# rename columns
colnames(ld) <- c("rsid", snps)
# join snp table and LD table:
snp_plot <- left_join(snpfile, ld, by = "rsid")
# get the label for the legend:
rsq=expression(paste(r^2))
# plot and save png:
for (x in 1:length(snps)) {
index <- snps[x]
top <- filter(snp_plot, rsid == !!index) # filter to retain only index SNP
#### CHANGE THE FOLLOWING LINE FOR THE Y-AXIS TO: < y = "PIP" > if you want to plot the PIP instead.
ggplot(snp_plot, aes_string(x = "position", colour = index, y = "log10bf")) +
scale_y_continuous() +
geom_point(colour = "black", size = 4, alpha = 7/10) +
geom_point(aes_string(colour = index), size = 3.5, shape = 19, alpha = 7/10) +
scale_colour_stepsn(colours=c("blue2","deepskyblue","limegreen","orange2","#CC0000"), name=rsq, breaks=c(0.2, 0.4, 0.6, 0.8), labels=c("0.2","0.4","0.6","0.8")) +
#### CHANGE THE FOLLOWING LINE FOR THE Y-AXIS TITLE TO < y = "PIP" > if you want to plot the PIP instead:
guides(fill=FALSE) + labs( x = paste("Chromosome",chr,sep=" "), y = "log10(Bayes Factor)") +
theme(axis.text.x = element_text(face="bold", size=12), axis.text.y = element_text(face="bold", size=14),
axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15),
legend.title = element_text(size=16, face="bold"), legend.text = element_text(size=12),
panel.background = element_blank(), axis.line = element_line(color = "black"), legend.key=element_blank()) +
geom_point(data = top, colour = "black", size = 7.4, shape = 18) +
geom_point(data=top, colour="#FFFF00", shape = 18, size = 6.5) +
#### COMMENT THE FOLLOWING LINE IF PLOTTING THE PIP:
geom_hline(yintercept=2, colour = "black", linetype="dashed")
#### COMMENT THE FOLLOWING LINE IF PLOTTING THE PIP:
ggsave(file=paste("path-to-save-plots/subset",subset,"-",snps[x],"-logBF.png", sep=""))
#### COMMENT THE FOLLOWING LINE IF PLOTTING THE log10BF:
# ggsave(file=paste("path-to-save-plots/subset",subset,"-",snps[x],"-PIP.png", sep=""))
}
}
|
c41815b4b8f527935ff2389aa5b6b4ee27deebdb
|
59456c5c0693c7a9412998567a26a703b80273ba
|
/regenerate.r
|
11ce8408adda70de76da59bd5133dc5fa89defed
|
[
"Artistic-2.0"
] |
permissive
|
perishky/dmrff
|
c97e84362e75cdf90317606693eba8cd9cb4cc33
|
cc1d19cb905bacd24510c0ee0c741053673380df
|
refs/heads/master
| 2023-06-22T20:51:39.654368
| 2023-06-19T10:45:58
| 2023-06-19T10:45:58
| 159,144,556
| 7
| 4
|
Artistic-2.0
| 2019-10-27T12:32:13
| 2018-11-26T09:35:51
|
R
|
UTF-8
|
R
| false
| false
| 336
|
r
|
regenerate.r
|
#' The steps below are needed to regenerate
#' the data objects and documentation files
#' included with the package and then
#' run all tests.
#' install.packages("devtools")
#' devtools::install_github("klutometis/roxygen")
library(devtools)
library(roxygen2)
document("dmrff")
system("R CMD INSTALL dmrff")
reload(inst("dmrff"))
|
b7858d4c6b03db5efddc06d178dd49cfd3f539e6
|
e2facadc1e8a9e1ccfca847d48515fc1fac5023f
|
/Attempts/Attempt9- Boosting Attempt.R
|
64024458b25530d0187d33ca69051bcfce07a817
|
[] |
no_license
|
pnkonge/Statistical-Learning
|
be85943c2769fd513206477588640a80b4f6bea6
|
357b5e7a7eb70b1be386ce95c1019a0a88191fa7
|
refs/heads/master
| 2022-01-20T02:23:24.536333
| 2019-06-18T19:18:17
| 2019-06-18T19:18:17
| 192,599,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,207
|
r
|
Attempt9- Boosting Attempt.R
|
# 1) Loading the data required for Competition
getwd()
setwd("./University of Toronto - Mississauga/4 - Fourth Year/STA314/Competition/all")
sample_submission <- read.csv("SampleSubmission.csv")
test_data <- read.csv("test_predictors.csv")
training_data <- read.csv("trainingdata.csv")
library(gbm)
set.seed(1)
train = sample(1:nrow(training_data), 400, replace = FALSE)
y.test = training_data[-train,]$y
boost.train = gbm(y~.,
data = training_data[train,],
distribution='gaussian',
n.trees = 5000,
bag.fraction = 1,
interaction.depth = 1,
shrinkage = 0.1,
cv.folds = 5)
# bi gives relative variable importance
#summary(boost.train)
# next line gives results from cross-validation
bi = gbm.perf(boost.train,method="cv")
bi
# predictions
# note: once we have a model with N trees
# can specify any number of trees for prediction
# as long as it does not exceed N
pr.boo = predict(boost.train,newdata=training_data[-train,],n.trees=bi)
error = sqrt(mean((pr.boo-y.test)^2))
error
#------FULL MODEL PREDICTION
boost.train = gbm(y~.,
data = training_data,
distribution='gaussian',
n.trees = 5000,
bag.fraction = 1,
interaction.depth = 1,
shrinkage = 0.1,
cv.folds = 5)
bi = gbm.perf(boost.train,method="cv")
#Submit Predictions
pr.boo = predict(boost.train,newdata=test_data,n.trees=bi)
solution <- data.frame(id = c(1:500), y = pr.boo)
names(solution)[2] <- "y"
names(solution)
nrow(solution)
write.csv(solution, file = "9_boosting.csv", row.names = FALSE)
#---------------
# random forest with m = 6
# on same split gives similar performance
# plot influence of rm on prediction
# this is avergaed over all other predictors
# see details on lecture slides
plot(boost.train,i='X12',n.trees = bi)
plot(boost.train,i='X25',n.trees = bi)
# heat plot of joint effect of rm and lstat
plot(boost.train,i=c('X12','X25'),n.trees = 500)
|
b3f5e9d31e4b87ef6716e209756770c24e573aa1
|
1fdd28ec9ec0c58bdf50db47da583ff52ff0cbde
|
/man/internal-functions.Rd
|
5472c9655b94a4b264d5eb636fe35d36162ee095
|
[] |
no_license
|
michaelfop/damda
|
e6253796b23868f9da47c81d8c7b2d98d4cea5af
|
f4988a11139de7b3179c6945428083a73ec38959
|
refs/heads/main
| 2023-08-24T23:16:25.804714
| 2021-10-19T15:09:28
| 2021-10-19T15:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 388
|
rd
|
internal-functions.Rd
|
\name{internal-functions}
\alias{em_damda}
\alias{init_em}
\alias{mstep_damda}
\alias{start_parallel}
\alias{bic_reg}
\alias{ice}
\alias{estepdamda}
\alias{icf}
\alias{bhat_dist}
\title{
Internal \code{damda} function
}
\description{
Internal function not to be called by the user.
}
\value{
No return value, these functions are not meant to be called by the user.
}
\keyword{internal}
|
58acf43dbfbb7fca5dd641d10fbea91db433ca0f
|
8e727b337656a075f196447372fd97f32f4773a4
|
/test_text2vec.R
|
0e0a924012d4017a6a2c94137b98b615907b4b9a
|
[] |
no_license
|
ngfrey/text-analysis-with-R
|
cfb9fbfc5574a93fa7f4559b46794c9262558d6f
|
cd80836982afb02203459db7bb481f71d406fd5f
|
refs/heads/master
| 2020-12-31T21:38:19.112875
| 2018-09-09T17:15:12
| 2018-09-09T17:15:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,918
|
r
|
test_text2vec.R
|
load('data/shakes_words_df_4text2vec.RData')
library(tidyverse); library(text2vec)
shakes_words_as_list = shakes_words %>%
anti_join(tidytext::stop_words) %>%
split(.$id) %>%
map(function(x) x %>% pull(word))
# tokens <- word_tokenizer(shakes_words_as_list) # you've essentially done this
# Create vocabulary. Terms will be unigrams (simple words).
it = itoken(shakes_words_as_list, progressbar = FALSE)
vocab <- create_vocabulary(it) %>%
prune_vocabulary(term_count_min = 5L)
# Use our filtered vocabulary
vectorizer <- vocab_vectorizer(vocab)
# use window of 5 for context words
tcm <- create_tcm(it, vectorizer, skip_grams_window = 3)
RcppParallel::setThreadOptions(numThreads = 6)
glove = GloVe$new(word_vectors_size = 10, vocabulary = vocab, x_max = 10)
word_vectors_main = glove$fit_transform(tcm, n_iter = 100)
# word_vectors <- glove$get_word_vectors() # deprecated
word_vectors_context <- glove$components
word_vectors = word_vectors_main + t(word_vectors_context)
test <- word_vectors["romeo", , drop = FALSE] +
word_vectors["juliet", , drop = FALSE]
cos_sim = sim2(x = word_vectors, y = test, method = "cosine", norm = "l2")
head(sort(cos_sim[,1], decreasing = TRUE), 5)
test <- word_vectors["romeo", , drop = FALSE] -
word_vectors["juliet", , drop = FALSE]
cos_sim = sim2(x = word_vectors, y = test, method = "cosine", norm = "l2")
head(sort(cos_sim[,1], decreasing = TRUE), 5)
test <- word_vectors["romeo", , drop = FALSE]
cos_sim = sim2(x = word_vectors, y = test, method = "cosine", norm = "l2")
head(sort(cos_sim[,1], decreasing = TRUE), 5)
test <- word_vectors["hamlet", , drop = FALSE]
cos_sim = sim2(x = word_vectors, y = test, method = "cosine", norm = "l2")
head(sort(cos_sim[,1], decreasing = TRUE), 10)
test <- word_vectors["juliet", , drop = FALSE]
cos_sim = sim2(x = word_vectors, y = test, method = "cosine", norm = "l2")
head(sort(cos_sim[,1], decreasing = TRUE), 5)
|
f09c4e95771db4e5b89be7d22b4125e0f9f6bdc6
|
96e92e1a8267457ef7c0c3d2424b028792b196ca
|
/data/ch6-1.R
|
ab1f0b34a7620fcdb63af9b7ee0d403d744dabc8
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Finfra/r-sem
|
23dd74ca1b143b35793cdb2d5a8390856bab4559
|
21ecf73be106bc6e652691a24a58791726409ade
|
refs/heads/master
| 2020-12-03T06:36:50.178279
| 2017-03-03T08:12:16
| 2017-03-03T08:12:16
| 52,897,327
| 0
| 0
| null | 2016-03-01T17:50:39
| 2016-03-01T17:50:39
| null |
UTF-8
|
R
| false
| false
| 1,114
|
r
|
ch6-1.R
|
require(lavaan)
data=read.csv("data/data.csv")
str(data)
model <- 'price =~ x1 + x2 + x3 + x4
service =~ x5 + x6 + x7 + x8
atm =~ x9 + x10 + x11 + x12
cs =~ y1 + y2 + y3 + y4
cl =~ y5 + y6 + y7 + y8'
fit <- cfa(model, data =data)
fit
getMethod("print","lavaan")
summary(fit)
summary(fit,header=FALSE,estimates=TRUE)
getMethod("summary","lavaan")
getMethod("short","lavaan")
short.summary(fit)
fitMeasures(fit,fit.measures = "all")
resid(fit)
fitMeasures(fit, fit.measures = "default")
print.fit.measures
PE<-parameterEstimates(fit,standardized=TRUE,add.attributes=TRUE)
PE
getMethod("print","lavaan.parameterEstimates")
getMethod("print","lavaan.data.frame")
print.lavaan.data.frame
str(PE)
PE$std.lv[46:55]
print(PE,nd=3L)
summary(fit)
summary(fit,header=FALSE,fit.measures=TRUE)
summary(fit, standardized = TRUE)
diagram<-semPlot::semPaths(fit,
whatLabels="std", intercepts=FALSE, style="lisrel",
nCharNodes=0,
nCharEdges=0,
curveAdjacent = TRUE,title=TRUE, layout="tree2",curvePivot=TRUE)
|
fa95c4113fb8ef88020aa7fca08d8462da72f289
|
0ee14073e3f6bf0b1fde7ed81c664597c6bb44c1
|
/R/unicoxph.R
|
ec50a0b3bfee78d9d4abd1f1515993a6461bfb13
|
[] |
no_license
|
tienan/lxctk-m1
|
f71a714783d32f52f8085c844ac9d30e3433a927
|
60538ffe7e3a1e3042b8c261acb9f3968c86e729
|
refs/heads/master
| 2020-03-29T18:59:52.571006
| 2018-09-25T09:47:03
| 2018-09-25T09:47:03
| 150,242,676
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,193
|
r
|
unicoxph.R
|
unicoxph <- function(cox.f,data,variable.display=NULL, plot=TRUE, ...)
{
variables <- all.vars(cox.f)
n <- length(variables)
lci <- rep(NA, n)
uci <- rep(NA, n)
OR <- rep(NA, n)
p.value <- rep(NA, n)
ci.l <- list()
OR.l <- list()
p.value.l <- list()
for (i in 3:n) {
e <- variables[i]
#surv.f <- eval( do.call('substitute', list(surv.f.template, list(marker=as.name(e)))) )
surv.f <- as.formula( sprintf("Surv(%s,%s) ~ %s", variables[1], variables[2], e) )
r <- coxph(surv.f, data=data)
ci <- exp(confint(r))
odd.ratio <- exp(r$coefficients)
p <- summary(r)$coefficients[,5]
ci.l[[e]] <- ci
OR.l[[e]] <- odd.ratio
p.value.l[[e]] <- p
}
ci.x <- do.call('rbind', ci.l)
OR.x <- do.call('c', OR.l)
p.value.x <- do.call('c', p.value.l)
p.value.x <- sprintf("%.3g", p.value.x)
lci <- ci.x[,1]
uci <- ci.x[,2]
CI <- sprintf('%.3g (%.3g ~ %.3g)', OR.x, lci,uci)
if (is.null(variable.display)) variable.display <- names(OR.x)
coxTable <- cbind(variable.display, CI, p.value.x, OR.x, lci, uci)
coxTable <- rbind(c('Variable','HR (95% CI)','P-value', 'OR', 'lci', 'uci'), coxTable)
if (plot)
plot.coxph(coxTable=coxTable, ...)
invisible(coxTable)
}
|
08a85d305b6662e8d5cf41bc270cd723f00bab4c
|
f2e879fe1e58c13596c2245188c3aed56ffc125b
|
/scripts/mash_screen.R
|
9e5bddaeb67325fad920b745e3cf20dc83cb5c8f
|
[
"BSD-3-Clause"
] |
permissive
|
hkaspersen/misc-scripts
|
738f4b0e9db5b1320f826daec4f69a5c6aa83c33
|
24bf988439ce61fbabba2b1691a1da0adac53d50
|
refs/heads/master
| 2021-06-25T20:10:03.047768
| 2020-10-22T13:04:39
| 2020-10-22T13:04:39
| 148,607,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,613
|
r
|
mash_screen.R
|
#!/usr/bin/env Rscript
args <- commandArgs(trailingOnly = TRUE)
report_loc <- args[1]
pattern <- args[2]
filter_value <- args[3]
organism <- args[4]
output_dir <- args[5]
filter_value <- as.numeric(filter_value)
# Libraries
library(ggplot2)
library(dplyr)
library(tidyr)
library(stringr)
library(purrr)
library(viridis)
library(impoRt)
library(svglite)
library(R.devices)
# Functions
func_paste <- function(x) paste(unique(x[!is.na(x)]), collapse = ", ")
## Identifies the largest value in the cell and removes the other values
scan_max <- function(x) max(scan(text = x,
what = "",
sep = ",",
quiet = TRUE,
strip.white = TRUE))
'%not_in%' <- Negate('%in%')
# Run analyses
print("Reading data...")
## Import data
mash_raw <- get_data(report_loc,
pattern,
delim = "\t",
col_names = FALSE) %>%
rename("identity" = X1,
"shared_hashes" = X2,
"median_multiplicity" = X3,
"p_value" = X4,
"query_id" = X5,
"query_comment" = X6) %>%
mutate(p_value = round(as.numeric(p_value), 5),
ref = sub("_mash.out", "", ref))
## Wrangle data
mash_results <- mash_raw %>%
separate(shared_hashes, c("min","max"), sep = "/", remove = FALSE) %>%
mutate(test = as.numeric(min)/as.numeric(max)) %>%
select(-c(min, max)) %>%
filter(test >= filter_value) %>%
mutate(species = query_comment %>%
str_replace("\\[.*?\\] ", "") %>%
sub("^.+_.+\\.[0-9] (.*?), .+", "\\1", .) %>%
word(1,2))
## Match species name for correct query
species_id <- mash_results %>%
mutate(species_test = grepl(organism, species, ignore.case = TRUE)) %>%
filter(species_test == TRUE) %>%
select(species) %>%
mutate(query = organism) %>%
summarise_all(list(func_paste))
organism <- unlist(strsplit(species_id$species, ", ", fixed = TRUE))
print("Creating output files...")
# Plotting
mash_plot <- ggplot(mash_results, aes(ref, as.numeric(identity), fill = species))+
geom_point(pch = 21, size = 2)+
geom_hline(yintercept = 0.95, alpha = 0.5)+
scale_fill_brewer(type = "div", palette = 2)+
labs(y = "Identity",
x = "Samples")+
theme_classic()+
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 12),
axis.title = element_text(size = 16),
legend.title = element_blank(),
legend.text = element_text(size = 12),
axis.ticks.x = element_blank())
# Tables
contaminated_df <- mash_results %>%
filter(species %not_in% organism,
identity >= 0.95) %>%
select(ref)
contaminated_ids <- contaminated_df$ref
if (length(contaminated_ids) == 0) {
print("No contamined isolates were identified within set limits.")
} else {
contam_ids <- suppressWarnings(
mash_results %>%
select(
-c(
shared_hashes,
query_id,
query_comment,
median_multiplicity,
test,
p_value
)
) %>%
filter(ref %in% contaminated_ids) %>%
mutate(id2 = 1:n()) %>%
spread(species, identity, fill = NA) %>%
select(-id2) %>%
group_by(ref) %>%
summarise_all(list(func_paste)) %>%
mutate_at(vars(-ref), list(sapply(., scan_max))) %>%
mutate_all(list(gsub("^$", NA, .)))
)
write.table(contam_ids,
paste0(output_dir,
"/contaminated_samples_report.txt"),
sep = "\t",
row.names = FALSE)
}
## Create reports
mash_report <- suppressWarnings(
mash_results %>%
select(
-c(
shared_hashes,
query_id,
query_comment,
median_multiplicity,
test,
p_value
)
) %>%
mutate(id2 = 1:n()) %>%
spread(species, identity, fill = NA) %>%
select(-id2) %>%
group_by(ref) %>%
summarise_all(funs(func_paste)) %>%
mutate_at(vars(-ref), funs(sapply(., scan_max))) %>%
mutate_all(funs(gsub("^$", NA, .)))
)
cont_species <- paste0(names(mash_report)[-1], collapse = ", ")
print(paste0("Species identified: ", cont_species))
# Write to file
write.table(mash_report,
paste0(output_dir,
"/full_mash_report.txt"),
sep = "\t",
row.names = FALSE)
invisible(suppressGraphics(
ggsave(paste0(output_dir,
"/mash_plot.svg"),
mash_plot,
device = "svg",
dpi = 100,
height = 14,
width = 16)
)
)
print("Analysis complete!")
|
5ebc87be47ccab1bb02a853519577ab1695b0317
|
e201e192fc8ed87d7b92f141568bdf21b17d965d
|
/man/univariate.formula.Rd
|
090c50cf373a5286f23925db5bbc87dc63f66bd0
|
[] |
no_license
|
zeligdev/ZeligDVN
|
e66c0ee16dea4cf5faae9bfe35a986eaa0489885
|
091b46787249fe1362cdbfb7184873f29b2082e9
|
refs/heads/master
| 2020-12-24T14:18:47.566770
| 2011-12-13T15:21:06
| 2011-12-13T15:21:06
| 2,349,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
rd
|
univariate.formula.Rd
|
\name{univariate.formula}
\alias{univariate.formula}
\title{Create a Formula with a Univariate Outcome...}
\usage{univariate.formula(outcome, explanatory, intercept, envir)}
\description{Create a Formula with a Univariate Outcome}
\details{...}
\value{a formula with a single outcome variable}
\author{Matt Owen \email{mowen@iq.harvard.edu}}
\arguments{\item{outcome}{a character-string specifying the outcome variables}
\item{explanatory}{a character-vector specifying the explanatory variables}
\item{intercept}{an integer specifying whether an intercept exists}}
|
49c211e659bf091089787865b9a2ab79a2ed0668
|
e885d8de246fc8bd8a212356e83a8ca4020a0034
|
/plot1.R
|
d5a382aaf89a11c120a61778965cd7e7f6d2d48d
|
[] |
no_license
|
momo476/ExData_Plotting1
|
192177562d1e5a316348bc6dcf0d6baee5db486e
|
4a20d397e670c5fb62859cb5270bad50262b9b16
|
refs/heads/master
| 2020-04-29T09:15:30.925951
| 2016-01-10T00:48:25
| 2016-01-10T00:48:25
| 49,169,400
| 0
| 0
| null | 2016-01-07T00:02:39
| 2016-01-07T00:02:38
| null |
UTF-8
|
R
| false
| false
| 909
|
r
|
plot1.R
|
## This code loads dplyr libraries
## The data file is then imported and the column classes set
## The table is converted into tbl_df format and filtered so that the data is ready to be plotted
library(dplyr) ##load dplyr library
plotdata <- read.table("household_power_consumption.txt",sep=";", header = TRUE, colClasses = "character") ##Reads in data file
plotdata_2 <- mutate_each(plotdata,funs(as.numeric),-(1:2))
plotdata_2 <- mutate(plotdata_2, Date = as.Date(Date, "%d/%m/%Y"))
plotdatadf <- tbl_df(plotdata_2) ##converts data frame to tbl_df
plotdatadf2_filtered <- filter(plotdatadf,Date == "2007-02-01" | Date == "2007-02-02") ##Filter out relevant dates
##Save to png file format
png("plot1.png", width = 480, height = 480)
hist(plotdatadf2_filtered$Global_active_power, col= "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)") ##plots the graph
dev.off()
|
c6772d063136b002b05679bee8697a370e7f863a
|
53d7e351e21cc70ae0f2b746dbfbd8e2eec22566
|
/inst/developer/tests to finish/test_xmuValues.r
|
75742f1e225516fe536b0eb9579a2c5758626a4a
|
[] |
no_license
|
tbates/umx
|
eaa122285241fc00444846581225756be319299d
|
12b1d8a43c84cc810b24244fda1a681f7a3eb813
|
refs/heads/master
| 2023-08-31T14:58:18.941189
| 2023-08-31T09:52:02
| 2023-08-31T09:52:02
| 5,418,108
| 38
| 25
| null | 2023-09-12T21:09:45
| 2012-08-14T20:18:01
|
R
|
UTF-8
|
R
| false
| false
| 666
|
r
|
test_xmuValues.r
|
data(myFADataRaw, package = "OpenMx")
manifests = paste0("x", 1:3)
latents = c("A", "B")
df = myFADataRaw[, manifests]
m1 <- mxModel("m1", type = "RAM",
latentVars = latents,
manifestVars = manifests,
umxPath("B", to = manifests),
umxPath("A", with = "B", fixedAt = 1),
# umxPath(means = manifests),
umxPath(var = manifests),
umxPath(var = latents),
mxData(df, "raw")
)
m1 = umxRun(m1, setLabels = TRUE, setValues = TRUE)
m1 = umxRun(m1); AIC(m1)
umxSummary(m1, show = "std"); # plot(m1, showFixed = T)
# ================
# = Test working =
# ================
testthat::expect_error(
xmuValues(m1), "You do this with mxPath(from = 'one', to = 'var')"
)
|
1aadb437fa8db6598caf19a69760c2b20e0a8aa7
|
dc763fb2506daf7aa1138b694a4dc3c1317a4d7d
|
/Statistics_project.R
|
ba6ac0fdeb3c9c3463ddc772fa19ddf7d80991f5
|
[] |
no_license
|
GioNardi/DataScienceprojects
|
208cd3f33e8269082d8df01da2cf2aa4b937ebd6
|
c6c0de0cebc6e74ddc64b22f587670525d1bdf34
|
refs/heads/master
| 2023-03-09T11:54:25.953461
| 2021-02-21T13:17:41
| 2021-02-21T13:17:41
| 299,362,704
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 7,276
|
r
|
Statistics_project.R
|
tmp <- read.table("car.data", header = TRUE, sep = ";")
#nella prima fase si legge analizza il dataset e lo si rende utilizzabile per le prossime analisi
options(scipen = 999)
auto=car.data
library(funModeling)
df_status(auto)
attach(auto)
auto$Years=2018-auto$Year
auto$Price_difference= auto$Present_Price - auto$Selling_Price
auto$Owner=as.factor(auto$Owner)
levels(auto$Owner)=c('Prima','Seconda','Quarta')
which(auto$Kms_Driven==500000) #500000 è errore di digitazione, quindi si corregge con 50000
auto[197,]$Kms_Driven=50000
auto2=auto[,-c(1,2)]
desc=df_status(auto2)
detach(auto)
attach(auto2)
#ora si può iniziare ad effettuare qualche analisi statistica di tipo descrittivo
#qualitative nominali, Fuel_type e Transmission
t1=table(Fuel_Type)*100/length(Fuel_Type) #si evince un numero maggiore di macchine a benzina
table(Transmission)*100/length(Transmission) #numero maggiore nettamente di macchine a cambio manuale
table(Seller_Type)*100/length(Seller_Type) #maggior numero di commercianti
table(Owner)*100/length(Owner)#maggior parte di vetture di prima mano
#in questo modo ottengo le percentuali relative alle modalità delle factor presenti nel dataset
C=c("red","blue","green")
lbls=c("CNG","Diesel","Petrol")
pct=c(0.66,19.93,79.40)
lbls=paste(lbls,pct)
lbls=paste(lbls,"%",sep="")
pie(t1,labels=lbls,main="Fuel Type percentage",col=C)
#vogliamo fare l'incorcio con transmission per vedere se c'è connessione quindi ricodifico la variabile Years
auto2$Age[auto2$Years<=4] = "bassa"
auto2$Age[auto2$Years>4 & auto2$Years<=10] = "media"
auto2$Age[auto2$Years>10] = "alta"
auto2$Age=factor(auto2$Age)
auto2$Age=ordered(auto2$Age,levels=c("bassa","media","alta"))
df_status(auto2)
attach(auto2)
tabella=table(auto2$Age,auto2$Transmission)
tabella
chisq.test(tabella) # trova chi square
chi=chisq.test(tabella)# create an object
chi$statistic # invocalo dall' oggetto
chi_norm<-chi$statistic/(nrow(auto2)*min(nrow(tabella)-1,ncol(tabella)-1))
chi_norm # chi quadrato/chi quadrato max
# il test chi quadro ci induce a non rifiutare l'ipotesi nulla di assenza di connessione tra Transmission e Age
prop.table(tabella)#percentuale di ogni cella sul totale
prop.table(tabella,1)#frequenze percentuali di Y condizionato a X
prop.table(tabella,2)#viceversa
#facciamo un barplot condizionato
barplot(t(tabella),main="Age by Transmission",col=c("red","blue"),legend.text=c("Automatic","Manual"))
#il grafico conferma il chi test. Non sembra esserci una connessione.
#costruiamo uno scatterplot condizionato tra kms driven, price_difference condizionato alla variabile age
c1=c("red","black","green")
plot(Kms_Driven,Price_difference*1000,col=c1[Age],xlim=c(0,240000),ylim=c(0,25000),ylab="Price_difference")
legend("topright", legend = levels(Age),pch=21,col=c1)
quantile(Price_difference,probs=c(0.9))
abline(h=7350)
#sembra esserci una lieve correlazione positiva tra la differenza di prezzo e i kilometri percorsi. Inoltre si può
#notare che le auto che hanno percorso più kilometri hanno età media o alta. le differenze di prezzo maggiori si notano
#in auto di età media o alta.
#quantitative discrete, con pochi valori (Years), vediamo percentuali e grafici
table(Years)*100/length(Years)
library(car)
Boxplot(~ Years, id.method="Years",main='Anni delle vetture') #con questo boxplot vengono riportati gli outliers anche
#questo comando ci mostra la divisione percentale dei valori relativi agli anni delle auto
summary(Years)#media è maggiore di mediana dunque abbiamo distribuzione leggermente asimmetrica positivamente per la variabile Years
#quantitative continue (Kmdrivens,selling price)
max(Kms_Driven)
min(Kms_Driven)
table(cut(Kms_Driven, breaks=c(0,10000,25000,40000,75000,500000)))
hist(Kms_Driven, breaks=c(0,10000,25000,40000,75000,220000))
lines(density(Kms_Driven),col="darkred",lwd=2)
Boxplot(~ Kms_Driven, id.method="Kms_driven",main='Km percorsi dalle vetture',ylim=c(0,240000)) #errore di digitazione 500000 km?
max(Selling_Price)
min(Selling_Price)
table(cut(Selling_Price, breaks=c(0,2.5,5,15,25,35)))
Boxplot(~ Selling_Price, id.method="Selling_Price",main='Valore delle auto') #in migliaia di euro
hist(Selling_Price, breaks=c(0,2.5,5,15,25,35))
summary(Selling_Price) #asimmetria positiva
#compito: inserire grafici adeguati per le singole variabili e presentare altre sttistiche come media-mediana
#traendone informazioni sulla asimmetria o normalità
#effettuiamo un boxplot condizionato con una qualitativa e una quantitativa
boxplot(Present_Price~Transmission,col=rainbow(3),ylab="Present_Price",xlab="Transmission",main="Boxplot comparing Present_Price by Transmission")
boxplot(Present_Price~Fuel_Type,col=rainbow(3),ylab="Present_Price",xlab="Fuel_Type",main="Boxplot comparing Present_Price by Fuel_Type")
#vediamo quale delle variabili quantitative risulta la più variabile in termini percentuali, mediante il calcolo del cv
#years
cv1=round((mean(Years)/sd(Years)),3)
#selling price
cv2=round((mean(Selling_Price)/sd(Selling_Price)),3)
#present price
cv3=round((mean(Present_Price)/sd(Present_Price)),3)
#Kms drivem
cv4=round((mean(Kms_Driven)/sd(Kms_Driven)),3)
cv=c(cv1,cv2,cv3,cv4)
cv #dunque la più variabile tra le variabili quantitative risulta essere la variabile Years
#valutiamo un po' di medie condizionate
#present price by transmission
attach(auto2)
means <- aggregate(Present_Price, by=list(Transmission), mean)
means #medie condizionate differenti di prezzo, non si riscontra indipendenza in media
#present price by fuel type
means2 <- aggregate(Present_Price, by=list(Fuel_Type), mean)
means2 #no indipenenza in media
means3 <- aggregate(Kms_Driven, by=list(Age), mean)
means3 #no indipendenza in media, eta quadro=0 in quanto le medie relative ai km guidati, condizionate
#alla variabile eta, sono differenti tra loro
#effettuiamo la stima intervallare per la media della variabile target Selling_Price con un grado di fiducia del 95%
#i dati non provengono da una distribuzione normale e sigma è ignota, si utilizza quindi la varianza campionaria
#R utilizza di default la varianza campionaria
S=sqrt(var(Selling_Price))
xmedio=mean(Selling_Price)
q=qnorm(0.975,0,1)
l1=xmedio-(q*(S/sqrt(301)))
l2=xmedio+(q*(S/sqrt(301)))
confintmu=c(l1,l2)
confintmu
#effettuiamo la stima intervallare per la proporzione di auto con cambio automatico con un grado di fiducia di 0.95
table(auto2$Transmission=="Automatic")
prop=40/301
S2=sqrt(prop*(1-prop))
lp1=prop-(q*(S2/sqrt(301)))
lp2=prop+(q*(S2/sqrt(301)))
confintp=c(lp1,lp2)
confintp
#effettuiamo la standardizzazione delle variabili quantitiative in quanto esse possidono differenti unità di misura
#duqnue una volta che si andrà modellare, potrebbe essere utile avere dati standardizzati
#creiamo una copia del dataset auto2 per poi standardizzarlo
std=auto2
attach(std)
std$Present_Price=scale(std$Present_Price,center=T,scale=T)
std$Selling_Price=scale(std$Selling_Price,center=T,scale=T)
std$Kms_Driven=scale(std$Kms_Driven,center=T,scale=T)
std$Price_difference=scale(std$Price_difference,center=T,scale=T)
std$Years=scale(std$Years,center=T,scale=T)
|
65c241b236c615e41e81c01763b8caedda22ee2d
|
752a33afa8fab689738da3032b03bdb70f2cce3b
|
/k_fold_CV_MarkerDensity_plus_MultipleModels_Marcus_Adjusted.All.Models.noParentsSelected.R
|
62009cd471c6716cac6d87b53352d4ba32386f40
|
[] |
no_license
|
marcbios/Evaluation-of-Genomic-Selection-and-Marker-Assisted-Selection-in-Miscanthus-and-energycane
|
ee80dcc8531d4982a857198bd2fe963d1635c7f3
|
8adcd870c7b161d8156f371a34a471561f09f4f0
|
refs/heads/master
| 2020-04-11T09:10:51.454407
| 2019-08-13T16:48:18
| 2019-08-13T16:48:18
| 161,668,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,604
|
r
|
k_fold_CV_MarkerDensity_plus_MultipleModels_Marcus_Adjusted.All.Models.noParentsSelected.R
|
run.k.fold.CV.using.rrBLUP <- function(the.phenotypes = NULL, the.genotypes = NULL, the.output.dir = NULL, number.of.folds = NULL,
proportion.to.select = NULL, user.input.seed.number = FALSE, seed.number = NULL, geno.proportion=NULL, ncycles=NULL){
setwd(the.output.dir)
seed.number = seednum.specific
y <- as.matrix(the.phenotypes[,2])
G <- as.numeric(t(the.genotypes[,-c(1:5)]))
G <- matrix(G, nrow(y), nrow(the.genotypes)) #Note: nrow(the genotypes) is specifying the number of columsns for G, whcih we ant
#to be equal to the number of markers
#for(props in 1:length(props.vec)){
#geno.proportion <- props.vec[props]
for(ncycle in 1:ncycles){#
colnames(G) <- 1:ncol(G)
test_proptn <- round(ncol(G)*geno.proportion)
set.seed(seed.numer.cycle[ncycle])
train.G = as.matrix(sample(1:ncol(G), test_proptn, replace=F))
Gt <- G[,train.G]
G2 <- Gt
Gt <- Gt - 1
cv.for.rrBLUP <- (as.matrix(rep(1, length(y))))
#cv.for.sommer <- (as.matrix(rep(1, length(y))))
#Calculate the kinship matrix in rrBLUP
Gt <- as.matrix(Gt)
A1 <- A.mat(Gt,shrink=TRUE)
library(sommer)
A2 <- A.mat(Gt,shrink=TRUE) # additive relationship matrix
D1 <- D.mat(Gt,shrink=TRUE) # dominance relationship matrix
E1 <- E.mat(Gt,shrink=TRUE) # epistatic relationship matrix
M <-tcrossprod(G2)/ncol(G2)
X <- G2
#burn=10
#Iter=600
#Save all of the above work into an object
#save.image("Workspace_20170815.Rdata")
sample.size <- length(y)
#if(!user.input.seed.number) seed.number <- sample(-1000000:1000000,1, replace = FALSE)
seed.number = seednum.specific
set.seed(seed.number)
sequence.sample <- rep(1:sample.size)
random.sample <- sample(1:sample.size, replace = FALSE)
increment <- ceiling(length(random.sample)/number.of.folds)
write.table(paste("Here_is_seed_number:_",seed.number, sep = ""), paste("Seed.number.for.",number.of.folds,".fold.CV.txt", sep = ""), row.names = FALSE, col.names = FALSE, sep = "\t", quote = FALSE)
#have a "for" loop, start it at 0, and end it at 4
#I am setting up "k" to denote the nubmer of folds - 1. This is done
# so that the for loop will work correctly.
r.gy.m<-matrix(NA,nrow=1,ncol=(number.of.folds+4))
r.gy.m.E<-matrix(NA,nrow=1,ncol=(number.of.folds+4))
r.gy.m.BA<-matrix(NA,nrow=1,ncol=(number.of.folds+4))
r.gy.m.BC<-matrix(NA,nrow=1,ncol=(number.of.folds+4))
r.gy.m.RK<-matrix(NA,nrow=1,ncol=(number.of.folds+4))
#setwd(wd)
#Read in a phenotype
count <- 0
k <- number.of.folds-1
r.gy <- NULL
r.gy.E <- NULL
r.gy.BA <- NULL
#r.gy.BB <- NULL
r.gy.BC <- NULL
#r.gy.BL <- NULL
r.gy.RK <- NULL
#r.gy.SV <- NULL
the.coefficients <- NULL
the.coefficients.E <- NULL
the.coefficients.BA <- NULL
#the.coefficients.BB <- NULL
the.coefficients.BC <- NULL
# the.coefficients.BL <- NULL
the.coefficients.RK <- NULL
#the.coefficients.SV <- NULL
#for (p in 2:ncol(the.phenotypes)){
for (p in 2:ncol(the.phenotypes)){
if((p >2)&(floor(p/10)==p/10)) {print(paste("--------------------------Working on the ", (p-1), "'th trait--------------------------------",sep=""))}
y <- as.matrix(the.phenotypes[,p])
Za <- diag(length(y))
Zd <- diag(length(y))
Ze <- diag(length(y))
for (i in 0:k){
print(paste("-------Now fitting the RR-BLUP model for fold ", (i+1), " -----------", sep = ""))
pred <- random.sample[((increment*i)+1):min(((increment*i)+increment) , sample.size)]
train <- random.sample[-(((increment*i)+1):min(((increment*i)+increment) , sample.size))]
yNA <- y
yNA <- as.vector(yNA)
yNA[pred] <- NA
data1 <- data.frame(y=yNA,gid=1:length(y), cv = cv.for.rrBLUP)
the.cv.names <- NULL
for(j in 1:ncol(cv.for.rrBLUP)) the.cv.names <- c(the.cv.names, paste("CV_",j,sep = ""))
# Fit rrBLP
colnames(data1) <- c("y","gid", the.cv.names)
rownames(A1) <- 1:nrow(A1) #A1 is created on line 114
ans1 <- kin.blup(data1,K=A1,geno="gid",pheno="y", covariate = the.cv.names)
#Measure correclation between OBS and Pred in validation set (V.S.)
r.gy <- c(r.gy, cor(ans1$g[pred], y[pred]))
#Fit a linear regression model, where the Y variable is the observed value and the x variabls is the predicted value
the.fitted.model <- lm(y[pred] ~ ans1$g[pred])
the.coefficients <- c(the.coefficients, the.fitted.model$coefficients[1], the.fitted.model$coefficients[2])
# Fit sommer models
rownames(E1) <- 1:nrow(E1)
ETA.E <- list(add=list(Z=Za,K=A2), dom=list(Z=Zd,K=D1), epi=list(Z=Ze,K=E1))
ans.E <- mmer(Y=data1$y, Z=ETA.E)
r.gy.E <- c(r.gy.E, cor(ans.E$fitted.y[pred], y[pred]) )
the.fitted.model.E <- lm(y[pred] ~ ans.E$fitted.y[pred])
the.coefficients.E <- c(the.coefficients.E, the.fitted.model.E$coefficients[1], the.fitted.model.E$coefficients[2])
#### BayesA Parametric MODEL ####
ETA<-list(list(X=X,model='BayesA'))
fm.BA<-BGLR(y=yNA,ETA=ETA,response_type="gaussian" ,nIter=Iter, burnIn=burn)
r.gy.BA <- c(r.gy.BA, cor(fm.BA$yHat[pred], y[pred]) )
the.fitted.model.BA <- lm(y[pred] ~ fm.BA$yHat[pred])
the.coefficients.BA <- c(the.coefficients.BA, the.fitted.model.BA$coefficients[1], the.fitted.model.BA$coefficients[2])
#### BayesC Parametric MODEL ####
ETA<-list(list(X=X,model='BayesC'))
fm.BC<-BGLR(y=yNA,ETA=ETA,response_type="gaussian" ,nIter=Iter, burnIn=burn)
r.gy.BC <- c(r.gy.BC, cor(fm.BC$yHat[pred], y[pred]) )
the.fitted.model.BC <- lm(y[pred] ~ fm.BC$yHat[pred])
the.coefficients.BC <- c(the.coefficients.BC, the.fitted.model.BC$coefficients[1], the.fitted.model.BC$coefficients[2])
#### Bayes RKHS Semi Parametric MODEL ####
ETA<-list(list(K=M,model='RKHS'))
fm.RK<-BGLR(y=yNA,ETA=ETA,response_type="gaussian" ,nIter=Iter, burnIn=burn)
r.gy.RK <- c(r.gy.RK, cor(fm.RK$yHat[pred], y[pred]) )
the.fitted.model.RK <- lm(y[pred] ~ fm.RK$yHat[pred])
the.coefficients.RK <- c(the.coefficients.RK, the.fitted.model.RK$coefficients[1], the.fitted.model.RK$coefficients[2])
#Obtain an object that has three columns. The first column is the taxa names in the validation population, the second column is the observed
# phenotypic value, and the third column is the predicted phenotypic value
the.taxa.in.the.validation.population <- as.character(the.phenotypes[as.numeric(rownames(ans1$g[pred])),1])
these.observed.and.predicted.phenotypic.values <- data.frame(the.taxa.in.the.validation.population, y[pred], ans1$g[pred])
colnames(these.observed.and.predicted.phenotypic.values) <- c("Taxa", "Observed.Value", "Predicted.Value")
Emat <- as.vector(ans.E$fitted.y)
names(Emat) <- 1:length(Emat)
the.taxa.in.the.validation.population.E <- as.character(the.phenotypes[as.numeric(names(Emat[pred])),1])
these.observed.and.predicted.phenotypic.values.E <- data.frame(the.taxa.in.the.validation.population.E, y[pred], Emat[pred])
colnames(these.observed.and.predicted.phenotypic.values.E) <- c("Taxa", "Observed.Value", "Predicted.Value")
BAmat <- as.vector(fm.BA$yHat)
names(BAmat) <- 1:length(BAmat)
the.taxa.in.the.validation.population.BA <- as.character(the.phenotypes[as.numeric(names(BAmat[pred])),1])
these.observed.and.predicted.phenotypic.values.BA <- data.frame(the.taxa.in.the.validation.population.BA, y[pred], BAmat[pred])
colnames(these.observed.and.predicted.phenotypic.values.BA) <- c("Taxa", "Observed.Value", "Predicted.Value")
BCmat <- as.vector(fm.BC$yHat)
names(BCmat) <- 1:length(BCmat)
the.taxa.in.the.validation.population.BC <- as.character(the.phenotypes[as.numeric(names(BCmat[pred])),1])
these.observed.and.predicted.phenotypic.values.BC <- data.frame(the.taxa.in.the.validation.population.BC, y[pred], BCmat[pred])
colnames(these.observed.and.predicted.phenotypic.values.BC) <- c("Taxa", "Observed.Value", "Predicted.Value")
RKmat <- as.vector(fm.RK$yHat)
names(RKmat) <- 1:length(RKmat)
the.taxa.in.the.validation.population.RK <- as.character(the.phenotypes[as.numeric(names(RKmat[pred])),1])
these.observed.and.predicted.phenotypic.values.RK <- data.frame(the.taxa.in.the.validation.population.RK, y[pred], RKmat[pred])
colnames(these.observed.and.predicted.phenotypic.values.RK) <- c("Taxa", "Observed.Value", "Predicted.Value")
if(i == 0){
the.observed.and.predicted.phenotypic.values <- these.observed.and.predicted.phenotypic.values
the.observed.and.predicted.phenotypic.values.E <- these.observed.and.predicted.phenotypic.values.E
the.observed.and.predicted.phenotypic.values.BA <- these.observed.and.predicted.phenotypic.values.BA
the.observed.and.predicted.phenotypic.values.BC <- these.observed.and.predicted.phenotypic.values.BC
the.observed.and.predicted.phenotypic.values.RK <- these.observed.and.predicted.phenotypic.values.RK
}else{
the.observed.and.predicted.phenotypic.values <- rbind(the.observed.and.predicted.phenotypic.values,these.observed.and.predicted.phenotypic.values)
the.observed.and.predicted.phenotypic.values.E <- rbind(the.observed.and.predicted.phenotypic.values.E,these.observed.and.predicted.phenotypic.values.E)
the.observed.and.predicted.phenotypic.values.BA <- rbind(the.observed.and.predicted.phenotypic.values.BA,the.observed.and.predicted.phenotypic.values.BA)
the.observed.and.predicted.phenotypic.values.BC <- rbind(the.observed.and.predicted.phenotypic.values.BC,these.observed.and.predicted.phenotypic.values.BC)
the.observed.and.predicted.phenotypic.values.RK <- rbind(the.observed.and.predicted.phenotypic.values.RK,these.observed.and.predicted.phenotypic.values.RK)
}#end if(i == 0)
}#end for (i in 0:k)
#calcualte the average and std. deviation
# rrBLUP
r.gy <- c(r.gy, mean(r.gy), sd(r.gy),(p-1))
the.coefficients <- c(the.coefficients, (p-1))
r.gy.output <- t(as.matrix(r.gy))
the.coefficients.output <- t(as.matrix(the.coefficients))
# sommer Epistasis
r.gy.E <- c(r.gy.E, mean(r.gy.E), sd(r.gy.E),(p-1))
the.coefficients.E <- c(the.coefficients.E, (p-1))
r.gy.output.E <- t(as.matrix(r.gy.E))
the.coefficients.output.E <- t(as.matrix(the.coefficients.E))
# BayesA
r.gy.BA <- c(r.gy.BA, mean(r.gy.BA), sd(r.gy.BA),(p-1))
the.coefficients.BA <- c(the.coefficients.BA, (p-1))
r.gy.output.BA <- t(as.matrix(r.gy.BA))
the.coefficients.output.BA <- t(as.matrix(the.coefficients.BA))
#BayesC
r.gy.BC <- c(r.gy.BC, mean(r.gy.BC), sd(r.gy.BC),(p-1))
the.coefficients.BC <- c(the.coefficients.BC, (p-1))
r.gy.output.BC <- t(as.matrix(r.gy.BC))
the.coefficients.output.BC <- t(as.matrix(the.coefficients.BC))
#BayesRK
r.gy.RK <- c(r.gy.RK, mean(r.gy.RK), sd(r.gy.RK),(p-1))
the.coefficients.RK <- c(the.coefficients.RK, (p-1))
r.gy.output.RK <- t(as.matrix(r.gy.RK))
the.coefficients.output.RK <- t(as.matrix(the.coefficients.RK))
#Sort the table of observed and predicted phenotypic values from smallest to largest
the.observed.and.predicted.phenotypic.values <- the.observed.and.predicted.phenotypic.values[order(the.observed.and.predicted.phenotypic.values$Predicted.Value, decreasing = TRUE),]
the.observed.and.predicted.phenotypic.values.E <- the.observed.and.predicted.phenotypic.values.E[order(the.observed.and.predicted.phenotypic.values.E$Predicted.Value, decreasing = TRUE),]
the.observed.and.predicted.phenotypic.values.BA <- the.observed.and.predicted.phenotypic.values.BA[order(the.observed.and.predicted.phenotypic.values.BA$Predicted.Value, decreasing = TRUE),]
the.observed.and.predicted.phenotypic.values.BC <- the.observed.and.predicted.phenotypic.values.BC[order(the.observed.and.predicted.phenotypic.values.BC$Predicted.Value, decreasing = TRUE),]
the.observed.and.predicted.phenotypic.values.RK <- the.observed.and.predicted.phenotypic.values.RK[order(the.observed.and.predicted.phenotypic.values.RK$Predicted.Value, decreasing = TRUE),]
#Output the table of observed and predicted phenotypic values
#write.table(the.observed.and.predicted.phenotypic.values ,paste( "Pred.Vals.rrBLUP.trait.",(p-1),".txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
#write.table(the.observed.and.predicted.phenotypic.values.E ,paste( "Pred.Vals.sommer.ADE.trait",(p-1),".txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
#write.table(the.observed.and.predicted.phenotypic.values.BA ,paste( "Pred.Vals.BGLR.BA.trait",(p-1),".txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
#write.table(the.observed.and.predicted.phenotypic.values.BC ,paste( "Pred.Vals.BGLR.BC.trait",(p-1),".txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
#write.table(the.observed.and.predicted.phenotypic.values.RK ,paste( "Pred.Vals.BGLR.RK.trait",(p-1),".txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
number.of.taxa.to.select <- round((proportion.to.select*length(y)),0)
if(count == 0){
r.gy.m <- r.gy.output
r.gy.m.E <- r.gy.output.E
r.gy.m.BA <- r.gy.output.BA
r.gy.m.BC <- r.gy.output.BC
r.gy.m.RK <- r.gy.output.RK
the.coefficients.m <- the.coefficients.output
the.selected.taxa <- as.character(the.observed.and.predicted.phenotypic.values[1:number.of.taxa.to.select,1])
the.coefficients.m.E <- the.coefficients.output.E
the.selected.taxa.E <- as.character(the.observed.and.predicted.phenotypic.values.E[1:number.of.taxa.to.select,1])
the.coefficients.m.BA <- the.coefficients.output.BA
the.selected.taxa.BA <- as.character(the.observed.and.predicted.phenotypic.values.BA[1:number.of.taxa.to.select,1])
the.coefficients.m.BC <- the.coefficients.output.BC
the.selected.taxa.BC<- as.character(the.observed.and.predicted.phenotypic.values.BC[1:number.of.taxa.to.select,1])
the.coefficients.m.RK <- the.coefficients.output.RK
the.selected.taxa.RK <- as.character(the.observed.and.predicted.phenotypic.values.RK[1:number.of.taxa.to.select,1])
}else{
r.gy.m <- rbind(r.gy.m,r.gy.output)
the.coefficients.m <- rbind(the.coefficients.m, the.coefficients.output)
the.selected.taxa <- c(the.selected.taxa, as.character(the.observed.and.predicted.phenotypic.values[1:number.of.taxa.to.select,1]))
r.gy.m.E <- rbind(r.gy.m.E,r.gy.output.E)
the.coefficients.m.E <- rbind(the.coefficients.m.E, the.coefficients.output.E)
the.selected.taxa.E <- c(the.selected.taxa.E, as.character(the.observed.and.predicted.phenotypic.values.E[1:number.of.taxa.to.select,1]))
r.gy.m.BA <- rbind(r.gy.m.BA,r.gy.output.BA)
the.coefficients.m.BA <- rbind(the.coefficients.m.BA, the.coefficients.output.BA)
the.selected.taxa.BA <- c(the.selected.taxa.BA, as.character(the.observed.and.predicted.phenotypic.values.BA[1:number.of.taxa.to.select,1]))
r.gy.m.BC <- rbind(r.gy.m.BC,r.gy.output.BC)
the.coefficients.m.BC <- rbind(the.coefficients.m.BC, the.coefficients.output.BC)
the.selected.taxa.BC <- c(the.selected.taxa.BC, as.character(the.observed.and.predicted.phenotypic.values.BC[1:number.of.taxa.to.select,1]))
r.gy.m.RK <- rbind(r.gy.m.RK,r.gy.output.RK)
the.coefficients.m.RK <- rbind(the.coefficients.m.RK, the.coefficients.output.RK)
the.selected.taxa.RK <- c(the.selected.taxa.RK, as.character(the.observed.and.predicted.phenotypic.values.RK[1:number.of.taxa.to.select,1]))
}#endif(count == 0)
#Create a list of the top X% lines with optimal predicted phenotypes
count <- count+1
#Reset the "the.observed.and.predicted.phenotypic.values" to NULL
r.gy <- NULL
the.coefficients <- NULL
the.observed.and.predicted.phenotypic.values <- NULL
r.gy.E <- NULL
the.coefficients.E <- NULL
the.observed.and.predicted.phenotypic.values.E <- NULL
r.gy.BA <- NULL
the.coefficients.BA <- NULL
the.observed.and.predicted.phenotypic.values.BA <- NULL
r.gy.BC <- NULL
the.coefficients.BC <- NULL
the.observed.and.predicted.phenotypic.values.BC <- NULL
r.gy.RK <- NULL
the.coefficients.RK <- NULL
the.observed.and.predicted.phenotypic.values.RK <- NULL
dev.off()
}#for (p in 1:100){
colnames(r.gy.m)<-c("fold.1","fold.2","fold.3","fold.4","fold.5","mean","sd","trait")
colnames(the.coefficients.m) <- c("Intercpet.fold.1", "Slope.fold.1", "Intercpet.fold.2", "Slope.fold.2",
"Intercpet.fold.3", "Slope.fold.3", "Intercpet.fold.4", "Slope.fold.4",
"Intercpet.fold.5", "Slope.fold.5","trait")
write.table(r.gy.m ,paste( "Cor.betw.obs.pred.",ncycle,"_Cycle_",number.of.folds,".folds.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
write.table(the.coefficients.m, paste( "SLR.int.slope.Y.obs.X.pred",ncycle,"_Cycle_",number.of.folds,".folds.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
the.selected.taxa <- the.selected.taxa[!the.selected.taxa%in%parents.vec]
the.unique.selected.taxa <- unique(the.selected.taxa)
write.table(the.selected.taxa, paste("the.sel.taxa.RR_BLUP",ncycle,"_Cycle",".txt", sep=""), sep="\t", quote=F, row.names=F, col.names=F)
write.table(the.unique.selected.taxa, paste("the.uniq.sel.taxa.RR_BLUP",ncycle,"_Cycle",".txt"), sep="\t", quote=F, row.names=F, col.names=F)
colnames(r.gy.m.E)<-c("fold.1","fold.2","fold.3","fold.4","fold.5","mean","sd","trait")
colnames(the.coefficients.m.E) <- c("Intercpet.fold.1", "Slope.fold.1", "Intercpet.fold.2", "Slope.fold.2",
"Intercpet.fold.3", "Slope.fold.3", "Intercpet.fold.4", "Slope.fold.4",
"Intercpet.fold.5", "Slope.fold.5","trait")
write.table(r.gy.m.E ,paste( "Cor.betw.obs.pred.",ncycle,"_Cycle_",number.of.folds,".folds.sommerADE.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
write.table(the.coefficients.m.E, paste( "SLR.int.slope.Y.obs.X.pred",ncycle,"_Cycle_",number.of.folds,".folds.sommerADE.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
the.selected.taxa.E <- the.selected.taxa.E[!the.selected.taxa.E%in%parents.vec]
the.unique.selected.taxa.E <- unique(the.selected.taxa.E)
write.table(the.selected.taxa.E, paste("the.sel.taxa.E",ncycle,"_Cycle",".txt", sep=""), sep="\t", quote=F, row.names=F, col.names=F)
write.table(the.unique.selected.taxa.E, paste("the.uniq.sel.taxa.sommer.E",ncycle,"_Cycle",".txt", sep=""), sep="\t", quote=F, row.names=F, col.names=F)
colnames(r.gy.m.BA)<-c("fold.1","fold.2","fold.3","fold.4","fold.5","mean","sd","trait")
colnames(the.coefficients.m.BA) <- c("Intercpet.fold.1", "Slope.fold.1", "Intercpet.fold.2", "Slope.fold.2",
"Intercpet.fold.3", "Slope.fold.3", "Intercpet.fold.4", "Slope.fold.4",
"Intercpet.fold.5", "Slope.fold.5","trait")
write.table(r.gy.m.BA ,paste( "Cor.betw.obs.pred.",ncycle,"_Cycle_",number.of.folds,".folds.BGLR.BA.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
write.table(the.coefficients.m.BA, paste( "SLR.int.slope.Y.obs.X.pred",ncycle,"_Cycle_",number.of.folds,".folds.BGLR.BA.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
the.selected.taxa.BA <- the.selected.taxa.BA[!the.selected.taxa.BA%in%parents.vec]
the.unique.selected.taxa.BA <- unique(the.selected.taxa.BA)
write.table(the.selected.taxa.BA, paste("the.sel.taxa.BA",ncycle,"_Cycle_",".txt", sep=""), sep="\t", quote=F, row.names=F, col.names=F)
write.table(the.unique.selected.taxa.BA, paste("the.uniq.sel.taxa.BGLR.BA",ncycle,"_Cycle_",".txt", sep=""), sep="\t", quote=F, row.names=F, col.names=F)
colnames(r.gy.m.BC)<-c("fold.1","fold.2","fold.3","fold.4","fold.5","mean","sd","trait")
colnames(the.coefficients.m.BC) <- c("Intercpet.fold.1", "Slope.fold.1", "Intercpet.fold.2", "Slope.fold.2",
"Intercpet.fold.3", "Slope.fold.3", "Intercpet.fold.4", "Slope.fold.4",
"Intercpet.fold.5", "Slope.fold.5","trait")
write.table(r.gy.m.BC ,paste( "Cor.betw.obs.pred.",ncycle,"_Cycle_",number.of.folds,".folds.BGLR.BC.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
write.table(the.coefficients.m.BC, paste( "SLR.int.slope.Y.obs.X.pred",ncycle,"_Cycle_",number.of.folds,".folds.BGLR.BC.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
the.selected.taxa.BC <- the.selected.taxa.BC[!the.selected.taxa.BC%in%parents.vec]
the.unique.selected.taxa.BC <- unique(the.selected.taxa.BC)
write.table(the.selected.taxa.BC, paste("the.sel.taxa.BC",ncycle,"_Cycle_",".txt", sep=""), sep="\t", quote=F, row.names=F, col.names=F)
write.table(the.unique.selected.taxa.BC, paste("the.uniq.sel.taxa.BGLR.BC",ncycle,"_Cycle_",".txt", sep=""), sep="\t", quote=F, row.names=F, col.names=F)
colnames(r.gy.m.RK)<-c("fold.1","fold.2","fold.3","fold.4","fold.5","mean","sd","trait")
colnames(the.coefficients.m.RK) <- c("Intercpet.fold.1", "Slope.fold.1", "Intercpet.fold.2", "Slope.fold.2",
"Intercpet.fold.3", "Slope.fold.3", "Intercpet.fold.4", "Slope.fold.4",
"Intercpet.fold.5", "Slope.fold.5","trait")
write.table(r.gy.m.RK ,paste( "Cor.betw.obs.pred.",ncycle,"_Cycle_",number.of.folds,".folds.BGLR.RK.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
write.table(the.coefficients.m.RK, paste( "SLR.int.slope.Y.obs.X.pred",ncycle,"_Cycle_",number.of.folds,".folds.BGLR.RK.txt",sep = ""),sep = "\t", quote = FALSE, row.names = FALSE,col.names = TRUE)
the.selected.taxa.RK <- the.selected.taxa.RK[!the.selected.taxa.RK%in%parents.vec]
the.unique.selected.taxa.RK <- unique(the.selected.taxa.RK)
write.table(the.selected.taxa.RK, paste("the.sel.taxa.BGLR.RK",ncycle,"_Cycle",".txt",sep=""), sep="\t", quote=F, row.names=F, col.names=F)
write.table(the.unique.selected.taxa.RK, paste("the.uniq.sel.taxa.BGLR.RK",ncycle,"_Cycle",".txt",sep=""), sep="\t", quote=F, row.names=F, col.names=F)
#return(list(the.unique.selected.taxa=the.unique.selected.taxa, the.unique.selected.taxa.A=the.unique.selected.taxa.A, the.unique.selected.taxa.D=the.unique.selected.taxa.D,
#the.unique.selected.taxa.E=the.unique.selected.taxa.E, the.unique.selected.taxa.BA=the.unique.selected.taxa.BA, the.unique.selected.taxa.BB=the.unique.selected.taxa.BB,
#the.unique.selected.taxa.BC=the.unique.selected.taxa.BC, the.unique.selected.taxa.BL=the.unique.selected.taxa.BL, the.unique.selected.taxa.BR=the.unique.selected.taxa.BR,
#the.unique.selected.taxa.RK=the.unique.selected.taxa.RK, the.unique.selected.taxa.SV=the.unique.selected.taxa.SV))
}# Cycles per density
# Marker density
}#end run.k.fold.CV.using.rrBLUP
|
dc0db60ec1ca802ec8f32fe3ce179afbf77c86a4
|
6beb664cff08e5875853f22441fadf0879e713d5
|
/R/mvCircTruncNormal.R
|
3c54580cd8385849d95cae3dbb317a0db133c9d5
|
[
"BSD-3-Clause"
] |
permissive
|
lrodriguezlujan/mvcircular
|
ef703a53fcb85cf7c01544611e739f79cc0de7a6
|
f42966ebfc69a742c0b7cbd63d850e99936d57b9
|
refs/heads/master
| 2021-01-10T08:17:19.638751
| 2016-04-12T14:59:53
| 2016-04-12T14:59:53
| 45,543,757
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,775
|
r
|
mvCircTruncNormal.R
|
#' @name mvCircTruncNormal
#' @rdname mvCircTruncNormal
#' @title Multivariate circular truncated normal distribution
#'
#' @description
#' These functions implement diverse functionality over the
#' multivariate wrapped normal distribution given its parameters mu, the circular mean vector,
#' and sigma, the variance covariance matrix.
#'
#' @author Luis Rodriguez Lujan
#'
#' @keywords multivariate normal circular truncated
#'
#' @seealso \code{\link{mvCircTruncNormal}}
#' @export
NULL
MVCIRCTRUNCNORMAL_CLASS <- "mvCircTruncNormal"
#' @param mu Circular mean vector
#' @param sigma a positive-definite symmetric matrix that specifies covariance matrix
#' @param \dots (Constructor) Named list with additional attributes to add to the object
#'
#' @examples
#' mvCircTruncNormal(rep(0,3), diag(3) )
#'
#' @importFrom circular is.circular conversion.circular as.circular
#'
#' @export
mvCircTruncNormal <- function(mu, sigma, ...){
# Check parameters type
if ( circular::is.circular(mu) ) mu <- circular::conversion.circular(mu,modulo = "2pi")
else if ( is.numeric(mu) ) mu <- circular::as.circular(mu, modulo = "2pi", zero = 0, template = "none",
type = "angles", units = "radians", rotation = "counter" )
else stop("Mu must be circular or numeric. see circular::as.circular() ")
if ( !is.numeric(sigma) || !is.matrix(sigma) ) stop("Lambda should be a numeric matrix")
# Check parameters length (mu == kappa == nrow lambda == ncol lambda)
else if (length(mu) != nrow(sigma) ) {stop("Parameters length do not match")}
else if (nrow(sigma) != ncol(sigma)) {stop("sigma is not a square matrix")}
# Create base object
obj <- mvCircularProbDist( length(mu), ... )
# Create list object
obj$mu <- mu
obj$sigma <- sigma
obj$lower = as.numeric(mu) - pi
obj$upper = as.numeric(mu) + pi
# Add claseses (probdist + vonmises)
class(obj) <- append( class(obj), MVCIRCTRUNCNORMAL_CLASS)
return(obj)
}
#' Fit method uses data matrix or dataframe \code{samples} to compute the ML parameters of the distribution
#'
#' @param samples Matrix or DF with multivariate circular samples
#' @param zero.threshold Any sigma value that verifies that \code{abs(x) < zero.threshold } is returned as zero
#'
#' @return \code{mvCircTruncNormal} returns a mvCircTruncNormal object
#'
#' @importFrom Matrix nearPD
#' @importFrom tmvtnorm mle.tmvnorm
#'
#' @examples
#' samples <- rmvCircTruncNormal(100000, rep(pi,3), matrix( c(3,1,-1,1,3,0,-1,0,3), ncol = 3 , nrow = 3 ) )
#' obj <- mvCircTruncNormal.fit(samples)
#' sum(abs(obj$mu - rep(pi,3)))
#' sum(abs(obj$sigma - matrix( c(3,1,-1,1,3,0,-1,0,3), ncol = 3 , nrow = 3 ) ))
#' plot(obj, obj$fitted.data [1:1000,])
#'
#' @rdname mvCircTruncNormal
#' @export
mvCircTruncNormal.fit <- function(samples, zero.threshold = 1E-2, ...){
# number of variables
ndim <- ncol(samples)
nsamples <- nrow(samples)
# First: Convert samples to complex numbers
samples.complex <- data.frame( lapply(samples, function(x){ complex(argument = x) } ))
# Then: compute mu component by component
samples.complex.mean <- unlist(lapply(samples.complex, function(x){ mean(x) } ))
# Mean vector lowr and upper bounds
mu <- Arg( samples.complex.mean ) %% (2*pi)
lower <- as.numeric(mu) - pi
upper <- as.numeric(mu) + pi
# Put points in the region
samples <- sweep( (samples %% (2*pi)) - pi, 2, mu, FUN = "+" )
fit <- tmvtnorm::mle.tmvnorm(
as.matrix( samples ),
lower = lower,
upper = upper,
start = list( mu = as.numeric(mu), sigma = diag(ncol(samples) ) ),
fixed = list( "mu" ))
# Sigma elements
sigma <- matrix(0, ncol = ndim, nrow = ndim)
sigma[lower.tri(sigma,diag = T)] <- attr(fit,"coef")[-(1:ndim)]
sigma <- sigma + t(sigma)
diag(sigma) <- diag(sigma)/2
# Any value under the threshold is 0
sigma[ abs(sigma) < zero.threshold ] <- 0
# Sigma must be Positive Definite (dk if mle method does it)
sigma <- as.matrix(Matrix::nearPD(sigma)$mat)
# Return the object
return( mvCircTruncNormal(mu, sigma, fitted.data = samples) )
}
#' @param n Number of samples to generate
#' @param ... (\code{rmvCircTruncNormal}) Additional parameters for \code{\link{tmvtnorm::rtmvnorm} }
#'
#' @return \code{rmvCircTruncNormal} returns a multivariate circular dataframe with \code{n}
#' samples from a truncated circular normal distribution
#'
#' @examples
#' samples <- rmvCircTruncNormal(100, rep(pi,2), diag(2) )
#' plot(as.numeric(samples[,1]), as.numeric(samples[,2]) )
#'
#' @importFrom tmvtnorm rtmvnorm
#'
#' @rdname mvCircTruncNormal
#' @export
rmvCircTruncNormal <- function(n, mu, sigma, ...){
# Check n
if ( !is.numeric(n) || n <= 0 || floor(n) != n ) stop("The number of samples should be a positive integer")
mu <- as.numeric(mu) %% (2*pi)
# Call sampler
res <- tmvtnorm::rtmvnorm(n, mean = mu, sigma = sigma, lower = mu - pi, upper = mu + pi, ...) %% (2*pi)
# Retun mv df
return( as.mvCircular(res ) )
}
#' \code{dmvCircTruncNormal} computes multivariate circular normal densitiy function approximately.
#'
#' @param x The point to evaluate
#' @param ... (\code{dmvCircTruncNormal}) extra arguments for \code{\link{tmvtnorm::dtmvnorm}}
#'
#' @return \code{dmvCircTruncNormal} returns the density function evaluated at \code{x}
#'
#' @importFrom tmvtnorm dtmvnorm
#'
#' @rdname mvCircTruncNormal
#' @export
#'
#' @examples
#' dmvCircTruncNormal(c(0,0,0), rep(0,3), 1000*diag(3) )
#' dmvCircTruncNormal(c(0,0,0), rep(0,3), 0.1*diag(3))
#' dmvCircTruncNormal(c(pi,pi,pi), rep(0,3), diag(3) )
dmvCircTruncNormal <- function(x, mu, sigma, ...){
# Validate inputs
if ( (is.matrix(x) || is.data.frame(x) ) ) {
if (ncol(x) != length(mu)) stop("")
}
else if (is.numeric(x)) {
if ( length(x) != length(mu) ) stop("")
else x <- matrix(x,nrow = 1)
}
else stop("")
# Number of variables
dims <- ncol(x)
# Put x in mu-pi,mu+pi
mu <- as.numeric(mu) %% (2*pi)
x <- (x %% (2*pi) ) + mu - pi
return( tmvtnorm::dtmvnorm(x,
mean = mu,
sigma = sigma,
lower = mu - pi,
upper = mu + pi) )
}
#' @examples
#' obj <- mvCircTruncNormal(rep(pi,2), diag(2) )
#' samples <- getSamples(obj,100)
#' plot(as.numeric(samples[,1]), as.numeric(samples[,2]) )
#'
#' @rdname mvCircTruncNormal
#' @export
getSamples.mvCircTruncNormal <- function(obj, n, ...) {
# Retun mv df
return( rmvCircTruncNormal(n, obj$mu, obj$sigma ) )
}
#' @examples
#' obj <- mvCircTruncNormal(rep(0,3), diag(3) )
#' fval(obj,c(0,0,0))
#' fval(obj,c(2*pi,2*pi,2*pi))
#' fval(obj,c(pi,pi,pi))
#'
#' obj <- mvCircTruncNormal(rep(0,3), 1000*diag(3) )
#' fval(obj,c(0,0,0))
#'
#' obj <- mvCircTruncNormal(rep(0,3), 0.1*diag(3) )
#' fval(obj,c(0,0,0))
#'
#' @rdname mvCircTruncNormal
#' @export
fval.mvCircTruncNormal <- function(obj, x, ... ) {
return( dmvCircTruncNormal(x, obj$mu, obj$sigma, ...) )
}
#'@importFrom tmvtnorm dtmvnorm.marginal
#'@rdname mvCircTruncNormal
circMarginal.mvCircTruncNormal <- function(obj, x, i){
return( tmvtnorm::dtmvnorm.marginal(x, n = i, mean = as.numeric(obj$mu),
sigma = obj$sigma, lower = obj$lower, upper = obj$upper ) )
}
#'@rdname mvCircTruncNormal
circMarginalMean.mvCircTruncNormal <- function(obj , i){
return( obj$mu[i] )
}
#'@rdname mvCircTruncNormal
circMarginalConcentration.mvCircTruncNormal <- function(obj, i){
return( exp(((obj$sigma)[i,i]) ^ 2 / -2 ) )
}
#'@importFrom MASS ginv
#'@rdname mvCircTruncNormal
circCor.mvCircTruncNormal <- function(obj, i, j){
return( MASS::ginv(obj$sigma)[i,j] )
}
|
43c9bb69f1a7d98371258b74e30ecfc98825bab6
|
eafe89a586691ba69c445cdb27e171e42f7b9602
|
/GraphicsColor.R
|
b00d6143baf5215c5368f7fb2785e4cf2e725ce5
|
[] |
no_license
|
BMVerrico/Bio381_ComputationalBiology
|
073ff775d727c6435ceaf7104f1b8ffbd79e6984
|
283bc58c9508655b9d3a7d97c7a6b59ed2e3a3fa
|
refs/heads/master
| 2021-09-14T05:53:49.323026
| 2018-05-08T23:10:11
| 2018-05-08T23:10:11
| 117,879,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,478
|
r
|
GraphicsColor.R
|
# ggplot graphics
# 04.17.2018
library(ggplot2)
library(ggthemes)
library(wesanderson)
library(TeachingDemos)
char2seed("short capo")
# colors in graphics
# aesthetics
# large geom (fills) -- pale colors
# small geom (points, lines) -- brighter colors
# dichromat() -- visible with people with RG color blindness
# emphasize or de-emphasize graphic elements
# pale, grey to de-emphasize
# bright and saturated colors to emphasize
# how to use color to convey additional important information about the data
# discrete scale
# fills in boxplot, histo
# same color = similar treatments
# neutral colors (black, white, gray) = control treatments
# symbolic colors (heat: red, orange; cool: blues)
# photosynthesis/growth: green tones
# low nutrients: blue tones
# over enriched: brown
# disease dynamics : infected ind--red.
# monochromatic scale: different degrees/scales/shades of that color
# dichromatic scale: colors go from low to high value
# divergent scale: low, baseline, high colors
d=mpg # get data set
p1=ggplot(data= d,
mapping = aes(x=fl, y=hwy, group=fl))
p1 +geom_violin(fill="red")
myColors=c("red", "green", "pink", "blue", "orange")
p1 +geom_boxplot(fill=myColors)
myGray=gray(0.7)
p1 +geom_boxplot(fill=myGray)
print(myGray) # hexidecimal representation of the color
# red green blue tones in plot. in 16s
test="#C9B4B5"
p1 +geom_boxplot(fill=test)
.=col2rgb("red")
print(.)
. = . /255
print(.)
.= rgb(t(.), alpha=0.5)
print(.) # the 80 is the transparancy value in the hexidecimal
myPaleRed=.
p1 +geom_boxplot(fill=myPaleRed)
p1 +geom_boxplot(fill=gray(seq(from=0.1,to=0.9,length=5)))
x1 <- rnorm(n=100,mean=0)
x2 <- rnorm(n=100,mean=3)
dFrame <- data.frame(v1=c(x1,x2))
lab <- rep(c("Control","Treatment"),each=100)
dFrame <- cbind(dFrame,lab)
str(dFrame)
h1 <- ggplot(data=dFrame,mapping=aes(x=v1,fill=lab))
h1 + geom_histogram(position="identity",alpha=0.5,color="black")
p1 + geom_boxplot(fill=wes_palettes[["Rushmore"]])
redmono = c("#99000D", "#CB181D", "#EF3B2C", "#FB6A4A", "#FC9272")
p1 + geom_boxplot(fill=redmono)
p1 + geom_boxplot(fill=c(gray(0.5), canva_palettes[[1]]))
p2=ggplot(data=d,
mapping=aes(x=fl, y=hwy, fill=fl)) + geom_boxplot() +
scale_fill_manual(values=wes_palettes[["Rushmore"]]) # override colors. # fill is different than color
print(p2)
p2 + scale_fill_brewer(palette = "Blues")
# continuous scale colors
p3 = ggplot(data=d,
mapping=aes(x=displ, y=hwy, color=fl)) +
geom_point() +
scale_color_brewer(palette = "Spectral")
print(p3)
p3 <- ggplot(data=d, mapping=aes(x=displ,y=hwy,color=cty)) +
geom_point()
print(p3)
p3 +
scale_color_gradient(low="red",high="blue")
p3 + scale_color_gradient2(midpoint=20,low="red",
mid=grey(0.5),high="darkblue")
z=mean(d$cty)
p3 +
scale_color_gradient2(midpoint=z,low="red",mid="pink",
high="purple",space="Lab")
# space is required. but no real definition as to what it is. why it is needed.
# use scale_color_gradientn for multicolored changes
p3+scale_color_gradientn(colours = rainbow(5))
# heat map
xVar <- 1:30
yVar <- 1:5
myData <- expand.grid(xVar=xVar,yVar=yVar)
head(myData)
zVar <- myData$xVar + myData$yVar + 2*rnorm(n=150)
myData <- cbind(myData,zVar)
head(myData)
p4 <- ggplot(data=myData,mapping=aes(x=xVar,y=yVar,fill=zVar))
p4 + geom_tile() + scale_fill_gradient2(midpoint=19,low="red",mid="pink",
high="purple")
|
67a0d8ff43311db6c9343ba3fbbf57c2929e2614
|
4ea40094ed91786879c2ea4b4b39ca30aa99261f
|
/r/get_evi_nc_func.R
|
525597219618d74a35e7ff4848ed186a2e1c9512
|
[] |
no_license
|
Jinyan-Yang/get_evi
|
c1862308255f6add86c3afcb273687785cf63b21
|
43a35b510179a070f345301e474dd610de64f8ce
|
refs/heads/master
| 2021-06-25T04:53:35.681932
| 2021-03-10T23:21:21
| 2021-03-10T23:21:21
| 214,079,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,087
|
r
|
get_evi_nc_func.R
|
# TERN AusCover. [Year and version number of data product]. [Data product title]. Obtained from [server URL], made available by the AusCover facility (http://www.auscover.org.au) of the Terrestrial Ecosystem Research Network (TERN, http://www.tern.org.au). Accessed [Date accessed].
#This dataset has been developed by the Time Series Remote Sensing Team, CSIRO Marine and Atmospheric Research. The original data were supplied by the Land Processes Distributed Active Archive Center (LPDAAC), located at the U.S. Geological Survey (USGS) Earth Resources Observation and Science Center (EROS) http://lpdaac.usgs.gov.
# library(curl)####
library(raster)
library(ncdf4)
library(gdalUtils)
library(R.utils)
library(lubridate)
#function to downloaded file from url with test ####
curl_download_withtest <- function(filename, url,...){
if(!file.exists(filename)){
download.file(url, filename,"curl",quiet = FALSE,...)
} else {
message("Skipped ",filename, "; already exists.")
}
}
get.modis.nc.func <- function(year){
# http://remote-sensing.nci.org.au/u39/public/data/modis/lpdaac-mosaics-cmar/v1-hdf4/aust/MOD13Q1.005/
# 2000.02.18/MOD13Q1.2000.049.aust.005.b02.250m_evi.hdf.gz
urlBase <- 'http://remote-sensing.nci.org.au/u39/public/data/modis/lpdaac-mosaics-cmar/v2-nc4/aust/MOD13Q1.005/'
yearDate <- format(as.Date(sprintf("%s-01-01",year)),"%Y.%m.%d")
date.seq <- seq(as.Date(sprintf("%s-01-01",year)),
as.Date(sprintf("%s-12-31",year)),by=16)
doy <- yday(date.seq)
for (i in seq_along(doy)){
# url and file info
urlZip <- paste0(urlBase,format(date.seq[i],"%Y.%m.%d"),
"/MOD13Q1.",year,".",sprintf("%03d.aust.005.", doy[i]),"enhanced_vegetation_index.nc")
fn <- sprintf("MOD.%s.%03d.250m_evi.nc",as.character(year),doy[i])
# download file
try(curl_download_withtest(file.path('downloads','modis',fn),urlZip))
}
}
get.evi.coords.ns.func <- function(year.in,lat,lon){
creat.date.func <- function(year){
seq(as.Date(sprintf("%s-01-01",year)),
as.Date(sprintf("%s-12-31",year)),by=16)
}
read.func <- function(fn){
nc_data <- nc_open(fn)
lon.vec <- ncvar_get(nc_data, "longitude")
lat.vec <- ncvar_get(nc_data, "latitude")
evi <- ncvar_get(nc_data, "evi")
lat.index <- which.min(abs(lat.vec - lat))
lon.index <- which.min(abs(lon.vec - lon))
value.tar <- evi[lon.index,lat.index]
rm(evi)
return(value.tar)
}
date.ls <- list()
for (i in seq_along(year.in)){
date.ls[[i]] <- data.frame(Date = creat.date.func(year.in[i]))
}
out.df <- do.call(rbind,date.ls)
out.df$year <- year(out.df$Date)
out.df$doy <- yday(out.df$Date)
out.df$lat <- lat
out.df$lon <- lon
#
file.nm.vec <- sprintf("downloads/modis/MOD.%s.%03d.250m_evi.nc",out.df$year,out.df$doy)
out.df$evi <- mapply(read.func,file.nm.vec)
return(out.df)
}
# get.modis.nc.func(2016)
# download evi####
for(year in 2001:2016){
get.modis.nc.func(year)
}
# get and save evi for coords####
# get ym
year.in <- 2001:2016
lat <- -33.610412
lon <- 150.73394
evi.ym.df <- get.evi.coords.ns.func(year.in,lat = lat ,lon=lon)
saveRDS(evi.ym.df,'ym20012016.rds')
# get qp
# -26.577250, 144.619028
year.in <- 2001:2016
lat <- -26.577250
lon <- 144.619028
evi.qp.df <- get.evi.coords.ns.func(year.in,lat = lat ,lon=lon)
saveRDS(evi.qp.df,'qp20012016.rds')
# ng
# -31.645194, 146.641889
year.in <- 2001:2016
lat <- -31.645194
lon <- 146.641889
evi.ng.df <- get.evi.coords.ns.func(year.in,lat = lat ,lon=lon)
saveRDS(evi.ng.df,'ng20012016.rds')
# DP
# -36.271964, 146.309585
year.in <- 2001:2016
lat <- -36.271964
lon <- 146.309585
evi.dp.df <- get.evi.coords.ns.func(year.in,lat = lat ,lon=lon)
saveRDS(evi.dp.df,'dp20012016.rds')
# make plot####
evi.ym.df <- readRDS('ym20012016.rds')
plot(evi~Date,data = evi.ym.df,type='b',pch=16,col='grey80',ylab='EVI')
plot(evi~Date,data = evi.ym.df[evi.ym.df$year %in% c('2013','2014'),],type='b',pch=16,col='grey',ylab='EVI')
huf.df <- read.csv('outPutHufken.csv')
pdf('evi_model.pdf',width = 8,height = 8*0.618)
par(mar=c(3,5,1,1))
par(bg = rgb(240/255,241/255,211/255))
plot(evi~Date,data = evi.ym.df[evi.ym.df$year %in% c('2013','2014'),],type='b',
lwd=3,pch=16,col='grey80',
xlim=c(as.Date('2013-1-1'),as.Date('2014-12-31')),
xlab='',ylab='EVI')
par(new=T)
plot(huf.df$lai,type='s',ann=F,axes=F)
legend('topright',legend = c('MODIS','MODEL'),lty='solid',lwd=2,,col=c('grey','black'),bty='n')
dev.off()
par(mfrow=c(3,2))
for (i in 2011:2016) {
plot(evi~Date,data = evi.ym.df[evi.ym.df$year %in% i,],type='b',pch=16,col='grey',ylab='EVI')
legend('top',legend = i,bty='n')
}
# get watson evi####
watson.sites <- read.csv('watson_site.csv')
save.evi.sites.func <- function(row.watson){
year.in <- 2001:2016
tmp.df <- get.evi.coords.ns.func(year.in,lat = as.numeric(row.watson[2]),lon=as.numeric(row.watson[3]))
saveRDS(tmp.df,sprintf('cache/evi.%s20012016.rds',row.watson[1]))
}
apply(watson.sites,1,save.evi.sites.func)
|
05191368240b244644dca5f8abd67058485b6ba6
|
a7e3f0739f609ca7d81468a89348f55633c22d44
|
/src/mgqueue/README.R
|
33de564fac9f2695a9df7c4cd5deec799bd3c5cf
|
[] |
no_license
|
kkaloudis/approximate-bayesian-computation
|
1d3b0c752a691ac687657d92f12f43dc14fad333
|
6fa28c285661cbb22cd006940f8b86a71f5a7254
|
refs/heads/master
| 2023-03-18T14:29:28.375220
| 2020-05-28T17:45:16
| 2020-05-28T17:45:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
README.R
|
Reproduce results for the queueing model in Section 5.3 of the article.
===
queue_wsmc_marginal_intermediate.R: load data set (RData file in the folder),
and approximates WABC posterior based on Wasserstein distance between marginal
distributions of synthetic and observed data sets.
queue_pmmh_intermediate.R: loads results from above script, uses them to tune the parameters
of a PMMH algorithm to target the exact posterior distribution, and runs PMMH.
queue_abctools.R: runs semi-automatic ABC using abctools package
(install.packages("abctools"))
queue_plots_compare.R: loads results from above scripts,
and creates the three plots in Figure 8 (a,b,c).
|
2a5257caf680e8dfc74ce77649f3048d6a4789d4
|
410af461636b0463e12703fd35cd966d6d8d9e27
|
/R/stratifier.R
|
71de39420810dd93ba5d0b73da7b063e7d542dd4
|
[] |
no_license
|
Generalizer/thegeneralizer
|
a8b354999c178225505f0f02065586e9d1670841
|
1921658e94b99b07188ec0a1c308ae2ec504972f
|
refs/heads/master
| 2022-11-12T08:49:17.682394
| 2020-07-01T18:54:25
| 2020-07-01T18:54:25
| 210,443,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,848
|
r
|
stratifier.R
|
#'Stratify a population data frame.
#'
#'This function is designed to receive a data frame containing information about
#'an inference population.
#'
#'...
#'
#'To complete the generalization process, the user should run this function on
#'their desired inference population data frame and save the output to an object.
#'The resulting object, referred to as \code{solution} in the package documentation,
#'should then be provided as input to the \code{mean_table()} and \code{schools_table()}
#'functions.
#'
#'@param data A data frame containing information about schools in your
#' inference population. Must contain a column of ID numbers identifying each
#' school, along with columns of stratifying variables.
#'@param metric A character string specifying the metric to be used in
#' calculating distances for observations. The default is \code{gower}; other
#' options are \code{euclidean} and \code{manhattan}.
#'@param clusters
#'@param autosave
#'@param guided
#'@param idnum
#'@return A list. The first element contains the raw results of stratifying the
#' data into the user-selected number of clusters. The second element contains
#' several lists, each ranking the observations within clusters.
#'@seealso \url{http://thegeneralizer.org/}, also add other resources
#' @examples
#' \dontrun{
#' stratifier(x)
#' }
stratifier <- function(data, metric = "gower", clusters = NULL,
autosave = TRUE, guided = TRUE, idnum = NULL){
if(guided == TRUE){
cat("Your chosen inference population is the '", deparse(substitute(data)),
"' dataset.", sep="")
read_id <- function(){
cat("\n")
n <- readline(prompt = "Enter the name of the ID Variable in your dataset: ")
if(!n %in% names(data))
{
cat("We could not find that variable. Please make sure your dataset contains an ID variable.",
"If necessary, press 'esc' to leave stratifier()")
return(read_id())
}
return(n)
}
idnum <- read_id()
cat("\n\nIf you want to adjust or restrict your inference population (e.g., if you are interested in only one location, etc.), make sure that you have altered the data frame appropriately. \n")
# }
filterhelp <- menu(choices = c("Ready to proceed", "Need help filtering"),
title = cat("If you need to alter the data frame, enter 0 to exit; you can use " %+% blue$bold("dplyr::filter()") %+% " or " %+% blue$bold("set_percentile_limits()") %+% " and return.\n"))
if(filterhelp == 2){
inference_help()
return()
}
# Selecting the ID column(s)
id <- data %>% select(idnum)
data <- data %>% select(-idnum)
# Select stratifying variables --------------------------------------------
cat("\nYou're now ready to select your stratification variables. The following are the variables available in your dataset. \nDo note that the stratifier function will only accept continuous and binary variables.")
var_select <- function(){
names <- names(data)
n <- select.list(choices = names,
title = cat("\nWhich key variables do you think may explain variation in your treatment effect?",
"Typically, studies include up to 10 variables for stratification.\n"),
graphics = FALSE, multiple = TRUE)
return(n)
}
variables <- var_select()
if(length(variables) >= 1){
data <- data %>%
select(variables)
}else{
stop("You have to select some stratifying variables.")
}
# Verifying the variables
if(menu(choices=c("Yes", "No"),
title=cat("\nYou have selected the following stratifying variables: ",
paste(colnames(data), collapse=', '), paste(". \nIs this correct?\n"), sep="")) == 1){
}else{
stop("Make sure that you have selected the stratifying variables you want to use.")
}
# Distribution and summary info about stratifying variables -----------
cat("\nHere are summary statistics and histograms of each of your stratifying variables.\n\n")
sumstats <- data %>%
summarize_all(list(base::mean, stats::sd, base::min, base::max), na.rm=TRUE)
means <- sumstats %>% select(contains("fn1"))
sds <- sumstats %>% select(contains("fn2"))
mins <- sumstats %>% select(contains("fn3"))
maxs <- sumstats %>% select(contains("fn4"))
colnames(means) <- colnames(data); colnames(sds) <- colnames(data)
colnames(mins) <- colnames(data); colnames(maxs) <- colnames(data)
sumstats_table <- data.frame(rbind(means, sds, mins, maxs))
row.names(sumstats_table) <- c("Mean", "Standard Deviation", "Minimum Value", "Maximum Value")
print(sumstats_table, digits = 4)
suppressMessages(
meltedx <- data %>%
melt(variable.name = 'variable')
)
suppressWarnings(
suppressMessages(
for(i in 1:(length(levels(meltedx$variable)))){
df <- meltedx %>%
dplyr::filter(as.numeric(meltedx$variable) == i)
hist <- ggplot(data = df, aes(value)) +
geom_histogram(bins = 30) +
theme_base() +
labs(title = paste("Histogram of", levels(meltedx$variable)[i]))
print(hist)
par(ask = TRUE)
}
)
)
par(ask = FALSE)
if(menu(choices = c("Yes", "No"), title = cat("\nDo you wish to proceed?")) == 1){
}else{
stop("You have stopped the stratification process.")
}
# Clustering ---------------------------------------------------------
clusterchoice <- (menu(choices = c(4, 5, 6, 7, 8),
title = cat("Choose a number of strata to divide your population into.",
"\n\nTypically, the more strata, the better.",
"However, increasing\nthe number of strata uses more resources,",
"because you must sample a given \nnumber of observations from each stratum.",
"\n\nTherefore, choose a larger number if possible, and only if you have the",
"resources to accommodate it. Otherwise, choose a smaller number.")) + 3)
if(clusterchoice < 4 | clusterchoice > 8){
stop("You should choose a number of clusters between 4 and 8.")
}
cat("This might take a little while. Please bear with us.")
# Kmeans breaks if there are ANY missing values in the distance matrix.
# It seems that, although the gower metric is pretty good at handling missing data,
# if there is a TON of missing data, it will still fail and leave NAs in distance.
# So this next line says to automatically use multiple imputation (mice) to fill in missing values.
# Sometimes it may not be necessary at all, but some variables have a lot of missing data.
suppressMessages(
data <- mice::complete(mice(data, print = FALSE, m = 1), 1)
)
cat("\n1: Imputed missing data using mice package.")
suppressWarnings(distance <- daisy(data, metric=metric))
cat("\n2: Calculated distance matrix.")
solution <- KMeans_rcpp(as.matrix(distance), clusters=clusterchoice, verbose = TRUE)
# Reattaching ID variable -------------------------------------------------
x2 <- data.frame(id, data, clusterID = solution$clusters)
sortedschools <- list(NULL)
for(i in 1:clusterchoice){
dat3 <- x2 %>%
dplyr::filter(clusterID == i)
idvar <- dat3 %>% select(idnum)
dat4 <- dat3 %>% select(-c(idnum, clusterID))
mu <- moment(dat4, order=1, central=FALSE) # population mean of stratifying vars
v <- var(dat4)
a <- diag(v)
if(any(a == 0)){ a[which(a == 0)] <- 0.00000001 }
cov.dat <- diag(a)
ma.s <- mahalanobis(dat4,mu,cov.dat)
dat4 <- data.frame(idvar, dat4, distance = ma.s, clusterID = dat3$clusterID)
sortedschools[[i]] <- dat4 %>% # Produces a list of data frames, one per stratum, sorted by
# distance (so the top N schools in each data frame are the "best," etc.)
arrange(distance) %>%
select(idnum)
}
cat(blue$bold("Congratulations, you have successfully grouped your data into", clusterchoice,
"strata! \nNext, run mean_table(generalizer_output) and recruitment(generalizer_output)."))
generalizer_output <<- list(solution, sortedschools, data=data, iddata=x2, idvar=idnum)
return(invisible(generalizer_output))
}
if(guided == FALSE){
if(is.null(clusters)){
stop("You should choose a number of clusters to stratify your data into.")
}
if(is.null(idnum)){
stop("Need to specify an identifying column or columns.")
}
if(!idnum %in% names(data)){
stop("We could not find that variable. Please make sure your dataset has a variable containing IDs of your data.")
}
# Selecting the ID column(s)
id <- data %>% select(idnum)
# From now on the id column(s) is separated from the rest of the data frame, they're stored as "id".
# "idnum" is a vector of the id column(s).
data <- data %>% select(-idnum)
# Select stratifying variables --------------------------------------------
cat("Your ID Variable is ", blue(idnum), ".")
cat("\nYou have chosen to stratify your data into ", blue(clusters), " clusters using",
"the following variables: \n\n")
cat(paste(names(data),collapse=", "))
# Clustering ---------------------------------------------------------
cat("This might take a little while. Please bear with us.")
suppressMessages(
data <- mice::complete(mice(data, print = FALSE, m = 1), 1)
)
cat("\n1: Imputed missing data using mice package.")
suppressWarnings(distance <- daisy(data, metric=metric))
cat("\n2: Calculated distance matrix.")
solution <- KMeans_rcpp(as.matrix(distance), clusters=clusters, verbose = TRUE)
# Reattaching ID variable -------------------------------------------------
x2 <- data.frame(id, data, clusterID = solution$clusters)
sortedschools <- list(NULL)
for(i in 1:clusters){
dat3 <- x2 %>%
dplyr::filter(clusterID == i)
idvar <- dat3 %>% select(idnum)
dat4 <- dat3 %>% select(-c(idnum, clusterID))
mu <- moment(dat4, order=1, central=FALSE) # population mean of stratifying vars
v <- var(dat4)
a <- diag(v)
if(any(a == 0)){ a[which(a == 0)] <- 0.00000001 }
cov.dat <- diag(a)
ma.s <- mahalanobis(dat4,mu,cov.dat)
dat4 <- data.frame(idvar, dat4, distance = ma.s, clusterID = dat3$clusterID)
sortedschools[[i]] <- dat4 %>% # Produces a list of data frames, one per stratum, sorted by
# distance (so the top N schools in each data frame are the "best," etc.)
arrange(distance) %>%
select(idnum)
}
generalizer_output <<- list(solution, sortedschools, data=data, iddata=x2, idvar=idnum)
readline(prompt = "Press [enter] to view the results")
# Heatmap generation -------------------------------------------
n_clusters <- max(solution$clusters)
cat("\n\nYou have specified ")
cat(bold (n_clusters))
cat(" strata, which explains ")
cat(bold(100*round(solution$between.SS_DIV_total.SS, 4), "%"))
cat(" of the total variation in the population")
cat("\n\nThe following table presents the average value (mean) for each covariate for each stratum.\nThe first row, 'All,' presents the average values for the entire inference population. \nThe last column, '# of Schools,' lists the total number of schools in the inference population \nthat fall within each stratum.\n\n")
data <- data.frame(data, clusters=as.character(solution$clusters))
simtab_pop <- data %>%
dplyr::group_by(clusters) %>%
dplyr::summarize_if(is.numeric, base::mean) %>%
select(-clusters)
num_schools <- data %>%
group_by(clusters) %>%
summarize(count = n()) %>%
mutate("Number of Schools" = count) %>%
select(-c(clusters, count)) %>%
add_row("Number of Schools" = length(solution$clusters), .before=1)
Clusters <- "Population"
for(i in 2:(n_clusters+1)){
Clusters[i] <- paste("Stratum", (i - 1))
}
simtab_m <- data %>%
select(-clusters) %>%
summarize_all(base::mean)
simtab_sd <- data %>% select(-clusters) %>% summarize_all(sd)
simtab_sd_pop <- data %>% group_by(clusters) %>% summarize_if(is.numeric, sd) %>%
mutate(clusters = factor(clusters)) %>% select(-clusters)
meantab <- bind_rows(simtab_m, simtab_pop)
sdtab <- bind_rows(simtab_sd, simtab_sd_pop)
final_table <- data.frame(matrix(NA, ncol=(ncol(meantab)*2), nrow=(nrow(meantab))))
odd_vals <- seq(1, ncol(final_table), by=2)
even_vals <- seq(2, ncol(final_table), by=2)
final_table[,odd_vals] <- meantab
final_table[,even_vals] <- sdtab
final_table <- data.frame(Clusters, final_table, num_schools)
colnames(final_table)[(even_vals+1)] <- "SD"
colnames(final_table)[(odd_vals+1)] <- colnames(simtab_m)
colnames(final_table)[ncol(final_table)] <- "# of Schools"
print(final_table, digits = 4)
#Heatmap function
mean <- simtab_m %>%
tidyr::gather(key = Stratifying_Variable, value = Pop_Mean)
sd_heat <- sdtab %>%
mutate(Clusters = Clusters) %>%
gather(key = Stratifying_Variable, value = SD, - Clusters)
schools <- num_schools %>%
mutate(Clusters = Clusters) %>%
rename(Schools = `Number of Schools`)
mean_heat <- meantab %>%
mutate(Clusters = Clusters) %>%
gather(key = Stratifying_Variable, value = Mean, - Clusters)
heatdata <- mean_heat %>%
left_join(sd_heat, by = c("Clusters", "Stratifying_Variable")) %>%
left_join(mean, by = "Stratifying_Variable") %>%
left_join(schools, by = "Clusters") %>%
mutate(Deviation = case_when((Mean - Pop_Mean)/Pop_Mean >= 0.7 ~ 0.7,
(Mean - Pop_Mean)/Pop_Mean <= -0.7 ~ -0.7,
TRUE ~ (Mean - Pop_Mean)/Pop_Mean)) %>%
mutate(Clusters = ifelse(Clusters == "Population", "Total", Clusters))
#Preserve levels
heatdata$Stratifying_Variable <- factor(heatdata$Stratifying_Variable,
levels = rev(unique(heatdata$Stratifying_Variable)))
#Heatmap
heat <- ggplot(data = heatdata) +
geom_tile(aes(x = Clusters, y = Stratifying_Variable, fill = Deviation), width = 0.95) +
geom_text(aes(x = Clusters, y = (ncol(final_table)/2 - 0.15), label = paste(Schools, "\nschools")),
size = 3.4) +
geom_label(aes(x = Clusters, y = Stratifying_Variable,
label = paste0(round(Mean, 1), "\n(", round(SD, 1), ")")),
colour = "black", alpha = 0.7, size = ifelse(ncol(final_table)/2 > 7, 2, 3.5)) +
geom_hline(yintercept = seq(1.5,ncol(final_table) - 2 ,1), linetype = "dotted", colour = "white") +
scale_fill_gradientn(name = NULL,
breaks=c(-0.5, 0, 0.5), labels = c("50% \nBelow Mean","Population\nMean","50% \nAbove Mean"),
colours=c("#990000","#CC0000", "white", "#3D85C6", "#0B5294"),
limits = c(-0.7, 0.7)) +
scale_x_discrete(position = "top", expand = c(0, 0), labels = c(Clusters[-1], "Population")) +
expand_limits(y = c(0, ncol(final_table)/2 + 0.1)) +
labs(y = NULL, x = NULL) +
theme(panel.background = element_blank(),
axis.ticks = element_blank(),
axis.text = element_text(size = 10, colour = "grey15"),
legend.key.height = unit(1, "cm"),
legend.text = element_text(size = 10),
legend.position = "right")
print(heat)
cat("\nThis heat map compares the average values of the covariates in \nthe strata to the average values in the population.",
"Strata with \nhigher average values are blue; strata with lower average values \nare red.",
"Strata whose average values are close to that of the \npopulation are white. The number in each tile is the actual mean value. \n\n")
if(autosave){
cat(blue("We've saved your stratification output as"), blue$bold("'generalizer_output'"),
blue("in the Global Environment. To generate recruitment lists, run recruitment_list(generalizer_output)."))
return(invisible(generalizer_output))
}
}
}
|
982dfb6c33e8a2df558b6fc39a84c77a2b53c795
|
df3b3e2cff3f789a3e91e561d7e3121603d51546
|
/R/ShroutFleissICC1k.R
|
4418f666cb3c4317ee54632019c10501092501db
|
[] |
no_license
|
humanfactors/superb
|
39218b3458d8d8d834b844412a22b9a4bf5a8746
|
cdb7a903d84c2a83d4a4c7c94a97a2d4bc2221a4
|
refs/heads/master
| 2023-07-16T21:47:00.530298
| 2021-09-04T16:52:48
| 2021-09-04T16:52:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,969
|
r
|
ShroutFleissICC1k.R
|
######################################################################################
#' @title Shrout and Fleiss intra-class correlation functions
#'
#' @aliases ShroutFleissICC1 ShroutFleissICC11 ShroutFleissICC1k
#'
#' @md
#'
#' @description The functions ShroutFleissICC1, ShroutFleissICC11
#' and ShroutFleissICC1k computes the intra-class correlation ICC
#' for a given data frame containing repeated measures in columns cols
#' when the measures are in distinct clusters, identified in column clustercol.
#' See \insertCite{sf79}{superb}.
#'
#' @param dta A data frame containing within-subject measures, one participant per line;
#' @param clustercol is the column index where cluster belonging are given;
#' @param cols A vector indicating the columns containing the measures.
#'
#' @return ICC the intra-class measure of association.
#'
#' @references
#' \insertAllCited{}
#'
#' @examples
#' # creates a small data frames with 4 subject's scores for 5 measures:
#' dta <- data.frame(cbind(
#' clus <- c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3),
#' col1 <- c(2, 4, 4, 6, 4, 5, 8, 8, 5, 8, 9, 9)
#' ))
#'
#' ShroutFleissICC1(dta, 1, 2)
#' # 0.434343434
#' ShroutFleissICC11(dta[, 1], dta[,2])
#' # 0.434343434
#'
#' dta2 <- data.frame(cbind(
#' clus <- c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3),
#' col1 <- c(1, 3, 3, 5, 3, 4, 7, 7, 4, 7, 8, 8),
#' col1 <- c(2, 4, 4, 6, 4, 5, 8, 8, 5, 8, 9, 9),
#' col1 <- c(3, 5, 5, 7, 5, 6, 9, 9, 6, 9, 10, 10)
#' ))
#'
#' ShroutFleissICC1(dta2, 1, 2:4)
#' # 0.7543859649
#' ShroutFleissICC1k(dta2[, 1], dta2[,2:4])
#' # 0.7543859649
#'
#' @references
#' \insertAllCited{}
#'
#' @export ShroutFleissICC1
#' @export ShroutFleissICC11
#' @export ShroutFleissICC1k
#
ShroutFleissICC1 <- function(dta, clustercol, cols) {
if (is.factor(dta[,clustercol])) {
clusters = as.numeric(levels(dta[,clustercol]))[dta[,clustercol]]
} else {
clusters = as.numeric(as.vector(as.matrix(dta[clustercol])))
}
if (length(cols)==1)
# computes ICC on raw data
ShroutFleissICC11(clusters, as.vector(as.matrix(dta[cols])) )
else
# computes ICC on multiple columns that will be averaged
ShroutFleissICC1k(clusters, as.matrix(dta[cols]))
}
ShroutFleissICC11 <- function(clusters, scores) {
# The Shrout and Fleiss ICC(1,1)
# computes the intra-class correlation
# for a measurement.
# clusters MUST be a vector
# scores MUST be a vector
SStotal <- SS(cbind(clusters, scores))
SSerror <- sum(
plyr::ddply(as.data.frame(cbind(clusters,scores)),
.variables = "clusters", .fun = SS
)[,2])
SSeffect <- SStotal-SSerror
dleffect <- length(unique(clusters)) - 1
dlerror <- length(scores) - length(unique(clusters))
MSeffect <- SSeffect / dleffect
MSerror <- SSerror / dlerror
n <- length(scores)/ length(unique(clusters))
ICC = (MSeffect - MSerror)/ (MSeffect + (n-1)*MSerror)
ICC
}
ShroutFleissICC1k <- function(clusters, multiplescores) {
# The Shrout and Fleiss ICC(1,k)
# computes the intra-class correlation
# for the mean measurement; it assumes that m == k.
# clusters MUST be a vector
# multiplescores MUST be a matrix
temp <- as.data.frame( cbind(clusters,rowMeans(multiplescores)) )
names(temp) <- c("clus", "score")
SStotal <- SS(temp)
SSerror <- sum(
plyr::ddply( temp, .variables = "clus", .fun = SS
)[,2])
SSeffect <- SStotal-SSerror
dleffect <- length(unique(clusters)) - 1
dlerror <- length(clusters) - length(unique(clusters))
MSeffect <- SSeffect / dleffect
MSerror <- SSerror / dlerror
ICC <- (MSeffect - MSerror)/ MSeffect
ICC
}
######################################################################
## subsidiary functions
######################################################################
# A quick sum of the squared differences to the mean function
SS <- function(v) {
sum((v[,2] - mean(v[,2]))^2)
}
# compute the correction factor as per Cousineau & Laurencelle, 2016, Psyck Methods
lambda <- function(paramvector) {
r <- paramvector[1] # i.e. ICC
r <- max(-0.2, r)
k <- paramvector[2] # number of clusters
ns <- paramvector[c(-1,-2)] # drop r and k
M <- sum(ns^2)
N <- sum(ns)
nbar <- mean(ns)
return(sqrt((1+(M/N-1) * r) / (1 - (nbar-1)/(N-1) * r)))
}
# extract the number of clusters k and the number of subjects per cluster ns.
getKNs<-function(dta, clustercol) {
k <- length(unique(dta[,clustercol]))
ns <- plyr::ddply(as.data.frame(dta[clustercol]), .fun = dim, .variables = clustercol)$V1
return( c(k, ns) )
}
|
e76d1a9d8f8df781dda5f0efde715b86b39f2e8f
|
760ef5fb8162d11468999fe7656269dcafe1a12f
|
/R/swlist.R
|
1cadbb66bbb7f59067bc5ccab758b8f8787e9b98
|
[] |
no_license
|
FredHutch/swift
|
2779abf143c410b4c90528e84bf30be28b073082
|
7a791238b2455362f953066122b7f0b2c3b46862
|
refs/heads/master
| 2021-04-26T23:27:30.577739
| 2015-01-15T05:52:45
| 2015-01-15T05:52:45
| 27,573,273
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,982
|
r
|
swlist.R
|
swlist <-
function(container=NULL, format=c("short", "abbrv", "long"), ...,
prefix=NULL, delimiter=NULL)
{
stopifnot(missing(container) || .isString(container))
format <- match.arg(format)
stopifnot(is.null(prefix) || .isString(prefix))
stopifnot(is.null(delimiter) || .isString(delimiter, nchar=1L))
marker <- NULL
curl <- RCurl::getCurlHandle()
hdr <- .swauth(curl)
result <- new.env(parent=emptyenv())
ith <- 0L
repeat {
url <- .RESTurl(hdr[["X-Storage-Url"]], container, format="json",
prefix=prefix, delimiter=delimiter, marker=marker, ...)
contents <- .RESTmetadata(curl, hdr, url)
if (identical(attr(contents, "status"), "complete"))
break
marker <- attr(contents, "marker")
idx <- if (is.null(container)) {
!grepl("^\\.trash.*", vapply(contents, "[[", character(1), "name"))
} else {
vapply(contents, length, integer(1)) == 5L
}
contents <- contents[idx]
ith <- ith + 1L
bytes <- sapply(contents, "[[", "bytes")
last_modified <- .NULLas(sapply(contents, "[[", "last_modified"))
name <- .NULLas(sapply(contents, "[[", "name"))
result[[as.character(ith)]] <- switch(format, abbrv={
data.frame(name=name, stringsAsFactors=FALSE)
}, short={
FUN <- utils:::format.object_size
size <- sapply(bytes, FUN, "auto")
data.frame(size=size, last_modified=last_modified, name=name,
stringsAsFactors=FALSE)
}, long={
hash <- .NULLas(sapply(contents, "[[", "hash"))
data.frame(bytes=bytes, last_modified=last_modified, hash=hash,
name=name, stringsAsFactors=FALSE)
})
}
df <- do.call(rbind, as.list(result)[as.character(seq_along(result))])
rownames(df) <- NULL
if (format == "abbrv")
df$name
else df
}
|
a6df03b6d0dc4a86f2a2b667554ef2bea091b9a9
|
6376f6678c494a6f87ddbe5d1d4afc8b2018600c
|
/R/mod_rural.R
|
01693e6350c8265f3ee0728f7828cec5d1663ac4
|
[
"MIT"
] |
permissive
|
databrew/hefpi
|
d6a666dc87f6de8a09781b4cdb56692320fe15c9
|
16c2a877b6ef44a5e8aa456f4964d8b506b7e0cf
|
refs/heads/master
| 2023-04-06T03:06:03.761124
| 2023-03-23T23:02:39
| 2023-03-23T23:02:39
| 242,064,106
| 0
| 1
|
NOASSERTION
| 2023-03-23T23:02:41
| 2020-02-21T05:45:43
|
R
|
UTF-8
|
R
| false
| false
| 16,139
|
r
|
mod_rural.R
|
# Module recent value subnational
#' @title mod_recent_sub.R
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#'
#' @keywords internal
#' @export
mod_rural_ui <- function(id){
# let leaflet know that selections should persist
# options(persistent = TRUE)
ns <- shiny::NS(id)
shiny::fluidRow(
shiny::column(8,
shiny::uiOutput(ns('map_title_ui')),
plotly::plotlyOutput(
ns('recent_sub_mean_plot'), height = 550
)),
shiny::column(4,
p('Country'),
shiny::selectInput(ns('country'),
label = NULL,
choices = unique(hefpi::hefpi_df$country), selected = 'Morocco'),
# sliderInput(ns('date_range'),
# label = 'Year',
# min = 1982,
# max = 2021,
# value = c(1982, 2021),
# step = 1,
# sep = ''),
shiny::selectInput(ns('date_range'),
label = 'Year',
choices = NULL
),
shiny::selectInput(ns('indicator'),
label = 'Indicator',
choices = NULL
),
# uiOutput(ns('ind_ui')),
shiny::uiOutput(ns('axis_ui')),
shiny::downloadButton(ns("dl_plot"), label = 'Download image', class = 'btn-primary'),
shiny::downloadButton(ns("dl_data"), label = 'Download data', class = 'btn-primary')
)
)
}
# SERVER FOR MOST RECENT VALUE SUBNATIONAL MEAN
mod_rural_server <- function(input, output, session){
# # ---- UI output for region within country ---#
# output$ind_ui <- renderUI({
# #cn = 'India'
# #plot_years = c(1982, 2017)
# req(input$country)
#
# cn = input$country
# plot_years <- input$date_range
#
# hefpi::hefpi_sub_df
#
# ind <- hefpi::hefpi_df %>%
# as_tibble() %>%
# select(country, year, regioncode, indic, urb, rur) %>%
# filter(country == cn) %>%
# # filter(year == plot_years) %>%
# left_join(
# hefpi::indicators %>% select(good_or_bad, variable_name, indicator_short_name, unit_of_measure),
# by = c('indic' = 'variable_name')
# ) %>%
# select(indicator_short_name) %>%
# pull() %>%
# unique() %>%
# sort()
#
# indicator_intersect <- indicators_list
# indicator_intersect$`Financial Protection` <- intersect(indicators_list$`Financial Protection`, ind) %>% as.list()
# indicator_intersect$`Healthcare Coverage` <- intersect(indicators_list$`Healthcare Coverage`, ind) %>% as.list()
# indicator_intersect$`Health Outcomes` <- intersect(indicators_list$`Health Outcomes`, ind) %>% as.list()
#
# fluidPage(
# fluidRow(
# selectInput(inputId = session$ns('indicator'),
# label = 'Indicator',
# choices = indicator_intersect,
# selected = indicator_intersect[[1]])
#
# )
# )
#
#
# })
shiny::observeEvent(input$country, {
shiny::req(input$country)
cn = input$country
plot_years <- input$date_range
hefpi::hefpi_sub_df
ind <- hefpi::hefpi_df %>%
tidyr::as_tibble() %>%
dplyr::select(country, year, regioncode, indic, urb, rur) %>%
dplyr::filter(country == cn) %>%
# filter(year == plot_years) %>%
dplyr::left_join(
hefpi::indicators %>% dplyr::select(good_or_bad, variable_name, indicator_short_name, unit_of_measure),
by = c('indic' = 'variable_name')
) %>%
dplyr::select(indicator_short_name) %>%
dplyr::pull() %>%
unique() %>%
sort()
indicator_intersect <- hefpi::indicators_list_v2
indicator_intersect$`Financial Protection` <- dplyr::intersect(indicators_list$`Financial Protection`, ind) %>% as.list()
indicator_intersect$`Healthcare Coverage` <- dplyr::intersect(indicators_list$`Healthcare Coverage`, ind) %>% as.list()
indicator_intersect$`Health Outcomes` <- dplyr::intersect(indicators_list$`Health Outcomes`, ind) %>% as.list()
shiny::updateSelectInput(session,
inputId = "indicator",
choices = indicator_intersect,
selected = indicator_intersect[[1]])
})
shiny::observeEvent(input$indicator, {
shiny::req(input$country)
shiny::req(input$indicator)
ind_selected = input$indicator
measure_unit <- hefpi::indicators_dat_country %>%
dplyr::select(indicator_short_name, unit_of_measure) %>%
dplyr::filter(indicator_short_name == ind_selected) %>%
dplyr::distinct() %>%
dplyr::slice(1) %>%
dplyr::select(unit_of_measure) %>%
dplyr::pull()
if(!is.null(input$country)) {
indicator <- input$indicator
# indicator <- 'Diastolic blood pressure (mmHg)'
# region <- input$region
# region <- 'Europe & Central Asia'
cn <- input$country
# country_name <- 'Ukraine'
# get data
# TEMPORARILY COMMENT OUT CODE FOR FAKE DATA BELOW
pd <- hefpi::hefpi_df
years <- pd %>%
tidyr::as_tibble() %>%
dplyr::select(country, year, regioncode, indic, urb, rur) %>%
dplyr::filter(country == cn) %>%
dplyr::left_join(
hefpi::indicators %>% dplyr::select(good_or_bad, variable_name, indicator_short_name, unit_of_measure),
by = c('indic' = 'variable_name')
) %>%
dplyr::filter(indicator_short_name == indicator) %>%
dplyr::select(year) %>%
dplyr::pull() %>%
unique() %>%
sort(decreasing = TRUE)
shiny::updateSelectInput(session,
inputId = "date_range",
label = 'Year',
choices = years,
selected = years[1]
)
}
if(stringr::str_detect(measure_unit, '%')) {
output$axis_ui <- shiny::renderUI({
shiny::fluidPage(
shiny::fluidRow(
shiny::sliderInput(inputId = session$ns('axis'),
label = 'Y - Axis',
min = 0,
max = 100,
step = 1,
value = 100)
)
)
})
} else {
output$axis_ui <- shiny::renderUI({
shiny::fluidPage(
shiny::fluidRow(
NULL
)
)
})
}
})
# ----------- REACTIVE DATA ---------------#
hefpi_sub_df__reactive <- shiny::reactive({
shiny::req(input$country)
shiny::req(input$indicator)
shiny::req(input$date_range)
# cn = 'St. Lucia'
# indicator = 'BMI, adults (BMI)'
# year = c(2012)
#cn = 'India'
#plot_years = c(2019)
#indicator = "4+ antenatal care visits (%)"
#indicator = "Height, adults (Centimeter)"
#rn = rn[1]
# get inputs
# plot_years <- c(min(input$date_range):max(input$date_range))
plot_years <- input$date_range
indicator <- input$indicator
cn <- input$country
# rn <- input$region_name
# while map (generate from reactive object) is null, plot is null
if(any(is.null(indicator))){
return(NULL)
} else {
df <- hefpi::hefpi_df %>%
dplyr::filter(country == cn) %>%
tidyr::as_tibble() %>%
dplyr::select(country, year, regioncode, indic, urb, rur) %>%
# filter(country == cn) %>%
dplyr::left_join(
hefpi::indicators_dat_country %>% dplyr::select(good_or_bad, variable_name, indicator_short_name, unit_of_measure),
by = c('indic' = 'variable_name')
) %>%
dplyr::filter(indicator_short_name == indicator) %>%
dplyr::filter(year %in% plot_years) %>%
tidyr::pivot_longer(cols = c('urb', 'rur'), names_to = 'urb_rur') %>%
dplyr::group_by(urb_rur) %>%
dplyr::filter(year == max(year, na.rm = TRUE)) %>%
dplyr::reframe(value = first(value),
indic = indic,
year = year,
# region_name = rn,
#survey_list = survey_list,
indicator_short_name = indicator_short_name,
good_or_bad = good_or_bad,
unit_of_measure = unit_of_measure) %>%
tidyr::drop_na(value)
return(df)
}
})
# ---- PLOT FROM REACTIVE DATA ---- #
hefpi_sub_plot__reactive <- shiny::reactive({
shiny::req(hefpi_sub_df__reactive())
plot_years <- input$date_range
indicator <- input$indicator
cn <- input$country
# print('DF output')
# print(hefpi_sub_df__reactive())
if(is.null(hefpi_sub_df__reactive())){
NULL
} else {
df <- hefpi_sub_df__reactive()
# create null plot if data is empty
if(nrow(df)==0){
empty_plot <- function(title = NULL){
p <- plotly::plotly_empty(type = "scatter", mode = "markers") %>%
plotly::config(
displayModeBar = FALSE
) %>%
plotly::layout(
title = list(
text = title,
yref = "paper",
y = 0.5
)
)
return(p)
}
p <- empty_plot("No data available for the selected inputs")
} else {
# get data
unit_of_measure <- unique(df$unit_of_measure)
good_or_bad = unique(df$good_or_bad)
temp <- df
# get plot
if(good_or_bad == 'Good'){
bar_palette = 'Greens'
} else {
bar_palette = 'Reds'
}
# relevel factor for chart
temp$urb_rur <- factor(temp$urb_rur, levels = unique(temp$urb_rur)[order(temp$value, decreasing = TRUE)])
# get plot objects
plot_text <- paste(
"Indicator: ", indicator,' (',unit_of_measure,')',"<br>",
"Economy: ", as.character(temp$urb_rur),"<br>",
"Value: ", paste0(ifelse(unit_of_measure == '%', round(temp$value, digits = 2) * 100, round(temp$value, digits = 2)), ' (', unit_of_measure, ')'), "<br>",
# 'Value: ', round(temp$value, digits = 2),' (',unit_of_measure,')',"<br>",
# "Year: ", as.character(temp$year),"<br>",
sep="") %>%
lapply(htmltools::HTML)
y_axis_text = paste0(indicator)
# Create value_color vector, identical to value
temp$value_col <- temp$value
temp$urb_rur <- ifelse(temp$urb_rur == 'urb', 'Urban', 'Rural')
# the selected country gets a value of NA which the palette will make black.
# temp$value_col[temp$key == rn] <- NA
# add higlight functionality to plot
temp <- plotly::highlight_key(temp, key=~urb_rur)
if(length(df$urb_rur) > 5) {
gg <- ggplot2::ggplot(temp, ggplot2::aes(forcats::fct_rev(factor(urb_rur)), value, text = plot_text))
} else {
gg <- ggplot2::ggplot(temp, ggplot2::aes(urb_rur, value, text = plot_text))
}
if(nchar(y_axis_text) > 55) {
y_axis_text_split <- y_axis_text
y_axis_text <- as.character(str_glue('{substr(y_axis_text_split, 1, 55)}\n - {substr(y_axis_text_split, 56, nchar(y_axis_text_split))}'))
}
# If unit_of_measure is '%'
if(str_detect(unit_of_measure, '%')) {
p <- gg +
ggplot2::geom_bar(stat = 'identity', ggplot2::aes(fill = value_col)) +
ggplot2::scale_fill_distiller(palette = bar_palette, direction = 1) +
ggplot2::scale_y_continuous(limits = c(0, input$axis/100), labels = function(x) paste0(x*100)) +
# scale_y_continuous(labels = function(x) paste0(x*100)) +
ggplot2::labs(x = '',
y = y_axis_text) +
hefpi::theme_hefpi(grid_major_x=NA,
x_axis_angle = 45,
x_axis_line = NA,
axis_title_size = 0.51,
legend_position = 'none')
} else {
p <- gg +
ggplot2::geom_bar(stat = 'identity', aes(fill = value_col)) +
ggplot2::scale_fill_distiller(palette = bar_palette, direction = 1) +
ggplot2::labs(x='',
y = y_axis_text) +
hefpi::theme_hefpi(grid_major_x=NA,
x_axis_angle = 45,
x_axis_line = NA,
legend_position = 'none')
}
if(length(df$urb_rur) > 5) {
p <- p +
ggplot2::coord_flip()
}
return(p)
}
}
})
# ---- RENDER PLOT FROM REACTIVE DATA ---- #
output$recent_sub_mean_plot <- plotly::renderPlotly({
#cn = 'India'
#plot_years = c(1982, 2017)
#indicator = "4+ antenatal care visits (%)"
#rn = rn[1]
# get inputs
plot_years <- input$date_range
indicator <- input$indicator
cn <- input$country
shiny::req(hefpi_sub_plot__reactive())
# rn <- input$region_name
# while map (generate from reactive object) is null, plot is null
if(is.null(hefpi_sub_plot__reactive())){
NULL
} else {
p <- plotly::ggplotly(
plotly::ggplotly(
hefpi_sub_plot__reactive(),
tooltip = 'text')
) %>%
plotly::config(displayModeBar = T) %>%
plotly::highlight(on='plotly_hover',
off = 'plotly_doubleclick',
persistent = FALSE,
color = 'black',
opacityDim = 0.6) %>%
plotly::layout(xaxis = list(fixedrange = TRUE), yaxis = list(fixedrange = TRUE))
p
}
})
# ---- DOWNLOAD PLOT IMAGE ---- #
output$dl_plot <- shiny::downloadHandler(
filename = function() {
paste0("barchart_", Sys.Date(), ".png")
},
content = function(file) {
device <- function(..., width, height) grDevices::png(..., width = width, height = height, res = 300, units = "in")
ggplot2::ggsave(file, plot = hefpi_sub_plot__reactive(), device = device)
}
)
# ---- DOWNLOAD DATA FROM MAP ---- #
output$dl_data <- shiny::downloadHandler(
filename = function() {
paste0("most_recent_value_mean_regional_barchart_", Sys.Date(), ".csv")
},
content = function(file) {
# get data
hefpi_sub_df__reactive <- hefpi_sub_df__reactive()
if(is.null(hefpi_sub_df__reactive)){
NULL
} else {
if(is.na(hefpi_sub_df__reactive)){
temp <- data_frame()
write.csv(temp, file)
} else {
write.csv(hefpi_sub_df__reactive(), file)
}
}
}
)
output$map_title_ui <- shiny::renderUI({
shiny::req(input$indicator)
indicator_name <- input$indicator
shiny::fluidPage(
shiny::fluidRow(
HTML(stringr::str_glue('
<div class="chart-header-labels-row">
<div class="chart-label"> Urban-rural </div>
<div class="chart-label"> {indicator_name} </div>
</div>
'))
)
)
})
}
## To be copied in the UI
# mod_rural_ui("rural")
## To be copied in the server
# callModule(mod_rural_server, 'rural')
|
51380656434fea9b08371e07566096df6c802043
|
41220a1d459375277408dfd5c15530871e1b8585
|
/man/get_size.Rd
|
6f667a5a3485b65c549af21ee50cdda4a8be9e27
|
[
"MIT"
] |
permissive
|
emilyriederer/wigo
|
219380437072078436095d0541d79b5a40347d73
|
4ffccd3c65733745fe505a753e715a8f5ba26549
|
refs/heads/master
| 2020-11-30T20:46:33.119555
| 2020-01-08T12:47:46
| 2020-01-08T12:47:46
| 230,475,879
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 292
|
rd
|
get_size.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_size}
\alias{get_size}
\title{Format object size with different units based on size}
\usage{
get_size(x)
}
\description{
Format object size with different units based on size
}
\keyword{internal}
|
dcf518cb66cf58cb6b05bf18f204913378c3369a
|
76a61cd24a8e8f8b7a5c146538cc6dc7d93d975a
|
/r_dataset_titanic.R
|
192528b9c4ba6b76806ea024b1416b4b97ec5102
|
[] |
no_license
|
yuu-ito/yuu-ito.github.io
|
0534b313cca8756ef651c19ae78da7907b5c4387
|
b1c3bbb01788007667a5f20219af8173cff7c0b1
|
refs/heads/master
| 2021-01-01T06:11:46.990074
| 2014-06-18T13:57:01
| 2014-06-18T13:57:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 937
|
r
|
r_dataset_titanic.R
|
# refs
# - http://ww2.coastal.edu/kingw/statistics/R-tutorials/loglin.html
data(Titanic)
# ?Titanic
dimnames(Titanic)
require(graphics)
mosaicplot(Titanic, main = "Survival on the Titanic")
margin.table(Titanic, c(2,4))
# オッズ比
(odds_male_survived <- 367/1364)
(odds_female_survived <- 344/126)
(odds_rate_F <-odds_female_survived/odds_male_survived) # 女性である場合、10倍の確率で生存する。
# 尤度比、リスク比
(344/(344+126)) / (367/(367+1364))
t <- Titanic
t.df <- data.frame(t)
names(t.df) <- tolower(names(t.df))
require("reshape2")
t.cast.df <- dcast(t.df,class+sex+age~survived,value.var="freq")
(t.cast.df)
str(t.cast.df)
t.glm <- glm(cbind(Yes,No)~class+sex+age, family=binomial, data=t.cast.df)
summary(t.glm)
# stepAICによる変数選択
t.glm <- glm(cbind(Yes,No)~(.)^2, family=binomial, data=t.cast.df)
require("MASS")
stepAIC(t.glm)
|
65d75203cf321e8f7b8268d03a4d3b7da01210a4
|
0f669180ea51e8b804b5a2b776398e0e22a7d721
|
/scripts/interaction_figs.R
|
17585f30cd5fa798046db2e7a6f1c34f373ba10b
|
[
"MIT"
] |
permissive
|
fergusca/NLA_lk_hydro
|
e4ac8a171802d374685769976afad5b1df6d8b71
|
9a7090bc001d9052402468a600b8afd1e685defe
|
refs/heads/main
| 2023-06-19T04:59:23.166498
| 2021-07-15T18:37:11
| 2021-07-15T18:37:11
| 382,412,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,231
|
r
|
interaction_figs.R
|
#######################
## INTERACTION PLOTS
## EFFECT SIZE CURVE USING JOHN GELDHOF'S Spreadsheet
## Simplified biplot of VertDD ~ PHDI by Hydrap class
## Will combine for a conceptual figure showing interaction
##
## 7/9/21
#######################
remove(list=ls())
library(dplyr)
library(ggplot2)
library(grid)
library(gridExtra)
library(lattice)
library(cowplot)
library(GGally)
library(ggpubr)
library(tidyr)
library(FSA)
#############
## LOAD DATA
# CONUS ALL LAKES n = 1716
dat <- read.csv("~/NLA_hydro/NLA_hydro_driver/data_processed/conus_NLA_opt1_1716.csv")
todrop<-names(dat)%in%c("X1","X1_1")
dat<-dat[!todrop]
## CREATE DATASET WITH CATEGORICAL VARIABLE INDICATING WHETHER LOW HYDRO_ALT LAKE OR NOT
dat$hydro_dist <-dat$HydrAP
dat$hydro_dist <-as.factor(dat$hydro_dist)
str(dat$hydro_dist)
# Create SUBGROUPS FOR LAKES WITH MIN HYDRO_ALTERAITO DISTURBANCES AND ALL OTHERS
levels(dat$hydro_dist) <- list("Minimal" = c("0","1","2"), "CONUS" = c("3","4","5","6","7"))
dat<-dat %>% replace_na(list(hydro_dist = "CONUS"))
table(dat$hydro_dist)
#Minimal CONUS
# 553 1163
#################
## CREATE HYDRAP BINS
## Follows bins from HydrAP paper (Ecological Indicators 2021)
dat$hydrap_bin <- dat$HydrAP
dat$hydrap_bin[dat$HydrAP <(3)]<- "Low"
dat$hydrap_bin[dat$HydrAP >=3 & dat$HydrAP <=5] <- "Moderate"
dat$hydrap_bin[dat$HydrAP > 5] <- "High"
# Bin PHDI values into drought, normal, and wet conditions
dat$PHDI_bin <- dat$PHDI
dat$PHDI_bin[dat$PHDI < (-0.5)] <- "drought"
dat$PHDI_bin[dat$PHDI >=(-0.5) & dat$PHDI <= 0.5] <- "normal"
dat$PHDI_bin[dat$PHDI > (0.5)] <- "wet"
table(dat$PHDI_bin)
## GRAND MEAN SCALED 30 yr normal precipitation
dat$Precip8110Ws_grd_sc <- scale(dat$Precip8110Ws, scale=T)
## GROUP MEAN SCALED 30 yr normal PRECIPITATION BY ECOREGION (scale = centers by mean and divides by std dev within an ecoregion group)
dat <- dat %>%
group_by(ECOREG_rev) %>%
mutate(Precip8110Ws_grp_sc = scale(Precip8110Ws, scale=TRUE)) # scaling is the default but just in case
summary(dat$Precip8110Ws_grp_sc)
###########
## ORDER CATEGORIES
# ORDER REVISED FIVE ECOREGIONS - already in order from west to east but if want to make sure
dat$ECOREG_rev<-ordered(dat$ECOREG_rev, levels=c("West","Great Plains","Midwest",
"Appalachians","Coastal Plains"))
# ORDER HYDRAP BINS
dat$hydrap_bin <- ordered(dat$hydrap_bin, levels=c("Low", "Moderate", "High"))
#####################
## MIDWEST ORIGINAL DATA n= 482
#########
mwest<-dat %>%
filter(ECOREG_rev=="Midwest")
###################
## PLOTTING SPECIFICATIONS
#####################
## Set font for plot
windowsFonts(RMN=windowsFont("Times New Roman")) #RMN = Times New Roman
############
## MIDWEST - SIMPLIFIED INTERACTION PLOT (ONLY LOW AND HIGH HYDRAP)
# https://cmdlinetips.com/2021/05/tips-to-customize-text-color-font-size-in-ggplot2-with-element_text/#plotcaption
# Remove lakes missing HydrAP ranks
mwest_mod <- mwest %>%
drop_na(HydrAP)
# Only Low and High HydrAP lakes
mwest_mod <- mwest_mod %>%
filter(hydrap_bin=="Low" | hydrap_bin =="High")
simple_graph_mwest<-ggplot(mwest_mod, aes(x= PHDI, y=VertDD_use, color=hydrap_bin,linetype=hydrap_bin))+ #, shape=hydrap_bin))+
scale_y_continuous(trans="log10",limits=c(NA,0.5),labels=function(x) format(x,scientific = FALSE))+
xlim(-4, 4)+
geom_smooth(method=lm, se=FALSE, fullrange=FALSE)+#
#scale_linetype_manual(values=c("dashed","solid"))+
scale_color_manual( values=c("#4682b4", "#b4464b"),na.value= "#999999")+ #"#2c7bb6", "#fdae61","#d7191c"
theme_bw(base_size=12)+
theme(plot.title = element_text(family = "RMN", face="plain", size = 12, hjust=0.5),
axis.text.x = element_text(family = "RMN", angle=45, hjust=1),
axis.text.y = element_text(family = "RMN"),
axis.title.y=element_text(family="RMN"),
axis.title.x=element_text(family="RMN"),
panel.grid.major = element_line(colour = NA),
panel.grid.minor=element_line(colour = NA),
strip.text = element_text(family = "RMN"),
legend.title = element_text(family="RMN"),
legend.text = element_text(family="RMN"),
legend.position=c(0.5,0.86),
legend.direction="horizontal",
plot.tag=element_text(family="RMN"))+
#legend.position="bottom")+
#annotate("text", x=c(-3,3.5),y=0.08,label=c("Drought","Wet"), family="RMN", size=5)+
annotate("text",x=c(-3,3),y=c(0.28,0.10),label=c("More decline","Less decline"),family="RMN",color="#4682b4")+
annotate("text",x=c(-3,3),y=c(0.115,0.28),label=c("Less decline","More decline"),family="RMN",color="#b4464b")+
#annotate("text", x=c(3,3),y=c(0.10,0.28),label=c("Less decline","More decline"), family="RMN")+
labs(x=expression("PHDI"), #
y=expression("Vertical decline (m)"),
color="HydrAP class",
linetype="HydrAP class",
tag="b.")#,
###########################
## EFFECT SIZE CURVE
###########################
## LOAD DATA
# Compiled estimated partial total effects of vertdd ~ HydrAP given a level of PHDI
## copied values from John's excel spreadsheet
# MIDWEST Vertical decline HydrAP relationships vs. PHDI
peff <- read.csv("~/NLA_hydro/NLA_hydro_driver/data_processed/partial_total/phdi_vertdd_midwest.csv")
# EFFECT SIZE CURVE
effect_size_mwest<-ggplot(peff, aes(x= PHDI, y=partial_total))+ #
#scale_y_continuous(trans="log10",limits=c(NA,0.5),labels=function(x) format(x,scientific = FALSE))+
xlim(-4, 4)+
#geom_point() +
geom_smooth(method=lm, se=FALSE, fullrange=FALSE, color="black")+
#xlim(NA,350)+
#scale_color_manual(name="HydrAP class", values=c("#4682b4", "#b4464b"),na.value= "#999999")+ #"#2c7bb6", "#fdae61","#d7191c"
#stat_cor(aes(color=hydrap_bin, family="RMN"), label.x=1)+
theme_bw(base_size=12)+
theme(plot.title = element_text(family = "RMN", face="plain", size = 12, hjust=0.5),
axis.text.x = element_blank(),#element_text(family = "RMN", angle=45, hjust=1),
axis.text.y = element_text(family = "RMN"),
axis.title.y=element_text(family="RMN"),
axis.title.x=element_blank(),#axis.title.x=element_text(family="RMN"),
axis.ticks=element_blank(),
panel.grid.major = element_line(colour = NA),
panel.grid.minor=element_line(colour = NA),
strip.text = element_text(family = "RMN"),
#plot.margin=margin(3,3,3,15),
# panel.spacing = unit(c(1,1,0,4), "lines"),
legend.title = element_text(family="RMN"),
legend.text = element_text(family="RMN"),
legend.position="bottom",
plot.tag=element_text(family="RMN"))+#c(0.90,0.25),#"right" # 1st = x-axis (0-1), 2nd = y-axis (0-1)
labs(x=expression("PHDI"), #
y=expression("Total effect HydrAP~vertical"),
tag="a.")#,
##########
## SAVE MIDWEST INTERACTION PLOTS TOGETHER
tiff(filename="~/NLA_hydro/NLA_hydro_driver/figures/stacked_interaction_plot_MIDWEST.tiff",
width=5, height=6, units="in", res=400)
grid.arrange(arrangeGrob(effect_size_mwest,
simple_graph_mwest,
nrow=2))
#grid.arrange(west_pct_eff_stacked_bar, midwest_pct_eff_stacked_bar, legend, ncol=2, nrow=2, widths=c(3.0, 2.8))
dev.off()
|
9c17ae3e9c00cbccded93b452d2e5fd8a67d4f2e
|
f8ce1034cef41685ab2387fa42084ac1ee5c96cf
|
/chapter13/setleaves.R
|
1109c14956aaad6cf0c2e8213c998e590aa7023a
|
[] |
no_license
|
AnguillaJaponica/RProgramming
|
ab7e15d99153ebe585745289490e6e3df118a35c
|
ae1401920b5a52795cffc95bd83922f15761e531
|
refs/heads/master
| 2020-11-28T10:07:02.579925
| 2019-12-23T15:28:23
| 2019-12-23T15:28:23
| 229,777,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 235
|
r
|
setleaves.R
|
# リーフの点の種類、文字の大きさ、色、ラベルの大きさの変更
dend %>%
set("leaves_pch", 19) %>%
set("leaves_cex", 0.5) %>%
set("leaves_col", "blue") %>%
set("labels_cex", 0.7) %>%
plot
|
627b44c4d31baf61eecf288ea2a7b60e85a54a72
|
0545754d49da01622f87c46f69ae29693a25b6de
|
/cachematrix.R
|
17b7da31adb60859d3a4d377a8abbe0f0dece624
|
[] |
no_license
|
cosereagheorghe/ProgrammingAssignment2
|
62cc9e7a2e1f977d38fe20579e63a2b13dfbc546
|
08914b855c10fd29eb05a00f0b5806ab6724a1e6
|
refs/heads/master
| 2020-12-25T09:08:53.891986
| 2014-06-20T05:23:43
| 2014-06-20T05:23:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,394
|
r
|
cachematrix.R
|
## Description :
## Create a "cache matrix" ie. matrix that keeps its computed inverse
## so that further usage of its invers dont require any computation.
##
## The "cache matrix" is just a list with the set/get methods for fields
## we cache; the cached filds are in the parent envirionment where the
## methods are defined ie. the environment of this constructor
##
makeCacheMatrix <- function(x = matrix())
{
## assert x is a sqare matrix
stopifnot(is.matrix(x))
stopifnot(nrow(x) == ncol(x))
mat <- x;
inv <- NULL;
set <- function(x) {
mat <<- x
inv <<- NULL
return(mat)
}
get <- function() { return(mat)}
get.inv <- function() { return(inv) }
set.inv <- function(x) { inv <<- x; return(x) }
return(list(set=set, get=get, set.inv=set.inv, get.inv=get.inv))
}
## Description:
## Return the cached inverse of x;
##
## Obs: if matrix is not invertible the result is a NA-matrix
##
cacheSolve <- function(x, ...)
{
mat <- x$get();
inv <- x$get.inv()
if (is.null(inv)) {
tryCatch(inv <- solve(mat),
error = function(err) {
inv <<- matrix(nrow = ncol(mat), ncol = nrow(mat))
warning("matrix is not invertible")
return(err);
})
x$set.inv(inv)
}
return(inv)
}
|
c05792072d74ebaaed3ffd85a54ea4c5cce6f2ab
|
a74fb6bf52760d2df735721a3afb560b4a265cad
|
/COSTT4_A_vs_PINCP.R
|
78bdcaa13239dcf06c8b7aa455254a40802ab63c
|
[] |
no_license
|
cmw72d/Community-Survey-College-Scorecard
|
70a046cfc030dc2027af60ff6e9d2ce879a621c0
|
c8b934ce616b1e79cdd898be474105173d849eea
|
refs/heads/master
| 2016-09-14T07:00:38.361129
| 2016-04-25T18:32:58
| 2016-04-25T18:32:58
| 56,654,259
| 0
| 2
| null | 2016-04-20T05:23:01
| 2016-04-20T04:04:39
|
Python
|
UTF-8
|
R
| false
| false
| 3,373
|
r
|
COSTT4_A_vs_PINCP.R
|
library(ggplot2)
library(dplyr)
scorecard = group_by(Scorecard, st_fips) %>% filter(COSTT4_A != "NA") %>% summarize(mean = mean(COSTT4_A))
ss13pusa$ST[ss13pusa$ST %in% "1"] <- "Alabama"
ss13pusa$ST[ss13pusa$ST %in% "2"] <- "Alaska"
ss13pusa$ST[ss13pusa$ST %in% "4"] <- "Arizona"
ss13pusa$ST[ss13pusa$ST %in% "5"] <- "Arkansas"
ss13pusa$ST[ss13pusa$ST %in% "6"] <- "California"
ss13pusa$ST[ss13pusa$ST %in% "8"] <- "Colorado"
ss13pusa$ST[ss13pusa$ST %in% "9"] <- "Connecticut"
ss13pusa$ST[ss13pusa$ST %in% "10"] <- "Delaware"
ss13pusa$ST[ss13pusa$ST %in% "11"] <- "District of Columbia"
ss13pusa$ST[ss13pusa$ST %in% "12"] <- "Florida"
ss13pusa$ST[ss13pusa$ST %in% "14"] <- "Georgia"
ss13pusa$ST[ss13pusa$ST %in% "15"] <- "Hawaii"
ss13pusa$ST[ss13pusa$ST %in% "16"] <- "Idaho"
ss13pusa$ST[ss13pusa$ST %in% "17"] <- "Illinois"
ss13pusa$ST[ss13pusa$ST %in% "18"] <- "Indiana"
ss13pusa$ST[ss13pusa$ST %in% "19"] <- "Iowa"
ss13pusa$ST[ss13pusa$ST %in% "20"] <- "Kansas"
ss13pusa$ST[ss13pusa$ST %in% "21"] <- "Kentucky"
ss13pusa$ST[ss13pusa$ST %in% "22"] <- "Louisiana"
ss13pusa$ST[ss13pusa$ST %in% "23"] <- "Maine"
ss13pusa$ST[ss13pusa$ST %in% "24"] <- "Maryland"
ss13pusa$ST[ss13pusa$ST %in% "25"] <- "Massachusetts"
ss13pusa$ST[ss13pusa$ST %in% "26"] <- "Michigan"
ss13pusa$ST[ss13pusa$ST %in% "27"] <- "Minnesota"
ss13pusa$ST[ss13pusa$ST %in% "28"] <- "Mississippi"
ss13pusa$ST[ss13pusa$ST %in% "29"] <- "Missouri"
ss13pusa$ST[ss13pusa$ST %in% "30"] <- "Montana"
ss13pusa$ST[ss13pusa$ST %in% "31"] <- "Nebraska"
ss13pusa$ST[ss13pusa$ST %in% "32"] <- "Nevada"
ss13pusa$ST[ss13pusa$ST %in% "33"] <- "New Hampshire"
ss13pusa$ST[ss13pusa$ST %in% "34"] <- "New Jersey"
ss13pusa$ST[ss13pusa$ST %in% "35"] <- "New Mexico"
ss13pusa$ST[ss13pusa$ST %in% "36"] <- "New York"
ss13pusa$ST[ss13pusa$ST %in% "37"] <- "North Carolina"
ss13pusa$ST[ss13pusa$ST %in% "38"] <- "North Dakota"
ss13pusa$ST[ss13pusa$ST %in% "39"] <- "Ohio"
ss13pusa$ST[ss13pusa$ST %in% "40"] <- "Oklahoma"
ss13pusa$ST[ss13pusa$ST %in% "41"] <- "Oregon"
ss13pusa$ST[ss13pusa$ST %in% "42"] <- "Pennsylvania"
ss13pusa$ST[ss13pusa$ST %in% "44"] <- "Rhode Island"
ss13pusa$ST[ss13pusa$ST %in% "45"] <- "South Carolina"
ss13pusa$ST[ss13pusa$ST %in% "46"] <- "South Dakota"
ss13pusa$ST[ss13pusa$ST %in% "47"] <- "Tennessee"
ss13pusa$ST[ss13pusa$ST %in% "48"] <- "Texas"
ss13pusa$ST[ss13pusa$ST %in% "49"] <- "Utah"
ss13pusa$ST[ss13pusa$ST %in% "50"] <- "Vermont"
ss13pusa$ST[ss13pusa$ST %in% "51"] <- "Virginia"
ss13pusa$ST[ss13pusa$ST %in% "53"] <- "Washington"
ss13pusa$ST[ss13pusa$ST %in% "54"] <- "West Virginia"
ss13pusa$ST[ss13pusa$ST %in% "55"] <- "Wisconsin"
ss13pusa$ST[ss13pusa$ST %in% "56"] <- "Wyoming"
ss13pusa$ST[ss13pusa$ST %in% "60"] <- "American Samoa"
ss13pusa$ST[ss13pusa$ST %in% "64"] <- "Federated States of Micronesia"
ss13pusa$ST[ss13pusa$ST %in% "66"] <- "Guam"
ss13pusa$ST[ss13pusa$ST %in% "69"] <- "Northern Mariana Islands"
ss13pusa$ST[ss13pusa$ST %in% "70"] <- "Palau"
ss13pusa$ST[ss13pusa$ST %in% "72"] <- "Puerto Rico"
ss13pusa$ST[ss13pusa$ST %in% "78"] <- "Virgin Islands"
survey = group_by(ss13pusa, ST) %>% filter(PINCP != "NA" && PINCP > 0 && ST>0) %>% summarize(mean = mean(PINCP))
total = merge(scorecard, survey, by.x = "st_fips", by.y = "ST")
ggplot(total, aes(x = COSTT4_A, y= PINCP, col=ST))+geom_point()
|
e240318f56ba8bd32516a14ddf4a192c60f7134f
|
192fd3dbc491d3c36bd9351f02cf9b5957ea56d1
|
/Methodology/Survival Model Research/ggRandomForests/ggRandomForests-master/man/calc_roc.rfsrc.Rd
|
b4ca1f348f3286d45ff2e16a6a4a9d0460ad6c81
|
[] |
no_license
|
ryerex/Research_and_Methods
|
d4d211defdbee83e47ecc72c59944c3f60a3bcca
|
4010b75a5521c2c18ee624d48257ee99b29a7777
|
refs/heads/master
| 2023-05-26T01:54:17.048907
| 2020-08-05T16:14:29
| 2020-08-05T16:14:29
| 91,369,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,374
|
rd
|
calc_roc.rfsrc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_roc.R
\name{calc_roc.rfsrc}
\alias{calc_roc}
\alias{calc_roc.randomForest}
\alias{calc_roc.rfsrc}
\title{Reciever Operator Characteristic calculator}
\usage{
calc_roc.rfsrc(object, yvar, which.outcome = "all", oob = TRUE)
}
\arguments{
\item{object}{\code{\link[randomForestSRC]{rfsrc}} or
\code{\link[randomForestSRC]{predict.rfsrc}} object
containing predicted response}
\item{yvar}{True response variable}
\item{which.outcome}{If defined, only show ROC for this response.}
\item{oob}{Use OOB estimates, the normal validation method (TRUE)}
}
\value{
A \code{gg_roc} object
}
\description{
Reciever Operator Characteristic calculator
}
\details{
For a randomForestSRC prediction and the actual
response value, calculate the specificity (1-False Positive Rate) and sensitivity
(True Positive Rate) of a predictor.
This is a helper function for the \code{\link{gg_roc}} functions, and not intended
for use by the end user.
}
\examples{
## Taken from the gg_roc example
# rfsrc_iris <- rfsrc(Species ~ ., data = iris)
data(rfsrc_iris)
gg_dta <- calc_roc.rfsrc(rfsrc_iris, rfsrc_iris$yvar, which.outcome=1, oob=TRUE)
gg_dta <- calc_roc.rfsrc(rfsrc_iris, rfsrc_iris$yvar, which.outcome=1, oob=FALSE)
}
\seealso{
\code{\link{calc_auc}} \code{\link{gg_roc}} \code{\link{plot.gg_roc}}
}
|
ef918afaae1b53a7906bdf1862f4781370c1be22
|
c123ffc8e23813033f1a8c90c23f6ee28d13ed13
|
/Rejection likelihood MCMC ASR Super Subject.R
|
a2ccea2779ced553230b2c88e283d6dc6c9fde4a
|
[] |
no_license
|
noahmthomas-nmt/ABC_Chapter
|
6aba6de5dc07bb7b9a1976eac867e3511847c02f
|
73eda7d3fc9535806599063315f674562d6d1bef
|
refs/heads/main
| 2023-09-05T16:26:57.443473
| 2021-11-02T17:42:01
| 2021-11-02T17:42:01
| 423,571,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,107
|
r
|
Rejection likelihood MCMC ASR Super Subject.R
|
rm(list = ls()) # Clear environment
# ------------------------------------------------------------- Set up
# Rejection based MCMC for ASR
set.seed(43210) # set seed for reproducibility
library("tidyverse") # import tidy function
library("compiler") # import R compiler
library("MCMCpack") # MH sampler algorithm
library("msm") # Probability density functions
library("brms") # Probability density functions
source("asr_distribution.R") # ASR probability density functions
# functions to transform parameters to normal or ASR param space
source("transformations_for_asr.R")
source("priors_asr.R") # priors for ASR model
source("ABC_functions.R") # functions to run ABC
source("log_densities_asr.R") # likehood and prior functions
source("limits.r") # for plotting
# ------------------------------------------------------------- ASR Experiment Parameters
conds <- c(0, 1) # names of conditions
condition_table <- c(n, n) # make a table of the condition/stimuli
names(condition_table) <- conds
# ------------------------------------------------------------- Initialization
Data <- read_table2("Conflict_Data_2.0 (1).txt",
col_types = cols(Inc = col_integer(),
SOA = col_integer(), Sub = col_integer())) # import data
data_list <- list()
data_list[[1]] <- Data %>%
dplyr::select(Inc, RT) %>%
as.list()
Data <- data_list
dasr <- function(t, theta, inc=0, soa=0) {
alpha <- 1/theta["alpha"] # Exponential scale for A
beta <- 1/theta["beta"] # Exponential scale for B
mu <- theta["mu"] # Mean for C
sigma <- theta["sigma"] # SD for C
lambda <- theta["lambda"] # Incongruent delay
p <- beta/(alpha + beta) # P(B<A)
# Dexgaussian uses the scale, not the rate
pdf <- (1-inc)*dexgaussian(t,mu+1/beta,sigma,1/beta) + inc*
( p*
dexgaussian(t,mu+1/(alpha+beta)+lambda,sigma,1/(alpha+beta)) +
(1-p)*
((1 + beta/alpha)*
dexgaussian(t,mu+1/beta,sigma,1/beta) -
(beta/alpha)*
dexgaussian(t,mu+1/(alpha+beta),sigma,1/(alpha+beta))))
return(pdf)
}
log_post <- function(theta, data){
names(theta) <- param_names
ldp <- 0 #dens_prior(theta, LOG = T)
lds <- 0
theta_ <- transform_to_asr(theta)
if(!is.finite(ldp)){
lds <- lds + rep(-740, length(Data[[1]]$RT))
lds <- lds + -740
}else{
# log likelihood
ld_con <- dasr(t = Data[[1]]$RT[Data[[1]]$Inc == 0], theta_, inc=0, soa=0)
ld_incon <- dasr(t = Data[[1]]$RT[Data[[1]]$Inc == 1], theta_, inc=1, soa=0)
ld_incon <- pmax(ld_incon, 0)
ld_con <- pmax(ld_con, 0)
lds <- lds + log(ld_con) + log(ld_incon)
}
out <- sum(lds + ldp)
# print(theta_)
# print(out)
out
}
mcmc <- 100000
theta_init <- mu_mean_vec
out <- MCMCmetrop1R(fun = log_post,
theta.init = theta_init,
burnin = mcmc*.1,
mcmc = mcmc,
thin = 1,
tune = 1.3,
seed = 1,
logfun = T,
optim.method = "Nelder-Mead",
force.samp = T,
verbose = T)
# --------------------------------------------- prior/posterior density plots
# Extract posteriors for each param
alpha <- out[,1]
beta <- out[,2]
mu <- out[,3]
sigma <- out[,4]
lambda <- out[,5]
# ground truth
theta <- c(.0075,.01,350,50,100)
# make plots
par(mfrow=c(1,3),cex=2.5)
cs.alpha <- quantile(alpha,c(0,.025,.975,1))
range.alpha <- alpha.y[2]-alpha.y[1]
plot(density(alpha),xlim=alpha.x,ylim=alpha.y,
xlab="Log Alpha",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(1/theta[1]),2),alpha.y,lty=3)
lines(cs.alpha[2:3],rep(-.005*range.alpha,2),lwd=5)
abline(h=1,lwd=1)
cs.beta <- quantile(beta,c(0,.025,.975,1))
range.beta <- beta.y[2]-beta.y[1]
plot(density(beta),xlim=beta.x,ylim=beta.y,
xlab="Log Beta",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(1/theta[2]),2),beta.y,lty=3)
lines(cs.beta[2:3],rep(-.005*range.beta,2),lwd=5)
abline(h=1,lwd=1)
cs.lambda <- quantile(lambda,c(0,.025,.975,1))
range.lambda <- lambda.y[2]-lambda.y[1]
plot(density(lambda),xlim=lambda.x,ylim=lambda.y,
xlab="Log Lambda",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(theta[5]),2),lambda.y,lty=3)
lines(cs.lambda[2:3],rep(-.005*range.lambda,2),lwd=5)
abline(h=1,lwd=1)
par(mfrow=c(1,2),cex=2.5)
cs.mu <- quantile(mu,c(0,.025,.975,1))
range.mu <- mu.y[2]-mu.y[1]
plot(density(mu),xlim=mu.x,ylim=mu.y,
xlab="Log Mu",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(theta[3]),2),mu.y,lty=3)
lines(cs.mu[2:3],rep(-.005*range.mu,2),lwd=5)
abline(h=1,lwd=1)
cs.sigma <- quantile(sigma,c(0,.025,.975,1))
range.sigma <- sigma.y[2]-sigma.y[1]
plot(density(sigma),xlim=sigma.x,ylim=sigma.y,
xlab="Log Sigma",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(theta[4]),2),sigma.y,lty=3)
lines(cs.sigma[2:3],rep(-.005*range.sigma,2),lwd=5)
abline(h=1,lwd=1)
|
d75fef31f85df98ebb56d20b6be3a1b18cc6385c
|
e89596399215b86d90385143b93ef718a5d3fc5e
|
/tests/testthat/test_ux.R
|
49c0c8e8de8c92f232331201e2d99f4187d8a328
|
[
"MIT"
] |
permissive
|
fdrennan/rjsonpath
|
a8096134f3d0684d5632a1fa6e9dcb0d4f9a3c79
|
dc6877270d7d688f925dcc386b386a45b3a02e92
|
refs/heads/master
| 2021-12-14T20:04:08.603066
| 2017-06-03T22:35:30
| 2017-06-03T22:35:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 81
|
r
|
test_ux.R
|
context("Error messages")
test_that("errors report missing nested fields", {
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.