blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c78c60e4d1be70c708d229bf831be730be3de1b | 7f25d453a80f55795830a282c70fecb1336f2d49 | /man/oneParamPlot.Rd | 2a63e967ce1e3828b8b1a98f84e7ae0dc07e1c5a | [] | no_license | acgerstein/diskImageR | 2ad6382ffd7b87adacf80ea05b007fc277659ee8 | 60817781292e9fd088de337c136e63002a6f8f49 | refs/heads/master | 2023-08-31T17:49:19.538196 | 2023-08-24T15:10:13 | 2023-08-24T15:10:13 | 29,325,026 | 6 | 6 | null | 2023-08-18T15:59:02 | 2015-01-16T00:25:09 | HTML | UTF-8 | R | false | true | 3,055 | rd | oneParamPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oneParamPlot.R
\name{oneParamPlot}
\alias{oneParamPlot}
\title{Used to plot a single parameter}
\usage{
oneParamPlot(
projectName,
type,
param = "RAD20",
ymin = 0,
ymax = 100,
width = 6,
height = 4,
xlabels = "line",
xlabAngle = NA,
order = NA,
orderFactor = "line",
overwrite = TRUE,
savePDF = TRUE,
popUp = TRUE,
barplot = TRUE
)
}
\arguments{
\item{projectName}{the short name to be used for the project}
\item{type}{specify whether the dataset to use is a dataframe with all data ("df") or an aggregated dataframe ("ag")}
\item{param}{what parameter to plot (supported: "RAD20", "RAD50", "RAD80", "FoG20", "FoG50", "FoG80", "slope"), default = "RAD20"}
\item{ymin}{a numeric value indicating the minimum y value plotted in each plot}
\item{ymax}{a numeric value indicating the maximum y value plotted in each plot}
\item{width}{a numeric value indicating the width of the pdf file generated}
\item{height}{a numeric value indicating the height of the pdf file generated}
\item{xlabels}{either a vector containing the desired x-axis labels, or a single value indicating the column name that contains the values to use (likely either the 'line' column or one of the type columns), default = "line".}
\item{xlabAngle}{indicates whether to print the x axis labels on a angle, if a number is provided this will be the angle used. The defauilt is not to plot on an angle, default = NA.}
\item{order}{can be either "factor" or "custom". If custom, supply a numberial vector the same length as the dataframe to indicate the desired order. If factor, supply the column name in \code{ordeFactor} to be used to factor.}
\item{orderFactor}{if \code{order = "factor"} supply the column name to be used to factor.}
\item{overwrite}{a logical value indicating whether to overwrite existing figures created on the same day for the same project name}
\item{savePDF}{a logical value indicating whether to save a PDF file or open a new quartz window. Defaults to TRUE (saves a pdf file).}
\item{popUp}{a logical value indicating whether to pop up the figure after it has been created}
\item{barplot}{whether to plot values as a barplot (barplot = TRUE) or dotplot (barplot = FALSE), default = TRUE. Only possible when \code{type = "ag"}}
}
\value{
Either a pdf figure figure (projectName_RAD-FoG.pdf) saved to the 'figures' directory or a figure on screen
}
\description{
This function creates a pdf figure of plots showing the results of the imageJ analysis for resistance (radius from the disk, RAD), sensitivity (slope) and tolerance (fraction of growth above RAD, FoG).
}
\details{
Basic parameter plotting functions to plot a single parameter. Input can be the dataframe from either \code{\link{createDataframe}} \code{type="df"} or from \code{\link{aggregateData}} \code{type=="ag"}.
}
\seealso{
\code{\link{twoParamPlot}} for a similar figure with two parameters or \code{\link{threeParamPlot}} for a similar figure with three parameters
}
|
b0a1628b296287be956a8029c01c6e8bdf28d18d | caf361bdbc2459187fb58fae876bad5497e532a1 | /man/process.Rd | d3c5c74e978789d7d21476e5168b2913dfe11567 | [
"MIT"
] | permissive | ddiez/scmisc | 35efffabe859ddc6ac9c2c20f00d283a376def44 | f19819e7e736cfd167fd4b0c29d7290d66ab961a | refs/heads/master | 2023-08-17T04:08:03.971880 | 2023-08-06T13:35:17 | 2023-08-06T13:35:17 | 180,719,852 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,096 | rd | process.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process.R
\name{process}
\alias{process}
\alias{process.Seurat}
\title{process}
\usage{
process(x, ...)
\method{process}{Seurat}(
x,
assay = NULL,
dims = 1:10,
algorithm = 1,
resolution = 0.6,
nfeatures = 2000,
metric = "cosine",
n.neighbors = 30L,
min.dist = 0.3,
spread = 1,
verbose = FALSE,
...
)
}
\arguments{
\item{x}{an object of class Seurat.}
\item{...}{arguments passed down to methods.}
\item{assay}{assay to use for processing.}
\item{dims}{PCA dimensions to use for UMAP and clustering.}
\item{algorithm}{algorithm to use for clustering.}
\item{resolution}{resolution to use for clustering.}
\item{nfeatures}{number of features for FindVariableFeatures().}
\item{metric}{metric used for UMAP.}
\item{n.neighbors}{number of nearest-neighbors to use for UMAP.}
\item{min.dist}{min.dist for UMAP.}
\item{spread}{spread for UMAP.}
\item{verbose}{whether to output diagnostic information.}
}
\description{
Applies several processing steps to a single cell genomics object.
}
|
bd02c20a3d9271348a684699f5e23c931351d095 | 2c0e460a0d1a229640da96cd4a1bb2f1ba2ab8b7 | /man/gp_mse_othermodel.Rd | 68461692a9d4550c6c6039633ec36c7592dcc330 | [
"MIT"
] | permissive | ensley/gpcovr | d24aa981d485e06e54d9e8de1776c388e1a02af3 | 8d96197b965e9807f7b9c17fc4fed7c34163617a | refs/heads/master | 2021-05-08T17:25:39.116358 | 2018-01-30T02:41:46 | 2018-01-30T02:41:46 | 119,471,663 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 730 | rd | gp_mse_othermodel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_output.R
\name{gp_mse_othermodel}
\alias{gp_mse_othermodel}
\title{Optimally predict points using an arbitrary covariance model and find the
mean squared error}
\usage{
gp_mse_othermodel(gpEst, model)
}
\arguments{
\item{gpEst}{A \code{GPsimulated} object}
\item{model}{A covariance model made using the \code{RandomFields} package}
}
\value{
The mean squared error
}
\description{
To predict points from another model, such as the best-fitting Matern model,
create it using \code{RandomFields} and specify it here.
}
\details{
If the predictions have already been found via
\code{\link{predict.GPsimulated}}, use \code{\link{mse}} instead.
}
|
06c5d4a04b20cffcff130f9ecbfc761602d5b0b5 | 51f78fdbe81750ce5b0b270527c28d83f93e3a22 | /TestFile.R | 4fd0207109c1b14da524f7f034195a9d75f309c2 | [] | no_license | tkarishma25/IT497 | 2b7d86c4645e62a55d1f5929d44a045e54d9fa27 | a39231f11e751a95c501b0d9d572acab83a3508c | refs/heads/master | 2021-05-03T20:37:55.007611 | 2016-10-19T20:58:10 | 2016-10-19T20:58:10 | 71,396,071 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 102 | r | TestFile.R | df <- read.table("http://en.wikipedia.org/wiki/List_of_cities_proper_by_population")
head(df)
tail(df) |
dec36d379af0767c64f42623603222c9d4705754 | 55d60dce45c675777d67b1975398f9ed7442c45e | /man/split_nrange.Rd | 5168a2807fac65a5353671b4fce812c5d7b2bbf2 | [
"MIT"
] | permissive | overdodactyl/mRclwhip | 2c77de810b87fd412ffa9dc77ad301b0b6911435 | 7f5ba4f1414c92068b4e651ddcf38a8a99cfe995 | refs/heads/master | 2023-07-08T08:33:43.928531 | 2023-06-28T18:38:32 | 2023-06-28T18:38:32 | 219,841,535 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 683 | rd | split_nrange.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/split_nrange.R
\name{split_nrange}
\alias{split_nrange}
\title{Split character vector of N (low, high) into three columns}
\usage{
split_nrange(.data, col, remove = TRUE)
}
\arguments{
\item{.data}{A tbl.}
\item{col}{Column to separate}
\item{remove}{If TRUE, remove input columns from output data frame.}
}
\value{
An object of the same class as .data.
}
\description{
Split character vector of N (low, high) into three columns
}
\examples{
tmp <- data.frame(
obs = "A",
val = "1224.11 (119.3214, 134.21)",
stringsAsFactors = FALSE
)
tmp \%>\%
split_nrange(val) \%>\%
nrange(n, low, high)
}
|
6b5b8f3a76c650780f87450c9be8c9c896f20a05 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/drake/examples/build_graph.Rd.R | e7417ec749605eb1f109f86101b1c3df70894fec | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 187 | r | build_graph.Rd.R | library(drake)
### Name: build_graph
### Title: Deprecated function 'build_graph'
### Aliases: build_graph
### Keywords: internal
### ** Examples
# See ?drake_config for examples.
|
4d0d2fb02c89c4df4e7518b8cbbd5b27394073ac | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /kcpRS/R/kcpRS_workflow.default.R | 37264fa36c41e76824f7ff7396f06401bedbd7dc | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,150 | r | kcpRS_workflow.default.R | #' @export
kcpRS_workflow.default <- function(data,
RS_funs = c("runMean", "runVar", "runAR", "runCorr"),
wsize = 25,
nperm = 1000,
Kmax = 10,
alpha = .05,
varTest = FALSE,
bcorr = TRUE,
ncpu = 1
) {
if (ncpu<=detectCores()){
rm <- ifelse("runMean" %in% RS_funs, 1, 0)
rv <- ifelse("runVar" %in% RS_funs, 1, 0)
ra <- ifelse("runAR" %in% RS_funs, 1, 0)
rc <- ifelse("runCorr" %in% RS_funs, 1, 0)
kcp_mean <- NULL
kcp_var <- NULL
kcp_corr <- NULL
kcp_AR <- NULL
#check which tests are to be performed and if correction is asked
ntest <- rm + rv + ra + rc #no. of tests
alpha_per_test <-
ifelse(isTRUE(bcorr), alpha / ntest, alpha) #alpha per RS test
#Running means
if (rm == 1) {
kcp_mean <- kcpRS(
data,
RS_fun = runMean,
RS_name = "Mean",
wsize,
nperm,
Kmax,
alpha = alpha_per_test,
varTest,
ncpu
)
}
ncp_mean <- length(kcp_mean$changePoints)
if (rm == 1 &
ncp_mean > 0 &
(rv + ra + rc) > 0) {
#if there is a mean change point and further tests are requested
cps <- as.numeric(kcp_mean$changePoints)
nv <- ncol(data)
N <- nrow(data)
bounds <- c(1, cps, N)
nbounds <- length(bounds)
dat_centered <- matrix(0, nrow <- N, ncol = nv)
for (v in 1:nv) {
for (k in 2:nbounds) {
mean_temp <- mean(data[bounds[k - 1]:(bounds[k] - 1), v])
dat_centered[bounds[k - 1]:(bounds[k] - 1), v] <-
data[bounds[k - 1]:(bounds[k] - 1), v] - mean_temp
}
}
dat_centered <- as.data.frame(dat_centered)
colnames(dat_centered) <- colnames(data)
data <- dat_centered
}
#Running var
if (rv == 1) {
kcp_var = kcpRS(
data,
RS_fun = runVar,
RS_name = "Variance",
wsize,
nperm,
Kmax,
alpha = alpha_per_test,
varTest,
ncpu
)
}
#Running AR
if (ra == 1) {
kcp_AR = kcpRS(
data,
RS_fun = runAR,
RS_name = "Autocorrelation",
wsize,
nperm,
Kmax,
alpha = alpha_per_test,
varTest,
ncpu
)
}
#Running corr
if (rc == 1) {
kcp_corr = kcpRS(
data,
RS_fun = runCorr,
RS_name = "Correlation",
wsize,
nperm,
Kmax,
alpha = alpha_per_test,
varTest,
ncpu
)
}
output <- list(
"kcpMean" = kcp_mean,
"kcpVar" = kcp_var,
"kcpAR" = kcp_AR,
"kcpCorr" = kcp_corr
)
class(output) <- "kcpRS_workflow"
return(output)
}
}
|
fa22af462cdbe5c8f18946b5fe9ff771c652466a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mrfDepth/examples/projdepth.Rd.R | a34792fed2437c840bb0e862b59ca994be1ccbf1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,204 | r | projdepth.Rd.R | library(mrfDepth)
### Name: projdepth
### Title: Projection depth of points relative to a dataset
### Aliases: projdepth
### Keywords: multivariate
### ** Examples
# Compute the projection depth of a simple two-dimensional dataset.
# Outliers are plotted in red.
if (requireNamespace("robustbase", quietly = TRUE)) {
BivData <- log(robustbase::Animals2)
} else {
BivData <- matrix(rnorm(120), ncol = 2)
BivData <- rbind(BivData, matrix(c(6,6, 6, -2), ncol = 2))
}
Result <- projdepth(x = BivData)
IndOutliers <- which(!Result$flagX)
plot(BivData)
points(BivData[IndOutliers,], col = "red")
# A multivariate rainbowplot may be obtained using mrainbowplot.
plot.options = list(legend.title = "PD")
mrainbowplot(x = BivData,
depths = Result$depthX, plot.options = plot.options)
# Options for the underlying outlyingness routine may be passed
# using the options argument.
Result <- projdepth(x = BivData,
options = list(type = "Affine",
ndir = 1000,
stand = "MedMad",
h = nrow(BivData)
)
)
|
901a29d64420f578f36d99c9bad4127d47f0976e | 5a221ae7a3c5431579e461772f87443aa58493ba | /nimbleSCR/man/dbernppAC.Rd | c3c63fcad350dad85303c045eab8554ec6c4093a | [] | no_license | torbjore/nimbleSCR | 0200949946e6668c5e7d98c205778e9b8ed9cd3a | 61264c603a725856fb150e3357c43b48758c4743 | refs/heads/master | 2023-08-28T11:53:16.676865 | 2021-11-13T22:06:27 | 2021-11-13T22:06:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,145 | rd | dbernppAC.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbernppAC.R
\name{dbernppAC}
\alias{dbernppAC}
\alias{rbernppAC}
\title{Bernoulli point process for the distribution of activity centers}
\usage{
dbernppAC(
x,
lowerCoords,
upperCoords,
logIntensities,
logSumIntensity,
habitatGrid,
numGridRows,
numGridCols,
log = 0
)
rbernppAC(
n,
lowerCoords,
upperCoords,
logIntensities,
logSumIntensity,
habitatGrid,
numGridRows,
numGridCols
)
}
\arguments{
\item{x}{Vector of x- and y-coordinates of a single spatial point (i.e. AC location).}
\item{lowerCoords, upperCoords}{Matrices of lower and upper x- and y-coordinates of all habitat windows. One row for each window.
Each window should be of size 1x1 (after rescaling if necessary).}
\item{logIntensities}{Vector of log habitat intensities for all habitat windows.}
\item{logSumIntensity}{Log of the sum of habitat intensities over all windows.}
\item{habitatGrid}{Matrix of habitat window indices. Only needed for \code{dbernppAC}.
Habitat window indices should match the order in \code{lowerCoords}, \code{upperCoords}, and \code{logIntensities}.
When the grid has only one row/column, artificial indices have to be provided to inflate \code{habitatGrid} in order
to be able to use the distribution in \code{nimble} model code.}
\item{numGridRows, numGridCols}{Numbers of rows and columns of the habitat grid.}
\item{log}{Logical argument, specifying whether to return the log-probability of the distribution.}
\item{n}{Integer specifying the number of realisations to generate. Only n = 1 is supported.}
}
\value{
\code{dbernppAC} gives the (log) probability density of the observation vector \code{x}.
\code{rbernppAC} gives coordinates of a randomly generated spatial point.
}
\description{
Density and random generation functions of the Bernoulli point process for the distribution of activity centers.
}
\details{
The \code{dbernppAC} distribution is a NIMBLE custom distribution which can be used to model and simulate
the activity center location (\emph{x}) of a single individual in continuous space over a set of habitat windows defined by their upper and lower
coordinates (\emph{lowerCoords,upperCoords}). The distribution assumes that the activity center
follows a Bernoulli point process with intensity = \emph{exp(logIntensities)}.
}
\examples{
# Use the distribution in R
lowerCoords <- matrix(c(0, 0, 1, 0, 0, 1, 1, 1), nrow = 4, byrow = TRUE)
upperCoords <- matrix(c(1, 1, 2, 1, 1, 2, 2, 2), nrow = 4, byrow = TRUE)
logIntensities <- log(c(1:4))
logSumIntensity <- log(sum(c(1:4)))
habitatGrid <- matrix(c(1:4), nrow = 2, byrow = TRUE)
numGridRows <- nrow(habitatGrid)
numGridCols <- ncol(habitatGrid)
dbernppAC(c(0.5, 1.5), lowerCoords, upperCoords, logIntensities, logSumIntensity,
habitatGrid, numGridRows, numGridCols, log = TRUE)
}
\references{
W. Zhang, J. D. Chipperfield, J. B. Illian, P. Dupont, C. Milleret, P. de Valpine and R. Bischof. 2020.
A hierarchical point process model for spatial capture-recapture data. bioRxiv. DOI 10.1101/2020.10.06.325035
}
\author{
Wei Zhang
}
|
45c46693187a079ab68b0c55805ab3ea3bf6fba2 | 15bfe89b3c2fc88eb656093976e36497dcd4ba4f | /R/spread_each.R | 51cab23d520b30fc0657ad3d9ed9c369e20a67e7 | [
"MIT"
] | permissive | halpo/tidymargins | a39c59cfa82dfa3c2c59452f0275505972657264 | 136cf1585d35fc2a200ca7a28582ee9e9808ca9f | refs/heads/master | 2020-06-18T21:52:11.441212 | 2019-09-05T17:45:07 | 2019-09-05T17:45:07 | 196,463,637 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,375 | r | spread_each.R | #' Spread multiple variables
#'
#' This is a multiple variable version of the function
#' [tidyr::spread()].
#'
#' @inheritParams tidyr::spread
#' @param ... the columns to act as the values to spread out.
#' @param . The separator between the key levels and the value column names.
#' @param sep the character to use to separate parts of column names.
#' @param key.first If `TRUE`, the default, the columns are named
#' `{key level}{sep}{value column name}`,
#' otherwise the format is `{value column name}{sep}{key level}{sep}`
#'
#' @return A wide [`tbl_df`][tibble::tbl_df], with multiple value columns spread out.
#'
#' @seealso
#' * [Wide versus long data](https://en.wikipedia.org/wiki/Wide_and_narrow_data) (also known as narrow data) on Wikipedia.
#' * [tidyr::spread()] for the single variable version.
#' @export
#' @example inst/examples/ex-spread_each.R
spread_each <-
function( data #< A tibble or compatible object.
, key #< key to be used as for the 'super-columns'
, ... #< Value variables to be spread
, fill=NA #< a single value or a named list of values to fill in structural missing.
, convert=FALSE #< See <spread>
, drop=FALSE #< See <spread>
, sep='.' #< the separator to be used to separate the names of the super- and sub-columns.
, key.first=TRUE
){
key <- rlang::enquo(key)
dots <- rlang::quos(...)
assert_that( is.flag(convert)
, is.flag(drop)
, is.string(sep)
)
key.var <- tidyselect::vars_pull(tbl_vars(data), !!key)
value.cols <- tidyselect::vars_select(tbl_vars(data), !!!dots)
retained.groups <- dplyr::group_vars(data) %>% setdiff(key.var)
grouping.cols <- tbl_vars(data) %>% setdiff(key.var) %>% setdiff(value.cols)
assert_that(rlang::is_dictionaryish(value.cols))
if (!is.null(names(fill))) {
if (all(. <- rlang::have_name(fill))) {
assert_that( rlang::is_dictionaryish(fill)
, all(names(value.cols) %in% names(fill))
)
} else {
assert_that(sum(!.)==1L, msg='`fill` should have only one default/unnamed value')
fill <- fill[match(value.cols, names(fill), nomatch = which(!.))] %>%
rlang::set_names(value.cols)
}
} else {
fill <- rlang::rep_along(value.cols, fill) %>% rlang::set_names(value.cols)
}
key.levels <- levels2(pull(data, key.var))
f <- function(col, name){
new.names <-
rlang::set_names(key.levels
, if (key.first) paste(key.levels, name, sep=sep)
else paste(name, key.levels, sep=sep))
data %>% dplyr::ungroup() %>%
dplyr::select( key.var, col, grouping.cols) %>%
tidyr::spread( key = key.var
, value = col
, fill = fill[[name]]
, sep = NULL
) %>%
dplyr::rename(., !!!(new.names[new.names %in% names(.)]))
}
col.order <- purrr::map(if (key.first) key.levels
else names(rlang::quos_auto_name(dots))
, ~rlang::expr(starts_with(!!.)))
value.cols %>%
purrr::imap(f) %>%
purrr::reduce(full_join, by=grouping.cols) %>%
dplyr::select( !!!grouping.cols
, !!!(col.order)
) %>%
dplyr::group_by(!!!rlang::syms(retained.groups))
}
if(F){#@example
data <- expand.grid( x = c( 'a', 'b', 'c')
, y = c( 'd', 'e', 'f')
, .rep = 1:10
) %>%
mutate( v = rnorm(90)) %>%
select(-.rep)
long <- summarise(group_by(data, x, y),N=n(), sum=sum(v))
spread_each(long, y, N, sum)
}
if(F){#@testing
data <- expand.grid( x = c( 'a', 'b', 'c')
, y = c( 'd', 'e', 'f')
, .rep = 1:10
) %>%
mutate( v = rep(c(-1, 0, 1), length.out=90)) %>%
select(-.rep)
long <- data %>%
group_by(x, y) %>%
summarise(N=n(), sum=sum(v))
val <- spread_each(long, y, N, sum)
expect_equal(dim(val), c(3L,7L))
expect_equal(names(val), c('x', 'd.N', 'd.sum', 'e.N', 'e.sum'
, 'f.N', 'f.sum'))
val2 <- spread_each(long, y, N, sum, key.first=FALSE)
expect_equal(dim(val2), c(3L,7L))
expect_equal(names(val2), c('x'
, paste0('N', '.', c( 'd', 'e', 'f'))
, paste0('sum', '.', c( 'd', 'e', 'f'))
))
}
if(FALSE){#@testing spread_each(fill=...)
data <- expand.grid( x = c( 'a', 'b', 'c')
, y = c( 'd', 'e', 'f')
, .rep = 1:10
) %>%
mutate( v = rep(c(-1, 0, 1), length.out=90)) %>%
select(-.rep)
long <- data %>%
group_by(x, y) %>%
summarise(N=n(), sum=sum(v)) %>%
filter(!(x=='b' & y=='e'))
val <- spread_each(long, y, N, sum, fill=list(N='#N/A', sum='???'))
expect_is(val, 'tbl')
expect_equal( val[2,c('e.N', 'e.sum')]
, tibble(e.N = '#N/A', e.sum = '???')
)
expect_error(spread_each(long, y, N, sum, fill=list(N='#N/A')))
expect_error(spread_each(long, y, N, sum, fill=list('#N/A', '???', x='.')))
val2 <- spread_each(long, y, N, sum, fill=list(N='#N/A', '???'))
expect_is(val2, 'tbl')
expect_equal( val2[2,c('e.N', 'e.sum')]
, tibble(e.N = '#N/A', e.sum = '???')
)
}
levels2 <- function(x){
if (inherits(x, 'factor'))
return(levels(x))
else if (inherits(x, 'character'))
return(sort(unique(x)))
else
return(sort(unique(as.character(x))))
}
if(F){#@testing
x <- ordered(c('b', 'a', 'c'), levels=c('c', 'b', 'a'))
expect_equal(levels2(x), levels(x))
x <- c('c', 'b', 'a')
expect_equal(levels2(x), c('a', 'b', 'c'))
x <- 1:3
expect_equal(levels2(x), c('1','2','3'))
}
|
7b2b46bc8a772ae08fd3d568ee6e5ab97131ef15 | 994768a737bb232129cf171d6617277141459fd1 | /ui.R | 4cfea60121a483606c4212e3711d8c64c4565914 | [] | no_license | patzaw/irisGlmPred | 9d15bb25f9960831b3f940f7256eb804fc3c533f | a04601ea98a39926d31fac4d0d692b8e2944333e | refs/heads/master | 2021-01-23T19:39:01.450746 | 2018-10-28T05:50:18 | 2018-10-28T05:50:18 | 26,760,966 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 954 | r | ui.R | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Simple glm predictors of Iris species"),
sidebarPanel(
h3("Feed the predictor"),
selectInput(
inputId="feature",
label="1) Select the feature to perform the prediction",
choices=c(
"Sepal length"="Sepal.Length",
"Sepal width"="Sepal.Width",
"Petal length"="Petal.Length",
"Petal width"="Petal.Width"
),
selected="Petal.Length"
),
uiOutput("valSlider"),
uiOutput("valSel"),
p("... or click on the graph."),
h3("Interpret the results"),
p(
"The higlited species is the most probable one according to the provided value.
The prediction responses are displayed on the right of the graph."
),
p(
"The curves correspond to the predictive functions for each species."
)
),
mainPanel(
plotOutput("modPlLeg", width="100%", height="100px"),
plotOutput("modPlot", width = "100%", height="600px", clickId = "mpClick")
)
))
|
ad1e6ad43c7754e33490db04591837f3c3eec403 | e3d2f8e956c3e6bdeac4a46eb52f3d3350b1d2d8 | /presentation/A Team Has No Name/Water/App/server.R | d0989e40084c770b54130d78d2a024be06b48fc8 | [] | no_license | socalrug/hackathon-2019-05 | 2bbf6e951361476d054bb13da243784f531adbc0 | 0b40c3e51d7dbc681baf7458cbbf5e4e5f14c82f | refs/heads/master | 2022-01-25T17:52:25.465946 | 2019-05-21T15:21:41 | 2019-05-21T15:21:41 | 175,350,381 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,806 | r | server.R | shinyServer(function(input, output, session) {
imap <- reactiveVal(NULL)
observe({
if(input$area == "COUNTY") {
counties <- CAcounties$COUNTY
statcounts <- rep(0, length(counties))
violations <- ActVi[, .N, by = .(COUNTY)]
statcounts[match(violations$COUNTY, counties)] <- violations$N
popup <- ""
pal <- colorNumeric(palette = "viridis", domain = statcounts)
imap <- leaflet(CAcounties) %>% addProviderTiles("CartoDB.DarkMatterNoLabels") %>%
addPolygons(stroke = F, smoothFactor = 0.2, fillOpacity = 0.8,
color = ~pal(statcounts), popup = popup)
} else {
zips <- data.table(ZIPCODE = as.character(CAzips@data$ZCTA5CE10), N = 0)
zipcounts <- ActVi[, .N, by = .(ZIPCODE)]
zips[match(zipcounts$ZIPCODE, zips$ZIPCODE), N := zipcounts$N]
zippal <- colorNumeric(palette = "viridis", zips$N)
popup <- ""
imap <- leaflet(CAzips) %>% addProviderTiles("CartoDB.DarkMatterNoLabels") %>%
addPolygons(stroke = F, smoothFactor = 0.2, fillOpacity = 0.8,
color = ~zippal(zips$N), popup = popup)
}
imap(imap)
})
output$imap <- renderLeaflet({
imap()
})
output$imap2 <- renderLeaflet({
factpal <- colorFactor(palette = "viridis", Stations$Dry)
# numpal <- colorNumeric(palette = "viridis", Stations$DryHistory)
imap <- leaflet(Stations) %>% addProviderTiles("CartoDB.DarkMatterNoLabels") %>%
addCircles(lng = ~LONGITUDE, lat = ~LATITUDE, radius = 1,
color = ~factpal(Stations$Dry), fill = F)
imap
})
})
|
e4711cdc2bf4448ccb7ce874c649e0db43fb98a3 | 4179c1525e5fbe925044cb01ffb40e6f0c07f71c | /man/eucliDist.Rd | 84b4457c969a657bcf591eca89e0dc8d9fa62185 | [] | no_license | hetong007/SwarmSVM | 20e58c0887da3eb90483759119af3a343e9862bd | a82b7eb37d3adb51decfc98f637d9bc32ba5b652 | refs/heads/master | 2022-12-27T04:53:10.674589 | 2022-12-15T08:38:34 | 2022-12-15T08:38:34 | 36,259,233 | 15 | 8 | null | null | null | null | UTF-8 | R | false | true | 318 | rd | eucliDist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{eucliDist}
\alias{eucliDist}
\title{Euclidean Distance calculation}
\usage{
eucliDist(x, centers)
}
\arguments{
\item{x}{the data matrix}
\item{centers}{the matrix of centers}
}
\description{
Euclidean Distance calculation
}
|
d7785311b58647b8c5f0ab6c29aed075f4118fa6 | 5258a7ec2449f8babf435728526a8ae9dece933e | /Justin Fungi/AMF reorg.R | d43e175054fc631cdfe2ef08d321e68fb4ff62d9 | [] | no_license | HallettLab/usda-compost | 54675ff3be2c2354ce5e9f0a3a8e310e616cacb0 | 879c5425b03668d3fa296d718594e470b21a3ef9 | refs/heads/master | 2023-09-03T15:09:42.248305 | 2023-08-07T19:30:11 | 2023-08-07T19:30:11 | 136,986,215 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,980 | r | AMF reorg.R | library(tidyverse)
library(ggplot2) #for plots
library(nlme)#for mixed effect models to test effects of treatments
library(lsmeans)#post hoc test for significance between treatments
library(vegan)
# Import csv file, call it data. Import soil moisture data, call it moisture.data
setwd("C:/Users/Owner/Desktop")
data<-read.csv("compost.fungi.csv",header=T) %>%
mutate(ppt_trt=ordered(ppt_trt, levels = c(d="d", xc="xc", w="w"))) %>% #orders factors
mutate(nut_trt=ordered(nut_trt, levels = c(c="c", f="f", n="n"))) #orders factors
str(data)
levels(data$ppt_trt)#check levels of precipitation treatment factor
levels(data$nut_trt)#check levels of nutrient treatment factor
levels(data$fungi)#check levels of fungi
data$block <- as.factor(data$block)
data$root <- as.factor(data$root)
data$rep <- as.factor(data$rep)
#import soil moisture data
moisture.data <- read.csv("moisture.csv", header=T) %>%
mutate(ppt_trt=ordered(ppt_trt, levels = c(d="d", xc="xc", w="w"))) %>% #orders factors
mutate(nut_trt=ordered(nut_trt, levels = c(c="c", f="f", n="n")))
str(moisture.data)
moisture.data$block <- as.factor(moisture.data$block)
levels(moisture.data$block)
levels(moisture.data$ppt_trt)
levels(moisture.data$nut_trt)
#import root biomass data (belowground net primary productivity, BNPP)
BNPP <- read.csv("BNPP.csv", header=T) %>%
mutate(ppt_trt=ordered(ppt_trt, levels = c(d="d", xc="xc", w="w"))) %>% #orders factors
mutate(nut_trt=ordered(nut_trt, levels = c(c="c", f="f", n="n")))
str(BNPP)
BNPP$block <- as.factor(BNPP$block)
levels(BNPP$block)
levels(BNPP$ppt_trt)
levels(BNPP$nut_trt)
#colonization of amf by ppt, nut,root, and block
colonization <- data %>% group_by(block, ppt_trt, nut_trt, root, fungi) %>% filter(count != "NA") %>%
summarize(percent=sum(count)/length(count))
#mean and standard deviation
col.plot.1 <- colonization %>% group_by(ppt_trt, nut_trt, fungi) %>%
summarize(mean=mean(percent), stdev= sd(percent), se=sd(percent)/sqrt(length(percent)))
#formating moisture.data. Calculating soil moisture
#AS: I changed the formula to calculate % water out of DRY soil
moisture.data$dry_wt <- moisture.data$dry_soil_tin - moisture.data$tin_wt
moisture.data$water_wt <- moisture.data$wet_soil - moisture.data$dry_wt
moisture.data$percent_moisture <- (moisture.data$water_wt / moisture.data$dry_soil) * 100 #changed to dry soil
#mean, sd, and se of soil moisture data
#AS: fixed error in se calculation (needed square root of n, my mistake on thursday)
moisture.stat <- moisture.data %>% group_by(ppt_trt, nut_trt) %>%
summarize(mean=mean(percent_moisture), se=sd(percent_moisture)/sqrt(length(percent_moisture)))
#add soil moisture to colonization data
#AS: nice job joining these!! but I think I would use the average values (all 5 roots averaged per block)
#AS: I added col.plot.2 to average colonization, leaving block in
#AS: Then I joined moisture to the averaged colonization data in col.moist.plot2
col.moist.plot <- full_join(colonization, moisture.data)
col.plot.2 <- colonization %>% group_by(block, ppt_trt, nut_trt, fungi) %>%
summarize(mean=mean(percent), stdev= sd(percent), se=sd(percent)/sqrt(length(percent)))
col.moist.plot2 <- full_join(col.plot.2, moisture.data)
#JD BNPP mean, sd, se
BNPP.stat <- BNPP %>% group_by(nut_trt, ppt_trt)%>%
summarize(mean=mean(BNPP), stdev= sd(BNPP), se=sd(BNPP)/sqrt(length(BNPP)))
#add BNPP data to colonization and moisture data
col.moist.plot2<-merge(col.moist.plot2, BNPP)
#ANOVA for nut_trt*percent_moisture on percent colonization
#I'm not entirely sure that I did this analysis correctly
#AS: This is correct for a linear model, no significant effects though :(
amf.moist <- col.moist.plot2 %>% filter(fungi=="amf")
options(contrasts = c("contr.treatment", "contr.poly"))
m2 = lm ( mean ~ nut_trt + percent_moisture + nut_trt:percent_moisture,
data = amf.moist)
summary(m2)
anova(m2)
#import plant composition data
plant.data <- read.csv("Compost_Cover_LongClean.csv", header=T)
levels(plant.data$ppt_trt) <- c("D"="d","W"="w","XC"="xc")#Change factors to lower case
levels(plant.data$nut_trt) <- c("C"="c", "F"="f", "N"="n")
str(plant.data)
plant.data$block <- as.factor(plant.data$block)
levels(plant.data$block)
levels(plant.data$ppt_trt)
levels(plant.data$nut_trt)
levels(plant.data$fxnl_grp)
levels(plant.data$Duration)
levels(plant.data$nativity)
levels(plant.data$date)
#percent grass/forb
plant1 <- plant.data%>%
dplyr::select(block, nut_trt, ppt_trt, pct_grass, pct_forb, pct_bare, pct_litter, litter_depth_cm)%>%
group_by(block, ppt_trt, nut_trt)%>%
summarise(pct.grass=max(pct_grass), pct.forb = max(pct_forb), pct.bare = max(pct_bare), pct.litter=max(pct_litter),litter.depth.cm=max(litter_depth_cm))
plant2 <- full_join(amf.moist, plant1)
#species data/ diversity
plant3 <- plant.data%>%
dplyr::select(block, ppt_trt, nut_trt, species, pct_cover, date)%>%
filter(date!="2019-04-19", date!="2019-04-20")%>%
spread(species, pct_cover)
cover <- plant3%>%
dplyr::select(5:56)
cover[is.na(cover)] <- 0
plant2$diversity <- diversity(cover)
#richness
plant2$richness <- specnumber(cover)
#Evenness diversity
#
#Needs Debugging
plant2$evenness <- plant2$diversity/log(specnumber(cover))
#functional group
plant4 <- plant.data%>%
dplyr::select(block, ppt_trt, nut_trt, fxnl_grp, pct_cover, date)%>%
filter(date!="2019-04-19", date!="2019-04-20")%>%
mutate(ppt_trt=ordered(ppt_trt, levels=c("d","xc","w")))%>%
mutate(nut_trt=ordered(nut_trt, levels=c("n","f","c")))
levels(plant4$ppt_trt)
levels(plant4$nut_trt)
plant4 <- plant4%>%
group_by(block, nut_trt, ppt_trt, fxnl_grp)%>%
summarise(percent=sum(pct_cover))%>%
spread(fxnl_grp, percent)
plant4 <- merge(plant4, plant2)
plant4 <- plant4%>%
select(-pct.grass, -pct.forb)
str(plant4)
colnames(plant4)[colnames(plant4) == "N-fixer"] <- "nfixer"
#calculations for Variance
var(plant4$mean)
#histogram of all data, looking for normality
ggplot(data=plant4, aes(x=mean))+
geom_density()
#transformation of data using asin(sqrt(mean))
#This works! Data is normal! p=0.1031
ggplot(data=plant4, aes(x=asin(sqrt(mean))))+
geom_density()
plant4$norm_mean <- asin(sqrt(plant4$mean))
shapiro.test(plant4$norm_mean)
#variance of normalized data
var(plant4$norm_mean)
#variance of AMF colonization within and between treatments
bartlett.test(norm_mean ~ ppt_trt, plant4)
bartlett.test(norm_mean ~ nut_trt, plant4)
#PLANT COMPOSITION STATS
#
#
#ANOVA for AMF and diversity
#significant relationship between AMF colonization and diversity (AMF decline with increasing diversity)
p1 = lme ( mean ~ diversity, random=~1|block, plant4, na.action=na.exclude)
summary(p1)
anova(p1)
ggplot(plant4, aes(x=diversity, y=mean))+
geom_point()+
geom_smooth(method="lm")
#ANOVA for AMF and richness
#significant relationship between AMF colonization and diversity (AMF decline with increasing diversity)
p1a = lme ( mean ~ richness, random=~1|block, plant4, na.action=na.exclude)
summary(p1a)
anova(p1a)
ggplot(plant4, aes(x=richness, y=mean))+
geom_point()+
geom_smooth(method="lm")
#ANOVA for AMF and Forb
#no significance
p2 = lme ( mean ~ Forb, random=~1|block, plant4, na.action=na.exclude)
summary(p2)
anova(p2)
#ANOVA for AMF and Grass
#no significance
p3 = lme ( mean ~ Grass, random=~1|block, plant4, na.action=na.exclude)
summary(p3)
anova(p3)
#ANOVA for AMF and N-fixer
#significant effects of nfixers on AMF, where AMF declines with increasing Nfixer cover
p4 = lme ( mean ~ nfixer, random=~1|block, plant4, na.action=na.exclude)
summary(p4)
anova(p4)
ggplot(plant4, aes(x=nfixer, y=mean))+
geom_point()+
geom_smooth(method="lm")
#ANOVA for AMF and evenness
#ANOVA for AMF richness
#ANOVA for forb and treatment
#significance for diversity X nut_trt, but not combined treatments
m1 = lm (diversity ~ ppt_trt + nut_trt + ppt_trt:nut_trt,
data = plant4)
summary(m1)
anova(m1)
#ANOVA for nfixer and treatment
#no significance
m2 = lm (nfixer ~ ppt_trt + nut_trt + ppt_trt:nut_trt,
data = plant4)
summary(m2)
anova(m2)
#ANOVA for forb and treatment
#no significance
m3 = lm (Forb ~ ppt_trt + nut_trt + ppt_trt:nut_trt,
data = plant4)
summary(m3)
anova(m3)
#ANOVA for grass and treatment
#no significance
m4 = lm (Grass ~ ppt_trt + nut_trt + ppt_trt:nut_trt,
data = plant4)
summary(m4)
anova(m4)
#across treatments
q1 = lme ( mean ~ diversity*nut_trt*ppt_trt, random=~1|block, plant4, na.action=na.exclude)
summary(q1)
anova(q1)
#Richness and AMF
#Significant intercept with ppt_trt
q2 = lme ( richness ~ mean*nut_trt*ppt_trt, random=~1|block, plant4, na.action=na.exclude)
summary(q2)
anova(q2)
#difference in richness with colonization
lq2 <- lsmeans(q2, ~mean*ppt_trt)
contrast(lq2, "pairwise")
#evenness
q3 = lme ( evenness ~ mean*nut_trt*ppt_trt, random=~1|block, plant4, na.action=na.exclude)
summary(q3)
anova(q3)
#FiGURES
#
#
#new data set for plots specifically
plot_data <- plant4
plot_data<- plot_data %>% mutate(nut_trt=ifelse(nut_trt=="c", "Compost",
ifelse(nut_trt=="f", "Fertilizer",
ifelse(nut_trt=="n", "No Amendment", nut_trt))))
plot_data<- plot_data %>% mutate(ppt_trt=ifelse(ppt_trt=="d", "Drought",
ifelse(ppt_trt=="xc", "Ambient",
ifelse(ppt_trt=="w", "Wet", ppt_trt))))
plot_data <- plot_data%>%
mutate(ppt_trt=ordered(ppt_trt, levels=c("Drought","Ambient","Wet")))%>%
mutate(nut_trt=ordered(nut_trt, levels=c("No Amendment","Fertilizer","Compost")))
str(plot_data)
levels(plot_data$ppt_trt)
levels(plot_data$nut_trt)
#diversity*amf
ggplot(subset(plot_data,fungi=="amf"), aes(y=diversity,x=mean))+
geom_point()+
geom_smooth(method="lm", se=F)+
facet_wrap(~nut_trt)+
ylab("diversity")+
xlab("AMF colonization (% root)")+
ggtitle("AMF vs. diversity")+
theme_classic() +
theme(legend.position="none", axis.text=element_text(size=16), axis.title=element_text(size=16), plot.title = element_text(size = 18, face = "bold"), strip.text.x = element_text(size = 16))
#nfixer*amf
ggplot(subset(plot_data,fungi=="amf"), aes(y=mean,x=nfixer))+
geom_point()+
geom_smooth(method="lm", se=F)+
ylab("AMF colonization")+
xlab("nitrogen fixers")+
ggtitle("AMF vs. nfixer")+
theme_classic() +
theme(legend.position="none", axis.text=element_text(size=16), axis.title=element_text(size=16), plot.title = element_text(size = 18, face = "bold"), strip.text.x = element_text(size = 16))
#richness*amf
ggplot(subset(plot_data,fungi=="amf"), aes(y=richness,x=mean, color=ppt_trt))+
geom_point()+
geom_smooth(method="lm", se=F)+
facet_wrap(~ppt_trt)+
xlab("AMF colonization")+
ylab("richness")+
ggtitle("Regression of Plot Richness with AMF Colonization")+
theme_classic()+
theme(legend.position="none", axis.text=element_text(size=16), axis.title=element_text(size=16), plot.title = element_text(size = 18, face = "bold"), strip.text.x = element_text(size = 16))+
scale_color_manual(values = c( "indianred1","lightgoldenrod2","skyblue2" ),
guide = guide_legend(title = "Precipitation Treatment"),
labels=c("Drought", "Ambient", "High"))
#diversity*nutrients
ggplot(plot_data,aes(x=nut_trt, y=diversity))+
geom_bar(stat="identity", position="dodge") +
ylab("diversity")+
xlab("")+
ggtitle("")+
scale_x_discrete(labels=c("Compost", "Fertilizer","No Amendment")) +
theme(legend.position=c(0.8,0.8), legend.title=element_text(size=14), legend.text=element_text(size=12), axis.text=element_text(size=16), axis.title=element_text(size=16), plot.title = element_text(size = 18, face = "bold"))+
#evenness*amf
ggplot(subset(plot_data,fungi=="amf"), aes(y=evenness,x=mean))+
geom_point()+
geom_smooth(method="lm", se=F)+
ylab("evenness")+
xlab("AMF colonization (% root)")+
ggtitle("AMF vs. evenness")+
theme_classic() +
theme(legend.position="none", axis.text=element_text(size=16), axis.title=element_text(size=16), plot.title = element_text(size = 18, face = "bold"), strip.text.x = element_text(size = 16))
#Boxplot of amf colonization across nut and ppt treatments.
ggplot(plot_data, aes(x=nut_trt, y=mean, fill=ppt_trt))+
geom_boxplot()+
scale_fill_manual(values = c( "indianred1","lightgoldenrod2","skyblue2" ),
guide = guide_legend(title = "Precipitation Treatment"),
labels=c("Drought","Ambient", "Wet"))
#Boxplot of BNPP across nut and ppt treatments.
ggplot(plot_data, aes(x=nut_trt, y=BNPP, fill=ppt_trt))+
geom_boxplot()+
scale_fill_manual(values = c( "indianred1","lightgoldenrod2","skyblue2" ),
guide = guide_legend(title = "Precipitation Treatment"),
labels=c( "Drought", "Ambient","Wet"))
|
0723982149fe516b6f1393263496a7f5ec17759c | 05a62c2797d2ab194e82498122e855c9b1537559 | /subclustering.R | 825bc7483cd8417cf3fe680904bb0c9df239d8e1 | [] | no_license | jdavisucd/Single-cell-multiomics-reveals-the-complexity-of-TGF-signalling-to-chromatin-in-iPSC-derived-kidney | e7a81a4b96680b11b2fca38090e05f0f085798d4 | b38f29307cd6b6a0d9af1c6c76319f638eaf10ed | refs/heads/main | 2023-04-15T22:02:18.736365 | 2022-11-03T10:50:47 | 2022-11-03T10:50:47 | 564,415,654 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,206 | r | subclustering.R | seurat <- FindSubCluster(seurat,
cluster = "Stroma 2/3_0",
algorithm = 3,
resolution = 0.5,
graph.name = "peaks_snn")
DimPlot(object = seurat, label = TRUE, repel = TRUE, group.by = "sub.cluster")
DimPlot(object = seurat, label = TRUE, repel = TRUE)
DimPlot(object = seurat, label = TRUE, repel = TRUE, group.by = "orig.ident")
DimPlot(object = subset(seurat, idents = "Stroma 2/3_0"), label = TRUE, repel = TRUE, group.by = "sub.cluster")
DimPlot(object = subset(seurat, idents = "Stroma 2/3_0"), label = TRUE, repel = TRUE, group.by = "orig.ident")
seurat <- RunUMAP(seurat, dims = 2:50, reduction = "harmony", reduction.name = "umap_harmony")
seurat <- RenameIdents(seurat, "Stroma 2/3_0_1" = "Stroma 3", "Stroma 2/3_0_2" = "Stroma 3", "Stroma 2/3_0_3" = "Stroma 2", "Stroma 2/3_0_0" = "Stroma 3")
Idents(seurat) <- seurat$sub.cluster
seurat$clusters <- Idents(seurat)
levels <- c("Stroma 1", "Stroma 2", "Stroma 3", "Stroma 4", "Podocyte", "Mixed Distal Endothelial", "Kidney Progenitor 1", "Kidney Progenitor 2", "Muscle Progenitor", "Glial Progenitor", "Neural Progenitor 1", "Neural Progenitor 2", "Neural Progenitor 3",)
|
67a06a8593481fc1b22eedf5e510e866863f8ee9 | 722e13d427cc095153233052b1f90ed138484cc3 | /man/spectra.Rd | 477bb91e6d0c3c9ef3aab4d027e8299d7362265f | [] | no_license | annakat/casper_defunct | 2eb0a261c67af7e3299c64816ec3e7113034c6dd | ed378b53ec54c104bfe66d615d944bf3622d3cfe | refs/heads/master | 2020-12-24T06:03:06.612672 | 2016-11-15T17:22:21 | 2016-11-15T17:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 654 | rd | spectra.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spectra.R
\name{spectra}
\alias{spectra}
\title{Create a spectra object}
\usage{
spectra(reflectance, wavelengths, names, meta = NULL, ...)
}
\arguments{
\item{reflectance}{N by M numeric matrix. N samples in rows. values between 0 and 1.}
\item{wavelengths}{wavelength names in vector of length M}
\item{names}{sample names in vector of length N}
\item{meta}{spectra metadata. defaults to NULL. Must be either of length or nrow
equals to the number of samples (i.e. nrow(reflectance) or length(names) )}
}
\value{
spectra object
}
\description{
Create a spectra object
}
|
85f3e34a43246339a83ecbc487322f4e13e0e6f4 | 86be8d8b3e5803515b1c693823d538717d02e4da | /asremlAIC.R | 70dda0979f68ec88801d14f956e533ea55cfceeb | [] | no_license | lisamarieharrison/R-functions-southern-ocean | 37c2aac28733af42fc8f0771ca64e3203b6cb460 | 071a808b889da8c3143a70b84179af9252173a54 | refs/heads/master | 2021-01-10T08:41:29.092535 | 2017-02-13T03:11:12 | 2017-02-13T03:11:12 | 46,317,376 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 282 | r | asremlAIC.R | asremlAIC <- function(obj) {
#calculates AIC for an asreml model
#obj = asreml model object
#returns list containing log likelihood, number of parameters and AIC
l <- obj$logl
K <- length(obj$gammas)
AIC <- -2*l + 2*K
return(list(l = l, K = K, AIC = AIC))
} |
9db34eec52def42f5c49cdf03407beec4107efff | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /highfrequency/man/selectExchange.Rd | 3faa079b6a71801c7ceb74be8f9196c8642cb671 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 984 | rd | selectExchange.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_handling.R
\name{selectExchange}
\alias{selectExchange}
\title{Retain only data from a single stock exchange}
\usage{
selectExchange(data, exch = "N")
}
\arguments{
\item{data}{an xts or data.table object containing the time series data.
The object should have a column "EX", indicating the exchange by its symbol.}
\item{exch}{The (vector of) symbol(s) of the stock exchange(s) that should be selected.
By default the NYSE is chosen (exch = "N"). Other exchange symbols are:
\itemize{
\item A: AMEX
\item N: NYSE
\item B: Boston
\item P: Arca
\item C: NSX
\item T/Q: NASDAQ
\item D: NASD ADF and TRF
\item X: Philadelphia
\item I: ISE
\item M: Chicago
\item W: CBOE
\item Z: BATS
}}
}
\value{
xts or data.table object depending on input
}
\description{
Function returns an xts object containing the data of only 1 stock exchange.
}
\author{
Jonathan Cornelissen and Kris Boudt
}
\keyword{cleaning}
|
272057422bb0ec40d8acd73ae361bc0d1ac6b865 | 85e365dd17802995c9ac3014001fb21deedeca3b | /IS 608 Knowledge and Visual Analytics/Assignment 3/PS1/ggplot/ui.R | 34fbcf08ce083fb5f56fc1eb528fa5611fed95b7 | [] | no_license | cspitmit03/CUNY | 3ebd6ee7bcf9cedc3816038bd7cc121bf41841f3 | 483fc6763cc82baa460cdd0626b6f76b8ac312e1 | refs/heads/master | 2020-03-15T21:29:04.027464 | 2016-12-01T04:24:31 | 2016-12-01T04:24:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 632 | r | ui.R | # This is the user interface part of the script
#
library('shiny')
library('ggplot2')
setwd('/Users/Charley/Downloads/cuny/IS 608 Knowledge and Visual Analytics/Assignment 3/PS1/ggplot')
# let's create a list of potential states and years
mort_ui <- read.csv('cleaned-cdc-mortality-1999-2010.csv')
cause <- lapply(unique(mort_ui$ICD.Chapter), as.character)
# shiny UI
shinyUI(pageWithSidebar(
headerPanel('Cause of Death by Year, by Type'),
sidebarPanel(selectInput("cause", "Cause: ",
choices=cause, selected='Certain infectious and parasitic diseases')),
mainPanel(plotOutput('values')))
)
|
76ccc1b93e10c6bb4caa883211e6571e89832c07 | e7b4e87c79608b120e69ab7b60f156d9f0e65b42 | /cachematrix.R | 2fb93ad0dd3743972b782500b972d67aedcc8644 | [] | no_license | celinechu/ProgrammingAssignment2 | 2e3325083646475f6d7ed00d631ec37766e2db3a | aa8df8ec038092fe8f3d372b359cdfa988cd1770 | refs/heads/master | 2021-01-17T08:00:53.013802 | 2015-06-18T04:02:26 | 2015-06-18T04:02:26 | 37,572,690 | 0 | 0 | null | 2015-06-17T04:34:25 | 2015-06-17T04:34:25 | null | UTF-8 | R | false | false | 1,726 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Function makeCacheMatrix creates a special "matrix" object that
## can cache its invese.
##
makeCacheMatrix <- function(x = matrix()) { # the function takes a matrix as input
inv <- NULL
set <- function(y) { # set(y) will pass y value to x, and saved in a special environment
x <<- y
inv <<- NULL #inside the set function, inv is set to be NULL
}
get <- function() x # this will get the x value
setinverse <- function(inverse) inv <<- inverse # this assign the inverse value to inv and saved in a special environment
getinverse <- function() inv # this retrieves the inverse value, which is inv.
list(set = set, get = get,
setinvere = setinverse, getinverse = getinverse) #the function returns a list of varibles
}
## Write a short comment describing this function
## Function cacheSolve computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then cacheSolve should retrieve
## in inverse from the cache.
cacheSolve <- function(x, ...) { #this function takes a list generated above as input
inv <- x$getinverse() # check to see whether there is a cached inv saved in the special environment
if(!is.null(inv)) { #if yes, then return the saved inverse value, and throw a message.
message("getting cache data")
return(inv)
}
data <- x$get() #if not, then get the actual matrix
inv <- solve(data,...) # calculate the inverse of the matrix
x$setinv(inv) #save the inverse value to the cache
inv #return the inverse value
}
|
43517247a08034380df677276629eb2b0bf596c1 | 1ddcc1392054036a2609a410a79730c16de66dd3 | /Week3/Code/PP_Lattice.R | 64feb6e4afec198f9fb61fa876dfeba1b01babad | [] | no_license | JiqiuWu/CMEECourseWork | de4bad2a2a67aa1dd5be8474a6228ef29873aee1 | 6950bd63ce095a19280e20472b8bb628dfcfe5f7 | refs/heads/master | 2020-03-30T16:05:36.070368 | 2019-09-09T20:35:03 | 2019-09-09T20:35:03 | 151,392,780 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,052 | r | PP_Lattice.R | MyData <- read.csv("../data/EcolArchives-E089-51-D1.csv", header = T, stringsAsFactors = F)
library(lattice)
library(plyr)
pdf("../results/Pred_Lattice.pdf")
densityplot(~log(Predator.mass) | Type.of.feeding.interaction, data=MyData)
graphics.off()
pdf("../results/Prey_Lattice.pdf")
densityplot(~log(Prey.mass) | Type.of.feeding.interaction, data=MyData)
graphics.off()
pdf("../results/SizeRation_Lattice.pdf")
densityplot(~log(Prey.mass)/log(Predator.mass) | Type.of.feeding.interaction, data=MyData)
graphics.off()
PPResults <- ddply(MyData, ~ Type.of.feeding.interaction, summarize,
MeanMassPred = mean(log(Predator.mass)),
MedianMassPred = median(log(Predator.mass)),
MeanMassPrey = mean(log(Prey.mass)),
MedianMassPrey = median(log(Prey.mass)),
MeanRatio = mean(log(Predator.mass/Prey.mass)),
MedianRatio = median(log(Predator.mass/Prey.mass)))
write.csv(PPResults, file = "../results/PP_Results.csv", row.names = F)
|
182c8a489f42c936407230bde8906da18e5b7e82 | 23660ade69cb1c41b49a8a86e6c4197cab331ffa | /man/sgpop.Rd | dd42f7b29f2fb1eb18a14f08ce557345589fa357 | [] | no_license | FelipeJColon/SpatialDengue | e4c871557ec94a6786c683e2054e3b3958e7dff8 | a130214a52796093e6682973ba922ab7b8c04719 | refs/heads/master | 2023-05-31T18:53:40.367373 | 2020-11-26T15:46:11 | 2020-11-26T15:46:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 587 | rd | sgpop.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sgpop.R
\docType{data}
\name{sgpop}
\alias{sgpop}
\title{Population of Singapore in raster format}
\format{A georeferenced raster file with pixels aligned in a 184 row, 271 column grid:
\describe{
\item{value}{number of people residing in pixel}
}}
\source{
\url{https://www.openstreetmap.org/#map=6/54.910/-3.432}
}
\usage{
sgpop
}
\description{
A dataset containing the number of people residing in each 100m x 100m pixel in the nation of Singapore using openstreetmap building data.
}
\keyword{datasets}
|
e249527822da2e4f857a21733958cfcdb7e9eea9 | b022e68f0139455784d95133deb4cf8f487142ce | /man/predict.WGLVmix.Rd | 80df7d5f69e5987f3701efd7481dee2a804d9e10 | [] | no_license | cran/REBayes | 1e311610a28f37509d2da28f81385b17e84b4bbf | e26237baf78f2dc4bb776ae29a2ddfce68963435 | refs/heads/master | 2022-05-13T04:13:53.603760 | 2022-03-22T17:20:02 | 2022-03-22T17:20:02 | 17,681,954 | 4 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,254 | rd | predict.WGLVmix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.WGLVmix.R
\name{predict.WGLVmix}
\alias{predict.WGLVmix}
\title{Predict Method for WGLVmix}
\usage{
\method{predict}{WGLVmix}(object, newdata, Loss = 2, ...)
}
\arguments{
\item{object}{Fitted object of class "GLVmix"}
\item{newdata}{data.frame with components(y,id,w) at which prediction is desired
this data structure must be compatible with that of \code{WGLVmix}, if newdata$w
is NULL then w is replaced by a vector of ones of length(y)}
\item{Loss}{Loss function used to generate prediction: Currently supported values:
2 to get mean predictions, 1 to get median predictions, 0 to get modal predictions
or any tau in (0,1) to get tau-th quantile predictions.}
\item{...}{optional arguments to predict}
}
\value{
A vector of predictions
}
\description{
Predict Method for Gaussian Location-scale Mixtures (Longitudinal Version)
}
\details{
The predict method for \code{WGLmix} objects will compute means, quantiles or
modes of the posterior according to the \code{Loss} argument. Typically,
\code{newdata} would be passed to \code{predict}. Note that these predictions
are for the location parameter only.
}
\author{
Roger Koenker
}
\keyword{nonparametric}
|
273b2c95a43eff5bba300abb9b98f9391b3d1fb6 | 5080d3c020721003422074648d4505b5e49a8e68 | /week2.R | 229f2ddac742a8e0f024d69b419accbd10830845 | [] | no_license | kala28/Week2Demo | 2435e0922924b7d19dec677f9f786d493d7722b2 | 2b2d691b25bba5f7f4f930d1c32e65fbd7f46281 | refs/heads/master | 2021-02-17T23:46:00.283067 | 2020-03-05T12:29:15 | 2020-03-05T12:29:15 | 245,137,303 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 573 | r | week2.R | #adding variables
x <- 3
x
y <- 6
y
#defining whether the x is numeric or not. if true it returns to YES or NO
is.numeric(x)
#Adding two Vectors
vec1 <- c(1, 2, 4, 9)
vec4 <- c(1, 9, 6)
vect_total = vec1 + vec4
vect_total
#
vec_A <- c("Hockey", "Foootball", "baseball", "curling", "rugby", "hurling", "basketball", "tennis", "cricket", "lacrosse")
vec_B <- c(vec_A, "Hockey", "lacrosse", "Hockey", "water polo", "hockey", "lacrosse")
#addi
vec_C <- vec_B[c(1,3,6)]
vec_C
vec_C_factor <- as.factor(vec_C)
class(vec_C_factor)
#find the type of the vectors.
class((vec_C)) |
37764024a17b311f1285b51fc3b165b5bdc613d2 | c0c0aa2d061bb15890cfb34cd4028a2f76df1035 | /ALBERTPAPERS/Figures/FiguresWord.R | 20a335aaccbeb3a07cb400267f279dd8dc678ae9 | [] | no_license | timriffe/ViennaDiagnostics | 82f88ae2b12e416a0ba46a89eae0348b3402a34f | aafba7388ee9a7ffc053686d5f86ea4eaffc9b08 | refs/heads/master | 2020-04-12T21:36:06.831560 | 2012-12-13T08:33:54 | 2012-12-13T08:33:54 | 2,657,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 56,563 | r | FiguresWord.R |
# TODO: Add comment
#
# Author: Tim Riffe
###############################################################################
setwd("C:/Users/triffe/git/ViennaPaper/ALBERTPAPERS/Figures/figsW")
setwd("/home/triffe/git/ViennaDiagnostics/ALBERTPAPERS/Figures/figsW")
#install.packages("devEMF")
library(devEMF)
#install.packages("Cairo")
library(Cairo)
#
# Figure 1a
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
emf(file="Figure1a.emf",width=7,height=7)
omar <- par("mar")
par("xaxs"="i","yaxs"="i",mar=c(10,4,1,2))
plot(NULL,type="n",xlim=c(11.5,24.5),ylim=c(0,100),ylab="% population",xlab="Age",cex.lab=1.5)
extr <- par("usr")
rect(extr[1],extr[3],extr[2],extr[4],col="#EBEBEB")
abline(v=seq(12,24,by=2),col="white")
abline(h=seq(20,80,by=20),col="white")
for (i in 12:24){
# in school
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000,"prop_school"]
x <- rep(i-.3,length(y))
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i-.4,FN[1],i-.2,FN[3],col="#EEC900") #IQR box
segments(i-.4,FN[2],i-.2,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i-.3,FN[1],i-.3,mincut,lty=2,col="#EEC900") # lower whisker
segments(i-.3,FN[3],i-.3,maxcut,lty=2,col="#EEC900") # upper whisker
points(c(i-.3,i-.3),c(mincut,maxcut),pch=19,col="#EEC900",cex=.5)
if (i > 14){
# in union
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000,"prop_union"]
x <- rep(i,length(y))
#points(jitter(x,amount=.05),y,col="#7D26CD30",pch=19)
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i-.1,FN[1],i+.1,FN[3],col="#7D26CD")
segments(i-.1,FN[2],i+.1,FN[2])
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i,FN[1],i,mincut,lty=2,col="#7D26CD")
segments(i,FN[3],i,maxcut,lty=2,col="#7D26CD")
points(c(i,i),c(mincut,maxcut),pch=19,col="#7D26CD",cex=.5)
# mother
y <- 100*(1-DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000,"prop_childless"])
x <- rep(i+.3,length(y))
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+.2,FN[1],i+.4,FN[3],col="#FF69B4")
segments(i+.2,FN[2],i+.4,FN[2])
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+.3,FN[1],i+.3,mincut,lty=2,col="#FF69B4")
segments(i+.3,FN[3],i+.3,maxcut,lty=2,col="#FF69B4")
points(c(i+.3,i+.3),c(mincut,maxcut),pch=19,col="#FF69B4",cex=.5)
}
}
legend(10.5,-13.5,fill=c("#EEC900","#7D26CD","#FF69B4"),legend=c("% enrolled","% in union","% mother"),xpd=T,cex=1.5)
par(mar=omar)
dev.off()
##################################
# Figure 1b (males)
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
emf(file="Figure1b.emf",width=7,height=7)
omar <- par("mar")
par("xaxs"="i","yaxs"="i",mar=c(10,4,1,2))
plot(NULL,type="n",xlim=c(11.5,24.5),ylim=c(0,100),ylab="% population",xlab="Age",cex.lab=1.5)
extr <- par("usr")
rect(extr[1],extr[3],extr[2],extr[4],col="#EBEBEB")
abline(v=seq(12,24,by=2),col="white")
abline(h=seq(20,80,by=20),col="white")
for (i in 12:24){
# in school
y <- 100*DATA[DATA$age==i & DATA$sex=="Male" & DATA$round==2000,"prop_school"]
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i-.3,FN[1],i-.1,FN[3],col="#EEC900") #IQR box
segments(i-.3,FN[2],i-.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i-.2,FN[1],i-.2,mincut,lty=2,col="#EEC900") # lower whisker
segments(i-.2,FN[3],i-.2,maxcut,lty=2,col="#EEC900") # upper whisker
points(c(i-.2,i-.2),c(mincut,maxcut),pch=19,col="#EEC900",cex=.5)
if (i >14){
# in union
y <- 100*DATA[DATA$age==i & DATA$sex=="Male" & DATA$round==2000,"prop_union"]
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+.1,FN[1],i+.3,FN[3],col="#7D26CD")
segments(i+.1,FN[2],i+.3,FN[2])
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+.2,FN[1],i+.2,mincut,lty=2,col="#7D26CD")
segments(i+.2,FN[3],i+.2,maxcut,lty=2,col="#7D26CD")
points(c(i+.2,i+.2),c(mincut,maxcut),pch=19,col="#7D26CD",cex=.5)
}
}
legend(10.5,-13,fill=c("#EEC900","#7D26CD"),legend=c("% enrolled","% in union"),xpd=T,cex=1.5)
par(mar=omar)
dev.off()
#######################
# Figure 2a
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
CairoPNG("Figure2a.png",width=1000,height=1000,pointsize=25)
# figure 2adots
png("Figure2adots.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% enrolled",xlab="% in union",cex.lab=1.5,axes=F,asp=1)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_union"]
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
#segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
#rect(57,60,100,100,col="white")
#legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
png("Figure2aline.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% enrolled",xlab="% in union",cex.lab=1.5,axes=F,asp=1)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_union"]
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
rect(57,60,100,100,col="white")
legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
#############################################################
# Figure 2c
# need to remove thailand, iran, nepal, palestine, sudan, re: email from Jeroen, 28 Nov, 2011:
# decision based on low response rates: probable bias leads to high leverage of particular points in plot
# that then overly determine the slope.
# I argued for weighting based on a combo of response rate and proportion significance
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
# removing some countries (see above comment)
indrm <- DATA$country %in% c("Thailand", "Iran", "Nepal", "Palestine", "Sudan")
DATA <- DATA[!indrm,]
#CairoPNG("Figure2cdots.png",width=1000,height=1000,pointsize=25)
png("Figure2cline.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
sps <- spsprint <- c()
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% enrolled",xlab="% mother",axes=F,cex.lab=1.5,asp=1)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*(1-DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_childless"])
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"year"]
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
}
rect(57,60,100,100,col="white")
legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
#############################
# Figure 2b
#------------------------------------------------------
# Male Scatter, all ages, Enrollment vs In Union
#------------------------------------------------------
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
CairoPNG("Figure2b.png",width=1000,height=1000,pointsize=25)
png("Figure2bdots.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% enrolled",xlab="% in union",axes=F,cex.lab=1.5,asp=1)
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"prop_union"]
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
#segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
}
#rect(60,60,100,100,col="white")
#legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
png("Figure2bdots.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% enrolled",xlab="% in union",axes=F,cex.lab=1.5,asp=1)
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"prop_union"]
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
}
#rect(60,60,100,100,col="white")
#legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
################################################
# Figure 3a
#------------------------------------------------------
# Female boxplots split on school attendance
#------------------------------------------------------
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure3.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
emf(file="Figure3a.emf")
opar <- par()
cols <- c("#EEC900","#FF69B4","#CD5B45","#8B008B")
par("xaxs"="i","yaxs"="i",mar=c(11,4,1,2))
plot(NULL,type="n",xlim=c(14.5,24.5),ylim=c(0,100),ylab="% population",xlab="Age",cex.lab=1.5)
extr <- par("usr")
rect(extr[1],extr[3],extr[2],extr[4],col="#EBEBEB")
abline(v=seq(12,24,by=2),col="white")
abline(h=seq(20,80,by=20),col="white")
# iterate over ages
for (i in 15:24){
###################
# in school, in union
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000 & DATA$school==1,"prop_union2"]
x <- rep(i-.3,length(y))
xmid <- -.37
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+xmid-.1,FN[1],i+xmid+.1,FN[3],col=cols[1]) #IQR box
segments(i+xmid-.1,FN[2],i+xmid+.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+xmid,FN[1],i+xmid,mincut,lty=2,col=cols[1]) # lower whisker
segments(i+xmid,FN[3],i+xmid,maxcut,lty=2,col=cols[1]) # upper whisker
points(c(i+xmid,i+xmid),c(mincut,maxcut),pch=19,col=cols[1],cex=.5)
###################
# in school, has child
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000 & DATA$school==1,"prop_child2"]
x <- rep(i-.3,length(y))
xmid <- -.12
#points(jitter(x,amount=.05),y,col="#FF450050",pch=19)
# IQR box:"#FF69B4"
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+xmid-.1,FN[1],i+xmid+.1,FN[3],col=cols[2]) #IQR box
segments(i+xmid-.1,FN[2],i+xmid+.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+xmid,FN[1],i+xmid,mincut,lty=2,col=cols[2]) # lower whisker
segments(i+xmid,FN[3],i+xmid,maxcut,lty=2,col=cols[2]) # upper whisker
points(c(i+xmid,i+xmid),c(mincut,maxcut),pch=19,col=cols[2],cex=.5)
###################
# not in school, in union
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000 & DATA$school==0,"prop_union2"]
x <- rep(i-.3,length(y))
xmid <- .12
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+xmid-.1,FN[1],i+xmid+.1,FN[3],col=cols[3]) #IQR box
segments(i+xmid-.1,FN[2],i+xmid+.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+xmid,FN[1],i+xmid,mincut,lty=2,col=cols[3]) # lower whisker
segments(i+xmid,FN[3],i+xmid,maxcut,lty=2,col=cols[3]) # upper whisker
points(c(i+xmid,i+xmid),c(mincut,maxcut),pch=19,col=cols[3],cex=.5)
###################
# not in school, has child
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000 & DATA$school==0,"prop_child2"]
x <- rep(i-.3,length(y))
xmid <- .37
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+xmid-.1,FN[1],i+xmid+.1,FN[3],col=cols[4]) #IQR box
segments(i+xmid-.1,FN[2],i+xmid+.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+xmid,FN[1],i+xmid,mincut,lty=2,col=cols[4]) # lower whisker
segments(i+xmid,FN[3],i+xmid,maxcut,lty=2,col=cols[4]) # upper whisker
points(c(i+xmid,i+xmid),c(mincut,maxcut),pch=19,col=cols[4],cex=.5)
}
legend(13.5,-13,fill=cols,legend=c("in school, in union","in school, mother",
"not in school, in union","not in school, mother"),xpd=T,cex=1.5)
par(opar)
dev.off()
##########################
# Figure 3b
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure3.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
emf(file="Figure3b.emf")
omar <- par("mar")
cols <- c("#EEC900","#CD5B45")
QuantilesMat <- matrix(ncol=4,nrow=13)
par("xaxs"="i","yaxs"="i",mar=c(11,4,1,2))
plot(NULL,type="n",xlim=c(14.5,24.5),ylim=c(0,100),ylab="% in union",xlab="Age",cex.lab=1.5)
extr <- par("usr")
rect(extr[1],extr[3],extr[2],extr[4],col="#EBEBEB")
abline(v=seq(12,24,by=2),col="white")
abline(h=seq(20,80,by=20),col="white")
for (i in 15:24){
###################
# in school, in union
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Male" & DATA$round==2000 & DATA$school==1,"prop_union2"]
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i-.3,FN[1],i-.1,FN[3],col=cols[1]) #IQR box
segments(i-.3,FN[2],i-.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i-.2,FN[1],i-.2,mincut,lty=2,col=cols[1]) # lower whisker
segments(i-.2,FN[3],i-.2,maxcut,lty=2,col=cols[1]) # upper whisker
points(c(i-.2,i-.2),c(mincut,maxcut),pch=19,col=cols[1],cex=.5)
###################
# not in school, in union
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Male" & DATA$round==2000 & DATA$school==0,"prop_union2"]
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+.1,FN[1],i+.3,FN[3],col=cols[2])
segments(i+.1,FN[2],i+.3,FN[2])
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+.2,FN[1],i+.2,mincut,lty=2,col=cols[2])
segments(i+.2,FN[3],i+.2,maxcut,lty=2,col=cols[2])
points(c(i+.2,i+.2),c(mincut,maxcut),pch=19,col=cols[2],cex=.5)
}
legend(13.5,-13,fill=cols,legend=c("in school, in union","not in school, in union"),xpd=T,cex=1.5)
par(mar=omar)
dev.off()
########################
# Figure 4
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure5.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
CairoPNG("Figure4.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% in school total pop",
xlab="% mother of those enrolled",cex.lab=1.5,asp=1,axes=F)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*(1-DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_childless_att"])
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
rect(60,60,100,100,col="white")
legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
####################
# Figure 5
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure6.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
CairoPNG("Figure5.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i","yaxs"="i")
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% mother total pop",
xlab="% mother of those enrolled",cex.lab=1.5,asp=1,axes=FALSE)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*(1-DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_childless_att"])
y <- 100*(1-DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_childless"])
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
rect(55,0,100,46,col="white",border="black")
legend(x=55,y=46,col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),bty="o",box.col="transparent")
rect(55,0,100,46)
rect(0,0,100,100)
dev.off()
####################
# Figure 6
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure7.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
# Females, bivariate relationship, percentage in school and in union versus in union in the overall population
CairoPNG("Figure6.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i","yaxs"="i")
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% in union total pop",
xlab="% in union of those enrolled",cex.lab=1.5,axes=FALSE,asp=1)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_union_att"]
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_union"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
rect(55,0,100,46,col="white",border="black")
legend(x=55,y=46,col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),bty="o",box.col="transparent")
rect(55,0,100,46)
rect(0,0,100,100)
dev.off()
#######################
# Figure 8 (old figure 7 deprecated)
DATA <- read.table("C:\\Users\\triffe\\git\\ViennaPaper\\ALBERTPAPERS\\Figures\\figsW\\data\\Figure8.txt",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
colalpha <- function(color,alpha){
colalphai <- function(color,alpha){
paste(rgb(t(col2rgb(color)/255)),alpha,sep="")
}
sapply(color,colalphai,alpha=alpha)
}
CairoPNG("Figure7.png",width=1000,height=1000,pointsize=25)
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),
ylab="% mother total pop (20)",xlab="% mother of those in school (20)",
axes=FALSE,cex.lab=1.5,asp=1)
#
#
# Author: Tim Riffe
###############################################################################
setwd("C:/Users/triffe/git/ViennaPaper/ALBERTPAPERS/Figures/figsW")
#install.packages("devEMF")
library(devEMF)
#install.packages("Cairo")
library(Cairo)
#
# global parameters: depends on whether they go to ppt or docx:
# for docx: cex.lab should be 1
# for ppt should be 1.5
cex.lab <- 1
# Figure 1a
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
emf(file="Figure1a.emf",width=7,height=7)
omar <- par("mar")
par("xaxs"="i","yaxs"="i",mar=c(10,4,1,2))
plot(NULL,type="n",xlim=c(11.5,24.5),ylim=c(0,100),ylab="% population",xlab="Age",cex.lab=cex.lab)
extr <- par("usr")
rect(extr[1],extr[3],extr[2],extr[4],col="#EBEBEB")
abline(v=seq(12,24,by=2),col="white")
abline(h=seq(20,80,by=20),col="white")
for (i in 12:24){
# in school
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000,"prop_school"]
x <- rep(i-.3,length(y))
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i-.4,FN[1],i-.2,FN[3],col="#EEC900") #IQR box
segments(i-.4,FN[2],i-.2,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i-.3,FN[1],i-.3,mincut,lty=2,col="#EEC900") # lower whisker
segments(i-.3,FN[3],i-.3,maxcut,lty=2,col="#EEC900") # upper whisker
points(c(i-.3,i-.3),c(mincut,maxcut),pch=19,col="#EEC900",cex=.5)
if (i > 14){
# in union
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000,"prop_union"]
x <- rep(i,length(y))
#points(jitter(x,amount=.05),y,col="#7D26CD30",pch=19)
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i-.1,FN[1],i+.1,FN[3],col="#7D26CD")
segments(i-.1,FN[2],i+.1,FN[2])
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i,FN[1],i,mincut,lty=2,col="#7D26CD")
segments(i,FN[3],i,maxcut,lty=2,col="#7D26CD")
points(c(i,i),c(mincut,maxcut),pch=19,col="#7D26CD",cex=.5)
# mother
y <- 100*(1-DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000,"prop_childless"])
x <- rep(i+.3,length(y))
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+.2,FN[1],i+.4,FN[3],col="#FF69B4")
segments(i+.2,FN[2],i+.4,FN[2])
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+.3,FN[1],i+.3,mincut,lty=2,col="#FF69B4")
segments(i+.3,FN[3],i+.3,maxcut,lty=2,col="#FF69B4")
points(c(i+.3,i+.3),c(mincut,maxcut),pch=19,col="#FF69B4",cex=.5)
}
}
legend(10.5,-13.5,fill=c("#EEC900","#7D26CD","#FF69B4"),legend=c("% enrolled","% in union","% mother"),xpd=T,cex=cex.lab)
par(mar=omar)
dev.off()
##################################
# Figure 1b (males)
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
emf(file="Figure1b.emf",width=7,height=7)
omar <- par("mar")
par("xaxs"="i","yaxs"="i",mar=c(10,4,1,2))
plot(NULL,type="n",xlim=c(11.5,24.5),ylim=c(0,100),ylab="% population",xlab="Age",cex.lab=cex.lab)
extr <- par("usr")
rect(extr[1],extr[3],extr[2],extr[4],col="#EBEBEB")
abline(v=seq(12,24,by=2),col="white")
abline(h=seq(20,80,by=20),col="white")
for (i in 12:24){
# in school
y <- 100*DATA[DATA$age==i & DATA$sex=="Male" & DATA$round==2000,"prop_school"]
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i-.3,FN[1],i-.1,FN[3],col="#EEC900") #IQR box
segments(i-.3,FN[2],i-.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i-.2,FN[1],i-.2,mincut,lty=2,col="#EEC900") # lower whisker
segments(i-.2,FN[3],i-.2,maxcut,lty=2,col="#EEC900") # upper whisker
points(c(i-.2,i-.2),c(mincut,maxcut),pch=19,col="#EEC900",cex=.5)
if (i >14){
# in union
y <- 100*DATA[DATA$age==i & DATA$sex=="Male" & DATA$round==2000,"prop_union"]
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+.1,FN[1],i+.3,FN[3],col="#7D26CD")
segments(i+.1,FN[2],i+.3,FN[2])
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+.2,FN[1],i+.2,mincut,lty=2,col="#7D26CD")
segments(i+.2,FN[3],i+.2,maxcut,lty=2,col="#7D26CD")
points(c(i+.2,i+.2),c(mincut,maxcut),pch=19,col="#7D26CD",cex=.5)
}
}
legend(10.5,-13,fill=c("#EEC900","#7D26CD"),legend=c("% enrolled","% in union"),xpd=T,cex=cex.lab)
par(mar=omar)
dev.off()
#######################
# Figure 2a
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
CairoPNG("Figure2a.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","orange","magenta","blue"),space="Lab")
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% enrolled",xlab="% in union",cex.lab=cex.lab,axes=F,asp=1)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_union"]
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
rect(57,60,100,100,col="white")
legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
#############################################################
# Figure 2c
# need to remove thailand, iran, nepal, palestine, sudan, re: email from Jeroen, 28 Nov, 2011:
# decision based on low response rates: probable bias leads to high leverage of particular points in plot
# that then overly determine the slope.
# I argued for weighting based on a combo of response rate and proportion significance
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
# removing some countries (see above comment)
indrm <- DATA$country %in% c("Thailand", "Iran", "Nepal", "Palestine", "Sudan")
DATA <- DATA[!indrm,]
CairoPNG("Figure2c.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
sps <- spsprint <- c()
library(grDevices)
colsR <- colorRampPalette(c("green","orange","magenta","blue"),space="Lab")
cols <- colsR(length(ages))
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% enrolled",xlab="% mother",axes=F,cex.lab=cex.lab,asp=1)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*(1-DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_childless"])
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$round==2000,"year"]
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
}
rect(57,60,100,100,col="white")
legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
#############################
# Figure 2b
#------------------------------------------------------
# Male Scatter, all ages, Enrollment vs In Union
#------------------------------------------------------
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure1_2.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
CairoPNG("Figure2b.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","orange","magenta","blue"),space="Lab")
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% enrolled",xlab="% in union",axes=F,cex.lab=cex.lab,asp=1)
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"prop_union"]
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Male" & DATA$round==2000,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
}
rect(60,60,100,100,col="white")
legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
################################################
# Figure 3a
#------------------------------------------------------
# Female boxplots split on school attendance
#------------------------------------------------------
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure3.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
emf(file="Figure3a.emf")
opar <- par()
cols <- c("#EEC900","#FF69B4","#CD5B45","#8B008B")
par("xaxs"="i","yaxs"="i",mar=c(11,4,1,2))
plot(NULL,type="n",xlim=c(14.5,24.5),ylim=c(0,100),ylab="% population",xlab="Age",cex.lab=cex.lab)
extr <- par("usr")
rect(extr[1],extr[3],extr[2],extr[4],col="#EBEBEB")
abline(v=seq(12,24,by=2),col="white")
abline(h=seq(20,80,by=20),col="white")
# iterate over ages
for (i in 15:24){
###################
# in school, in union
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000 & DATA$school==1,"prop_union2"]
x <- rep(i-.3,length(y))
xmid <- -.37
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+xmid-.1,FN[1],i+xmid+.1,FN[3],col=cols[1]) #IQR box
segments(i+xmid-.1,FN[2],i+xmid+.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+xmid,FN[1],i+xmid,mincut,lty=2,col=cols[1]) # lower whisker
segments(i+xmid,FN[3],i+xmid,maxcut,lty=2,col=cols[1]) # upper whisker
points(c(i+xmid,i+xmid),c(mincut,maxcut),pch=19,col=cols[1],cex=.5)
###################
# in school, has child
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000 & DATA$school==1,"prop_child2"]
x <- rep(i-.3,length(y))
xmid <- -.12
#points(jitter(x,amount=.05),y,col="#FF450050",pch=19)
# IQR box:"#FF69B4"
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+xmid-.1,FN[1],i+xmid+.1,FN[3],col=cols[2]) #IQR box
segments(i+xmid-.1,FN[2],i+xmid+.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+xmid,FN[1],i+xmid,mincut,lty=2,col=cols[2]) # lower whisker
segments(i+xmid,FN[3],i+xmid,maxcut,lty=2,col=cols[2]) # upper whisker
points(c(i+xmid,i+xmid),c(mincut,maxcut),pch=19,col=cols[2],cex=.5)
###################
# not in school, in union
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000 & DATA$school==0,"prop_union2"]
x <- rep(i-.3,length(y))
xmid <- .12
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+xmid-.1,FN[1],i+xmid+.1,FN[3],col=cols[3]) #IQR box
segments(i+xmid-.1,FN[2],i+xmid+.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+xmid,FN[1],i+xmid,mincut,lty=2,col=cols[3]) # lower whisker
segments(i+xmid,FN[3],i+xmid,maxcut,lty=2,col=cols[3]) # upper whisker
points(c(i+xmid,i+xmid),c(mincut,maxcut),pch=19,col=cols[3],cex=.5)
###################
# not in school, has child
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Female" & DATA$round==2000 & DATA$school==0,"prop_child2"]
x <- rep(i-.3,length(y))
xmid <- .37
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+xmid-.1,FN[1],i+xmid+.1,FN[3],col=cols[4]) #IQR box
segments(i+xmid-.1,FN[2],i+xmid+.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+xmid,FN[1],i+xmid,mincut,lty=2,col=cols[4]) # lower whisker
segments(i+xmid,FN[3],i+xmid,maxcut,lty=2,col=cols[4]) # upper whisker
points(c(i+xmid,i+xmid),c(mincut,maxcut),pch=19,col=cols[4],cex=.5)
}
legend(13.5,-13,fill=cols,legend=c("in school, in union","in school, mother",
"not in school, in union","not in school, mother"),xpd=T,cex=cex.lab)
par(opar)
dev.off()
##########################
# Figure 3b
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure3.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
emf(file="Figure3b.emf")
omar <- par("mar")
cols <- c("#EEC900","#CD5B45")
QuantilesMat <- matrix(ncol=4,nrow=13)
par("xaxs"="i","yaxs"="i",mar=c(11,4,1,2))
plot(NULL,type="n",xlim=c(14.5,24.5),ylim=c(0,100),ylab="% in union",xlab="Age",cex.lab=cex.lab)
extr <- par("usr")
rect(extr[1],extr[3],extr[2],extr[4],col="#EBEBEB")
abline(v=seq(12,24,by=2),col="white")
abline(h=seq(20,80,by=20),col="white")
for (i in 15:24){
###################
# in school, in union
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Male" & DATA$round==2000 & DATA$school==1,"prop_union2"]
# IQR box:
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i-.3,FN[1],i-.1,FN[3],col=cols[1]) #IQR box
segments(i-.3,FN[2],i-.1,FN[2]) #median line
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i-.2,FN[1],i-.2,mincut,lty=2,col=cols[1]) # lower whisker
segments(i-.2,FN[3],i-.2,maxcut,lty=2,col=cols[1]) # upper whisker
points(c(i-.2,i-.2),c(mincut,maxcut),pch=19,col=cols[1],cex=.5)
###################
# not in school, in union
###########
y <- 100*DATA[DATA$age==i & DATA$sex=="Male" & DATA$round==2000 & DATA$school==0,"prop_union2"]
FN <- quantile(y,probs=c(.25,.5,.75),na.rm=TRUE)
rect(i+.1,FN[1],i+.3,FN[3],col=cols[2])
segments(i+.1,FN[2],i+.3,FN[2])
maxcut <- ifelse(max(y,na.rm=T) > FN[3]+1.5*abs(diff(range(FN))),FN[3]+1.5*abs(diff(range(FN))),max(y,na.rm=T))
mincut <- ifelse(min(y,na.rm=T) < FN[1]-1.5*abs(diff(range(FN))),FN[1]-1.5*abs(diff(range(FN))),min(y,na.rm=T))
segments(i+.2,FN[1],i+.2,mincut,lty=2,col=cols[2])
segments(i+.2,FN[3],i+.2,maxcut,lty=2,col=cols[2])
points(c(i+.2,i+.2),c(mincut,maxcut),pch=19,col=cols[2],cex=.5)
}
legend(13.5,-13,fill=cols,legend=c("in school, in union","not in school, in union"),xpd=T,cex=cex.lab)
par(mar=omar)
dev.off()
########################
# Figure 4
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure5.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
CairoPNG("Figure4.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i");par("yaxs"="i");par(mar=c(4,4,1,2))
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% in school total pop",
xlab="% mother of those enrolled",cex.lab=cex.lab,asp=1,axes=F)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*(1-DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_childless_att"])
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_school"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
rect(60,60,100,100,col="white")
legend("topright",col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),box.col="transparent")
rect(0,0,100,100)
dev.off()
####################
# Figure 5
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure6.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
library(Cairo)
cex.lab=1
CairoPNG("Figure5.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i","yaxs"="i")
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% mother total pop",
xlab="% mother of those enrolled",cex.lab=cex.lab,asp=1,axes=FALSE)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*(1-DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_childless_att"])
y <- 100*(1-DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_childless"])
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
rect(55,0,100,46,col="white",border="black")
legend(x=55,y=46,col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),bty="o",box.col="transparent")
rect(55,0,100,46)
rect(0,0,100,100)
dev.off()
####################
# Figure 6
DATA <- read.table("http://www.ced.uab.es/worldfam/figures/figure7.tab",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
DATA$sex <- as.character(DATA$sex)
# Females, bivariate relationship, percentage in school and in union versus in union in the overall population
CairoPNG("Figure6.png",width=1000,height=1000,pointsize=25)
ages <- 15:24
library(grDevices)
colsR <- colorRampPalette(c("green","yellow","magenta","blue"))
cols <- colsR(length(ages))
sdev <- spsprint <- sps <- cty <- c()
par("xaxs"="i","yaxs"="i")
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),ylab="% in union total pop",
xlab="% in union of those enrolled",cex.lab=cex.lab,axes=FALSE,asp=1)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:length(ages)){
x <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_union_att"]
y <- 100*DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"prop_union"]
ctyi <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"country"]
yri <- DATA[DATA$age==ages[i] & DATA$sex=="Female" & DATA$year>=1998,"year"]
ctyi <- paste(ctyi,yri,sep="")
nax <- which(is.na(x)) ; nay <- which(is.na(y))
nas <- unique(c(nax,nay))
if (length(nas)>0){ ctyi <- ctyi[-nas]}
cty <- c(cty,ctyi)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x)
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=cols[i],lwd=2)
points(x,y,col=paste(cols[i],45,sep=""),pch=19)
pv <- summary(LM)$coefficients[2,4] # p val
pv <- ifelse(pv<.0001,"***",ifelse(pv<.001,"**",ifelse(pv<.01,"*",ifelse(pv<.05,"'",""))))
sps[i] <- summary(LM)$coefficients[2,1]
spsprint[i] <- paste(round(sps[i],3),pv)
sdev[i] <- summary(LM)$coefficients[2,2]
}
rect(55,0,100,46,col="white",border="black")
legend(x=55,y=46,col=cols,lwd=2,legend=paste(ages,", slope = ",spsprint,sep=""),bty="o",box.col="transparent")
rect(55,0,100,46)
rect(0,0,100,100)
dev.off()
#######################
# Figure 8 (old figure 7 deprecated)
DATA <- read.table("C:\\Users\\triffe\\git\\ViennaPaper\\ALBERTPAPERS\\Figures\\figsW\\data\\Figure8.txt",header=T,sep="\t",na.strings = ".")
DATA$country <- as.character(DATA$country)
colalpha <- function(color,alpha){
colalphai <- function(color,alpha){
paste(rgb(t(col2rgb(color)/255)),alpha,sep="")
}
sapply(color,colalphai,alpha=alpha)
}
CairoPNG("Figure7.png",width=1000,height=1000,pointsize=25)
plot(NULL,type="n",xlim=c(0,100),ylim=c(0,100),
ylab="% mother total pop (20)",xlab="% mother of those in school (20)",
axes=FALSE,cex.lab=cex.lab,asp=1)
extr <- par("usr")
rect(0,0,100,100,col="#EBEBEB")
abline(v=seq(20,80,by=20),col="white")
abline(h=seq(20,80,by=20),col="white")
colsi <- c("purple","orange")
axis(1,cex=2,pos=0);axis(2,cex=2,pos=0)
for (i in 1:2){
x1 <- 100*DATA[,i+2]
y1 <- 100*DATA$prop_child
x <- x1[!(x1==0 | x1 == 100 | y1==0 | y1 == 100)]
y <- y1[!(x1==0 | x1 == 100 | y1==0 | y1 == 100)]
points(x,y,col=colalpha(colsi[i],65),pch=19)
minx <- min(x,na.rm=T) ; maxx <- max(x,na.rm=T)
LM <- lm(y~x) # OLS
xref <- data.frame(x=seq(from=minx, to=maxx, length.out=25))
clim <- as.data.frame(predict(LM, xref, level=0.95, interval="confidence")) # confidence limits
#paste(cols[i],15,sep="")
polygon(c(xref$x,rev(xref$x)),c(clim$lwr,rev(clim$upr)),col="#30303010",border="transparent")
lines(cbind(xref,clim$lwr), col=colsi[i], lty="dashed")
lines(cbind(xref,clim$upr), col=colsi[i], lty="dashed")
segments(minx,LM$coef[1]+LM$coef[2]*minx,maxx,LM$coef[1]+LM$coef[2]*maxx,col=colsi[i],lwd=2)
}
rect(65,0,100,13,col="white",border="black")
legend(65,13,col=colsi,lty=1,lwd=2,legend=c("primary","secondary +"),bty="o",box.col="transparent")
rect(65,0,100,13)
rect(0,0,100,100)
dev.off()
|
5f6e306f835ba99950b0c8802c37fd5fab8ac141 | 4537ff4f3743a1716d23b39f68e268c0680f3c78 | /linmod/spring.r | c7de7a3bca342c7b312fefa71d29b93cad990b8d | [] | no_license | arnabc74/arnabc74.github.io | 699ce4967b8a17815d75ba95f75692c3d56bb2ca | 4dc1be6abf045eacdbcbd9c918777f01dd106f70 | refs/heads/main | 2023-08-23T13:34:24.047576 | 2023-08-22T02:41:42 | 2023-08-22T02:41:42 | 249,955,696 | 0 | 2 | null | 2020-06-11T17:06:50 | 2020-03-25T11:04:50 | JavaScript | UTF-8 | R | false | false | 106 | r | spring.r | x = rep(1:10,3)
y = 2+3*x + rnorm(length(x))/3
plot(x,y)
sink('spring.txt')
data.frame(wt=x,len=y)
sink()
|
a093950d209256d7c3990182e75c314fda6862fc | 17f1b5b761a43ec178602a43f24ac72c2d5d01a9 | /hmlasso/inst/testfiles/softThresholdC/libFuzzer_softThresholdC/softThresholdC_valgrind_files/1609897526-test.R | 6d08d757bc38fb3991fff4a8a5b38f745db50a09 | [] | no_license | akhikolla/newtestfiles-2 | 3e1882e7eea3091f45003c3abb3e55bc9c2f8f56 | e539420696b7fdc05ce9bad66b5c7564c5b4dab2 | refs/heads/master | 2023-03-30T14:44:30.614977 | 2021-04-11T23:21:23 | 2021-04-11T23:21:23 | 356,957,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 115 | r | 1609897526-test.R | testlist <- list(g = 0, z = 2.56859788616406e-319)
result <- do.call(hmlasso:::softThresholdC,testlist)
str(result) |
cb8778cad5aa83e3fd96beb4b79ab59ba490919d | d21e26ba99c0cb19b73ebc1f3e989adb979d5826 | /R/Ω unused/excel_format_check.R | b31cba827d83e26aa81ec4f7f808091d11b280e7 | [] | no_license | chatchavan/Agora | 168b7c9c797c1053c6e00dd2f4c86888396183f0 | 547815a9915a5ad9f2a856528bdeb9a48bc031b9 | refs/heads/master | 2023-04-13T16:14:07.610749 | 2021-04-23T08:48:12 | 2021-04-23T08:48:12 | 308,588,718 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,381 | r | excel_format_check.R | # check formats that are used in the coding to narrow down formats that matters
library(tidyverse)
library(tidyxl)
library(fs)
paths_xlsx <-
c(dir_ls("../../04 Transcripts/Chat/", recurse = TRUE, regexp = "/..\\.xlsx$"),
dir_ls("../../04 Transcripts/Nacho/", recurse = TRUE, regexp = "/..\\.xlsx$"))
xlsx_df <-
tibble(path = paths_xlsx) %>%
mutate(
coder = if_else(str_detect(path, "Chat"), "Chat", "Nacho"),
cells = map(path, xlsx_cells),
format = map(path, xlsx_formats))
#===============================================================================
# check the number of local format
xlsx_df %>%
mutate(
unique_local_format_id = map_int(cells, ~ length(unique(.x$local_format_id)))) %>%
pull(unique_local_format_id) %>%
hist()
# conclusion: small amount. ignoring them
#===============================================================================
# check the data type of the cells
xlsx_df %>%
mutate(data_type_freq = map(cells, function(cells) {
cells %>%
group_by(data_type) %>%
summarize(n = n())
})) %>%
unnest(data_type_freq) %>%
pivot_wider(id_cols = path, names_from = data_type, values_from = n) %>%
view()
# observation: only three data types: "blank", "character", and "date". Only two cells out of all files are "date"
xlsx_df %>%
unnest(cells) %>%
filter(data_type == "date")
# conclusion: manually fixed them to be a character type for a fuss-free processing
#===============================================================================
# narrow down the formats used
char_span_df <-
xlsx_df %>%
unnest(cells) %>%
select(path, coder, participant_id = sheet, row, col, character_raw = character, character_formatted) %>%
unnest(character_formatted)
char_span_df %>%
mutate(across(bold:family, ~as.character(.x))) %>%
pivot_longer(bold:family, names_to = "format_type", values_to = "format_value") %>%
filter(!is.na(format_value),
format_value != "FALSE",
!is.element(format_type, c("size", "font", "family", "color_tint", "color_theme"))) %>%
group_by(format_type, format_value) %>%
summarize(n = n())
# conclusions: only formats that were used are "color_rgb" and "bold"
#===============================================================================
# check valid cell range
char_span_df %>%
pull(col) %>% unique()
|
a1991c2492fa49e88a1d6a1482abaa47f856b0be | 9fa889c10187dff24aecc072949db196562da19b | /RScripts_Recession/RScript05-4_IncPovAnalysis.R | 6601bf0780f14d8c71773670f8ee2e0809b3a1e5 | [] | no_license | snandi/Project_Recession | 098ef216939ec764cd9fef1b06fd2ee6ea640ac5 | 1bdbf170e4c9491def5af541f613b0846599e119 | refs/heads/master | 2020-04-12T09:41:40.357259 | 2017-08-21T05:55:17 | 2017-08-21T05:55:17 | 41,777,027 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,024 | r | RScript05-4_IncPovAnalysis.R | rm( list = ls( all.names = TRUE ) )
rm( list = objects( all.names = TRUE ) )
#dev.off( )
########################################################################
## This script analyzes the income poverty over time. This is similar to
## RScript05-3, with the following differences
## 1. This includes interactions between time and all demographic factors
########################################################################
saveModel <- function( modelData, modelFilename ){
modelFilepath <- paste0( RDataPath, modelFilename )
save( modelData, file = modelFilepath )
}
preprocessAndSaveData <- function( Data, filenameModelData = 'Data_forIncPovModel_v5.RData' ){
Data$year <- substr( x = Data$yearqtr, start = 1, stop = 4 )
Data$year <- as.factor( Data$year )
Data$wt <- Data$whfnwgt_qtr/1000
Data$hhid <- as.factor( Data$hhid )
Data$yearqtrNum <- as.numeric( Data$yearqtr )
## This is necessary for post hoc test, otherwise it is throwing exception as the
## object not being a matrix, when difflsmeans is called
Data$Time <- Data$yearQtrNumCentered <- Data$yearqtrNum - mean( Data$yearqtrNum )
Data$race_origin <- factor( Data$race_origin, levels = c( "White", "Black", "Hispanic", "Others") )
Data$ms <- factor( Data$ms, levels = c( "Married", "Not married" ) )
Data$gender <- factor( Data$gender, levels = c( "Male", "Female" ) )
Data$education <- factor( Data$education,
levels = c( "Bachelors or higher", "Some college, diploma, assoc", "High School or less" ) )
Data_forIncPovModel <- Data
filenameData <- paste0( RDataPath, filenameModelData )
save( Data_forIncPovModel, file = filenameData )
return( Data_forIncPovModel )
}
#
formatAnovaTableForXtable <- function( anovaTable, multipleCorrection = TRUE,
multipleCorrectionMethod = 'BH' ){
if( class( anovaTable )[1] != 'anova' ){
stop( "Argument not an Anova table" )
}
anovaTableDF <- na.omit( as.data.frame( anovaTable ) )
colnames( anovaTableDF ) <- c( "Sum Sq", "Mean Sq", "NumDF", "DenDF", "F.value", "p.value" )
anovaTableDF$DenDF <- NULL
if( multipleCorrection ){
anovaTableDF$`p.value` <- p.adjust( p = anovaTableDF$`p.value`, method = multipleCorrectionMethod )
}
anovaTableDF <- anovaTableDF[ order( anovaTableDF[,'p.value'], -anovaTableDF[,'F.value'], decreasing = F ), ]
anovaTableDF$`p.value` <- round( anovaTableDF$`p.value`, 4 )
anovaTableDF$`F.value` <- round( anovaTableDF$`F.value`, 2 )
row.names( anovaTableDF ) <- sapply( X = row.names( anovaTableDF ), FUN = getFactorName )
return( anovaTableDF[, c( 'F.value', 'p.value' ) ] )
}
formatPostHocTables <- function( postHocTable, multipleCorrection = TRUE,
multipleCorrectionMethod = 'BH' ){
postHocTableDF <- as.data.frame( postHocTable$diffs.lsmeans.table )
rownames( postHocTableDF ) <- gsub( pattern = Factor, replacement = '', x = rownames( postHocTableDF ) )
postHocTableDF$`Factor Levels` <- rownames( postHocTableDF )
if( multipleCorrection ){
postHocTableDF$`p-value` <- p.adjust( p = postHocTableDF$`p-value`, method = multipleCorrectionMethod )
}
# postHocTableDF <- postHocTableDF[ order( postHocTableDF$`p-value`, decreasing = F ), ]
postHocTableDF <- postHocTableDF[ order( postHocTableDF[,'p-value'], -abs( postHocTableDF[,'t-value'] ),
decreasing = F ), ]
postHocTableDF$`Factor Levels` <- sapply( X = postHocTableDF$`Factor Levels`, FUN = getFactorName )
row.names( postHocTableDF ) <- sapply( X = row.names( postHocTableDF ), FUN = getFactorName )
return( postHocTableDF )
}
########################################################################
## Run Path definition file ##
########################################################################
PathPrefix <- '~/'
# PathPrefix <- '/Users/patron/Documents/snandi/'
RScriptPath <- paste0( PathPrefix, 'Project_Recession/RScripts_Recession/' )
DataPath <- paste0( PathPrefix, 'Project_Recession/Data/data_2015Dec/' )
RDataPath <- paste0( PathPrefix, 'Project_Recession/RData/data_2015Dec/' )
PlotPath <- paste0( PathPrefix, 'Project_Recession/Plots/' )
Filename.Header <- paste0( RScriptPath, 'HeaderFile_Recession.R' )
source( Filename.Header )
source( paste( RScriptPath, 'fn_Library_Recession.R', sep='' ) )
source( paste( RScriptPath, 'plotLSMeans.R', sep='' ) )
Today <- Sys.Date( )
########################################################################
## load income poverty data
########################################################################
#Filename <- paste0( RDataPath, 'Data_forIncPov_byRace.RData' )
#load( file = Filename )
##Filename <- paste0( RDataPath, 'Data_forIncPov.RData' )
##load( file = Filename )
Filename <- paste0( RDataPath, 'Data_forIncPov_v5_newWts.RData' )
load( file = Filename )
Data <- preprocessAndSaveData( Data = Data_forIncPov, filenameModelData = 'Data_forIncPovModel_v5.RData' )
rm( Data_forIncPov )
#######################################################################
## Mixed Effects Model ( MEM ) of Income Poverty Ratio
########################################################################
#library( lsmeans )
# FULLmodelFPL100 <- lmerTest::lmer(
# FPL100_num ~ 1 + Time + I( Time^2 ) + adult_disb + gender + ms + race_origin + education +
# adult_disb*gender + adult_disb*ms + adult_disb*race_origin + adult_disb*education + adult_disb*Time +
# gender*ms + gender*race_origin + gender*education +
# ms*race_origin + ms*education + race_origin*education +
# ( 1 | hhid ), data = Data, weights = wt
# )
# finalModel <- lmerTest::step( model = FULLmodelFPL100 )
modelFPL100 <- lmerTest::lmer(
FPL100_num ~ 1 + Time + I( Time^2 ) + adult_disb + gender + ms + race_origin + education +
adult_disb*Time + adult_disb*gender + adult_disb*education +
Time*gender + Time*ms + Time*race_origin + Time*education +
gender*ms + gender*education + ms*race_origin + ms*education + race_origin*education +
( 1 | hhid ), data = Data, weights = wt
)
saveModel( modelData = modelFPL100, modelFilename = 'modelFPL100_RS05-4.RData' )
# lmerTest::summary( modelFPL100 )
# lmerTest::anova( modelFPL100 )
# Residuals <- residuals( modelFPL100 )
# FittedValues <- fitted.values( modelFPL100 )
# qplot() + geom_point( aes( x = FittedValues, y = Residuals ) )
modelFPL100_Anova <- lmerTest::anova( modelFPL100 )
print( modelFPL100_Anova )
modelFPL100_Summary <- lmerTest::summary( modelFPL100 )
print( modelFPL100_Summary )
saveModel( modelData = modelFPL100_Summary, modelFilename = 'modelFPL100_Summary_RS05-4.RData' )
#######################################################################
## Model with Disabled only
########################################################################
DataDisb <- subset( Data, adult_disb == "yes" )
# FULLmodelFPL100Disab <- lmerTest::lmer(
# FPL100_ ~ 1 + Time + I( Time^2 ) + gender + ms + race_origin + education +
# gender*ms + gender*race_origin + gender*education +
# ms*race_origin + ms*education + race_origin*education +
# ( 1 | hhid ), data = DataDisb, weights = wt
# )
# finalModel <- lmerTest::step( model = FULLmodelFPL100Disab )
modelFPL100Disab <- lmerTest::lmer(
FPL100_num ~ 1 + Time + I( Time^2 ) + gender + ms + race_origin + education +
Time*gender + Time*ms + Time*race_origin + Time*education +
gender*ms + gender*education + ms*race_origin + ms*education + race_origin*education +
( 1 | hhid ), data = DataDisb, weights = wt
)
saveModel( modelData = modelFPL100Disab, modelFilename = 'modelFPL100Disab_RS05-4.RData' )
modelFPL100Disab_Anova <- lmerTest::anova( modelFPL100Disab )
print( modelFPL100Disab_Anova )
modelFPL100Disab_Summary <- lmerTest::summary( modelFPL100Disab )
print( modelFPL100Disab_Summary )
saveModel( modelData = modelFPL100Disab_Summary, modelFilename = 'modelFPL100Disab_Summary_RS05-4.RData' )
|
1247bbefb8113ffd4cde4a48b99e44e924995edb | ec8babbad68af8930385adf3153217149dde500b | /week3/run_ncov_leaflet.R | d1fd746a0033342d968e2df5156e0651f5361e83 | [] | no_license | cemalec/BIFX551 | d7b8728865fe9bc86aef5f93422816338a38eab1 | b33807326778e3108bc871e0ccbed09b51d084f1 | refs/heads/master | 2020-12-10T03:09:25.691385 | 2020-05-05T22:00:37 | 2020-05-05T22:00:37 | 233,489,415 | 0 | 8 | null | 2020-05-16T14:46:00 | 2020-01-13T01:48:47 | HTML | UTF-8 | R | false | false | 1,723 | r | run_ncov_leaflet.R | # Description: Sample plot of ncov using leaflets
# Author: Daniel Vogel
# Date: 2/5/2020
# Install required packages for leaflet map and plotting
#install.packages("leaflet")
#install.packages("sp")
#install.packages("tidyverse")
library(leaflet)
library(sp)
## for reading .csv files
library(tidyverse)
## load data files
ncov_outside_hubei<-read_csv("ncov_outside_hubei.csv")
attach(ncov_outside_hubei)
print("summary of data")
print(summary(ncov_outside_hubei))
##
## Plot an age histogram
##
hist(strtoi(ncov_outside_hubei$age),
main="Ages of 2019 CoronaVirus Patients",
xlab="Age",
ylab="Density",
col="darkmagenta",
freq=FALSE
)
# Pause to continue
#
readline("Plotting Reported Cases Per Date (shown in Viewer)")
plot.new()
dates_df<-data.frame(table(ncov_outside_hubei$date_confirmation))
dates_df$DD<-with(dates_df,as.integer(substr(Var1,1,2)))
print(dates_df)
xrange<- range(dates_df$DD)
yrange<- range(dates_df$Freq)
plot(xrange,yrange,
main="Confirmed Cases in Jan 2020",
xlab="Jan 12 - Jan 31",
ylab="# Confirmed")
# Add a line
x_dates<-as.integer(substr(dates_df$Var1,1,2))
y_freq<-dates_df$Freq
lines(x_dates,y_freq, type="b")
# Pause to continue
#
readline("Plotting global patient locations (shown in Viewer)")
m <- leaflet(data = ncov_outside_hubei) %>%
setView(lng=114.27, lat=30.59, zoom=3) %>%
addProviderTiles(providers$Esri.WorldStreetMap)%>% ##shows city names as well in English
#addProviderTiles(providers$Esri.WorldTopoMap)%>% ##shows country names in English
#addTiles() %>% ## default OpenStreet tiles which display country names in many languages
addCircles(color="red",radius=1)
# show the map with plotted data
print(m)
|
2a3b08a4cf1d4772e4baca2fed4c2e01dd014add | e40d274ff6b9bd7e7f20998379f483543582c81f | /snapflex/inst/flex/flex-rsds-1.R | c0cb9f6171a20cffcebcfa393f28766fadb7c301 | [
"MIT"
] | permissive | ua-snap/snap-r-tools | 5be2dcc5171cf7289504f20e98ad3ec603e4ed57 | c3f573c2abf11633b5262c4d98cfbde39854dbf4 | refs/heads/master | 2020-03-22T05:57:29.239067 | 2019-01-08T03:11:17 | 2019-01-08T03:11:17 | 139,602,296 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,447 | r | flex-rsds-1.R | # @knitr setup
library(dplyr)
library(tidyr)
library(ggplot2)
library(ggpubr)
library(snapplot)
plot_theme <- get(params$snaptheme)
library(showtext)
font_add_google(params$gfont, "gfont", regular.wt = params$regular, bold.wt = params$bold)
showtext_auto()
stat_compare_means <- function(mapping = NULL, data = NULL, method = NULL, paired = FALSE, # override issues in ggpubr
method.args = list(), ref.group = NULL, comparisons = NULL,
hide.ns = FALSE, label.sep = ", ", label = NULL, label.x.npc = "left",
label.y.npc = "top", label.x = NULL, label.y = NULL, tip.length = 0.03,
symnum.args = list(), geom = "text", position = "identity",
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...)
{
if (!is.null(comparisons)) {
method.info <- ggpubr:::.method_info(method)
method <- method.info$method
method.args <- ggpubr:::.add_item(method.args, paired = paired)
if (method == "wilcox.test")
method.args$exact <- FALSE
pms <- list(...)
size <- ifelse(is.null(pms$size), 0.3, pms$size)
textsize <- ifelse(is.null(pms$size), 10, pms$size)
color <- ifelse(is.null(pms$color), "black", pms$color)
map_signif_level <- FALSE
if (is.null(label))
label <- "p.format"
if (ggpubr:::.is_p.signif_in_mapping(mapping) | (label %in% "p.signif")) {
map_signif_level <- c(`****` = 1e-04, `***` = 0.001,
`**` = 0.01, `*` = 0.05, ns = 1)
if (hide.ns)
names(map_signif_level)[5] <- " "
}
step_increase <- ifelse(is.null(label.y), 0.12, 0)
ggsignif::geom_signif(comparisons = comparisons, y_position = label.y,
test = method, test.args = method.args, step_increase = step_increase,
size = size, textsize = textsize, color = color, map_signif_level = map_signif_level,
tip_length = tip.length, data = data)
}
else {
mapping <- ggpubr:::.update_mapping(mapping, label)
layer(stat = ggpubr:::StatCompareMeans, data = data, mapping = mapping,
geom = geom, position = position, show.legend = show.legend,
inherit.aes = inherit.aes, params = list(label.x.npc = label.x.npc,
label.y.npc = label.y.npc, label.x = label.x,
label.y = label.y, label.sep = label.sep, method = method,
method.args = method.args, paired = paired, ref.group = ref.group,
symnum.args = symnum.args, hide.ns = hide.ns,
na.rm = na.rm, ...))
}
}
d$value <- (24 * d$value) / (1000 * 0.0864) # MJ/m^2/day to kWh/m^2/day
clrs <- c("gray50", "#00AFBB", "#E7B800", snapplot::snapalettes()[c(4, 7, 8)])
clrs2 <- clrs[2:3]
clrs3 <- c("#00AFBB", "#E7B800", snapplot::snapalettes()[4])
contrast <- ifelse(params$snaptheme %in% c("theme_snapdark"), "white", "black")
dsub <- filter(d, Model != "CRU 4.0")
dsum <- dsub %>%
mutate(Window = ifelse(Year %in% 2010:2039, "2010 - 2039", ifelse(Year %in% 2040:2069, "2040 - 2069", "2070 - 2099"))) %>%
mutate(Window = factor(Window, levels = unique(Window))) %>%
group_by(Window, Model) %>% summarise(Mean = mean(value)) %>%
mutate(Model_Window = paste(Window, Model)) %>% arrange(Window, Mean)
bhats <- signif(lm(value ~ Year, data = dsub)$coefficients, 3)
lm_eqn <- function(df){
m <- lm(value ~ Year, df)
eq <- substitute(~~italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,
list(a = signif(coef(m)[1], 3),
b = signif(coef(m)[2], 3),
r2 = round(summary(m)$r.squared, 3)))
as.character(as.expression(eq))
}
n_proj <- length(unique(dsub$Year))
rsds1 <- bhats[1] + bhats[2] * min(dsub$Year)
rsds2 <- bhats[1] + bhats[2] * max(dsub$Year)
ratio <- rsds2 / rsds1
total_pct_change <- signif(100 * (ratio - 1), 2)
change_per_decade <- round(100 * (abs(ratio)^(10 / n_proj) - sign(ratio)), 1)
totpct <- paste0("~~Total~projected~change:~", total_pct_change, '*symbol("\045")')
decpct <- paste0("~~", change_per_decade, '*symbol("\045")/decade')
yrange <- diff(range(d$value))
totpos <- max(d$value) - 0.075 * yrange
decpos <- max(d$value) - 0.15 * yrange
prime_lab <- expression(Solar~Irradiance~(kWh/m^2/day))
prime_lab2 <- "solar irradiance"
pct_change_statement <- paste0("The estimated projected percent change in solar irradiance over the period 2006 - 2100 using the five climate models is ", total_pct_change,
"%. This is approximately ", change_per_decade, "% change per decade during the period. These mean estimates are based on the linear regression in figure 1.")
p1size1 <- ifelse(simplify, 1, 0.5)
p1 <- ggplot(d, aes(Year, value)) + geom_smooth(data = d, aes(colour = Model), se = FALSE, linetype = "longdash", size = p1size1) +
geom_point(aes(colour = Model), alpha = 0.2)
p1 <- p1 + scale_colour_manual(values = clrs) +
geom_smooth(data = dsub, colour = contrast, method = "lm", size = 1) +
plot_theme(base_family = "gfont", base_size = 20) + theme(text = element_text(size=40), plot.margin = unit(c(5, 10, 5, 5), "mm"), axis.text = element_text(size = 40), legend.text = element_text(size = 40)) + guides(colour = guide_legend(override.aes = list(size=5, alpha = 0.5))) +
scale_x_continuous(expand = c(0, 0)) +
labs(title = paste("Projected trend in", prime_lab2, "in", loc2),
subtitle = "By model and average", x = "Year", y = prime_lab) +
if(!simplify) p1 <- p1 + annotate("text", -Inf, Inf, label = lm_eqn(d), parse = TRUE, size = 12, colour = contrast, hjust = 0, vjust = 1) +
annotate("text", -Inf, totpos, label = totpct, parse = TRUE, size = 14, colour = contrast, hjust = 0, vjust = 1) +
annotate("text", -Inf, decpos, label = decpct, parse = TRUE, size = 14, colour = contrast, hjust = 0, vjust = 1)
p2 <- ggdensity(d, x = "value", add = "mean", rug = TRUE, color = "Period", fill = "Period",
palette = clrs2, size = 1, ggtheme = plot_theme(base_family = "gfont", base_size = 20)) +
theme(text = element_text(size=40), plot.margin = unit(c(5, 10, 5, 5), "mm"), axis.text = element_text(size = 40), legend.text = element_text(size = 40)) + guides(colour = guide_legend(override.aes = list(size=5))) +
scale_x_continuous(expand = c(0, 0)) +
labs(title = paste("Distributions of", prime_lab2, "in", loc2, "over time"),
subtitle = "1950 - 2013 CRU 4.0 and 2006 - 2100 GCM outputs", x = prime_lab, y = "Density")
d2 <- d
d2$Model <- reorder(d$Model, d$value, FUN=median)
idx <- match(levels(reorder(d$Model, d$value, FUN=median)), levels(d$Model))
comps <- purrr::map(2:6, ~c(levels(d$Model)[1], levels(d$Model)[.x]))
p3 <- ggboxplot(d2, x = "Model", y = "value",
color = contrast, fill = "Model", palette = clrs[idx],
add = "jitter", shape = 21, ggtheme = plot_theme(base_family = "gfont", base_size = 20)) +
stat_compare_means(comparisons = comps, color = contrast, textsize = 20) +
stat_compare_means(colour = contrast, size = 12) +
theme(text = element_text(size=40), plot.margin = unit(c(5, 10, 5, 5), "mm"), legend.key.size = unit(1,"line"), axis.text = element_text(size = 40), legend.text = element_text(size = 40), legend.position = "none") +
scale_x_discrete(expand = c(0, 0.4)) +
labs(title = paste("Distributions of", prime_lab2, "in", loc2, "by model"),
subtitle = "1950 - 2013 CRU 4.0 and 2006 - 2100 GCM outputs. Global and select pairwise tests for difference in means.", x = "Model", y = prime_lab)
dsum <- filter(d, Model != "CRU 4.0" & Year >= 2010 & Year < 2100) %>%
mutate(Window = ifelse(Year %in% 2010:2039, "2010 - 2039", ifelse(Year %in% 2040:2069, "2040 - 2069", "2070 - 2099"))) %>%
mutate(Window = factor(Window, levels = unique(Window))) %>%
group_by(Window, Model) %>% summarise(Mean = mean(value)) %>%
mutate(Model_Window = paste(Window, Model)) %>% arrange(Window, Mean)
p4 <- ggplot(dsum, aes(factor(Model_Window, levels = unique(Model_Window)), Mean, colour = Window)) + scale_colour_manual(values = clrs3) +
coord_flip() + plot_theme(base_family = "gfont", base_size = 20) +
theme(text = element_text(size=40), plot.margin = unit(c(5, 10, 5, 5), "mm"), axis.text = element_text(size = 40), legend.text = element_text(size = 40)) + guides(colour = guide_legend(title = "Period"), override.aes=list(alpha=1)) +
scale_y_continuous(expand = c(0.025, 0)) + scale_x_discrete(expand = c(0, 1)) +
geom_segment(aes(y = min(Mean), xend = Model_Window, yend = Mean, colour = Window), size = 1) +
geom_point(aes(colour = Window), shape = 19, size = 3) +
geom_text(aes(label = round(Mean, 1)), colour = contrast, size = 10, vjust = 1.7) +
labs(title = paste("Projected mean", prime_lab2, "by model and time period"),
subtitle = loc2, x = NULL, y = prime_lab)
set_axis_label_colors <- function(g, data, label, axis){
gb <- ggplot2::ggplot_build(g)
cols <- unlist(gb$data[[1]]["colour"])
names(cols) <- as.vector(data[[label]])
if(axis == "x") return(g + theme(axis.text.x = element_text(colour = cols)))
g + theme(axis.text.y = element_text(colour = cols))
}
p4 <- set_axis_label_colors(p4, dsum, "Model_Window", "y")
|
534ba3706800597a0090d284a57639767a7f8dd4 | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googledfareportingv27.auto/man/ListPopulationRule.Rd | a581a5dea320b76c3c6d4752dcc7a6a6f749400a | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 757 | rd | ListPopulationRule.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_objects.R
\name{ListPopulationRule}
\alias{ListPopulationRule}
\title{ListPopulationRule Object}
\usage{
ListPopulationRule(floodlightActivityId = NULL,
floodlightActivityName = NULL, listPopulationClauses = NULL)
}
\arguments{
\item{floodlightActivityId}{Floodlight activity ID associated with this rule}
\item{floodlightActivityName}{Name of floodlight activity associated with this rule}
\item{listPopulationClauses}{Clauses that make up this list population rule}
}
\value{
ListPopulationRule object
}
\description{
ListPopulationRule Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Remarketing List Population Rule.
}
|
60929b074656f93ba513681a11b2055cd63a4902 | 135e7e481cf0c859e9708444c672f395cf87b217 | /example_query.R | 6c117f0a54fd88ac897fc51a3dcb1760170cd3ff | [
"MIT"
] | permissive | MerrimanLab/merrimanDW | 96a7d7b9d0de58c1db10db0ba3430175d6536a1d | 4354ab3dd35449bb9bb8d466c2f26d43d4f486b8 | refs/heads/master | 2021-01-21T04:31:22.976091 | 2016-06-23T05:13:05 | 2016-06-23T05:13:05 | 55,732,731 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 942 | r | example_query.R | library(RMySQL)
library(data.table)
library(dplyr)
drv = dbDriver("MySQL")
db = dbConnect(drv, default.file = '~/.my.cnf', dbname="merrimanDW_test")
dbGetQuery()
ukvariant <- dbGetQuery(db, paste('select * from dimVariant where snp in (', paste(paste0("'",kot_t1$SNP,"'"), collapse = ','),")"))
dbGetQuery(db, "select * from dimExperiment where caseCondition IN (1,6) AND controlCondition = 1")
re <- dbGetQuery(db, "select dV.snp, dV.chromosome, dV.GRCh37_bp, dV.A1, dV.A2, fG.*, dE.Description from factGWAS as fG join dimVariant as dV on fG.variantID = dV.variantID join dimExperiment as dE on dE.experimentID = fG.experimentID where dV.chromosome = 3 AND dV.GRCh37_bp BETWEEN 195776000 and 195835000 AND dE.caseCondition IN (1,6) AND dE.controlCondition = 1")
re <- re[, !(names(re) %in% c("variantID", "infoscore","experimentID"))]
write.table(re, file = '~/TFRC.csv', col.names = TRUE, row.names = FALSE, quote = FALSE, sep=",")
|
da0e53ca389e4ba94cb54e40d8c5e3cb470dfe71 | eed02ca4981c27a9b77aaec2287fb91c5ae96d98 | /R/plot-osm-basemap.R | 3181ff3e1cc2300fe2f0bf3cecc31ef41f4e4893 | [] | no_license | jhollist/osmplotr | 572dd0d6c960b933d964ca571c05d4cacd457ac9 | a47701fc10719969d3fe493cfa81ec6cc4809d98 | refs/heads/master | 2020-12-25T22:08:03.656322 | 2016-03-25T13:40:09 | 2016-03-25T13:40:09 | 54,485,836 | 0 | 0 | null | 2016-03-22T15:19:32 | 2016-03-22T15:19:32 | null | UTF-8 | R | false | false | 2,425 | r | plot-osm-basemap.R | #' plot_osm_basemap
#'
#' Generates a base OSM plot ready for polygon, line, and point objects to be
#' overlain with add_osm_objects(). NOTE: Graphics files must be closed after
#' finishing map with dev.off() or graphics.off(). Unless specified, height of
#' graphics device is automatically calculated in proportion to the given width
#' according to the aspect ratio of the bounding box.
#'
#' @param bbox bounding box (Latitude-longitude range) to be plotted. A 2-by-2
#' matrix of 4 elements with columns of min and max values, and rows of x and y
#' values.
#' @param filename Name of plot file; default=NULL plots to screen device (low
#' quality and likely slow)
#' @param width Width of graphics file (in px; default 480).
#' @param structures Data frame returned by osm_structures() used here to
#' specify background colour of plot; if 'structs=NULL', the colour is specified
#' by 'bg'
#' @param bg Background colour of map (default = 'gray20' only if structs not
#' given)
#' @param graphic.device Type of graphic device to print to. For example, 'png'
#' (default), 'jpeg', 'png', or 'tiff'
#' @param ... Other parameters to be passed to graphic device (such as width and
#' height; see ?png, for example, for details)
#' @return nothing (generates file of specified type)
#' @export
#'
#' @examples
#' plot_osm_basemap (bbox=get_bbox (c (-0.15, 51.5, -0.1, 51.52)), col="gray20")
#' add_osm_objects (london$dat_BNR, col="gray40") # non-residential buildings
plot_osm_basemap <- function (bbox=bbox, filename=NULL, width=640,
structures=NULL, bg='gray20',
graphic.device='png', ...)
{
if (!is.null (structures))
bg = structure$cols [which (structures$structure == 'background')]
if (!is.null (filename))
if (nchar (filename) == 0)
filename <- NULL
if (is.null (filename) & width == 640)
width <- 7
height <- width * diff (bbox [2,]) / diff (bbox [1,])
if (!is.null (filename))
png (filename=filename, width=width, height=height,
type='cairo-png', bg='white', ...)
else
dev.new (width=width, height=height)
par (mar=rep (0, 4))
plot (NULL, NULL, xlim=bbox [1,], ylim=bbox [2,], xaxs='i', yaxs='i',
xaxt='n', yaxt='n', xlab='', ylab='', bty='n')
usr <- par ('usr')
rect (usr [1], usr [3], usr [2], usr [4], border=NA, col=bg)
}
|
06d0e8a42e1b8df67066e62434608d550449c5e1 | 86cb2d9a9c8aab4cfe59493d3a187a239451efd7 | /plots/typeDist.R | 9c1ad7ca558edf7fa723e983e1e9201ea86f0491 | [] | no_license | wckdouglas/tgirtERCC | 198878608cb9480847a907f7d22f5f234e791077 | fd807759c158b24d56a282bdbde3313406d9a2c1 | refs/heads/master | 2021-01-17T11:34:30.967484 | 2017-03-10T19:36:45 | 2017-03-10T19:36:45 | 41,743,074 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,484 | r | typeDist.R | #!/usr/bin/env Rscript
library(readr)
library(dplyr)
library(tidyr)
library(cowplot)
library(stringr)
library(stringi)
library(data.table)
library(RColorBrewer)
library(tgirtABRF)
datapath <- '/Users/wckdouglas/cellProject/result/countTables'
figurepath <- '/Users/wckdouglas/cellProject/figures'
smncRNA=c('misc_RNA','snRNA','snoRNA','piRNA','miRNA','tRNA')
changeType <- function(type,name){
if(grepl('7SK',name)){
'7SK'
}else if ( grepl('Y_RNA',name)){
'Y-RNA'
}else if(grepl('7SL',name)){
'7SL'
}else if(grepl('Vault',name)){
'Vault RNA'
}else{
type
}
}
colorscale = brewer.pal(9,"Pastel1")
geneLevels <- c('Protein coding','lincRNA','Antisense','Pseudogenes','Other ncRNA','Small ncRNA','Mt','ERCC')
df1 <- datapath %>%
str_c('sumTable.short.tsv',sep='/') %>%
read_tsv() %>%
select(grep('type|count',names(.))) %>%
select(grep('AS|AH',names(.),invert=T)) %>%
gather(sample,count,-type) %>%
mutate(sample = stri_list2matrix(stri_split(sample,fixed='_'))[2,]) %>%
mutate(prep = getPrep(sample)) %>%
mutate(template = sapply(sample,getTemplate)) %>%
mutate(replicate = sapply(sample,getReplicate)) %>%
mutate(replicate = str_sub(replicate,1,1)) %>%
mutate(lab = getLab(sample)) %>%
mutate(type = ifelse(type %in% c('miRNA','snoRNA','tRNA'),'Other sncRNA',type)) %>%
mutate(type = ifelse(grepl('rRNA',type),'rRNA',type)) %>%
mutate(name = paste0(template,replicate)) %>%
mutate(annotation = getAnnotation(prep,lab)) %>%
mutate(type = ifelse(grepl('sncRNA',type),'Small ncRNA',type)) %>%
mutate(type = ifelse(grepl('antisense',type),'Antisense',type)) %>%
filter(type != 'rRNA') %>%
group_by(name,type,prep,annotation) %>%
summarize(count = sum(count)) %>%
ungroup() %>%
group_by(name,annotation) %>%
do(data.frame(count = .$count/sum(.$count),
type = .$type)) %>%
mutate(type = factor(type,levels=rev(geneLevels))) %>%
arrange(type) %>%
tbl_df
p1 <- ggplot(data=df1, aes(x = name, y = count*100 , fill = factor(type,levels=geneLevels)))+#,
# order=factor(type,levels=rev(geneLevels)))) +
geom_bar(stat='identity') +
facet_grid(.~annotation,scale = 'free_x',space='free_x') +
theme(axis.text.x = element_text(angle = 90, hjust = 0.5, vjust = 1)) +
labs(x = ' ', y = 'Percentage of reads',fill='RNA type')+
scale_fill_manual(values=colorscale) +
theme(strip.text= element_text(size = 13,face = 'bold'))
figurename = paste(figurepath,'typeRatio.pdf',sep='/')
ggsave(p1,file=figurename,width=15,height = 10)
pastel <- c(brewer.pal(9,"Pastel1"),'gray74')
colorscale <- c('darkorange', pastel[c(6, 4, 3, 2, 1, 7,8)],'lightskyblue3' , pastel[10])
geneLevelsSmall <- c('tRNA','snoRNA','snRNA','7SK','7SL','miscRNA','Y-RNA','Vault RNA','piRNA','miRNA')
df <- datapath %>%
str_c('countsData.short.tsv',sep='/') %>%
read_tsv() %>%
filter(type %in% smncRNA) %>%
mutate(type = mapply(changeType,type,name)) %>%
gather(sample,counts,-id,-type,-name) %>%
group_by(type,sample) %>%
summarize(counts = sum(counts)) %>%
ungroup %>%
data.table %>%
group_by(sample) %>%
summarize(percentage = counts/sum(counts) * 100,
type=type) %>%
mutate(prep = getPrep(sample)) %>%
mutate(template = sapply(sample,getTemplate)) %>%
mutate(replicate = sapply(sample,getReplicate)) %>%
mutate(replicate = str_sub(replicate,1,1)) %>%
mutate(lab = getLab(sample)) %>%
mutate(name = paste0(template,replicate)) %>%
mutate(type = str_replace(type,'_','')) %>%
mutate(type = factor(type,level=unique(geneLevelsSmall))) %>%
mutate(annotation = getAnnotation(prep,lab)) %>%
mutate(type = factor(type,levels=rev(geneLevelsSmall))) %>%
arrange(type)
p2 <- ggplot(data=df,aes(x=name,y=percentage, fill = factor(type,levels=geneLevelsSmall))) +
geom_bar(stat='identity') +
facet_grid(.~annotation,scale = 'free_x',space='free_x') +
labs(x = ' ', y = 'Percentage of reads',fill='RNA type')+
scale_fill_manual(values=colorscale) +
theme(strip.text= element_text(size = 13,face = 'bold')) +
theme(text = element_text(face='bold')) +
theme(axis.text.x = element_text(angle = 90, hjust = 0.5, vjust = 0.5))
figurename = paste(figurepath,'smallTypeRatio.pdf',sep='/')
ggsave(p2,file=figurename,width=16,height = 7)
p <- ggdraw()+
draw_plot(p1+theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank()),
0,0.5,1,0.5) +
draw_plot(p2,0,0,0.986,0.55) +
draw_plot_label(c('A','B'),c(0,0),c(1,0.55))
figurename = paste(figurepath,'figure6.pdf',sep='/')
ggsave(p,file=figurename,width=15,height = 10)
|
6cdcae7199aff7dadbbfbe2ab0bd3da7e1312693 | 1a47690a821bb6dffb39f4ddff9cae9f8b0708c1 | /analyses/SimWork/fit_conceptual_figure_delayeffects.R | 2c8cedbfb75222214b310036d4632c39c29bed15 | [] | no_license | mrc-ide/reestimate_covidIFR_analysis | 364d018137360f4d229281a6f794117dce00d8d0 | 075feced7d14a84d1f83b88fb8f720f5123abcc3 | refs/heads/master | 2023-04-13T20:17:30.407867 | 2021-11-15T00:12:42 | 2021-11-15T00:12:42 | 267,118,325 | 10 | 1 | null | 2021-06-03T23:17:02 | 2020-05-26T18:16:20 | R | UTF-8 | R | false | false | 12,572 | r | fit_conceptual_figure_delayeffects.R | ####################################################################################
## Purpose: Plot for Figure Showing Delays and Inference Framework
##
## Notes:
####################################################################################
library(COVIDCurve)
library(tidyverse)
library(drake)
source("R/covidcurve_helper_functions.R")
source("R/my_themes.R")
set.seed(48)
#............................................................
# Read in Various Scenarios for Incidence Curves
#...........................................................
infxn_shapes <- readr::read_csv("data/simdat/infxn_curve_shapes.csv")
interveneflat <- infxn_shapes$intervene
# note need more infxns for sensitivity to be apparent on conceptual diagrams
interveneflat <- interveneflat * 1.5
interveneflat <- c(interveneflat, round(seq(from = interveneflat[200],
to = 10, length.out = 100)))
# read in fitted rate of seroreversion parameter
weibullparams <- readRDS("results/prior_inputs/weibull_params.RDS")
weibullparams$wscale <- weibullparams$wscale - 13.3 # account for delay in onset of symptoms to seroconversion
#............................................................
# setup fatality data
#............................................................
# make up fatality data
fatalitydata <- tibble::tibble(Strata = c("ma1", "ma2", "ma3"),
IFR = c(1e-3, 0.05, 0.1),
Rho = 1)
demog <- tibble::tibble(Strata = c("ma1", "ma2", "ma3"),
popN = c(1.3e6, 9e5, 8e5))
# run COVIDCurve sims for no seroreversion and seroreversion
dat <- COVIDCurve::Agesim_infxn_2_death(
fatalitydata = fatalitydata,
demog = demog,
m_od = 19.8,
s_od = 0.85,
curr_day = 300,
infections = interveneflat,
simulate_seroreversion = FALSE,
smplfrac = 1e-3,
sens = 0.85,
spec = 0.95,
sero_delay_rate = 18.3,
return_linelist = FALSE)
serorev_dat <- COVIDCurve::Agesim_infxn_2_death(
fatalitydata = fatalitydata,
demog = demog,
m_od = 19.8,
s_od = 0.85,
curr_day = 300,
infections = interveneflat,
simulate_seroreversion = TRUE,
sero_rev_shape = weibullparams$wshape,
sero_rev_scale = weibullparams$wscale,
smplfrac = 1e-3,
sens = 0.85,
spec = 0.95,
sero_delay_rate = 18.3,
return_linelist = FALSE)
#............................................................
#----- Model & Fit #-----
#...........................................................
#......................
# wrangle input data from non-seroreversion fit
#......................
# liftover obs serology
sero_days <- c(150, 200)
sero_days <- lapply(sero_days, function(x){seq(from = (x-5), to = (x+5), by = 1)})
obs_serology <- dat$StrataAgg_Seroprev %>%
dplyr::group_by(Strata) %>%
dplyr::filter(ObsDay %in% unlist(sero_days)) %>%
dplyr::mutate(serodaynum = sort(rep(1:length(sero_days), 11))) %>%
dplyr::mutate(
SeroPos = ObsPrev * testedN,
SeroN = testedN ) %>%
dplyr::group_by(Strata, serodaynum) %>%
dplyr::summarise(SeroPos = mean(SeroPos),
SeroN = mean(SeroN)) %>% # seroN doesn't change
dplyr::mutate(SeroStartSurvey = sapply(sero_days, median) - 5,
SeroEndSurvey = sapply(sero_days, median) + 5,
SeroPos = round(SeroPos),
SeroPrev = SeroPos/SeroN,
SeroLCI = NA,
SeroUCI = NA) %>%
dplyr::select(c("SeroStartSurvey", "SeroEndSurvey", "Strata", "SeroPos", "SeroN", "SeroPrev", "SeroLCI", "SeroUCI")) %>%
dplyr::ungroup(.) %>%
dplyr::arrange(SeroStartSurvey, Strata)
# proportion deaths
prop_deaths <- dat$StrataAgg_TimeSeries_Death %>%
dplyr::group_by(Strata) %>%
dplyr::summarise(deaths = sum(Deaths)) %>%
dplyr::ungroup(.) %>%
dplyr::mutate(PropDeaths = deaths/sum(dat$Agg_TimeSeries_Death$Deaths)) %>%
dplyr::select(-c("deaths"))
# make data out
reginputdata <- list(obs_deaths = dat$Agg_TimeSeries_Death,
prop_deaths = prop_deaths,
obs_serology = obs_serology)
#......................
# wrangle input data from seroreversion fit
#......................
# sero tidy up
sero_days <- c(150, 200)
sero_days <- lapply(sero_days, function(x){seq(from = (x-5), to = (x+5), by = 1)})
obs_serology <- serorev_dat$StrataAgg_Seroprev %>%
dplyr::group_by(Strata) %>%
dplyr::filter(ObsDay %in% unlist(sero_days)) %>%
dplyr::mutate(serodaynum = sort(rep(1:length(sero_days), 11))) %>%
dplyr::mutate(
SeroPos = ObsPrev * testedN,
SeroN = testedN ) %>%
dplyr::group_by(Strata, serodaynum) %>%
dplyr::summarise(SeroPos = mean(SeroPos),
SeroN = mean(SeroN)) %>% # seroN doesn't change
dplyr::mutate(SeroStartSurvey = sapply(sero_days, median) - 5,
SeroEndSurvey = sapply(sero_days, median) + 5,
SeroPos = round(SeroPos),
SeroPrev = SeroPos/SeroN) %>%
dplyr::select(c("SeroStartSurvey", "SeroEndSurvey", "Strata", "SeroPos", "SeroN", "SeroPrev")) %>%
dplyr::ungroup(.) %>%
dplyr::arrange(SeroStartSurvey, Strata) %>%
dplyr::mutate(SeroLCI = NA,
SeroUCI = NA) # just add these in for catch
# proportion deaths
prop_deaths <- serorev_dat$StrataAgg_TimeSeries_Death %>%
dplyr::group_by(Strata) %>%
dplyr::summarise(deaths = sum(Deaths)) %>%
dplyr::ungroup(.) %>%
dplyr::mutate(PropDeaths = deaths/sum(serorev_dat$Agg_TimeSeries_Death$Deaths)) %>%
dplyr::select(-c("deaths"))
# make data out
serorev_inputdata <- list(obs_deaths = dat$Agg_TimeSeries_Death,
prop_deaths = prop_deaths,
obs_serology = obs_serology)
#......................
# make IFR model
#......................
# sens/spec
sens_spec_tbl <- tibble::tibble(name = c("sens", "spec"),
min = c(0.5, 0.5),
init = c(0.85, 0.95),
max = c(1, 1),
dsc1 = c(850.5, 950.5),
dsc2 = c(150.5, 50.5))
# delay priors
tod_paramsdf <- tibble::tibble(name = c("mod", "sod", "sero_con_rate"),
min = c(18, 0, 16),
init = c(19, 0.85, 18),
max = c(20, 1, 21),
dsc1 = c(19.8, 2550, 18.3),
dsc2 = c(0.1, 450, 0.1))
serorev <- tibble::tibble(name = c("sero_rev_shape", "sero_rev_scale"),
min = c(1, 197),
init = c(2.5, 202),
max = c(4, 207),
dsc1 = c(weibullparams$wshape, weibullparams$wscale),
dsc2 = c(0.5, 0.1))
# combine
tod_paramsdf_serorev <- rbind(tod_paramsdf, serorev)
# make param dfs
ifr_paramsdf <- make_ma_reparamdf(num_mas = 3, upperMa = 0.4)
knot_paramsdf <- make_splinex_reparamdf(max_xvec = list("name" = "x4", min = 286, init = 290, max = 300, dsc1 = 286, dsc2 = 300),
num_xs = 4)
infxn_paramsdf <- make_spliney_reparamdf(max_yvec = list("name" = "y3", min = 0, init = 9, max = 14.92, dsc1 = 0, dsc2 = 14.92),
num_ys = 5)
noise_paramsdf <- make_noiseeff_reparamdf(num_Nes = 3, min = 0.5, init = 1, max = 1.5)
# bring together
df_params_reg <- rbind.data.frame(ifr_paramsdf, infxn_paramsdf, noise_paramsdf, knot_paramsdf, sens_spec_tbl, tod_paramsdf)
df_params_serorev <- rbind.data.frame(ifr_paramsdf, infxn_paramsdf, noise_paramsdf, knot_paramsdf, sens_spec_tbl, tod_paramsdf_serorev)
#......................
# make model for serorev and regular
#......................
# reg
mod1_reg <- COVIDCurve::make_IFRmodel_age$new()
mod1_reg$set_MeanTODparam("mod")
mod1_reg$set_CoefVarOnsetTODparam("sod")
mod1_reg$set_IFRparams(paste0("ma", 1:3))
mod1_reg$set_maxMa("ma3")
mod1_reg$set_Knotparams(paste0("x", 1:4))
mod1_reg$set_relKnot("x4")
mod1_reg$set_Infxnparams(paste0("y", 1:5))
mod1_reg$set_relInfxn("y3")
mod1_reg$set_Noiseparams(c(paste0("Ne", 1:3)))
mod1_reg$set_Serotestparams(c("sens", "spec", "sero_con_rate"))
mod1_reg$set_data(reginputdata)
mod1_reg$set_demog(demog)
mod1_reg$set_paramdf(df_params_reg)
mod1_reg$set_rcensor_day(.Machine$integer.max)
# serorev
mod1_serorev <- COVIDCurve::make_IFRmodel_age$new()
mod1_serorev$set_MeanTODparam("mod")
mod1_serorev$set_CoefVarOnsetTODparam("sod")
mod1_serorev$set_IFRparams(paste0("ma", 1:3))
mod1_serorev$set_maxMa("ma3")
mod1_serorev$set_Knotparams(paste0("x", 1:4))
mod1_serorev$set_relKnot("x4")
mod1_serorev$set_Infxnparams(paste0("y", 1:5))
mod1_serorev$set_relInfxn("y3")
mod1_serorev$set_Noiseparams(c(paste0("Ne", 1:3)))
mod1_serorev$set_Serotestparams(c("sens", "spec", "sero_con_rate", "sero_rev_shape", "sero_rev_scale"))
mod1_serorev$set_data(serorev_inputdata)
mod1_serorev$set_demog(demog)
mod1_serorev$set_paramdf(df_params_serorev)
mod1_serorev$set_rcensor_day(.Machine$integer.max)
#............................................................
#---- Come Together #----
#...........................................................
fit_map <- tibble::tibble(
name = c("reg_mod", "serorev_mod"),
infxns = list(interveneflat, NULL), # Null since same infections
simdat = list(dat, serorev_dat),
modelobj = list(mod1_reg, mod1_serorev),
rungs = 50,
burnin = 1e4,
samples = 1e4,
thinning = 10)
#......................
# fitmap out
#......................
# select what we need for fits and make outpaths
dir.create("data/param_map/Fig_ConceptualFits/", recursive = T)
lapply(split(fit_map, 1:nrow(fit_map)), function(x){
saveRDS(x, paste0("data/param_map/Fig_ConceptualFits/",
x$name, "_rung", x$rungs, "_burn", x$burnin, "_smpl", x$samples, ".RDS"))
})
#............................................................
# MCMC Object
#...........................................................
run_MCMC <- function(path) {
mod <- readRDS(path)
# run
fit <- COVIDCurve::run_IFRmodel_age(IFRmodel = mod$modelobj[[1]],
reparamIFR = TRUE,
reparamInfxn = TRUE,
reparamKnots = TRUE,
chains = 10,
burnin = mod$burnin,
samples = mod$samples,
rungs = mod$rungs,
GTI_pow = 3.0,
thinning = mod$thinning)
# out
dir.create("results/Fig_ConceptualFits/", recursive = TRUE)
outpath = paste0("results/Fig_ConceptualFits/",
mod$name, "_rung", mod$rungs, "_burn", mod$burnin, "_smpl", mod$samples, ".RDS")
saveRDS(fit, file = outpath)
return(0)
}
#............................................................
# Make Drake Plan
#...........................................................
# due to R6 classes being stored in environment https://github.com/ropensci/drake/issues/961
# Drake can't find <environment> in memory (obviously).
# Need to either wrap out of figure out how to nest better
# read files in after sleeping to account for file lag
Sys.sleep(60)
file_param_map <- list.files(path = "data/param_map/Fig_ConceptualFits/",
pattern = "*.RDS",
full.names = TRUE)
file_param_map <- tibble::tibble(path = file_param_map)
#............................................................
# Make Drake Plan
#...........................................................
plan <- drake::drake_plan(
fits = target(
run_MCMC(path),
transform = map(
.data = !!file_param_map
)
)
)
#......................
# call drake to send out to slurm
#......................
options(clustermq.scheduler = "slurm",
clustermq.template = "drake_clst/slurm_clustermq_LL.tmpl")
make(plan, parallelism = "clustermq", jobs = nrow(file_param_map),
log_make = "ConceptFig_drake.log", verbose = 2,
log_progress = TRUE,
log_build_times = FALSE,
recoverable = FALSE,
history = FALSE,
session_info = FALSE,
lock_envir = FALSE, # unlock environment so parallel::clusterApplyLB in drjacoby can work
lock_cache = FALSE)
cat("************** Drake Finished **************************")
|
f0c8a7761ab91f3cc0e93121a779048e8ccc0e64 | e9db1f5f3fd44b4e220780359eab9bdbcb1ebd3b | /Perfermance_Evaluation/plot_ROC_Epi.R | 6031fe6e587f028a8ff3fe0942b92aae40483846 | [] | no_license | programing-basic-Tutor/scripts_For_Seq | 4c49bd3a55d8add17c5ff569d1c49118219f019e | 7a74b6bda5f2a87c84bf4c18f8e66be84ea3c141 | refs/heads/master | 2022-10-10T00:29:16.452918 | 2020-05-31T02:00:43 | 2020-05-31T02:00:43 | 72,696,007 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 709 | r | plot_ROC_Epi.R | library(Epi)
data_A <- read.table("a1_MSI.txt", header=T, sep="\t")
x <- data_A$MSI_score
y <- data_A$MSI_status
z <- data_A$total_loci
rc <- ROC(form = y ~ x +z , plot="sp")
## optimal combination
opt <- which.max(rowSums(rc$res[, c("sens", "spec")]))
## optimal cut-off point
rc$res$lr.eta[opt]
ROC(form = y ~ x + z, plot = "ROC", MX = TRUE)
## ref https://stackoverflow.com/questions/23131897/how-can-i-get-the-optimal-cutoff-point-of-the-roc-in-logistic-regression-as-a-nu
## http://www.talkstats.com/threads/the-optimal-cutoff-score-in-the-classification-table.56212/
## https://smart-statistics.com/handling-roc-curves/
## http://ethen8181.github.io/machine-learning/unbalanced/unbalanced.html
|
1b90f5eeda2d5db9924ac197bfca73587461c508 | 05884bd8afb3222aec86c6a2b363e67ed3c64590 | /toolbox/examples/safeLibraryex.R | f5c051c19e87e38c39395a16c211a6d130d62857 | [] | no_license | nmarticorena/mineria_datos | bcfbea31e6de6f292e4404068b360638ab8a3cbb | 6e3f22c2fb79fe551a5d8c94136f495638088813 | refs/heads/master | 2020-03-09T00:36:28.806062 | 2018-06-14T03:12:35 | 2018-06-14T03:12:35 | 128,492,056 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 94 | r | safeLibraryex.R | #Safe Library example:
safeLibrary(roxygen2)
# useful package to documentate your packages
|
77dff884c3c37e4944963ed1ac7f368b8982865a | 60a532d56b30e7e8d833a5f10e4e55a2c9bb7c1b | /man/demo_se.Rd | 42305e7afeca45b9532814bf0b8cdc70e8d855d8 | [
"MIT"
] | permissive | mariakalimeri/forestplot | 2d0ffa5841b700af4b05fe27c67b4a3ba7cd8e8a | fef9aeacf9096f30d065ef72f42dec696d1879cd | refs/heads/master | 2021-07-08T00:39:12.708646 | 2018-09-12T13:59:55 | 2018-09-12T14:11:30 | 126,931,329 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 707 | rd | demo_se.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{demo_se}
\alias{demo_se}
\title{Standard Error Values of Linear Associations of NMR-quantified Biomarkers to BMI}
\format{A data frame (tibble) with 228 rows and 2 columns:
\describe{
\item{abbrev}{Biomarker abbreviation}
\item{cohort1}{Std. error values for simulated cohort 1}
\item{cohort2}{Std. error values for simulated cohort 2}
}}
\usage{
demo_se
}
\description{
A dataframe containing standard error values for linear associations of NMR-quantified biomarkers to BMI as estimated using simulated data. Std. error values correspond to the demo_beta dataframe values
}
\keyword{datasets}
|
e9a16265cfb1009025b676c183e86ddc18fdc38f | 8c30bb622e1aa34e74b297ec519fdc88ed73f2c0 | /R/main.R | e6abdba2fbba3f1fff2003f997acd14a4aa66b52 | [
"MIT"
] | permissive | EvgenyPetrovsky/scrambler | 0023529456a9e42b83ccb8f686b7adff6fa0e322 | 1da6ddbb3e4be8ecccd9cdfc866daaaf91af4df3 | refs/heads/master | 2020-03-14T22:00:22.294237 | 2018-10-28T19:42:17 | 2018-10-28T19:42:17 | 131,810,862 | 0 | 0 | MIT | 2018-10-25T12:09:41 | 2018-05-02T06:57:07 | R | UTF-8 | R | false | false | 4,727 | r | main.R | #' Process files
#' @description This procedure scrambles all files which meet selection criteria
#' according to scramble rules
#'
#' @export
#' @param input.folder input folder, word directory by default
#' @param file.names file wildcard to select files
#' @param output.folder folder name to store results. Folder should exist if
#' specified
#' @param rules.file filename with rules
#' @param seed seed value for random generation and sampling
#' @param skip.headlines number of lines in a file before data starts
#' @param skip.taillines number of lines before end of a file where data ends
#' @param data.header flag that data starts with header
#' @param chunksize specifies if file should be read and processed by portions,
#' portion denotes number of lines
processFiles <- function(
input.folder = ".",
file.names = "*",
output.folder = "",
rules.file = "",
seed = 0,
skip.headlines = 0,
skip.taillines = 0,
data.header = T,
chunksize = 0
) {
# log start
write.log(
"Staring process with parameters",
"-input.folder:", input.folder,
"-file.names:", file.names,
"-output.folder:", output.folder,
"-rules.file:", rules.file,
"-seed:", seed,
"-skip.headlines:", skip.headlines,
"-skip.taillines:", skip.taillines,
"-data.header:", data.header,
"-chunksize:", chunksize
)
# rules
rules <- if (rules.file == "") {
scrambler::scrambling.rules
} else {
loadRules(rules.file)
}
# input file names
files.in <- dir(path = input.folder, pattern = file.names, full.names = F)
# output folder
folder.out <- ifelse(output.folder == "", input.folder, output.folder)
# walk through files and process 1 by 1
if (length(files.in) == 0) {
write.log("nothing to process")
} else {
for (file.in in files.in) {
write.log("processing file", file.in)
fin <- paste0(input.folder, file.in)
fout <- paste0(
folder.out,
file.in,
ifelse(folder.out == input.folder, ".scrambled", "")
)
processFile(fin, fout, seed, rules, skip.headlines, skip.taillines, data.header, chunksize)
}
}
write.log("Process complete")
}
processFile <- function(
file.in, file.out,
seed, rules,
skip.headlines, skip.taillines, data.header = T, chunksize = 0
) {
write.log("processing original file", file.in)
# count lines in file
file.lines <- countFileLines(file.in)
data.lines <- file.lines - skip.headlines - as.integer(data.header) - skip.taillines
# take rules related to file
filteredRules <- if (nrow(rules) == 0) rules else {
subset(
rules,
sapply(
X = File, FUN = grepl, x = basename(file.in),
ignore.case = T, USE.NAMES = F
)
)
}
# ----------------------------------------------------------------------------
# process HEADER
# always load header because we take table column names as they are
header <- loadLines(
file = file.in,
start.line = 1,
skip.headlines + as.integer(data.header)
)
createFile(file = file.out)
saveLines(lines = header, file = file.out, append = T)
# ----------------------------------------------------------------------------
# process CONTENT
# function to process chunks
processData <- function(data) {
scdata <- if (nrow(filteredRules) > 0) {
write.log("scrambling data of", basename(file.in))
scrambleDataFrame(data, seed, filteredRules)
} else {
data
}
scdata
}
if (data.lines == 0) {
NULL
} else if (chunksize == 0) {
data <- loadData(
file = file.in,
skip.lines = skip.headlines,
max.lines = data.lines,
header = data.header)
scdata <- processData(data)
saveData(data = scdata, file = file.out)
} else {
chunks <- (data.lines %/% chunksize) + if (data.lines %% chunksize > 0) 1 else 0
for (chunk in 1:chunks) {
data <- loadData(
file = file.in,
skip.lines = skip.headlines,
max.lines = data.lines,
header = data.header,
chunk.no = chunk,
chunk.size = chunksize)
scdata <- processData(data)
saveData(data = scdata, file = file.out)
}
}
# ----------------------------------------------------------------------------
# process FOOTER
# load footer only if file has it
if (skip.taillines > 0) {
footer <- loadLines(file.in, file.lines - skip.taillines + 1, skip.taillines)
saveLines(lines = footer, file = file.out, append = T)
}
}
# in development
main <- function() {
args <- commandArgs(trailingOnly = T)
folder.in <- args[1]
file.names <- args[2]
folder.out <- args[3]
rules <- args[4]
seed <- args[5]
skip.headlines <- 0
skip.taillines <- 0
}
|
6411879795412d8fb6a026293c6fac621df6d5c1 | e12d1fe4369c0859a945bac2cb79a7a5ae0d4557 | /Article/Analysis.R | 753d24dbe7d1786a27075ecaed0d9723d1793528 | [] | no_license | sportalier/Article_Tanguy | a5b550185475fde2513b91da83cdcdd9070dda42 | 5c00048bb9cbbbdb834743f972ce3bbc76ed35df | refs/heads/master | 2021-01-25T04:01:26.027648 | 2018-04-10T18:49:53 | 2018-04-10T18:49:53 | 30,258,584 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,739 | r | Analysis.R | # Competition herbivores
# 9 March 2018
library(deSolve)
# parameters
# plant 1
a1 = 0.01;
Qv1r1 = 0.2;
Qv1r2 = 0.08;
# plant 2
a2 = 0.01;
Qv2r1 = 0.1;
Qv2r2 = 0.25;
# herbivore 1
g11 = 0.15;
g12 = 0.11; # 0.1
Qh1r1 = 0.3;
Qh1r2 = 0.2;
m1 = 0.1;
# herbivore 2
g21 = 0.09;
g22 = 0.15;
Qh2r1 = 0.2;
Qh2r2 = 0.3;
m2 = 0.1;
# supply
S1 = 5;
S2 = 5;
supplyR1=S1*Qv1r1+S2*Qv2r1
supplyR2=S1*Qv1r2+S2*Qv2r2
# initial conditions
Vx0 = 10;
Vy0 = 10;
H10 = 0.1;
H20 = 0.1;
time=10000;
dt=1;
tvoulu=seq(0,time,by=dt);
limx=1.5
limy=1.5
# equilibrium point
V1barre=(m1*g22*Qh1r1*Qv2r2-g12*m2*Qh2r2*Qv2r1)/(g11*g22*Qv1r1*Qv2r2-g12*g21*Qv2r1*Qv1r2)
V2barre=(m2*Qh2r2-g21*V1barre*Qv1r2)/(g22*Qv2r2)
# feasibility cone
V1bound=Qv1r2/Qv1r1
V2bound=Qv2r2/Qv2r1
# ZNGI herbivore 1
R1st1=Qh1r1*m1/g11;
R1st2=Qh1r1*m1/g12;
R2st1=Qh1r2*m1/g11;
R2st2=Qh1r2*m1/g12;
zngixR1y1=seq(R2st1,limy,length.out=20)
zngixR1x1=rep(R1st1,length(zngixR1y1))
zngixR2x1=seq(R1st1,limy,length.out=20)
zngixR2y1=rep(R2st1,length(zngixR2x1))
zngiyR1y1=seq(R2st2,limy,length.out=20)
zngiyR1x1=rep(R1st2,length(zngiyR1y1))
zngiyR2x1=seq(R1st2,limy,length.out=20)
zngiyR2y1=rep(R2st2,length(zngiyR2x1))
# ZNGI herbivore 2
R1st1=Qh2r1*m2/g21;
R1st2=Qh2r1*m2/g22;
R2st1=Qh2r2*m2/g21;
R2st2=Qh2r2*m2/g22;
zngixR1y2=seq(R2st1,limy,length.out=20)
zngixR1x2=rep(R1st1,length(zngixR1y2))
zngixR2x2=seq(R1st1,limy,length.out=20)
zngixR2y2=rep(R2st1,length(zngixR2x2))
zngiyR1y2=seq(R2st2,limy,length.out=20)
zngiyR1x2=rep(R1st2,length(zngiyR1y2))
zngiyR2x2=seq(R1st2,limy,length.out=20)
zngiyR2y2=rep(R2st2,length(zngiyR2x2))
# polygon vertices
predx1=limy/V2bound
predy1=limx*V1bound
xpolygon=c(0,predx1,limx,limx)
ypolygon=c(0,limy,limy,predy1)
#plot
plot(0,0,type='l',xlim=c(0,limx),ylim=c(0,limy),xlab='',ylab='',xaxs='i',yaxs='i',lwd=2,axes=F)
axis(1,lwd.ticks=0,at=c(0,0.65,2),label=c("","",""),cex.axis=2)
mtext(side = 1, text = expression(paste("R"[1])), line = 3,cex=2)
#axis(1,lwd.ticks=0,at=c(0,0.65,2),label=c("",expression(paste("R"[1])),""),cex.axis=2)
axis(2,lwd.ticks=0,at=c(0,0.65,2),label=c("",expression(paste("R"[2])),""),las=1,cex.axis=2)
polygon(xpolygon,ypolygon,col='grey87',lty=0)
lines(zngixR1y1~zngixR1x1,lwd=2)
lines(zngixR2y1~zngixR2x1,lwd=2)
lines(zngiyR1y1~zngiyR1x1,lwd=2)
lines(zngiyR2y1~zngiyR2x1,lwd=2)
lines(zngixR1y2~zngixR1x2,lty=2,lwd=2)
lines(zngixR2y2~zngixR2x2,lty=2,lwd=2)
lines(zngiyR1y2~zngiyR1x2,lty=2,lwd=2)
lines(zngiyR2y2~zngiyR2x2,lty=2,lwd=2)
box()
#abline(0,V1bound,col='grey70',lwd=2)
#abline(0,V2bound,col='grey70',lwd=2)
#### real ZNGI ####
S1vect=seq(1,50)
S2vect=seq(1,50)
# ZNGI R1 H1
R1isoH1vect=matrix(0,nrow=2500,ncol=2)
R1isoH1vect=as.data.frame(R1isoH1vect)
index=1
for (i in 1:50){
S1=S1vect[i]
for (j in 1:50){
S2=S2vect[j]
apolinom = g12*g11*m1*Qh1r1
bpolinom = g11*g12*Qv2r1*S2+a2*g11*m1*Qh1r1-g12*g11*S1*Qv1r1+g12*a1*m1*Qh1r1
cpolinom = a1*g12*Qv2r1*S2-a2*S1*g11*Qv1r1+a2*a1*m1*Qh1r1
delta1= bpolinom^2 - 4*apolinom*cpolinom
sol1=(-bpolinom+sqrt(delta1))/(2*apolinom)
sol2=(-bpolinom-sqrt(delta1))/(2*apolinom)
H1barre=sol1
V2barre=(S1*g11*Qv1r1-a1*m1*Qh1r1-g11*m1*Qh1r1*H1barre)/(a1*g12*Qv2r1+g11*g12*Qv2r1*H1barre)
V1barre=(m1*Qh1r1-g12*Qv2r1*V2barre)/(g11*Qv1r1)
R1isoH1=V1barre*Qv1r1+V2barre*Qv2r1
R1isoH1vect[index,1]=R1isoH1
R1isoH1vect[index,2]=V1barre*Qv1r2+V2barre*Qv2r2
index=index+1
}
}
# ZNGI R2 H1
R2isoH1vect=matrix(0,nrow=2500,ncol=2)
R2isoH1vect=as.data.frame(R2isoH1vect)
index=1
for (i in 1:50){
S1=S1vect[i]
for (j in 1:50){
S2=S2vect[j]
apolinom = g12*g11*m1*Qh1r2
bpolinom = g11*g12*Qv2r2*S2+a2*g11*m1*Qh1r2-g12*g11*S1*Qv1r2+g12*a1*m1*Qh1r2
cpolinom = a1*g12*Qv2r2*S2-a2*S1*g11*Qv1r2+a2*a1*m1*Qh1r2
delta1= bpolinom^2 - 4*apolinom*cpolinom
sol1=(-bpolinom+sqrt(delta1))/(2*apolinom)
sol2=(-bpolinom-sqrt(delta1))/(2*apolinom)
H1barre=sol1
V2barre=(S1*g11*Qv1r2-a1*m1*Qh1r2-g11*m1*Qh1r2*H1barre)/(a1*g12*Qv2r2+g11*g12*Qv2r2*H1barre)
V1barre=(m1*Qh1r2-g12*Qv2r2*V2barre)/(g11*Qv1r2)
R2isoH1=V1barre*Qv1r2+V2barre*Qv2r2
R2isoH1vect[index,2]=R2isoH1
R2isoH1vect[index,1]=V1barre*Qv1r1+V2barre*Qv2r1
index=index+1
}
}
# plot(R1isoH1vect[,2]~R1isoH1vect[,1],type='l',ylim=c(-1,10),xlim=c(-1,10))
# lines(R2isoH1vect[,2]~R2isoH1vect[,1],col='red')
mod1=lm(R1isoH1vect[,2]~R1isoH1vect[,1])
mod2=lm(R2isoH1vect[,2]~R2isoH1vect[,1])
plot(0,0,type='n',xlim=c(0,4),ylim=c(0,4))
abline(mod1$coef)
abline(mod2$coef)
#### test of vectors ####
# equilibrium point
V1barre=(m1*g22*Qh1r1*Qv2r2-g12*m2*Qh2r2*Qv2r1)/(g11*g22*Qv1r1*Qv2r2-g12*g21*Qv2r1*Qv1r2)
V2barre=(m2*Qh2r2-g21*V1barre*Qv1r2)/(g22*Qv2r2)
# EDO
equadiff=function(t,x,parms){
res=rep(0,length(x))
S1=parms[1]
S2=parms[2]
Min1=(g11*Qv1r1*x[3]+g12*Qv2r1*x[4])/Qh1r1
Min2=(g11*Qv1r2*x[3]+g12*Qv2r2*x[4])/Qh1r2
gh1=min(Min1,Min2)
res[1]=(gh1-m1)*x[1]
Min1=(g21*Qv1r1*x[3]+g22*Qv2r1*x[4])/Qh2r1
Min2=(g21*Qv1r2*x[3]+g22*Qv2r2*x[4])/Qh2r2
gh2=min(Min1,Min2)
res[2]=(gh2-m2)*x[2]
res[3]=S1-a1*x[3]-g11*x[3]*x[1]-g21*x[3]*x[2]
res[4]=S2-a2*x[4]-g12*x[4]*x[1]-g22*x[3]*x[2]
return(list(res))
}
suptest=seq(0.6,10,length.out=15)
leng=length(suptest)*length(suptest)
resu=matrix(nrow=leng,ncol=8)
resu=as.data.frame(resu)
names(resu)=c("H1","H2","V1","V2",'R1','R2','SR1','SR2')
count=1
for (i in 1:length(suptest)){
for (j in 1:length(suptest)){
S1=suptest[i]
S2=suptest[j]
parms=c(S1,S2)
result=lsoda(c(H10,H20,Vx0,Vy0),tvoulu,equadiff,parms=parms,rtol=1e-12)
endval=dim(result)[1]-1
resu[count,1]=result[endval,2]
resu[count,2]=result[endval,3]
resu[count,3]=result[endval,4]
resu[count,4]=result[endval,5]
resu[count,5]=result[endval,4]*Qv1r1+result[endval,5]*Qv2r1
resu[count,6]=result[endval,4]*Qv1r2+result[endval,5]*Qv2r2
resu[count,7]=S1*Qv1r1+S2*Qv2r1
resu[count,8]=S1*Qv1r2+S2*Qv2r2
count=count+1
print(count)
}
}
thres=1 #0.01
resu2=na.omit(resu)
for (i in 1:dim(resu2)[1]){
if (resu2[i,1]>thres && resu2[i,2]>thres){
points(resu2[i,8]~resu2[i,7],col='green')
}else{
if (resu2[i,1]<thres && resu2[i,2]>thres){
points(resu2[i,8]~resu2[i,7],col='blue')
}else{
if (resu2[i,1]>thres && resu2[i,2]<thres){
points(resu2[i,8]~resu2[i,7],col='red')
}
}
}
}
Vxequ=(m2*Qh2r2*g12*Qv2r1-g22*Qv2r2*m1*Qh1r1)/(g21*Qv1r2*g12*Qv2r1-g22*Qv2r2*g11*Qv1r1)
Vyequ=(m1*Qh1r1-g11*Vxequ*Qv1r1)/(g12*Qv2r1)
R1equ=Vxequ*Qv1r1+Vyequ*Qv2r1
R2equ=Vxequ*Qv1r2+Vyequ*Qv2r2
S1equ1=seq(1,10)
S1equ2=seq(1,10)
S2equ1=(S1equ1-a1)*(g12*Vyequ)/(g11*Vxequ)+a2*Vyequ
S2equ2=(S1equ1-a1)*(g22*Vyequ)/(g21*Vxequ)+a2*Vyequ
ch1=(S1equ1*Qv1r2+S2equ1*Qv2r2)/(S1equ1*Qv1r1+S2equ1*Qv2r1)
ch2=(S1equ2*Qv1r2+S2equ2*Qv2r2)/(S1equ2*Qv1r1+S2equ2*Qv2r1)
pente1=mean(ch1)
pente2=mean(ch2)
ordo1=R2equ-pente1*R1equ
ordo2=R2equ-pente2*R1equ
x0=R1equ-0.125
y0=pente1*x0+ordo1
x1=2
y1=x1*pente1+ordo1
arrows(x0,y0,x1,y1,code=1,lwd=2.5,col='grey45',length=0.2)
x0=R1equ-0.135
y0=pente2*x0+ordo2
x1=x0+0.01
y1=x1*pente2+ordo2
arrows(x0,y0,x1,y1,code=1,lwd=2.5,col='grey45',length=0.2)
x2=2
y2=x2*pente2+ordo2
lines(c(y1,y2)~c(x1,x2),lty=2,lwd=2.5,col='grey45')
pente3=(g11*V1barre*Qv1r2+g12*V2barre*Qv2r2)/(g11*V1barre*Qv1r1+g12*V2barre*Qv2r1)
pente4=(g21*V1barre*Qv1r2+g22*V2barre*Qv2r2)/(g21*V1barre*Qv1r1+g22*V2barre*Qv2r1)
ordo1=R2equ-pente3*R1equ
ordo2=R2equ-pente4*R1equ
x0=R1equ-0.125
y0=pente3*x0+ordo1
x1=2
y1=x1*pente3+ordo1
arrows(x0,y0,x1,y1,code=1,lwd=2.5,col='red',length=0.2)
x0=R1equ-0.135
y0=pente4*x0+ordo2
x1=x0+0.01
y1=x1*pente4+ordo2
arrows(x0,y0,x1,y1,code=1,lwd=2.5,col='red',length=0.2)
x2=2
y2=x2*pente4+ordo2
lines(c(y1,y2)~c(x1,x2),lty=2,lwd=2.5,col='red')
|
8c268594470a9071d86e979881d9c3f5420810e3 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /BiDAG/man/partitionMCMC.Rd | dace236859a56b6f985505ff60e26f0bf1bb2d62 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 4,865 | rd | partitionMCMC.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{partitionMCMC}
\alias{partitionMCMC}
\title{DAG structure sampling with partition MCMC}
\usage{
partitionMCMC(
scorepar,
startspace = NULL,
blacklist = NULL,
scoretable = NULL,
startDAG = NULL,
moveprobs = NULL,
iterations = NULL,
stepsave = NULL,
gamma = 1,
verbose = TRUE
)
}
\arguments{
\item{scorepar}{an object of class \code{scoreparameters}, containing the data and scoring parameters; see constructor function \code{\link{scoreparameters}}.}
\item{startspace}{(optional) a square matrix, of dimensions equal to the number of nodes, which defines the search space for the order MCMC in the form of an adjacency matrix; if NULL, the skeleton obtained from the PC-algorithm will be used. If \code{startspace[i,j]} equals to 1 (0) it means that the edge from node \code{i} to node \code{j} is included (excluded) from the search space. To include an edge in both directions, both \code{startspace[i,j]} and \code{startspace[j,i]} should be 1.}
\item{blacklist}{(optional) a square matrix, of dimensions equal to the number of nodes, which defines edges to exclude from the search space; if \code{blacklist[i,j]=1} it means that the edge from node \code{i} to node \code{j} is excluded from the search space}
\item{scoretable}{(optional) list of score tables; for example calculated at the last iteration of the function \code{iterativeMCMC}, to avoid their recomputation; the score tables must match the permissible parents in the search space defined by the startspace parameter}
\item{startDAG}{(optional) an adjacency matrix of dimensions equal to the number of nodes, representing a DAG in the search space defined by startspace. If startspace is defined but \code{startDAG} is not, an empty DAG will be used by default}
\item{moveprobs}{(optional) a numerical vector of 5 values in \code{\{0,1\}} corresponding to the following MCMC move probabilities in the space of partitions:
\itemize{
\item swap any two elements from different partition elements
\item swap any two elements in adjacent partition elements
\item split a partition element or join one
\item move a single node into another partition element or into a new one
\item stay still
}}
\item{iterations}{(optional) integer, the number of MCMC steps, the default value is \eqn{8n^{2}\log{n}}}
\item{stepsave}{(optional) integer, thinning interval for the MCMC chain, indicating the number of steps between two output iterations, the default is \code{iterations/1000}}
\item{gamma}{(optional) tuning parameter which transforms the score by raising it to this power, 1 by default}
\item{verbose}{logical, if set to TRUE (default) messages about progress will be printed}
}
\value{
an object of class \code{MCMCtrace}, which contains a list of 5 elements (each list contains \code{iterations/stepsave} elements):
\itemize{
\item incidence - contains a list of adjacency matrices of DAGs sampled at each step of MCMC
\item DAGscores - contains a list of scores of DAGs sampled at each step of MCMC
\item partitionscores - contains a list of scores of partitions of DAGs sampled at each step of MCMC
\item order - contains a list of permutations of the nodes in partitions of DAGs sampled at each step of MCMC
\item partition - contains a list of partitions of DAGs sampled at each step of MCMC
}
}
\description{
This function implements the partition MCMC algorithm for the structure learning of Bayesian networks. This procedure provides an unbiased sample from the posterior distribution of DAGs given the data.
The search space can be defined either by a preliminary run of the function \code{iterativeMCMC} or by a given adjacency matrix (which can be the full matrix with zero on the diagonal, to consider the entire space of DAGs, feasible only for a limited number of nodes).
}
\examples{
\dontrun{
myScore<-scoreparameters(14, "bge", Boston)
partfit<-partitionMCMC(myScore)
plot(partfit)
}
}
\references{
Kuipers J and Moffa G (2017). Partition MCMC for inference on acyclic digraphs. Journal of the American Statistical Association 112, 282-299.
Geiger D and Heckerman D (2002). Parameter priors for directed acyclic graphical models and the characterization of several probability distributions. The Annals of Statistics 30, 1412-1440.
Heckerman D and Geiger D (1995). Learning Bayesian networks: A unification for discrete and Gaussian domains. In Eleventh Conference on Uncertainty in Artificial Intelligence, pages 274-284.
Kalisch M, Maechler M, Colombo D, Maathuis M and Buehlmann P (2012). Causal inference using graphical models with the R package pcalg. Journal of Statistical Software 47, 1-26.
Kuipers J, Moffa G and Heckerman D (2014). Addendum on the scoring of Gaussian directed acyclic graphical models. The Annals of Statistics 42, 1689-1691.
}
|
634902f2552b236d530a247942a6730851d25a8d | 645f63b45d9dcdebc8f58bd71d186525d103e621 | /LAB2/testforbeginning.R | e5ec6d1c79dd15338ead664d5e16c260360c0ff0 | [
"Apache-2.0"
] | permissive | snotman/STT864 | 7a012446534ae9258cc832066969f7c9424a7a7d | a33df1d5f09510e372f71b4d0c701148c69f330d | refs/heads/master | 2021-01-19T01:03:40.665082 | 2017-04-04T18:31:33 | 2017-04-04T18:31:33 | 87,218,835 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,330 | r | testforbeginning.R | setwd("C://Users//nan66//Documents//MSU//stt864//LAB2")
flintlead<-read.csv(file="Flint-water-lead-dataset.csv",header=FALSE)
colnames(flintlead)=c("SampleID","Zip Code","Ward", "0sec", "5sec", "120sec")
time<-c(0, 45, 120)
flintlead2<-flintlead[flintlead[,5]<1000,]
f<-function(a,b,c,i){zipcode<-which(flintlead2[,2]==i)
subsetflintlead<-flintlead2[zipcode,]
responses1<-unlist(subsetflintlead[,4:6])
sampletime1<-rep(time,each=dim(subsetflintlead)[1])
nlsreg2<-nls(responses1~theta1/(1+theta2*(exp(sampletime1*theta3))),
start=list(theta1=a,theta2=b,theta3=c))
return(nlsreg2)
}
d<-function(i){zipcode<-which(flintlead2[,2]==i)
subsetflintlead<-flintlead2[zipcode,]
responses1<-unlist(subsetflintlead[,4:6])
sampletime1<-rep(time,each=dim(subsetflintlead)[1])
matplot(sampletime1,responses1,pch=18)
}
f<-function(a,b,c){
zipcode<-which(flintlead2[,2]==48504)
subsetflintlead<-flintlead2[zipcode,]
responses1<-unlist(subsetflintlead[,4:6])
sampletime1<-rep(time,each=dim(subsetflintlead)[1])
nlsreg2<-nls(responses1~theta1/(1+theta2*(exp(sampletime1*theta3))),
start=list(theta1=a,theta2=b,theta3=c))
return(nlsreg2)
}
##48503: 2.68,-0.75,-0.008
##48504?????????
##48505:1.44, -0.779, -0.0074
##48506: 2.19 , -0.81 -0.024
##48507 : 4.343 -0.61,-0.015 |
3d5bd09b52aa4ba516d4a2eb30487395f69b0930 | 9ab84dba6505ff02f06b8c8a7e58fa1bdf5113a8 | /R/getESFromChiSquared.R | 23daa073d05f3672aa42de94b89b1f554fe8820e | [] | no_license | marcionicolau/StatViz | 8a32ad2b9dc2128709cff9d856989df964904faf | c7b429eb707b2f8a4e55593a8a10a999055b3ed4 | refs/heads/master | 2020-12-26T13:51:21.469518 | 2013-12-11T12:14:01 | 2013-12-11T12:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 456 | r | getESFromChiSquared.R | getESFromChiSquared <- function(ChiSquared = "", n = "")
{
install.packages("compute.es");
library(compute.es);
if(ChiSquared == "")
{
ChiSquared = "3.14";
n = "40";
}
result = chies(eval(parse(text = ChiSquared)), eval(parse(text = n)));
# result = tes(3.14, 40, 40);
list(d = result$MeanDifference[["d"]], g = result$MeanDifference[["g"]], r = result$Correlation[["r"]], ChiSquared = ChiSquared, n = n);
}
|
a39fa335d5a23ef9dbdb027e9782a4989cea2f9e | ad6dfcab422765ece1a3fb9d6ea427fccdc29aae | /cachematrix.R | ac14e87d849caa72ff887f288c995f3adbfc0fa3 | [] | no_license | JorgeQuereda/Programming-Assignment-2 | 68a3203f6e8084738a2271ec3c4928bca9ef4cc9 | 73b2793b18b66fb07ab9d8b3964b4a7f06a47199 | refs/heads/master | 2021-01-10T16:39:35.596503 | 2015-10-21T12:49:26 | 2015-10-21T12:49:26 | 44,635,303 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,081 | r | cachematrix.R | ## Inverts a matrix x or reads its inverse from cache memory
## makeCacheMatrix creates a "matrix" object that can cache its inverse.
## x is the input matrix
## inv is the inverse of the matrix
## get reads the value of x
## set overwrites the value of x
## getinv reads the value of the inverse
##
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix. If the inverse has already been calculated
## (and the matrix has not changed), then cacheSolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
90764bc0d09e3719158ebd6b15179b82e2e65a28 | b3277733361941befd94f4b1cd0a858d9f475747 | /man/JSTOR_corpusofnouns.Rd | 686652249fb10b800a76d5806e765fbb480f736b | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pnandak/JSTORr | ce9299c465e6893a4f4fa3a401422a20cd565824 | 48e13a7a0c6cd41bda76f9cc98798fcc9875bb07 | refs/heads/master | 2021-01-15T10:05:38.151329 | 2014-02-05T06:19:28 | 2014-02-05T06:19:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,454 | rd | JSTOR_corpusofnouns.Rd | \name{JSTOR_corpusofnouns}
\alias{JSTOR_corpusofnouns}
\title{Remove all words except non-name nouns}
\usage{
JSTOR_corpusofnouns(x, parallel = FALSE)
}
\arguments{
\item{x}{object returned by the function JSTOR_unpack.}
\item{parallel}{if TRUE, apply function in parallel,
using the parallel library}
}
\value{
Returns a Document Term Matrix containing documents,
ready for more advanced text mining and topic modelling.
}
\description{
This function does part-of-speech tagging and removes all
parts of speech that are not non-name nouns. It also
removes punctuation, numbers, words with less than three
characters, stopwords and unusual characters (characters
not in ISO-8859-1, ie non-latin1-ASCII). For use with
JSTOR's Data for Research datasets
(http://dfr.jstor.org/). This function uses the stoplist
in the tm package. The location of tm's English stopwords
list can be found by entering this at the R prompt:
paste0(.libPaths()[1], "/tm/stopwords/english.dat") Note
that the part-of-speech tagging can result in the removal
of words of interest. To prevent the POS tagger from
removing these words, edit the tagdict file and add the
word(s) with a NN tag. To find the tagdict file, enter
this at the R prompt: at the R prompt:
paste0(.libPaths()[1],
"/openNLPmodels.en/models/parser/tagdict") and edit with
a text editor.
}
\examples{
## nouns <- JSTOR_corpusofnouns(unpack, parallel = TRUE)
}
|
55cfa4f3e6d7473ef40ed3c58168d723018a357b | 64b9d355734ac82c30aec9cf33fc08c24def620b | /batch 8 code.R | 8103c476a04bc9dae7e0ba4ac4de58b53fb9ba48 | [] | no_license | drstatsvenu/OU-Batch-2018-19-Sem3 | a38804d54f6dca93d91cd5114152ba714d60e19c | 7ab95f3a45ffe47b9635a5524d1ee878f9ab136b | refs/heads/master | 2020-09-07T12:33:34.968803 | 2019-11-10T11:46:28 | 2019-11-10T11:46:28 | 220,781,872 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,546 | r | batch 8 code.R | getwd()
setwd("C:/DMMLT")
getwd()
###################################################
#Reading data
###################################################################
data<-read.csv('email.csv',stringsAsFactors = T, na.strings = c(""," ","NA","?",NA))
View(data)
head(data,10)
tail(data)
#Structure of Dataset :
names(data)
pairs(data)
str(data)
summary(data)
dim(data)
###################################################################
# Correctign data types
###################################################################
data$Email_Status<-as.factor(data$Email_Status)
data$Time_Email_sent_Category<-as.factor(data$Time_Email_sent_Category)
data$Email_Type<-as.factor(data$Email_Type)
data$Email_Source_Type<-as.factor(data$Email_Source_Type)
data$Email_Campaign_Type<-as.factor(data$Email_Campaign_Type)
str(data)
###################################################################
#5. Find missing values in data set if any.
sum(is.na(data))
#Missing value Proportion for all the variables
sapply(data, function(df) {
(sum(is.na(df)==TRUE)/ length(df))*100;
})
###################################################################
#IMPUTATION
######################################3
install.packages("Hmisc")
library(Hmisc)
data$Total_Past_Communications[is.na(data$Total_Past_Communications)]<-median(data$Total_Past_Communications,na.rm=T)
data$Total_Links[is.na(data$Total_Links)]<-median(data$Total_Links,na.rm=T)
data$Total_Images[is.na(data$Total_Images)]<-median(data$Total_Images,na.rm=T)
#Imputing with most frequent occuring level
data$Customer_Location[is.na(data$Customer_Location)]<-'G'
sum(is.na(data))
#######
sapply(data, function(df) {
(sum(is.na(df)==TRUE)/ length(df))*100;
})
######################################3
# Removing outliers
######################################
boxplot(data$Subject_Hotness_Score)
boxplot(data$Total_Past_Communications)
boxplot(data$Total_Links)
boxplot(data$Total_Images)
data$Subject_Hotness_Score[data$Subject_Hotness_Score>quantile(data$Subject_Hotness_Score, 0.95)] <- quantile(data$Subject_Hotness_Score, 0.95)
data$Total_Links[data$Total_Links>quantile(data$Total_Links, 0.95)] <- quantile(data$Total_Links, 0.95)
data$Total_Images[data$Total_Images>quantile(data$Total_Images, 0.90)] <- quantile(data$Total_Images, 0.90)
boxplot(data$Subject_Hotness_Score)
boxplot(data$Total_Links)
boxplot(data$Total_Images)
names(data)
######################
# Hypothesis testing
######################
chisq.test(data$Email_Status, data$Email_Type, correct=FALSE)
chisq.test(data$Email_Status, data$Customer_Location , correct=FALSE)
chisq.test(data$Email_Status, data$Email_Campaign_Type , correct=FALSE)
#anova
x1=aov(data$Total_Links ~ data$Email_Status)
summary(x1)
x2=aov(data$Total_Images ~ data$Email_Status)
summary(x2)
summary(data)
######################################3
train_rows<- sample(1:nrow(data), size=0.7*nrow(data))
train_rows
training <- data[train_rows, ]
test <- data[-train_rows, ]
dim(data)
dim(training)
dim(test)
names(training)
names(test)
str(training)
######################################3
# set-up test options
install.packages("dplyr")
library(dplyr)
install.packages("caret")
library(caret)
control <- trainControl(method="repeatedcv", number=5)
seed <- 7
metric <- "Accuracy"
# Multinomnal Regression
set.seed(seed)
fit.glm1 <- train(Email_Status~., data=training, method="glm", metric=metric, trControl=control)
print(fit.glm1)
# CART
set.seed(seed)
fit.cart <- train(Email_Status~., data=training, method="rpart", metric=metric, trControl=control)
print(fit.cart)
# kNN
set.seed(seed)
fit.knn <- train(Email_Status~., data=training, method="knn", metric=metric, preProc=c("center", "scale"), trControl=control)
print(fit.knn)
# SVM
set.seed(seed)
fit.svm <- train(Email_Status~., data=training, method="svmRadial", metric=metric, preProc=c("center", "scale"), trControl=control, fit=FALSE)
print(fit.svm)
# Random Forest
set.seed(seed)
fit.rf <- train(Email_Status~., data=training, method="rf", metric=metric, trControl=control)
print(fit.rf)
# Compare algorithms
results <- resamples(list(logistic=fit.glm1,svm=fit.svm, knn=fit.knn, DT=fit.cart,rf=fit.rf ))
# Table comparison
summary(results)
# boxplot comparison
bwplot(results)
# Dot-plot comparison
dotplot(results)
###########################
#################################################################
# Checking Random Forest
#################################################################
customRF <- list(type = "Classification", library = "randomForest", loop = NULL)
customRF$parameters <- data.frame(parameter = c("mtry", "ntree"), class = rep("numeric", 2), label = c("mtry", "ntree"))
customRF$grid <- function(x, y, len = NULL, search = "grid") {}
customRF$fit <- function(x, y, wts, param, lev, last, weights, classProbs, ...) {
randomForest(x, y, mtry = param$mtry, ntree=param$ntree, ...)
}
customRF$predict <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata)
customRF$prob <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata, type = "prob")
customRF$sort <- function(x) x[order(x[,1]),]
customRF$levels <- function(x) x$classes
#########################################################################3
library(caret)
install.packages("randomForest")
library(randomForest)
control <- trainControl(method="repeatedcv", number=10, repeats=3)
#tunegrid <- expand.grid(.mtry=c(1:5), .ntree=c(100, 150, 200, 250))
tunegrid <- expand.grid(.mtry=c(1:5), .ntree=c(100,200,500))
set.seed(seed)
custom <- train(Email_Status~Email_Type+
Subject_Hotness_Score+ Email_Source_Type+
Customer_Location+ Email_Campaign_Type+
Total_Past_Communications+ Time_Email_sent_Category+
Word_Count+ Total_Links+
Total_Images,data = training,method=customRF, tuneGrid=tunegrid, trControl=control)
print(custom)
####################################################
library('randomForest')
rf_model <- randomForest(Email_Status~Email_Type+
Subject_Hotness_Score+ Email_Source_Type+
Customer_Location+ Email_Campaign_Type+
Total_Past_Communications+ Time_Email_sent_Category+
Word_Count+ Total_Links+
Total_Images,data = training,ntree=200,mtry=2,trControl=control)
#################################################################
# VAriable Importance
#################################################################
#We can have a look at the variable importance of our random forest model below :
install.packages("ggthemes")
library('ggthemes')
library('dplyr')
importance <- importance(rf_model)
varImportance <- data.frame(Variables = row.names(importance),
Importance = round(importance[ ,'MeanDecreaseGini'],2))
# Create a rank variable based on importance
rankImportance <- varImportance %>%
mutate(Rank = paste0('#',dense_rank(desc(Importance))))
# Use ggplot2 to visualize the relative importance of variables
ggplot(rankImportance, aes(x = reorder(Variables, Importance),
y = Importance, fill = Importance)) +
geom_bar(stat='identity') +
geom_text(aes(x = Variables, y = 0.5, label = Rank),
hjust=0, vjust=0.55, size = 4, colour = 'red') +
labs(x = 'Vari
ables') +
coord_flip() +
theme_few()
############################################
#Final model
############################################
install.packages("nnet")
library(nnet)
mymodel <- glm(Email_Status~Subject_Hotness_Score+Customer_Location+
Total_Past_Communications+Word_Count+Total_Links+Total_Images+Email_Campaign_Type, data=training,family='binomial')
# Prediction
p1 <- predict(mymodel, newdata=training, type = 'response')
pred1 <- ifelse(p1>0.5, 1, 0)
library(caret)
confusionMatrix(as.factor(pred1),training$Email_Status)
#FoR Test data
p2=predict(mymodel, newdata=test, type = 'response')
pred2 <- ifelse(p2>0.5, 1, 0)
library(caret)
confusionMatrix(as.factor(pred2),test$Email_Status)
|
a11b1da8dca4830a949962972342835736b1044f | ddd3559b1f1e58944d679bcbaa813e8453eafc62 | /R/read_GPX.R | cc931d50599d045b3478f34a5937666dce32f5d3 | [] | no_license | cran/tmaptools | 8a7ce9d8ba47cc792cadf8dfe3f231f0db57f961 | d37013ad904edcc5c0dab4db1b6ad561694a64b2 | refs/heads/master | 2021-07-25T01:47:09.925914 | 2021-01-19T19:30:02 | 2021-01-19T19:30:02 | 77,929,207 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,367 | r | read_GPX.R | #' Read GPX file
#'
#' Read a GPX file. By default, it reads all possible GPX layers, and only returns shapes for layers that have any features.
#'
#' Note that this function returns \code{\link[sf:sf]{sf}} objects, but still uses methods from sp and rgdal internally.
#'
#' @param file a GPX filename (including directory)
#' @param layers vector of GPX layers. Possible options are \code{"waypoints"}, \code{"tracks"}, \code{"routes"}, \code{"track_points"}, \code{"route_points"}. By dedault, all those layers are read.
#' @param remove.empty.layers should empty layers (i.e. with 0 features) be removed from the list?
#' @param as.sf not used anymore
#' @return a list of sf objects, one for each layer
#' @export
read_GPX <- function(file, layers=c("waypoints", "routes", "tracks", "route_points", "track_points"), remove.empty.layers = TRUE, as.sf = TRUE) {
if (!all(layers %in% c("waypoints", "routes", "tracks", "route_points", "track_points"))) stop("Incorrect layer(s)", call. = FALSE)
layers_data <- sf::st_layers(file)
if (!all(layers %in% layers_data$name)) stop("layers not found in GPX file")
res <- lapply(layers, function(l) {
sf::st_read(file, layer = l, quiet = TRUE)
})
names(res) <- layers
if (remove.empty.layers) {
res <- res[layers_data$features[match(layers, layers_data$name)] > 0]
}
res
}
|
403e2d964082082717a0f23c2293ebbe6b9b89f2 | d1ae02ea69b02f4ccbb51b1352dbb66b11f8169d | /asn1.R | 77f5b652b55406f297e83ba26984b5db3312c282 | [] | no_license | thiyagarajans/RepData_PeerAssessment1 | 55f0ea2af94a296c6b256cd32340f9f284211d09 | 8542497533433706d7eb460b887f6ed2b46a6009 | refs/heads/master | 2021-01-17T20:23:36.005861 | 2015-06-05T07:56:19 | 2015-06-05T07:56:19 | 35,601,659 | 0 | 0 | null | 2015-05-14T08:58:44 | 2015-05-14T08:58:44 | null | UTF-8 | R | false | false | 2,289 | r | asn1.R | library(plyr)
library(dplyr)
library(ggplot2)
d1 <- read.csv("activity.csv")
d1$date <- as.Date(d1$date,"%Y-%m-%d")
#Q1
d3 <- group_by(d1, date)
d5 <- summarize(d3, sum(steps, na.rm=TRUE), mean(steps, na.rm=TRUE))
names(d5) <- c("Date", "Sum", "Mean")
hist(d5$Sum, xlab="", main="")
title(main="Total number of Steps", xlab="Number of Steps")
print("Mean steps per day")
mean(d5$Sum, na.rm=TRUE)
print ("Median steps per day")
median(d5$Sum, na.rm=TRUE)
#Q2
d2 <- d1
d2$interval <- as.POSIXct(sprintf("%04d",d2$interval), format="%H%M")
d4 <- group_by(d2,interval)
d6 <- summarize(d4,sum(steps, na.rm=TRUE),mean(steps, na.rm=TRUE))
names(d6) <- c("Interval", "Sum", "Mean")
plot(d6$Interval, d6$Mean, type="l", main="",xlab="",ylab="Mean Steps", xaxt="n")
title(main="Mean number of steps across hours of a day", xlab="Time interval (hrs)")
axis.POSIXct(side=1, at=window(d6$Interval,deltat=12), format="%H")
print("Interval with maximum steps is ")
max_interval <- d6[d6$Sum==max(d6$Sum),]$Interval
#Q3.1
print("Number of rows with missing values")
sum(is.na(d1$steps))
#Q3.2
#find the index of missing values
missing_values <- which(is.na(d1$steps))
#Missing values in steps filled with Mean steps value of the corresponding
#time interval, averaged over all days.
fill_d1 <- d1
fill_d1$interval <- as.POSIXct(sprintf("%04d",fill_d1$interval), format="%H%M")
for (i in missing_values) {fill_d1$steps[i] <- d6[d6$Interval==fill_d1$interval[i],]$Mean }
#Q3.3
d7 <- group_by(fill_d1, date)
d8 <- summarize(d7,sum(steps),mean(steps))
names(d8) <- c("Date", "Sum", "Mean")
hist(d8$Sum, xlab="", main="")
title(main="Total number of Steps", xlab="Number of Steps")
print("Mean steps per day")
mean(d8$Sum)
print ("Median steps per day")
median(d8$Sum)
#Mean remains the same but the median has slightly changed.
#Q4
mut_d1 <- mutate(fill_d1,day="weekday", value=weekdays(date))
mut_d1[mut_d1$value == "Saturday",]$day <- "weekend"
mut_d1[mut_d1$value == "Sunday",]$day <- "weekend"
d9 <- group_by(mut_d1,day,interval, add=TRUE)
d10 <- summarize(d9,sum(steps),mean(steps))
names(d10) <- c("day","interval","Sum","Mean")
g <- ggplot(d10, aes(x=interval,y=Mean, col=day)) + geom_line(size=1.5) + facet_wrap(~day, ncol=1)
g + scale_x_continuous(breaks = seq(0,2400,100) , labels=c(0:24)) |
f1f48a2d951642b7444c40719d5e46054d4c0f5a | d0c72def22b832453eda4ecaf2d3ef350dc5fab0 | /hybrid_pipeline.R | 2c876d9bebbb3b47843826770cd7c0201804b8d6 | [] | no_license | diazrenata/ldats2020 | 9e12ac91ba4e4a172a3515880281a6632e63f7cb | 10dfafb89443a90a9c430328f52ef32058dcf443 | refs/heads/master | 2023-07-06T07:57:13.605804 | 2021-05-20T19:07:22 | 2021-05-20T19:07:22 | 294,782,847 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,706 | r | hybrid_pipeline.R | library(drake)
library(ggplot2)
library(dplyr)
source(here::here("analysis", "fxns", "crossval_fxns.R"))
source(here::here("analysis", "fxns", "hybrid_fxns.R"))
source(here::here("analysis", "fxns", "make_toy_data_objects.R"))
library(MATSS)
library(LDATS)
## include the functions in packages as dependencies
# - this is to help Drake recognize that targets need to be rebuilt if the
# functions have changed
## a Drake plan for creating the datasets
# - these are the default options, which don't include downloaded datasets
datasets <- build_bbs_datasets_plan()
m <- which(grepl(datasets$target, pattern = "rtrg_1_11")) # wants many topics
stories_codes = c("rtrg_304_17",
"rtrg_102_18",
"rtrg_105_4",
"rtrg_133_6",
"rtrg_19_35",
"rtrg_172_14")
stories_codes <- vapply(stories_codes, FUN = function(story) return(min(which(grepl(datasets$target, pattern = story)))), FUN.VALUE = 1)
datasets <- datasets[c(m, stories_codes),]
toy_dataset_files <- list.files(here::here("analysis", "toy_datasets"), pattern= ".csv")
toy_dataset_files <- unlist(strsplit(toy_dataset_files, split = ".csv"))
toy_path <- here::here("analysis", "toy_datasets")
toy_datasets <- drake::drake_plan(
toy = target(get_toy_data(dataset_name, toy_datasets_path = toy_path),
transform = map(dataset_name = !!toy_dataset_files))
)
#datasets <- bind_rows(datasets, toy_datasets)
datasets <- toy_datasets[7, ]
#if(FALSE){
methods <- drake::drake_plan(
ldats_fit = target(fit_ldats_hybrid(dataset, use_folds = T, n_folds = 20, n_timesteps = 2, buffer = 2, k = ks, seed = seeds, cpts = c(0:5), nit = 100),
transform = cross(
dataset = !!rlang::syms(datasets$target),
ks = !!c(2:10),
seeds = !!seq(4, 30, by = 2)
)),
ldats_eval = target(eval_ldats_crossval(ldats_fit, use_folds = T),
transform = map(ldats_fit)
),
all_evals = target(dplyr::bind_rows(ldats_eval),
transform = combine(ldats_eval, .by = dataset))
)
# } else {
# methods <- drake::drake_plan(
# ldats_fit = target(fit_ldats_crossval(dataset, buffer = 4, k = ks, seed = seeds, cpts = cpts, nit = 1000, fit_to_train = FALSE),
# transform = cross(
# dataset = !!rlang::syms(datasets$target),
# ks = !!c(2:5),
# seeds = !!seq(2, 50, by = 2),
# cpts = !!c(0:5)
# )),
# ldats_eval = target(eval_ldats_crossval(ldats_fit, nests = 1000),
# transform = map(ldats_fit)
# ),
# all_evals = target(dplyr::bind_rows(ldats_eval),
# transform = combine(ldats_eval, .by = dataset))
# )
# }
## The full workflow
workflow <- dplyr::bind_rows(
datasets,
methods
)
## Set up the cache and config
db <- DBI::dbConnect(RSQLite::SQLite(), here::here("analysis", "drake", "drake-cache-hybrid.sqlite"))
cache <- storr::storr_dbi("datatable", "keystable", db)
cache$del(key = "lock", namespace = "session")
## View the graph of the plan
if (interactive())
{
config <- drake_config(workflow, cache = cache)
sankey_drake_graph(config, build_times = "none") # requires "networkD3" package
vis_drake_graph(config, build_times = "none") # requires "visNetwork" package
}
## Run the pipeline
nodename <- Sys.info()["nodename"]
if(grepl("ufhpc", nodename)) {
print("I know I am on the HiPerGator!")
library(clustermq)
options(clustermq.scheduler = "slurm", clustermq.template = "slurm_clustermq.tmpl")
## Run the pipeline parallelized for HiPerGator
make(workflow,
force = TRUE,
cache = cache,
cache_log_file = here::here("analysis", "drake", "cache_log_hybrid.txt"),
verbose = 1,
parallelism = "clustermq",
jobs = 50,
caching = "master", memory_strategy = "autoclean") # Important for DBI caches!
} else {
# Run the pipeline on multiple local cores
system.time(make(workflow, cache = cache, cache_log_file = here::here("analysis", "drake", "cache_log_hybrid.txt")))
}
all_evals_objs <- methods$target[which(grepl(methods$target, pattern = "all_evals"))]
all_evals_list <- list()
for(i in 1:length(all_evals_objs)) {
all_evals_list[[i]] <- readd(all_evals_objs[i], character_only = T, cache = cache)
all_evals_list[[i]]$dataset = all_evals_objs[i]
}
all_evals_df <- bind_rows(all_evals_list)
write.csv(all_evals_df, here::here("analysis", "all_evals_hybrid_portal.csv"), row.names = F)
DBI::dbDisconnect(db)
rm(cache)
|
599ea0ae8f3f3babfb4c57ef4ff7094b613fd132 | 8052858568ded323d0bd79096c8b1d55a754818b | /tests/testthat/test-numeric_distributions.R | 787699e0131e542918c4b77639d4d3a46671db9a | [] | no_license | jmaspons/LHR | bfe77214eb16ca459a56705dad046ec823f6f9e2 | d3e48ad1c571a45b0016cd9f131d765166511bac | refs/heads/master | 2020-04-10T23:48:07.450826 | 2019-03-27T23:53:58 | 2019-03-27T23:53:58 | 161,365,369 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,762 | r | test-numeric_distributions.R | context("Class Numeric distributions (S3)")
## Check c memory management
# gctorture(on=TRUE)
# gctorture(on=FALSE)
test_that("constructor", {
expect_is(distri<- distriBinom(2, .6), "numericDistri")
expect_is(distriC<- distriBinom(distri, .3), "numericDistri")
res<- resC<- resS<- resP<- numeric()
for (i in 1:1000){
res[i]<- cumP(distriBinom(2, .6))$cump[3]
resP[i]<- cumP(distri * 2)$cump[3]
resC[i]<- cumP(distriBinom(distri, .3))$cump[3] ## Fixed
resS[i]<- cumP(distri + distriC)$cump[5] ## Fixed
# print(resS[i])
}
expect_equal(unique(res), 1)
expect_equal(unique(resP), 1)
expect_equal(unique(resC), 1)
expect_equal(unique(resS), 1)
## logP
distri<- distriBinom(2, .6, log=TRUE)
distriC<- distriBinom(distri, .3, log=TRUE)
res<- resC<- resS<- resP<- numeric()
for (i in 1:1000){
res[i]<- cumP(distriBinom(2, .6, log=TRUE))$cump[3]
resP[i]<- cumP(distri * 2)$cump[3]
resC[i]<- cumP(distriBinom(distri, .3, log=TRUE))$cump[3] ## Fixed
resS[i]<- cumP(distri + distriC)$cump[5] ## Fixed
# print(resS[i])
}
expect_equal(unique(res), 1)
expect_equal(unique(resP), 1)
expect_equal(unique(resC), 1)
expect_equal(unique(resS), 1)
})
test_that("methods", {
distri<- distriBinom(2, .6)
distriC<- distriBinom(distri, .3)
expect_is(mean(distri), "numeric")
expect_is(var(distri), "numeric")
expect_is(summary(distri), "data.frame")
expect_is(quantile(distri), c("numeric", "integer"))
expect_is(sdistri(distri), "data.frame")
expect_is(ddistri(0:2, distri), "numeric")
expect_is(pdistri(0:2, distri), "numeric")
expect_is(qdistri(seq(0, 1, length=5), distri), c("numeric", "integer"))
expect_is(rdistri(10, distri), c("numeric", "integer"))
})
|
7943b34a5886a98d8a0ef75e4e25c7ec594db168 | 384c8d1f3a5b92604972b0bb3d2e1a82628d0117 | /R/prob-sim-menu.R | fafc691477f21be16605f117c45ddf933b23b823 | [] | no_license | cran/RcmdrPlugin.IPSUR | e59be088de00b3b1ce06c09001596af810e1691c | 8b19055f8dde7e5ec044ed578150f1f32369d778 | refs/heads/master | 2021-01-23T02:30:37.390231 | 2019-01-26T15:32:58 | 2019-01-26T15:32:58 | 17,693,100 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 138,299 | r | prob-sim-menu.R | # # Last modified Feb 14, 2008
# # simulations optimized by Tyler Drombosky 2007
#
# `betaSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Beta Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# shape1Var <- tclVar("1")
# shape1Entry <- tkentry(top, width = "6", textvariable = shape1Var)
# shape2Var <- tclVar("1")
# shape2Entry <- tkentry(top, width = "6", textvariable = shape2Var)
# ncpVar <- tclVar("0")
# ncpEntry <- tkentry(top, width = "6", textvariable = ncpVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# shape1 <- tclvalue(shape1Var)
# shape2 <- tclvalue(shape2Var)
# ncp <- tclvalue(ncpVar)
# if (is.na(nsamples)) {
# errorCondition(recall = betaSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(shape1)) {
# errorCondition(recall = betaSimulate.ipsur, message = gettextRcmdr("The shape1 parameter was not specified."))
# return()
# }
# if (is.na(shape2)) {
# errorCondition(recall = betaSimulate.ipsur, message = gettextRcmdr("The shape2 parameter was not specified."))
# return()
# }
# if (is.na(ncp)) {
# errorCondition(recall = betaSimulate.ipsur, message = gettextRcmdr("The noncentrality parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = betaSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = betaSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# betaSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = betaSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatebetasimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(beta.sim",
# getRcmdr("betasimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("betasimNumber"):(nsamples +
# getRcmdr("betasimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$beta.sim", k,
# " <- rbeta(", newSS, ", shape1=", shape1,
# ", shape2=", shape2, ", ncp=", ncp, ")",
# sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("betasimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 beta variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " beta variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rbeta")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = betaSimulate.ipsur, message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatebetasimNumber()
# for (k in getRcmdr("betasimNumber"):(nsamples + getRcmdr("betasimNumber") -
# 1)) {
# justDoIt(paste(.activeDataSet, "$beta.sim", k,
# " <- rbeta(", samplesn, ", shape1=", shape1,
# ", shape2=", shape2, ", ncp=", ncp, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("betasimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 beta variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " beta variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rbeta")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("shape1")), shape1Entry,
# sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("shape2")), shape2Entry,
# sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("ncp (noncentrality parameter)")),
# ncpEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(shape1Entry, sticky = "w")
# tkgrid.configure(shape2Entry, sticky = "w")
# tkgrid.configure(ncpEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `binomialSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Binomial Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# sizeVar <- tclVar("1")
# sizeEntry <- tkentry(top, width = "6", textvariable = sizeVar)
# probVar <- tclVar("0.5")
# probEntry <- tkentry(top, width = "6", textvariable = probVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# size <- tclvalue(sizeVar)
# prob <- tclvalue(probVar)
# store <- tclvalue(locVariable)
# if (is.na(nsamples)) {
# errorCondition(recall = binomialSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(size)) {
# errorCondition(recall = binomialSimulate.ipsur, message = gettextRcmdr("Number of trials was not specified."))
# return()
# }
# if (is.na(prob)) {
# errorCondition(recall = binomialSimulate.ipsur, message = gettextRcmdr("The success probability was not specified."))
# return()
# }
# closeDialog()
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = binomialSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = binomialSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# binomialSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = binomialSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatebinomsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(binom.sim",
# getRcmdr("binomsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("binomsimNumber"):(nsamples +
# getRcmdr("binomsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$binom.sim", k,
# " <- rbinom(", newSS, ", size=", size, ", prob=",
# prob, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("binomsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 binomial variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " binomial variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rbinom")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = binomialSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatebinomsimNumber()
# for (k in getRcmdr("binomsimNumber"):(nsamples +
# getRcmdr("binomsimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$binom.sim",
# k, " <- rbinom(", samplesn, ", size=", size,
# ", prob=", prob, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("binomsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 binomial variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " binomial variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rbinom")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("size (number of trials)")),
# sizeEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("prob (of success)")),
# probEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(sizeEntry, sticky = "w")
# tkgrid.configure(probEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `cauchySimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Cauchy Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# locationVar <- tclVar("0")
# locationEntry <- tkentry(top, width = "6", textvariable = locationVar)
# scale1Var <- tclVar("1")
# scale1Entry <- tkentry(top, width = "6", textvariable = scale1Var)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# location <- tclvalue(locationVar)
# scale1 <- tclvalue(scale1Var)
# if (is.na(nsamples)) {
# errorCondition(recall = cauchySimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(location)) {
# errorCondition(recall = cauchySimulate.ipsur, message = gettextRcmdr("The location parameter was not specified."))
# return()
# }
# if (is.na(scale1)) {
# errorCondition(recall = cauchySimulate.ipsur, message = gettextRcmdr("The scale parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = cauchySimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = cauchySimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# cauchySimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = cauchySimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatecauchysimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(cauchy.sim",
# getRcmdr("cauchysimNumber"), "=1:", newSS,
# ")", sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("cauchysimNumber"):(nsamples +
# getRcmdr("cauchysimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$cauchy.sim",
# k, " <- rcauchy(", newSS, ", location=",
# location, ", scale=", scale1, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("cauchysimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 Cauchy variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " Cauchy variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rcauchy")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = cauchySimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatecauchysimNumber()
# for (k in getRcmdr("cauchysimNumber"):(nsamples +
# getRcmdr("cauchysimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$cauchy.sim",
# k, " <- rcauchy(", samplesn, ", location=",
# location, ", scale=", scale1, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("cauchysimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 Cauchy variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " Cauchy variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rcauchy")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("location")), locationEntry,
# sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("scale")), scale1Entry,
# sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(locationEntry, sticky = "w")
# tkgrid.configure(scale1Entry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `chisqSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Chi-Squared Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# dfVar <- tclVar("1")
# dfEntry <- tkentry(top, width = "6", textvariable = dfVar)
# ncpVar <- tclVar("0")
# ncpEntry <- tkentry(top, width = "6", textvariable = ncpVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# df <- tclvalue(dfVar)
# ncp <- tclvalue(ncpVar)
# if (is.na(nsamples)) {
# errorCondition(recall = chisqSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(df)) {
# errorCondition(recall = chisqSimulate.ipsur, message = gettextRcmdr("The degrees of freedom were not specified."))
# return()
# }
# if (is.na(ncp)) {
# errorCondition(recall = chisqSimulate.ipsur, message = gettextRcmdr("The noncentrality parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = chisqSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = chisqSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# chisqSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = chisqSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatechisqsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(chisq.sim",
# getRcmdr("chisqsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("chisqsimNumber"):(nsamples +
# getRcmdr("chisqsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$chisq.sim", k,
# " <- rchisq(", newSS, ", df=", df, ", ncp=",
# ncp, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("chisqsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 chi-squared variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " chi-squared variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rchisq")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = chisqSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatechisqsimNumber()
# for (k in getRcmdr("chisqsimNumber"):(nsamples +
# getRcmdr("chisqsimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$chisq.sim",
# k, " <- rchisq(", samplesn, ", df=", df, ", ncp=",
# ncp, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("chisqsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 chi-squared variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " chi-squared variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rchisq")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("df (degrees of freedom)")),
# dfEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("ncp (noncentrality parameter)")),
# ncpEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(dfEntry, sticky = "w")
# tkgrid.configure(ncpEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `disunifSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Discrete Uniform Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# from1Var <- tclVar("1")
# from1Entry <- tkentry(top, width = "6", textvariable = from1Var)
# to1Var <- tclVar("10")
# to1Entry <- tkentry(top, width = "6", textvariable = to1Var)
# by1Var <- tclVar("1")
# by1Entry <- tkentry(top, width = "6", textvariable = by1Var)
# userdefEntry <- tkentry(top, width = "30", textvariable = "")
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# from1 <- tclvalue(from1Var)
# to1 <- tclvalue(to1Var)
# by1 <- tclvalue(by1Var)
# if (is.na(nsamples)) {
# errorCondition(recall = disunifSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(from1)) {
# errorCondition(recall = disunifSimulate.ipsur, message = gettextRcmdr("The from parameter was not specified."))
# return()
# }
# if (is.na(to1)) {
# errorCondition(recall = disunifSimulate.ipsur, message = gettextRcmdr("The to parameter was not specified."))
# return()
# }
# if (is.na(by1)) {
# errorCondition(recall = disunifSimulate.ipsur, message = gettextRcmdr("The by parameter was not specified."))
# return()
# }
# closeDialog()
# command <- paste("support <- seq(", from1, ", ", to1, ", by=", by1,
# ")", sep = "")
# justDoIt(command)
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = disunifSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = disunifSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# disunifSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = disunifSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatedisunifsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(disunif.sim",
# getRcmdr("disunifsimNumber"), "=1:", newSS,
# ")", sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("disunifsimNumber"):(nsamples +
# getRcmdr("disunifsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$disunif.sim",
# k, " <- sample(support, size=", newSS, ", replace = TRUE)",
# sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("disunifsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 discrete uniform variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " discrete uniform variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rdisunif")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = disunifSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatedisunifsimNumber()
# for (k in getRcmdr("disunifsimNumber"):(nsamples +
# getRcmdr("disunifsimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$disunif.sim",
# k, " <- sample(support, size=", samplesn, ", replace = TRUE)",
# sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("disunifsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 discrete uniform variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " discrete uniform variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# remove(support, envir = .GlobalEnv)
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rdisunif")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("from (lower limit)")),
# from1Entry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("to (upper limit)")),
# to1Entry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("by (step size)")),
# by1Entry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(from1Entry, sticky = "w")
# tkgrid.configure(to1Entry, sticky = "w")
# tkgrid.configure(by1Entry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `expSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Exponential Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# rateVar <- tclVar("1")
# rateEntry <- tkentry(top, width = "6", textvariable = rateVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# rate <- tclvalue(rateVar)
# if (is.na(nsamples)) {
# errorCondition(recall = expSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(rate)) {
# errorCondition(recall = expSimulate.ipsur, message = gettextRcmdr("The rate parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = expSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = expSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# expSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = expSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdateexpsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(exp.sim",
# getRcmdr("expsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("expsimNumber"):(nsamples +
# getRcmdr("expsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$exp.sim", k,
# " <- rexp(", newSS, ", rate=", rate, ")",
# sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("expsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 exponential variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " exponential variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rexp")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = expSimulate.ipsur, message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdateexpsimNumber()
# for (k in getRcmdr("expsimNumber"):(nsamples + getRcmdr("expsimNumber") -
# 1)) {
# justDoIt(paste(.activeDataSet, "$exp.sim", k,
# " <- rexp(", samplesn, ", rate=", rate, ")",
# sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("expsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 exponential variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " exponential variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rexp")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("rate (of arrivals in unit time)")),
# rateEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(rateEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `fSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate F Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# df1Var <- tclVar("1")
# df1Entry <- tkentry(top, width = "6", textvariable = df1Var)
# df2Var <- tclVar("1")
# df2Entry <- tkentry(top, width = "6", textvariable = df2Var)
# ncpVar <- tclVar("0")
# ncpEntry <- tkentry(top, width = "6", textvariable = ncpVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# df1 <- tclvalue(df1Var)
# df2 <- tclvalue(df2Var)
# ncp <- tclvalue(ncpVar)
# if (is.na(nsamples)) {
# errorCondition(recall = fSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(df1) || is.na(df2)) {
# errorCondition(recall = fSimulate.ipsur, message = gettextRcmdr("Degrees of freedom were not specified."))
# return()
# }
# if (is.na(ncp)) {
# errorCondition(recall = fSimulate.ipsur, message = gettextRcmdr("The noncentrality parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = fSimulate.ipsur, message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = fSimulate.ipsur, message = paste("\"",
# dsnameValue, "\" ", gettextRcmdr("is not a valid name."),
# sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# fSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = fSimulate.ipsur, message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatefsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(f.sim",
# getRcmdr("fsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, " has been initialized."))
# for (k in getRcmdr("fsimNumber"):(nsamples +
# getRcmdr("fsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$f.sim", k, " <- rf(",
# newSS, ", df1=", df1, ", df2=", df2, ", ncp=",
# ncp, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("fsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 F variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " F variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rf")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = fSimulate.ipsur, message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatefsimNumber()
# for (k in getRcmdr("fsimNumber"):(nsamples + getRcmdr("fsimNumber") -
# 1)) {
# justDoIt(paste(.activeDataSet, "$f.sim", k, " <- rf(",
# samplesn, ", df1=", df1, ", df2=", df2, ", ncp=",
# ncp, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("fsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 F variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " F variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rf")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("df1 (num degrees of freedom)")),
# df1Entry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("df2 (denom degrees of freedom)")),
# df2Entry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("ncp (noncentrality parameter)")),
# ncpEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(df1Entry, sticky = "w")
# tkgrid.configure(df2Entry, sticky = "w")
# tkgrid.configure(ncpEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `gammaSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Gamma Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# shapeVar <- tclVar("1")
# shapeEntry <- tkentry(top, width = "6", textvariable = shapeVar)
# scale1Var <- tclVar("1")
# scale1Entry <- tkentry(top, width = "6", textvariable = scale1Var)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# shape <- tclvalue(shapeVar)
# scale1 <- tclvalue(scale1Var)
# if (is.na(nsamples)) {
# errorCondition(recall = gammaSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(shape)) {
# errorCondition(recall = gammaSimulate.ipsur, message = gettextRcmdr("The shape parameter was not specified."))
# return()
# }
# if (is.na(scale1)) {
# errorCondition(recall = gammaSimulate.ipsur, message = gettextRcmdr("The rate parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = gammaSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = gammaSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# gammaSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = gammaSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdategammasimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(gamma.sim",
# getRcmdr("gammasimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("gammasimNumber"):(nsamples +
# getRcmdr("gammasimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$gamma.sim", k,
# " <- rgamma(", newSS, ", shape=", shape,
# ", scale=", scale1, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("gammasimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 gamma variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " gamma variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rgamma")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = gammaSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdategammasimNumber()
# for (k in getRcmdr("gammasimNumber"):(nsamples +
# getRcmdr("gammasimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$gamma.sim",
# k, " <- rgamma(", samplesn, ", shape=", shape,
# ", rate=", scale1, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("gammasimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 gamma variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " gamma variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rgamma")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("shape")), shapeEntry,
# sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("rate (= 1/scale)")),
# scale1Entry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(shapeEntry, sticky = "w")
# tkgrid.configure(scale1Entry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `geomSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Geometric Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# probVar <- tclVar("0.5")
# probEntry <- tkentry(top, width = "6", textvariable = probVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# prob <- tclvalue(probVar)
# if (is.na(nsamples)) {
# errorCondition(recall = geomSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(prob)) {
# errorCondition(recall = geomSimulate.ipsur, message = gettextRcmdr("The probability of success was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = geomSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = geomSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# geomSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = geomSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integerr."))
# return()
# }
# UpdategeomsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(geom.sim",
# getRcmdr("geomsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("geomsimNumber"):(nsamples +
# getRcmdr("geomsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$geom.sim", k,
# " <- rgeom(", newSS, ", prob=", prob, ")",
# sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("geomsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 geometric variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " geometric variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rgeom")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = geomSimulate.ipsur, message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdategeomsimNumber()
# for (k in getRcmdr("geomsimNumber"):(nsamples + getRcmdr("geomsimNumber") -
# 1)) {
# justDoIt(paste(.activeDataSet, "$geom.sim", k,
# " <- rgeom(", samplesn, ", prob=", prob, ")",
# sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("geomsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 geometric variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " geometric variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rgeom")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("prob (of success in each trial)")),
# probEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(probEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `hyperSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Hypergeometric Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# mVar <- tclVar("1")
# mEntry <- tkentry(top, width = "6", textvariable = mVar)
# nVar <- tclVar("1")
# nEntry <- tkentry(top, width = "6", textvariable = nVar)
# k1Var <- tclVar("1")
# k1Entry <- tkentry(top, width = "6", textvariable = k1Var)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# m <- tclvalue(mVar)
# n <- tclvalue(nVar)
# k1 <- tclvalue(k1Var)
# if (is.na(nsamples) || nsamples < 1) {
# errorCondition(recall = hyperSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(m)) {
# errorCondition(recall = hyperSimulate.ipsur, message = gettextRcmdr("The m parameter was not specified."))
# return()
# }
# if (is.na(n)) {
# errorCondition(recall = hyperSimulate.ipsur, message = gettextRcmdr("The n parameter was not specified."))
# return()
# }
# if (is.na(k1)) {
# errorCondition(recall = hyperSimulate.ipsur, message = gettextRcmdr("The k parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = hyperSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = hyperSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# hyperSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = hyperSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatehypersimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(hyper.sim",
# getRcmdr("hypersimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("hypersimNumber"):(nsamples +
# getRcmdr("hypersimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$hyper.sim", k,
# " <- rhyper(", newSS, ", m=", m, ", n=",
# n, ", k=", k1, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("hypersimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 hypergeometric variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " hyergeometric variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rhyper")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = hyperSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatehypersimNumber()
# for (k in getRcmdr("hypersimNumber"):(nsamples +
# getRcmdr("hypersimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$hyper.sim",
# k, " <- rhyper(", samplesn, ", m=", m, ", n=",
# n, ", k=", k1, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("hypersimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 hypergeometric variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " hypergeometric variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rhyper")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("m (num of white balls in the urn)")),
# mEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("n (num of black balls in the urn)")),
# nEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("k (num of balls drawn from the urn)")),
# k1Entry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(mEntry, sticky = "w")
# tkgrid.configure(nEntry, sticky = "w")
# tkgrid.configure(k1Entry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `lnormalSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Log Normal Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# mulogVar <- tclVar("0")
# mulogEntry <- tkentry(top, width = "6", textvariable = mulogVar)
# sigmalogVar <- tclVar("1")
# sigmalogEntry <- tkentry(top, width = "6", textvariable = sigmalogVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# mulog <- tclvalue(mulogVar)
# sigmalog <- tclvalue(sigmalogVar)
# if (is.na(nsamples)) {
# errorCondition(recall = lnormalSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(mulog)) {
# errorCondition(recall = lnormalSimulate.ipsur, message = gettextRcmdr("The mean was not specified."))
# return()
# }
# if (is.na(sigmalog)) {
# errorCondition(recall = lnormalSimulate.ipsur, message = gettextRcmdr("The standard deviation was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = lnormalSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = lnormalSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# lnormalSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = lnormalSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatelnormsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(lnorm.sim",
# getRcmdr("lnormsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("lnormsimNumber"):(nsamples +
# getRcmdr("lnormsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$lnorm.sim", k,
# " <- rlnorm(", newSS, ", meanlog=", mulog,
# ", sdlog=", sigmalog, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("lnormsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 log normal variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " log normal variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rlnorm")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = lnormalSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatelnormsimNumber()
# for (k in getRcmdr("lnormsimNumber"):(nsamples +
# getRcmdr("lnormsimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$lnorm.sim",
# k, " <- rlnorm(", samplesn, ", meanlog=", mulog,
# ", sdlog=", sigmalog, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("lnormsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 log normal variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " log normal variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rlnorm")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("meanlog (mean of dist'n on log scale)")),
# mulogEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("sdlog (std dev of dist'n on log scale)")),
# sigmalogEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(mulogEntry, sticky = "w")
# tkgrid.configure(sigmalogEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
#
# `logisSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Logistic Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# locationVar <- tclVar("0")
# locationEntry <- tkentry(top, width = "6", textvariable = locationVar)
# scale1Var <- tclVar("1")
# scale1Entry <- tkentry(top, width = "6", textvariable = scale1Var)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# location <- tclvalue(locationVar)
# scale1 <- tclvalue(scale1Var)
# if (is.na(nsamples) || nsamples < 1) {
# errorCondition(recall = logisSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(location)) {
# errorCondition(recall = logisSimulate.ipsur, message = gettextRcmdr("The location was not specified."))
# return()
# }
# if (is.na(scale1)) {
# errorCondition(recall = logisSimulate.ipsur, message = gettextRcmdr("The scale parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = logisSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = logisSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# logisSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = logisSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatelogissimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(logis.sim",
# getRcmdr("logissimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("logissimNumber"):(nsamples +
# getRcmdr("logissimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$logis.sim", k,
# " <- rlogis(", newSS, ", location=", location,
# ", scale=", scale1, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("logissimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 logistic variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " logistic variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rlogis")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = logisSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatelogissimNumber()
# for (k in getRcmdr("logissimNumber"):(nsamples +
# getRcmdr("logissimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$logis.sim",
# k, " <- rlogis(", samplesn, ", location=",
# location, ", scale=", scale1, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("logissimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 logistic variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " logistic variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rlogis")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("location")), locationEntry,
# sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("scale")), scale1Entry,
# sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(locationEntry, sticky = "w")
# tkgrid.configure(scale1Entry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `nbinomSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Negative Binomial Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# sizeVar <- tclVar("1")
# sizeEntry <- tkentry(top, width = "6", textvariable = sizeVar)
# probVar <- tclVar("0.5")
# probEntry <- tkentry(top, width = "6", textvariable = probVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# size <- tclvalue(sizeVar)
# prob <- tclvalue(probVar)
# if (is.na(nsamples) || nsamples < 1) {
# errorCondition(recall = nbinomSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(size)) {
# errorCondition(recall = nbinomSimulate.ipsur, message = gettextRcmdr("The size was not specified."))
# return()
# }
# if (is.na(prob)) {
# errorCondition(recall = nbinomSimulate.ipsur, message = gettextRcmdr("The probability of success was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = nbinomSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = nbinomSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# nbinomSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = nbinomSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatenbinomsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(nbinom.sim",
# getRcmdr("nbinomsimNumber"), "=1:", newSS,
# ")", sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("nbinomsimNumber"):(nsamples +
# getRcmdr("nbinomsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$nbinom.sim",
# k, " <- rnbinom(", newSS, ", size=", size,
# ", prob=", prob, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("nbinomsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 negative binomial variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " negative binomial variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rnbinom")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = nbinomSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatenbinomsimNumber()
# for (k in getRcmdr("nbinomsimNumber"):(nsamples +
# getRcmdr("nbinomsimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$nbinom.sim",
# k, " <- rnbinom(", samplesn, ", size=", size,
# ", prob=", prob, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("nbinomsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 negative binomial variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " negative binomial variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rnbinom")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("size (target number of successes)")),
# sizeEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("prob (of success in each trial)")),
# probEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(sizeEntry, sticky = "w")
# tkgrid.configure(probEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `normalSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Normal Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# muVar <- tclVar("0")
# muEntry <- tkentry(top, width = "6", textvariable = muVar)
# sigmaVar <- tclVar("1")
# sigmaEntry <- tkentry(top, width = "6", textvariable = sigmaVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# mu <- tclvalue(muVar)
# sigma <- tclvalue(sigmaVar)
# if (is.na(nsamples)) {
# errorCondition(recall = normalSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(mu)) {
# errorCondition(recall = normalSimulate.ipsur, message = gettextRcmdr("The mean was not specified."))
# return()
# }
# if (is.na(sigma)) {
# errorCondition(recall = normalSimulate.ipsur, message = gettextRcmdr("The standard deviation was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = normalSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = normalSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# normalSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = normalSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatenormsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(norm.sim",
# getRcmdr("normsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("normsimNumber"):(nsamples +
# getRcmdr("normsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$norm.sim", k,
# " <- rnorm(", newSS, ", mean=", mu, ", sd=",
# sigma, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("normsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 normal variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " normal variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rnorm")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = normalSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatenormsimNumber()
# for (k in getRcmdr("normsimNumber"):(nsamples + getRcmdr("normsimNumber") -
# 1)) {
# justDoIt(paste(.activeDataSet, "$norm.sim", k,
# " <- rnorm(", samplesn, ", mean=", mu, ", sd=",
# sigma, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("normsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 normal variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " normal variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rnorm")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("mean (mu)")), muEntry,
# sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("sd (sigma)")), sigmaEntry,
# sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(muEntry, sticky = "w")
# tkgrid.configure(sigmaEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `poisSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Poisson Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# lambdaVar <- tclVar("1")
# lambdaEntry <- tkentry(top, width = "6", textvariable = lambdaVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# lambda <- tclvalue(lambdaVar)
# if (is.na(nsamples)) {
# errorCondition(recall = poisSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(lambda)) {
# errorCondition(recall = poisSimulate.ipsur, message = gettextRcmdr("The mean parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = poisSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = poisSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# poisSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = poisSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatepoissimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(pois.sim",
# getRcmdr("poissimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("poissimNumber"):(nsamples +
# getRcmdr("poissimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$pois.sim", k,
# " <- rpois(", newSS, ", lambda=", lambda,
# ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("poissimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 Poisson variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " Poisson variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rpois")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = poisSimulate.ipsur, message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatepoissimNumber()
# for (k in getRcmdr("poissimNumber"):(nsamples + getRcmdr("poissimNumber") -
# 1)) {
# justDoIt(paste(.activeDataSet, "$pois.sim", k,
# " <- rpois(", samplesn, ", lambda=", lambda,
# ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("poissimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 Poisson variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " Poisson variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rpois")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("lambda (mean)")),
# lambdaEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(lambdaEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `RcmdrEnv` <-
# function ()
# {
# pos <- match("RcmdrEnv", search())
# if (is.na(pos)) {
# RcmdrEnv <- list()
# rm(RcmdrEnv)
# pos <- match("RcmdrEnv", search())
# }
# return(pos.to.env(pos))
# }
#
#
# `tSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate t Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# dfVar <- tclVar("1")
# dfEntry <- tkentry(top, width = "6", textvariable = dfVar)
# ncpVar <- tclVar("0")
# ncpEntry <- tkentry(top, width = "6", textvariable = ncpVar)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# df <- tclvalue(dfVar)
# ncp <- tclvalue(ncpVar)
# if (is.na(nsamples)) {
# errorCondition(recall = tSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(df)) {
# errorCondition(recall = tSimulate.ipsur, message = gettextRcmdr("The degrees of freedom were not specified."))
# return()
# }
# if (is.na(ncp)) {
# errorCondition(recall = tSimulate.ipsur, message = gettextRcmdr("The noncentrality parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = tSimulate.ipsur, message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = tSimulate.ipsur, message = paste("\"",
# dsnameValue, "\" ", gettextRcmdr("is not a valid name."),
# sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# tSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = tSimulate.ipsur, message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdatetsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(t.sim",
# getRcmdr("tsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("tsimNumber"):(nsamples +
# getRcmdr("tsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$t.sim", k, " <- rt(",
# newSS, ", df=", df, ", ncp=", ncp, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("tsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 Student's t variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " Student's t variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rt")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = tSimulate.ipsur, message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdatetsimNumber()
# for (k in getRcmdr("tsimNumber"):(nsamples + getRcmdr("tsimNumber") -
# 1)) {
# justDoIt(paste(.activeDataSet, "$t.sim", k, " <- rt(",
# samplesn, ", df=", df, ", ncp=", ncp, ")",
# sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("tsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 Student's t variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " Student's t variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rt")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("df (degrees of freedom)")),
# dfEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("ncp (noncentrality parameter) ")),
# ncpEntry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(dfEntry, sticky = "w")
# tkgrid.configure(ncpEntry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `unifSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Uniform Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# min1Var <- tclVar("0")
# min1Entry <- tkentry(top, width = "6", textvariable = min1Var)
# max1Var <- tclVar("1")
# max1Entry <- tkentry(top, width = "6", textvariable = max1Var)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# min1 <- tclvalue(min1Var)
# max1 <- tclvalue(max1Var)
# if (is.na(nsamples)) {
# errorCondition(recall = unifSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(min1)) {
# errorCondition(recall = unifSimulate.ipsur, message = gettextRcmdr("The lower limit(min) was not specified."))
# return()
# }
# if (is.na(max1)) {
# errorCondition(recall = unifSimulate.ipsur, message = gettextRcmdr("The upper limit(max) was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = unifSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = unifSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# unifSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = unifSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdateunifsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(unif.sim",
# getRcmdr("unifsimNumber"), "=1:", newSS, ")",
# sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("unifsimNumber"):(nsamples +
# getRcmdr("unifsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$unif.sim", k,
# " <- runif(", newSS, ", min=", min1, ", max=",
# max1, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("unifsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 uniform variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " uniform variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "runif")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = unifSimulate.ipsur, message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdateunifsimNumber()
# for (k in getRcmdr("unifsimNumber"):(nsamples + getRcmdr("unifsimNumber") -
# 1)) {
# justDoIt(paste(.activeDataSet, "$unif.sim", k,
# " <- runif(", samplesn, ", min=", min1, ", max=",
# max1, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("unifsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 uniform variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " uniform variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "runif")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("min (lower limit of the distribution)")),
# min1Entry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("max (upper limit of the distribution)")),
# max1Entry, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(min1Entry, sticky = "w")
# tkgrid.configure(max1Entry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
#
#
# `UpdatebetasimNumber` <-
# function (increment = 1)
# {
# betasimNumber <- getRcmdr("betasimNumber")
# putRcmdr("betasimNumber", betasimNumber + increment)
# }
# `UpdatebinomsimNumber` <-
# function (increment = 1)
# {
# fsimNumber <- getRcmdr("binomsimNumber")
# putRcmdr("binomsimNumber", binomsimNumber + increment)
# }
# `UpdatecauchysimNumber` <-
# function (increment = 1)
# {
# cauchysimNumber <- getRcmdr("cauchysimNumber")
# putRcmdr("cauchysimNumber", cauchysimNumber + increment)
# }
# `UpdatechisqsimNumber` <-
# function (increment = 1)
# {
# chisqsimNumber <- getRcmdr("chisqsimNumber")
# putRcmdr("chisqsimNumber", chisqsimNumber + increment)
# }
# `UpdatedisunifsimNumber` <-
# function (increment = 1)
# {
# disunifsimNumber <- getRcmdr("disunifsimNumber")
# putRcmdr("disunifsimNumber", disunifsimNumber + increment)
# }
# `UpdateexpsimNumber` <-
# function (increment = 1)
# {
# expsimNumber <- getRcmdr("expsimNumber")
# putRcmdr("expsimNumber", expsimNumber + increment)
# }
# `UpdatefsimNumber` <-
# function (increment = 1)
# {
# fsimNumber <- getRcmdr("fsimNumber")
# putRcmdr("fsimNumber", fsimNumber + increment)
# }
# `UpdategammasimNumber` <-
# function (increment = 1)
# {
# gammasimNumber <- getRcmdr("gammasimNumber")
# putRcmdr("gammasimNumber", gammasimNumber + increment)
# }
# `UpdategeomsimNumber` <-
# function (increment = 1)
# {
# geomsimNumber <- getRcmdr("geomsimNumber")
# putRcmdr("geomsimNumber", geomsimNumber + increment)
# }
# `UpdatehypersimNumber` <-
# function (increment = 1)
# {
# hypersimNumber <- getRcmdr("hypersimNumber")
# putRcmdr("hypersimNumber", expsimNumber + increment)
# }
# `UpdatelnormsimNumber` <-
# function (increment = 1)
# {
# lnormsimNumber <- getRcmdr("lnormsimNumber")
# putRcmdr("lnormsimNumber", lnormsimNumber + increment)
# }
# `UpdatelogissimNumber` <-
# function (increment = 1)
# {
# logissimNumber <- getRcmdr("logissimNumber")
# putRcmdr("logissimNumber", logissimNumber + increment)
# }
# `UpdatenbinomsimNumber` <-
# function (increment = 1)
# {
# nbinomsimNumber <- getRcmdr("nbinomsimNumber")
# putRcmdr("nbinomsimNumber", nbinomsimNumber + increment)
# }
# `UpdatenormsimNumber` <-
# function (increment = 1)
# {
# normsimNumber <- getRcmdr("normsimNumber")
# putRcmdr("normsimNumber", normsimNumber + increment)
# }
# `UpdatepoissimNumber` <-
# function (increment = 1)
# {
# poissimNumber <- getRcmdr("poissimNumber")
# putRcmdr("poissimNumber", poissimNumber + increment)
# }
# `UpdatetsimNumber` <-
# function (increment = 1)
# {
# tsimNumber <- getRcmdr("tsimNumber")
# putRcmdr("tsimNumber", tsimNumber + increment)
# }
# `UpdateunifsimNumber` <-
# function (increment = 1)
# {
# unifsimNumber <- getRcmdr("unifsimNumber")
# putRcmdr("unifsimNumber", unifsimNumber + increment)
# }
# `UpdateweibullsimNumber` <-
# function (increment = 1)
# {
# weibullsimNumber <- getRcmdr("weibullsimNumber")
# putRcmdr("weibullsimNumber", weibullsimNumber + increment)
# }
#
#
# `weibullSimulate.ipsur` <-
# function ()
# {
# initializeDialog(title = gettextRcmdr("Simulate Weibull Variates"))
# parameterFrame <- tkframe(top)
# locationFrame <- tkframe(top)
# if (!is.character(ActiveDataSet())) {
# locVariable <- tclVar("new")
# }
# else {
# locVariable <- tclVar("add")
# }
# addtoactiveButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "add")
# newDataButton <- tkradiobutton(locationFrame, variable = locVariable,
# value = "new")
# samplesVar <- tclVar("1")
# samplesEntry <- tkentry(top, width = "6", textvariable = samplesVar)
# shapeVar <- tclVar("1")
# shapeEntry <- tkentry(top, width = "6", textvariable = shapeVar)
# scale1Var <- tclVar("1")
# scale1Entry <- tkentry(top, width = "6", textvariable = scale1Var)
# onOK <- function() {
# nsamples <- round(as.numeric(tclvalue(samplesVar)))
# shape <- tclvalue(shapeVar)
# scale1 <- tclvalue(scale1Var)
# if (is.na(nsamples)) {
# errorCondition(recall = weibullSimulate.ipsur, message = gettextRcmdr("Number of samples must be a positive integer."))
# return()
# }
# if (is.na(shape)) {
# errorCondition(recall = weibullSimulate.ipsur, message = gettextRcmdr("The shape parameter was not specified."))
# return()
# }
# if (is.na(scale1)) {
# errorCondition(recall = weibullSimulate.ipsur, message = gettextRcmdr("The scale parameter was not specified."))
# return()
# }
# closeDialog()
# store <- tclvalue(locVariable)
# if (store == "new") {
# initializeDialog(title = gettextRcmdr("Simulation Dataset"))
# dsname <- tclVar("Simset")
# entryDsname <- tkentry(top, width = "20", textvariable = dsname)
# newDataSS <- tclVar("100")
# entryNewDataSS <- tkentry(top, width = "6", textvariable = newDataSS)
# onOK <- function() {
# dsnameValue <- trim.blanks(tclvalue(dsname))
# newSS <- round(as.numeric(tclvalue(newDataSS)))
# closeDialog()
# if (dsnameValue == "") {
# errorCondition(recall = weibullSimulate.ipsur,
# message = gettextRcmdr("You must enter the name of a data set."))
# return()
# }
# if (!is.valid.name(dsnameValue)) {
# errorCondition(recall = weibullSimulate.ipsur,
# message = paste("\"", dsnameValue, "\" ",
# gettextRcmdr("is not a valid name."), sep = ""))
# return()
# }
# if (is.element(dsnameValue, listDataSets())) {
# if ("no" == tclvalue(checkReplace(dsnameValue,
# gettextRcmdr("Data set")))) {
# weibullSimulate.ipsur()
# return()
# }
# }
# if (is.na(newSS)) {
# errorCondition(recall = weibullSimulate.ipsur,
# message = gettextRcmdr("Sample Size must be a positive integer."))
# return()
# }
# UpdateweibullsimNumber()
# justDoIt(paste(dsnameValue, " = data.frame(weibull.sim",
# getRcmdr("weibullsimNumber"), "=1:", newSS,
# ")", sep = ""))
# logger(paste(dsnameValue, "has been initialized."))
# for (k in getRcmdr("weibullsimNumber"):(nsamples +
# getRcmdr("weibullsimNumber") - 1)) {
# justDoIt(paste(dsnameValue, "$weibull.sim",
# k, " <- rweibull(", newSS, ", shape=", shape,
# ", scale=", scale1, ")", sep = ""))
# }
# activeDataSet(dsnameValue)
# putRcmdr("weibullsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 weibull variate sample stored in ",
# dsnameValue, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " weibull variate samples stored in ",
# dsnameValue, ".", sep = ""))
# }
# }
# OKCancelHelp(helpSubject = "rweibull")
# tkgrid(tklabel(top, text = gettextRcmdr("Enter name for data set:")),
# entryDsname, sticky = "e")
# tkgrid(tklabel(top, text = gettextRcmdr("Sample Size (rows):")),
# entryNewDataSS, sticky = "e")
# tkgrid(buttonsFrame, columnspan = "2", sticky = "w")
# tkgrid.configure(entryDsname, sticky = "w")
# tkgrid.configure(entryNewDataSS, sticky = "w")
# tkfocus(CommanderWindow())
# dialogSuffix(rows = 2, columns = 2, focus = entryDsname)
# }
# else {
# if (!is.character(ActiveDataSet())) {
# errorCondition(recall = weibullSimulate.ipsur,
# message = gettextRcmdr("There is no active data set."))
# return()
# }
# .activeDataSet <- ActiveDataSet()
# justDoIt(paste("samplesn <- dim(", .activeDataSet,
# ")[1]", sep = ""))
# UpdateweibullsimNumber()
# for (k in getRcmdr("weibullsimNumber"):(nsamples +
# getRcmdr("weibullsimNumber") - 1)) {
# justDoIt(paste(.activeDataSet, "$weibull.sim",
# k, " <- rweibull(", samplesn, ", shape=", shape,
# ", scale=", scale1, ")", sep = ""))
# }
# activeDataSet(.activeDataSet)
# putRcmdr("weibullsimNumber", k)
# if (nsamples == 1) {
# logger(paste("There was 1 weibull variate sample stored in ",
# .activeDataSet, ".", sep = ""))
# }
# else {
# logger(paste("There were ", nsamples, " weibull variate samples stored in ",
# .activeDataSet, ".", sep = ""))
# }
# }
# tkfocus(CommanderWindow())
# }
# OKCancelHelp(helpSubject = "rweibull")
# tkgrid(tklabel(top, text = gettextRcmdr("Number of samples (columns):")),
# samplesEntry, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("Parameters:"), fg = "blue"),
# columnspan = 4, sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("shape")), shapeEntry,
# sticky = "w")
# tkgrid(tklabel(top, text = gettextRcmdr("scale")), scale1Entry,
# sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Store values in:"),
# fg = "blue"), columnspan = 4, sticky = "w")
# tkgrid(tklabel(locationFrame, text = gettextRcmdr("Active Dataset")),
# addtoactiveButton, sticky = "w")
# tkgrid(tklabel(locationFrame, text = "New Dataset"), newDataButton,
# sticky = "w")
# tkgrid.configure(samplesEntry, sticky = "w")
# tkgrid.configure(shapeEntry, sticky = "w")
# tkgrid.configure(scale1Entry, sticky = "w")
# tkgrid(locationFrame, sticky = "w")
# tkgrid(buttonsFrame, sticky = "w", columnspan = 2)
# dialogSuffix(rows = 6, columns = 1, focus = samplesEntry)
# }
|
ccc4a28c8d49c32140d1519e54d24c4032f99478 | ce6c631c021813b99eacddec65155777ca125703 | /R/mdlMLE.R | fa4119daf8bad162d0607ff9b19c976d6a124fea | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | Zhenglei-BCS/smwrQW | fdae2b1cf65854ca2af9cd9917b89790287e3eb6 | 9a5020aa3a5762025fa651517dbd05566a09c280 | refs/heads/master | 2023-09-03T04:04:55.153230 | 2020-05-24T15:57:06 | 2020-05-24T15:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,142 | r | mdlMLE.R | #' @title Estimate Statistics
#'
#' @description Support function for computing statistics for left-censored data using the
#'maximum likelihood method (Helsel and Cohn, 1988).
#'
#' @importFrom survival survreg Surv
#' @param x the data to estimate, Missing values permitted and ignored.
#'Must be an object of class "lcens," a numeric vector, or the output from censpp.
#' @param method the method to use, either "MLE" or "log MLE."
#' @param alpha the offset for plotting position, used to compute the filled in values.
#' @return A list containing the mean and standard deviation, filled in
#'values for the censored values, and the censored levels. If \code{method}
#'is "log MLE," then the list also contains the mean and standard deviation of the
#'natural log-transformed values computed by maximum likelihood.
#' @references Helsel, D.R. and Cohn, T.A., 1988, Estimation of descriptive statistics
#'for multiply censored water quality data: Water Resources Research v. 24, n.
#'12, p.1997--2004
#' @keywords misc
#' @export
mdlMLE <- function(x, method="MLE", alpha=0.4) {
## Coding history:
## 2012Mar09 DLLorenz original coding
## 2013Jan05 DLLorenz Roxygenized
## 2013Jan05 This version
##
method <- match.arg(method, c("MLE", "log MLE"))
if(class(x) != "list")
x <- censpp(x, a=alpha)
step1 <- Surv(c(x$x, x$xcen), c(rep(1, length(x$x)), rep(0, length(x$xcen))),
type="left")
if(method == "MLE") {
step2 <- survreg(step1 ~ 1, dist="gaussian")
coefs <- as.vector(c(step2$coefficients, step2$scale))
step3 <- qnorm(x$ppcen) * coefs[2L] + coefs[1L]
step4 <- as.vector(c(step3, x$x))
retval <- list(mean=coefs[1L], sd=coefs[2L], fitted=step4)
}
else {
step2 <- survreg(step1 ~ 1, dist="lognormal")
coefs <- as.vector(c(step2$coefficients, step2$scale))
step3 <- qnorm(x$ppcen) * coefs[2L] + coefs[1L]
step4 <- as.vector(c(exp(step3), x$x))
retval <- list(meanlog=coefs[1L], sdlog=coefs[2L], fitted=step4)
}
if(length(x$xcen) > 0L)
retval$censorlevels <- x$xcen
else
retval$censorlevels <- -Inf
return(retval)
}
|
cd8315c71350b0cec8eabe3b9b15c467ee1ca98e | 304fe6596b84657320497f82c73bd319497faf9a | /R/HSVencoding.R | ad3c822da1398c1306ae6b75c61fb104907199ed | [] | no_license | diegosainzg/STUtility | 30e76fd1c323b3d0806e43c85ca0b05922ed2ff4 | 1817a5dab56589459e0442216cbd9ee219842be3 | refs/heads/master | 2023-07-18T12:37:37.321338 | 2021-09-08T14:06:37 | 2021-09-08T14:06:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 36,847 | r | HSVencoding.R | #' HSV encoded plots
#'
#' Using an HSV encoding of feature values, this functions can be used to color
#' code expression profiles of multiple features and visualize spatially.
#'
#' Using RGB encoding, we can show up to 3 features at the same time in the
#' "red", "green" and "blue" color channels. Whenever two or three features overlap,
#' the color will be a mix of the three channels, e.g. 50% green and 50% red will give a yellow color.
#' This strategy is very effective when looking at features values with significant
#' overlap but is limited to show maximum three features.
#'
#' If we want to show more than three features in the same plot, this
#' function provides a strategy to do this as long as the overlap between features
#' is relatively low. First, a color is assigned to each of N features by cutting
#' the hue (H) into N values with an even interval. The feature values (e.g. gene expression)
#' are then scaled to a 0-1 range which is encoded in the Value channel (V).
#' For each spot, the color with the highest V is selected meaning that only the
#' feature with the highest value will be shown in the plot. This strategy works well
#' for features with no or very little overlap but gets cluttered when to many
#' features are included.
#'
#' This visualization method should be used only on carefully selected features and you should be
#' aware that color representation of quantitative data can be very misleading. It should only be
#' usde to assess qualitative aspects of the data, for example if you wish to know where 5 "non-overlapping"
#' features are expressed spatially. You should therefore investigate beforehand if the features of interest
#' overlap or, otherwise the results can become very confusing.
#'
#' @section scaling of features:
#' All features are by default scaled independently to a 0 to 1 range which means that the relative
#' differencies between the feature expression levels is not preserved. This is because some features
#' can still be very distinct for a region of interest even though their magnitude of expression is low.
#' If you want to preserve the relative differencies you can set `rescale = FALSE`.
#'
#' @param object Seurat object
#' @param features
#' \itemize{
#' \item An \code{Assay} feature (e.g. a gene name - "MS4A1")
#' \item A column name from meta.data (e.g. mitochondrial percentage - "percent.mito")
#' }
#' @param plot.type Select one of 'spots' or 'smooth' [default: 'spots']
#' @param split.hsv Should the HSV colored features be split into separate plots? [default: FALSE]
#' @param rescale Rescale each feature column separately from 0 to 1 range. If set to FALSE, all feature columns
#' will be scaled together from 0 to 1 and preserve the relative differencies
#' @param indices Numeric vector specifying sample indices to include in plot. Default is to show all samples.
#' @param spots Vector of spots to plot (default is all spots)
#' @param min.cutoff,max.cutoff Vector of minimum and maximum cutoff values for each feature,
#' may specify quantile in the form of 'q##' where '##' is the quantile (eg, 'q1', 'q10')
#' @param slot Which slot to pull expression data from?
#' @param pt.size Adjust point size for plotting
#' @param pt.alpha Adjust opacity of spots.
#' @param pt.border Should a border be drawn around the spots? [default: TRUE]
#' @param add.alpha Adds opacity to spots scaled by feature values. This will disable the pt.alpha parameter
#' @param shape.by If NULL, all points are circles (default). You can specify any spot attribute available in the meta.data slot
#' @param sigma Smoothing bandwidth; only active if \code{plot.type = 'smooth'}. A single positive number, a numeric vector of length 2, or a function that selects the bandwidth automatically [default: 2].
#' See \code{\link{density.ppp}} function from the \code{\link{spatstat}} package for more details.
#' @param highlight.edges Highlights the edges of the tissue. Only active if \code{plot.type = 'smooth'} and if the images have been masked.
#' @param grid.ncol Number of columns for display when combining plots
#' @param dark.theme Use a dark theme for plotting
#' @param theme Add a custom theme to the output ggplot object
#' @param scale.res Integer value setting the resolution of the output raster image. E.g. scale.res = 2 will double the
#' resolution of the output but will also take longer to render. Only active if plot.type is set to 'smooth'.
#' @param verbose Print messages
#' @param ... Extra parameters passed on to \code{\link{STPlot}}
#'
#' @inheritParams STPlot
#' @importFrom cowplot plot_grid
#' @importFrom scales rescale
#' @importFrom ggplot2 ggplot theme theme_void
#' @importFrom zeallot %<-%
#' @importFrom grDevices hsv
#' @importFrom imager imgradient enorm as.cimg
#' @importFrom magick image_crop image_info image_read image_composite image_border image_scale
#'
#' @return A ggplot object
#' @export
HSVPlot <- function (
object,
features,
ncol = NULL,
plot.type = 'spots',
split.hsv = FALSE,
rescale = TRUE,
indices = NULL,
spots = NULL,
min.cutoff = NA,
max.cutoff = NA,
slot = "data",
pt.size = 1,
pt.alpha = 1,
pt.border = FALSE,
add.alpha = FALSE,
shape.by = NULL,
sigma = 2,
highlight.edges = FALSE,
cols = NULL,
dark.theme = TRUE,
grid.ncol = NULL,
theme = theme_void(),
scale.res = 1,
custom.theme = NULL,
verbose = FALSE,
...
) {
# Check to see if Staffli object is present
if (!"Staffli" %in% names(object@tools)) stop("Staffli object is missing from Seurat object. Cannot plot without coordinates", call. = FALSE)
st.object <- object@tools$Staffli
# Collect data
spots <- spots %||% colnames(x = object)
data <- FetchData(object = object, vars = c(features), cells = spots, slot = slot)
data.type <- unique(sapply(data, class))
# Stop if feature classes are not numeric/integer
if (!all(data.type %in% c("numeric", "integer"))) {
stop("Only features of class 'integer' or 'numeric' are allowed ... ")
}
# Add group column to data
data[, "sample"] <- st.object[[spots, "sample", drop = TRUE]]
# Add shape column if specified
if (!is.null(x = shape.by)) {
if (!shape.by %in% colnames(object[[]])) {
stop(paste0("Shaping variable (shape.by) ", shape.by, " not found in meta.data slot"), call. = F)
}
data[, shape.by] <- as.character(object[[shape.by, drop = TRUE]])
}
# Obtain array coordinates
image.type <- "empty"
c(data, image.type) %<-% obtain.array.coords(st.object, data, image.type, spots)
# Raise error if features are not present in Seurat object
if (ncol(x = data) < 3) {
stop("None of the requested features were found: ",
paste(features, collapse = ", "),
" in slot ",
slot,
call. = FALSE)
}
data <- feature.scaler(data, features, min.cutoff, max.cutoff)
# Subset by index
if (!is.null(indices)) {
if (!all(as.character(indices) %in% data[, "sample"])) stop(paste0("Index out of range. "), call. = FALSE)
data <- data[data[, "sample"] %in% as.character(indices), ]
} else {
indices <- unique(data[, "sample"]) %>% as.numeric()
}
if (is.null(cols)) {
# Generate HSV encoded colors
if (verbose) cat(paste0("Defining Hue for ", length(x = features), " features ... \n"))
hue_breaks <- seq(0, 1, length.out = length(x = features) + 1)[1:length(x = features)]
hsv.matrix <- t(matrix(c(hue_breaks, rep(1, length(hue_breaks )), rep(1, length(hue_breaks))), ncol = 3))
rownames(hsv.matrix) <- c("h", "s", "v")
ann.cols <- apply(hsv.matrix, 2, function(x) hsv(x[1], x[2], x[3]))
} else {
if (length(x = features) != length(x = cols)) stop("Length of features and cols must match ...", call. = FALSE)
warning("Using user defined colors with opacity. HSV scale will not be used ...", call. = FALSE)
ann.cols <- cols
names(cols) <- features
}
# Rescale data 0 to 1
if (rescale) {
data[, features] <- apply(data[, features], 2, scales::rescale)
} else {
data[, features] <- setNames(data.frame(scales::rescale(data[, features] %>% as.matrix() %>% as.numeric()) %>% matrix(ncol = length(x = features))), nm = features)
}
# Disable pt.alpha if add.alpha is provided
if (add.alpha) pt.alpha <- NA
# Plot HSV encoded feature data
if (plot.type == 'spots') {
# Select highest V
# Select highest V
d <- create.array.from.feature.vals(data, features, hue_breaks, cols, dark.theme, verbose)
#red.cols <- data.frame()
if (verbose) cat("Selecting HSV colors for each spot ... \n")
data <- create.cols.from.array(data, d, features, cols, split.hsv, dark.theme, add.alpha)
if (verbose) cat("Plotting features:",
ifelse(length(features) == 1, features, paste0(paste(features[1:(length(features) - 1)], collapse = ", "), " and ", features[length(features)])))
# Normal visualization -------------------------------------------------------------------------------------
if (image.type != "empty") {
dims <- lapply(st.object@dims, function(x) {x[2:3] %>% as.numeric()})
} else {
dims <- st.object@limits
}
if (!is.null(indices)) dims <- dims[indices]
# Plot combined HSV
if (!split.hsv) {
plot <- STPlot(data, data.type, shape.by, NULL, pt.size, pt.alpha, pt.border = pt.border,
palette = "Reds", cols = NULL, ncol = ncol, spot.colors = data$cols,
center.zero = F, center.tissue = F, plot.title = "",
dims = dims, split.labels = FALSE, dark.theme = dark.theme,
pxum = NULL, sb.size = 2.5, custom.theme = custom.theme, ...)
if (dark.theme) {
plot <- plot + dark_theme()
}
plot <- plot +
geom_point(data = data.frame(x = rep(-1, length(features)), y = rep(-1, length(features)), features), aes(x, y, colour = features)) +
scale_color_manual(values = setNames(ann.cols, features))
return(plot)
} else {
plots <- lapply(seq_along(data), function (i) {
data <- data[[i]]
plot <- STPlot(data, data.type, shape.by, NULL, pt.size, pt.alpha, pt.border = pt.border,
palette = "Reds", cols = NULL, ncol = ncol, spot.colors = data$cols,
center.zero = F, center.tissue = F, plot.title = features[i],
dims = dims, split.labels = FALSE, dark.theme = dark.theme,
pxum = NULL, sb.size = 2.5, custom.theme = custom.theme, ...)
if (dark.theme) {
plot <- plot + dark_theme()
}
return(plot)
})
ncols <- grid.ncol %||% ceiling(sqrt(length(x = features)))
nrows <- ceiling(length(x = features)/ncols)
plot <- cowplot::plot_grid(plotlist = plots, ncol = ncols, nrow = nrows)
if (dark.theme) plot <- plot + dark_theme()
return(plot)
}
} else if (plot.type == 'smooth') {
feature.list <- list()
edges.list <- list()
for (ftr in features) {
val.limits <- range(data[, ftr])
p.list <- list()
for (i in 1:length(unique(data$sample))) {
data_subset <- subset(data, sample == i)
dims <- st.object@rasterlists$processed.masks[[i]] %>% dim()
if (image.type %in% c('raw', 'masked', 'processed')) {
extents <- st.object@dims[[i]][2:3] %>% as.numeric()
data_subset[, c("x", "y")] <- data_subset[, c("x", "y")]/((extents[1]/scale.res)/dims[2])
} else {
extents <- st.object@limits[[i]]
data_subset[, c("x", "y")] <- data_subset[, c("x", "y")]/((extents[1]/scale.res)*scale.res/dims[2])
}
ow <- spatstat.geom::owin(xrange = c(0, dims[2]*scale.res), yrange = c(0, dims[1]*scale.res))
p <- spatstat.geom::ppp(x = data_subset[, "x"], y = data_subset[, "y"], window = ow, marks = data_subset[, ftr])
suppressWarnings({s <- spatstat.core::Smooth(p, sigma*scale.res, dimyx = dims*scale.res)})
m <- as.matrix(s)
m[m < 0] <- 0
m <- m/max(m)
if (image.type %in% c('processed', 'masked')) {
msk.type <- paste0(image.type, ".masks")
msk <- st.object['processed.masks'][[i]]
if (scale.res != 1) {
msk <- image_read(msk) %>% image_scale(paste0(st.object@xdim*scale.res)) %>% magick2cimg()
} else {
msk <- msk %>% as.cimg()
}
if (highlight.edges) {
edges.list[[i]] <- imgradient(msk, "xy") %>% enorm()
}
msk <- msk[, , , 1] %>% as.cimg() %>% threshold()
m <- t(m) %>% as.cimg()
masked.m <- m*msk
p.list[[i]] <- masked.m
} else {
p.list[[i]] <- m %>% as.cimg()
}
}
feature.list[[ftr]] <- p.list
}
# HSV plot
hue_breaks <- seq(0, 1, length.out = length(x = features) + 1)[1:length(x = features)]
rsts <- list()
if (!split.hsv) {
for (j in 1:length(unique(data[, "sample"]))) {
ar <- array(dim = c(rev(dims*scale.res), length(features)))
n <- 1
for (i in features) {
ar[, , n] <- feature.list[[i]][[j]]
n <- n + 1
}
ftr.rst <- apply(ar[, , ], c(1, 2), function(x) {
if (is.null(cols)) {
hsvc <- hsv(h = hue_breaks[which.max(x)], s = ifelse(dark.theme, 1, max(x)), v = ifelse(dark.theme, max(x), 1))
} else {
hsvc <- cols[which.max(x)]
}
if (add.alpha) hsvc <- scales::alpha(hsvc, max(x))
return(hsvc)
}) %>% t() %>% as.raster() #%>% as.cimg()
if (length(edges.list) > 0) {
ftr.rst[t((edges.list[[j]] > 0)[, , , 1])] <- "#FFFFFF"
}
rsts[[j]] <- ftr.rst %>% as.raster()
}
ncols <- length(unique(data[, "sample"]))
nrows <- ceiling(length(unique(data[, "sample"]))/ncols)
} else {
for (j in 1:length(unique(data[, "sample"]))) {
feature.rsts <- list()
for (i in seq_along(features)) {
ftr.rst <- sapply(feature.list[[i]][[j]], function(x) {
if (is.null(cols)) {
hsvc <- hsv(h = hue_breaks[i], s = ifelse(dark.theme, 1, x), v = ifelse(dark.theme, x, 1))
} else {
hsvc <- cols[i]
}
if (add.alpha) hsvc <- scales::alpha(hsvc, x)
return(hsvc)
}) %>% matrix(nrow = dims[2]*scale.res, ncol = dims[1]*scale.res) %>% t() %>% as.raster() #%>% as.cimg()
if (length(edges.list) > 0) {
ftr.rst[t((edges.list[[j]] > 0)[, , , 1])] <- "#FFFFFF"
}
feature.rsts[[i]] <- ftr.rst %>% as.raster()
}
rsts[[j]] <- feature.rsts
}
rsts <- Reduce(c, rsts)
# rearrange results
reord <- rep(seq_along(features), each = 2)
reord[seq(2, length(reord), 2)] <- reord[seq(2, length(reord), 2)] + length(x = features)
rsts <- rsts[reord]
ncols <- length(unique(data[, "sample"]))
nrows <- length(x = features)
}
rsts <- lapply(seq_along(rsts), function(i) {
im <- rsts[[i]]
im <- im %>% image_read()
im <- image_border(im, ifelse(dark.theme, "#000000", "#FFFFFF"), paste(st.object@xdim*scale.res/10, st.object@xdim*scale.res/10, sep = "x"))
im <- image_annotate(im, text = i, size = round(st.object@xdim/10), color = ifelse(dark.theme, "#FFFFFF", "#000000"))
})
tmp.file <- tempfile(pattern = "", fileext = ".png")
png(width = st.object@xdim*ncols*scale.res, height = st.object@xdim*nrows*scale.res, file = tmp.file)
par(mfrow = c(nrows, ncols), mar = c(0, 0, 0, 0), bg = ifelse(dark.theme, "black", "white"))
for (rst in rsts) {
plot(rst)
}
dev.off()
im <- image_read(tmp.file)
if (!split.hsv) {
im <- image_border(im, ifelse(dark.theme, "#000000", "#FFFFFF"), paste0(st.object@xdim*scale.res/2))
} else {
im <- image_border(im, ifelse(dark.theme, "#000000", "#FFFFFF"), paste0(st.object@xdim*scale.res))
}
tmp.file <- tempfile(pattern = "", fileext = ".png")
lg <- g_legend(data.frame(x = 1, y = 1, feature = features), data.type = "character", variable = "feature", center.zero = FALSE, cols = ann.cols, val.limits = NULL, dark.theme = dark.theme)
grobHeight <- function(x) {
grid::convertHeight(sum(x$heights), "in", TRUE)
}
grobWidth <- function(x) {
grid::convertWidth(sum(x$widths), "in", TRUE)
}
ggsave(plot = lg, width = grobWidth(lg), height = grobHeight(lg), filename = tmp.file)
iminf <- image_info(im)[2:3] %>% as.numeric()
if (!split.hsv) {
lgim <- image_read(tmp.file) %>% image_scale(paste0(iminf[2]/5))
} else {
lgim <- image_read(tmp.file) %>% image_scale(paste0(iminf[2]/(nrows*2)))
}
iminf.lgm <- image_info(lgim)[2:3] %>% as.numeric()
lgim <- image_crop(lgim, paste0(iminf.lgm[1] - 2, "x", iminf.lgm[2] - 2, "x", 1, "x", 1))
if (!split.hsv) {
im <- image_composite(image = im, composite_image = lgim, offset = paste0("+", iminf[1] - st.object@xdim*scale.res/length(features), "+", (iminf[2])/2 - (iminf.lgm[2])/2))
} else {
im <- image_composite(image = im, composite_image = lgim, offset = paste0("+", st.object@xdim*ncols*scale.res*1.5, "+", (iminf[2])/2 - (iminf.lgm[2])/2))
}
par(mar = c(0, 0, 0, 0), bg = ifelse(dark.theme, "black", "white"))
plot(im %>% as.raster())
}
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# HSV plots on HE images
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#' Overlay HSVplot on one selected HE image
#'
#' Colors spots on an an ST array grid according to a 'feature'
#' (i.e. gene expression (raw counts or scaled) and features available in the meta data slot).
#' NOTE that this function only draws a plot for one sample at the time.
#'
#' @param sample.index Index specifying the sample that you want to use for plotting
#' @param spots Character vector with spot IDs to plot [default: all spots]
#' @param type Image type to plot on. Here you can specify any of the images available in your Seurat object. To get this list you can
#' run the \code{\link{rasterlists}} function on your Seurat object. If the type is not specified, the images will be prioritized in the following
#' order if they are available; "processed", "masked" and "raw".
#' @param slot Which slot to pull expression data from? [dafault: 'data']
#' @param sample.label Should the sample label be included in the image? [default: TRUE]
#' @param ... Extra parameters passed on to \code{\link{ST.ImagePlot}}
#'
#' @inheritParams ST.ImagePlot
#' @inheritParams ST.FeaturePlot
#' @inheritParams HSVPlot
#' @importFrom cowplot plot_grid
#'
#' @return A ggplot object
#'
spatial_hsv_plot <- function (
object,
features,
split.hsv = FALSE,
sample.index = 1,
rescale = TRUE,
spots = NULL,
type = NULL,
min.cutoff = NA,
max.cutoff = NA,
slot = "data",
pt.size = 2,
pt.alpha = 1,
pt.border = FALSE,
add.alpha = FALSE,
shape.by = NULL,
palette = NULL,
cols = NULL,
grid.ncol = NULL,
dark.theme = FALSE,
sample.label = TRUE,
show.sb = TRUE,
value.scale = c("samplewise", "all"),
custom.theme = NULL,
verbose = FALSE,
...
) {
# Check to see if Staffli object is present
if (!"Staffli" %in% names(object@tools)) stop("Staffli object is missing from Seurat object. Cannot plot without coordinates", call. = FALSE)
st.object <- object@tools$Staffli
# Obtain spots
spots <- spots %||% colnames(object)
# Check length of sample index
if (length(sample.index) > 1) stop(paste0("Only one sample index can be selected."), call. = FALSE)
type <- type %||% {
if (is.null(rasterlists(st.object))) stop("There are no images present in the Seurat object. Run LoadImages() first.", call. = FALSE)
choices <- c("processed", "masked", "raw", "processed.masks", "masked.masks")
match.arg(choices, rasterlists(st.object), several.ok = T)[1]
}
# Check that selected image type is present in Seurat object
msgs <- c("raw" = "LoadImages()", "masked" = "MaskImages()", "processed" = "WarpImages()", "masked.masks" = "MaskImages()", "processed.masks" = "WarpImages()")
if (!type %in% names(msgs)) stop(paste0(type, " not a valid type"), call. = FALSE)
if (!type %in% rasterlists(st.object)) stop(paste0("You need to run ", msgs[type], " before using DimOverlay() on '", type, "' images"), call. = FALSE)
# Check that sample.index is OK
if (!sample.index %in% names(st.object)) {
stop(paste0("sample.index ", sample.index, " does not match any of the images present in the Seurat object or is out of range"), call. = T)
}
# Collect image
image <- st.object[type][[sample.index]]
if (dark.theme & type %in% c("masked", "processed")) {
image[image == "#FFFFFF"] <- "#000000"
}
if (sample.label) {
image <- as.raster(image_annotate(image_read(image), text = paste(sample.index), color = ifelse(dark.theme, "#FFFFFF", "#000000"), size = round(st.object@xdim/10)))
}
imdims <- st.object@dims[[sample.index]][2:3] %>% as.numeric()
# Select spots matching sample index
sample.index <- ifelse(class(sample.index) == "numeric", unique(st.object[[, "sample", drop = T]])[sample.index], sample.index)
spots <- intersect(colnames(object)[st.object[[, "sample", drop = T]] == sample.index], spots)
if (length(spots) == 0) stop(paste0("All selected spots are missing from sample ", sample.index, " ... \n"), call. = FALSE)
if (verbose) cat(paste0("Selected ", length(spots), " spots matching index ", sample.index))
data <- FetchData(object = object, vars = c(features), cells = spots, slot = slot)
data.type <- unique(sapply(data, class))
# Select colorscale
# palette.info <- palette.select(info = T)
# palette <- palette %||% {
# palette <- subset(palette.info, category == "seq")$palette[1]
# }
# Obtain array coordinates
px.ids <- ifelse(rep(type %in% c("raw", "masked", "masked.masks"), 2), c("pixel_x", "pixel_y"), c("warped_x", "warped_y"))
if (all(px.ids %in% colnames(st.object[[]]))) {
data <- cbind(data, setNames(st.object[[, px.ids]][spots, ], nm = c("x", "y")))
} else {
stop(paste0(paste(px.ids, collapse = " and "), " coordinates are not present in meta data."), call. = FALSE)
}
if (ncol(x = data) < 3) {
stop("None of the requested features were found: ",
paste(features, collapse = ", "),
" in slot ",
slot,
call. = FALSE)
}
if (all(data.type %in% c("numeric", "integer"))) {
data <- feature.scaler(data, features, min.cutoff, max.cutoff)
}
# Add index column
data[, "sample"] <- sample.index
# Set scalebar input
if (show.sb) {
pixels.per.um <- st.object@pixels.per.um[sample.index]
} else {
pixels.per.um <- NULL
}
if (is.null(cols)) {
# Generate HSV encoded colors
if (verbose) cat(paste0("Defining Hue for ", length(x = features), " features ... \n"))
hue_breaks <- seq(0, 1, length.out = length(x = features) + 1)[1:length(x = features)]
hsv.matrix <- t(matrix(c(hue_breaks, rep(1, length(hue_breaks )), rep(1, length(hue_breaks))), ncol = 3))
rownames(hsv.matrix) <- c("h", "s", "v")
ann.cols <- apply(hsv.matrix, 2, function(x) hsv(x[1], x[2], x[3]))
} else {
if (length(x = features) != length(x = cols)) stop("Length of features and cols must match ...", call. = FALSE)
warning("Using user defined colors with opacity. HSV scale will not be used ...", call. = FALSE)
ann.cols <- cols
names(cols) <- features
}
# Rescale data 0 to 1
# Add dummy data
if (is.list(value.scale)) {
data <- rbind(data, setNames(data.frame(cbind(do.call(cbind, value.scale), matrix(NA, ncol = sum(!colnames(data) %in% features), nrow = 2))), nm = colnames(data)))
}
if (rescale) {
data[, features] <- apply(data[, features], 2, scales::rescale)
} else {
data[, features] <- setNames(data.frame(scales::rescale(data[, features] %>% as.matrix() %>% as.numeric()) %>% matrix(ncol = length(x = features))), nm = features)
}
data <- na.omit(data)
# Disable pt.alpha if add.alpha is provided
if (add.alpha) pt.alpha <- NA
if (verbose) cat("Plotting features:",
ifelse(length(features) == 1, features, paste0(paste(features[1:(length(features) - 1)], collapse = ", "), " and ", features[length(features)])))
# Select highest V
d <- create.array.from.feature.vals(data, features, hue_breaks, cols, dark.theme, verbose)
#red.cols <- data.frame()
if (verbose) cat("Selecting HSV colors for each spot ... \n")
data <- create.cols.from.array(data, d, features, cols, split.hsv, dark.theme, add.alpha)
# Plot combined HSV
if (!split.hsv) {
plot <- ST.ImagePlot(data, data.type, shape.by, NULL, image, dims = imdims,
pt.size, pt.alpha, pt.border = pt.border, FALSE, palette = "Reds",
cols, NULL, spot.colors = data$cols,
FALSE, plot.title = "", FALSE, dark.theme,
pixels.per.um, NULL, custom.theme = custom.theme, ...)
plot <- plot +
geom_point(data = data.frame(x = rep(-1, length(features)), y = rep(-1, length(features)), features), aes(x, y, colour = features)) +
scale_color_manual(values = setNames(ann.cols, features)) +
theme_void()
if (dark.theme) {
plot <- plot + dark_theme()
}
return(plot)
} else {
plots <- lapply(seq_along(data), function (i) {
data <- data[[i]]
plot <- ST.ImagePlot(data, data.type, shape.by, NULL, image, dims = imdims,
pt.size, pt.alpha, pt.border = pt.border, add.alpha = FALSE, palette = "Reds",
cols, NULL, spot.colors = data$cols,
FALSE, plot.title = features[i], FALSE, dark.theme,
pixels.per.um, NULL, custom.theme = custom.theme, ...)
if (dark.theme) {
plot <- plot + dark_theme()
}
return(plot)
})
ncols <- grid.ncol %||% ceiling(sqrt(length(x = features)))
nrows <- ceiling(length(x = features)/ncols)
plot <- cowplot::plot_grid(plotlist = plots, ncol = ncols, nrow = nrows)
if (dark.theme) plot <- plot + dark_theme()
return(plot)
}
}
#' Overlay HSV encoded features on HE images
#'
#' Graphs the selected features as a HSVplot on a 2D grid of spots overlaid on top of an HE images.
#' Only numerical features are accepted, e.g. genes or dimensionality reduction output vectors. If you
#' want to draw dimentionality reduction vectors you need to specify the whole names of the vectors, e.g.
#' `features = c("factor_1", "factor_2")` for the two first NMF factors.
#'
#' NOTE that this function draws sample 1 as default, but can take multiple samples as well using the `sampleids argument`.
#'
#' @details It is typically difficult to explore details in the HE image when diplaying multiple samples side by side,
#' so we recommend to draw the plots for one sample at the time. If you have higher resolution images,
#' it could also take significant time to draw the plots.
#'
#' @section Arrange plots:
#'
#' The `ncols.features` argument will determine how each subplot called using
#' \code{\link{DimOverlay}} is arranged and will by default put all dims in 1 row, i.e.
#' `ncols.features = length(features)`. The `ncols.samples` argument will determine how these subplots
#' are arranged and will by default use 1 column, meaning that each subplot is put in its own row.
#' The output layout matrix would then have the dimensions `length(samples)xlength(features)`
#'
#' @section Splitting categorical features:
#' If you are plotting a categorical feature, e.g.cluster labels, you have the option to split each label into facets using \code{split.labels=TRUE}.
#' This is very useful if you have many different labels which can make it difficult to distinguish the different colors.
#'
#' @section Arrange plots:
#'
#' The `ncols.features` argument will determine how each subplot is arranged and will by default put all features in 1 row, i.e.
#' `ncols.features = length(features)`. The `ncols.samples` argument will determine how these subplots
#' are arranged and will by default use 1 column, meaning that each subplot is put in its own row.
#' The output layout matrix would then have the dimensions `length(samples)xlength(features)`
#'
#' @param object Seurat object
#' @param sampleids Names of samples to plot
#' @param ncols.features Number of columns passed to \code{\link{FeatureOverlay}}. For example,
#' if you are plotting 4 features, `ncols.features = 2` will arrange the \code{\link{FeatureOverlay}}
#' plots into a 2x2 grid [default: `length(features)`]. (see \emph{Arrange plots*} for a detailed description)
#' @param ncols.samples Number of columns in the layout grid for the samples. For example,
#' if you are plotting 4 samples, `ncols.samples = 2` will arrange the plots obtained
#' from \code{\link{FeatureOverlay}} plots into a 2x2 grid [default: `1`].
#' (see \emph{Arrange plots*} for a detailed description)
#' @param show.sb Should a scalebar be drawn? [default: TRUE]
#' @param ... Parameters passed to DimOverlay
#'
#' @inheritParams spatial_hsv_plot
#' @inheritParams HSVPlot
#'
#' @examples
#' # Load images
#' se <- se %>% SCTransfrom() %>% LoadImages() %>% RunNMF()
#'
#' # Overlay first two NMF factors on the first two tissue sections
#' HSVPlot(se, features = c("factor_1", "factor_2"), sampleids = 1:2)
#'
#' @export
#'
HSVOverlay <- function (
object,
features,
sampleids = 1,
rescale = TRUE,
spots = NULL,
ncols.features = NULL,
ncols.samples = NULL,
type = NULL,
min.cutoff = NA,
max.cutoff = NA,
slot = "data",
pt.size = 2,
pt.alpha = 1,
add.alpha = FALSE,
shape.by = NULL,
palette = NULL,
cols = NULL,
split.hsv = FALSE,
dark.theme = FALSE,
sample.label = TRUE,
show.sb = TRUE,
custom.theme = NULL,
verbose = FALSE,
...
) {
# Check to see if Staffli object is present
if (!"Staffli" %in% names(object@tools)) stop("Staffli object is missing from Seurat object. Cannot plot without coordinates", call. = FALSE)
st.object <- object@tools$Staffli
# Select spots
Staffli_meta <- subset(st.object[[]], sample %in% paste0(sampleids))
selected.spots <- rownames(Staffli_meta)
spots <- spots %||% intersect(colnames(object), selected.spots)
if (length(spots) == 0) stop(paste0("None of the selected spots are present in samples ", paste(sampleids, collapse = ", "), " ... \n"), call. = FALSE)
# Check that spots are present in all sampleids samples
Staffli_meta_subset <- Staffli_meta[spots, ]
remaining_samples <- unique(Staffli_meta_subset$sample)[which(unique(Staffli_meta_subset$sample) %in% sampleids)]
if (length(x = remaining_samples) != length(x = sampleids)) warning(paste0("The selected spots are not present in all samples ", paste(sampleids, collapse = ", "), " ... \n",
"Subsetting data to include samples ", paste(remaining_samples, collapse = ", "), "... \n"), call. = FALSE)
ncols.features <- ncols.features %||% length(x = features)
ncols.samples <- ncols.samples %||% 1
data <- FetchData(object = object, vars = c(features), cells = spots, slot = slot)
value.type <- sapply(data, class)
if (any(!value.type %in% "numeric")) stop("Only numeric features can be plotted with HSVOverlay. \n", call. = FALSE)
value.scale.list <- lapply(data, range)
p.list <- lapply(remaining_samples, function(s) {
spatial_hsv_plot(object = object, features = features, split.hsv = split.hsv,
sample.index = s, rescale = rescale, spots = spots, type = type,
min.cutoff = min.cutoff, max.cutoff = max.cutoff, slot = slot,
pt.size = pt.size, pt.alpha, pt.border = FALSE, add.alpha = add.alpha, shape.by = shape.by,
palette = palette, cols = cols, grid.ncol = ncols.features,
dark.theme = dark.theme, sample.label = sample.label, show.sb = show.sb,
value.scale = value.scale.list, custom.theme = custom.theme, verbose = verbose)#, ... = ...)
})
p <- cowplot::plot_grid(plotlist = p.list, ncol = ncols.samples)
if (dark.theme) p <- p + dark_theme()
return(p)
}
#' Creates an array of dimensions number_of_spots*3*number_of_features
#'
#' For each feature, a matrix is stored with nSpots number of rows and
#' with the HSV color channels as columns. If dark.theme is set to TRUE,
#' the V channel will be reserved for feature values and the S channel will
#' be set to 1, otherwise the S channel will be resevred for feature values
#' and the V channel will be set to 1.
#'
#' @param data data.frame with feature values
#' @param features feature names
#' @param hue_breaks Hue values (same length as features)
#' @param cols Custom colors
#' @param dark.theme Used to select what channel the feature values should be encoded in
#' @param verbose Print messages
create.array.from.feature.vals <- function (
data,
features,
hue_breaks,
cols,
dark.theme,
verbose
) {
if (is.null(cols)) {
d <- array(dim = c(nrow(data), 3, length(x = features)))
if (verbose) cat("Converting values to HSV colors ... \n")
for (i in 1:length(features)) {
ftr <- features[i]
if (dark.theme) {
s <- data.frame(h = hue_breaks[i],
s = 1,
v = data[, ftr, drop = T] %>% as.numeric()) %>% as.matrix()
} else {
s <- data.frame(h = hue_breaks[i],
s = data[, ftr, drop = T] %>% as.numeric(),
v = 1) %>% as.matrix()
}
d[, , i] <- s
}
} else {
d <- array(dim = c(nrow(data), 1, length(x = features)))
if (verbose) cat("Using provided colors ... \n")
for (i in 1:length(features)) {
ftr <- features[i]
s <- data.frame(v = data[, ftr, drop = T] %>% as.numeric()) %>% as.matrix()
d[, , i] <- s
}
}
return(d)
}
#' Creates HSV colors from an array
#'
#' If split.hsv = FALSE, the feature with the highest value in a spot will define the
#' color for that spot. The intensity of the color will depend on if dark.theme is active and
#' the magnitude of the feature value in that spot.
#'
#' @param data data.frame with feature values
#' @param d array created with \code{create.array.from.vals} function
#' @param features Feature names
#' @param cols Custom colors
#' @param split.hsv Should the features be plotted separately?
#' @param dark.theme Used to select what channel the feature values should be encoded in
#' @param add.alpha Adds opacity to the output colors, defined by the scaled feature values
create.cols.from.array <- function (
data,
d,
features,
cols,
split.hsv,
dark.theme,
add.alpha
) {
# If split.hsv is deactivated, get return one data.frame
if (!split.hsv) {
if (is.null(cols)) {
red.cols <- apply(d, 1, function (x) {
ind <- ifelse(dark.theme, 3, 2)
max.val <- which.max(x[ind, ])
hsvc <- hsv(h = x[1, ][which.max(x[ind, ])], s = ifelse(dark.theme, 1, max(x[ind, ])), v = ifelse(dark.theme, max(x[ind, ]), 1))
if (add.alpha) hsvc <- scales::alpha(hsvc, max(x[ind, ]))
return(hsvc)
})
} else {
red.cols <- unlist(apply(d, 1, function (x) {
alpha_col <- cols[which.max(x[1, ])]
if (add.alpha) {
alpha_col <- scales::alpha(colour = alpha_col, alpha = max(x[1, ]))
}
return(alpha_col)
}))
}
data$cols <- red.cols
return(data)
} else {
# If split.hsv is activated, get return one data.frame
if (is.null(cols)) {
full.data <- matrix(ncol = ncol(data), nrow = 0)
for (i in 1:dim(d)[3]) {
full.data <- rbind(full.data, cbind(data, setNames(data.frame(d[, , i]), nm = c("h", "s", "v")), variable = features[i]))
}
red.cols <- apply(full.data, 1, function (x) {
hsvc <- hsv(h = x["h"], s = x["s"], v = x["v"])
if (add.alpha) hsvc <- scales::alpha(hsvc, as.numeric(ifelse(dark.theme, x["v"], x["s"])))
return(hsvc)
})
} else {
full.data <- matrix(ncol = ncol(data), nrow = 0)
for (i in 1:dim(d)[3]) {
full.data <- rbind(full.data, cbind(data, setNames(data.frame(d[, , i]), nm = c("v")), variable = features[i]))
}
red.cols <- apply(full.data, 1, function (x) {
hsvc <- cols[x["variable"]]
if (add.alpha) hsvc <- scales::alpha(hsvc, ifelse(dark.theme, x["v"], x["s"]))
return(hsvc)
})
}
full.data$cols <- red.cols
full.data.split <- split(full.data, full.data$variable)
return(full.data.split)
}
}
|
5e54959429184e68211614013408b831708bff21 | ec8de27b138ee4c85bfc0e6182c88e0afec68225 | /islr/04_lab_logistic_regression.r | 9a49ca6cb545b3c24546f9e75a398cb8a48bf0c3 | [] | no_license | re9ulus/books_std | 64e1bb7a69b7d49a1c2f1ef449d7a3eb00ee6307 | 83f0f48d78868855f450f03f60467acc83e38cc6 | refs/heads/master | 2020-04-15T04:47:41.404887 | 2016-11-19T18:56:56 | 2016-11-19T18:56:56 | 68,093,493 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,472 | r | 04_lab_logistic_regression.r | library(ISLR)
names(Smarket)
summary(Smarket)
pairs(Smarket)
cor(Smarket[, -9])
attach(Smarket)
plot(Volume)
# Logistic regression
glm.fit <- glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume,
data=Smarket, family=binomial)
summary(glm.fit)
coef(glm.fit)
summary(glm.fit)$coef
glm.probs <- predict(glm.fit, type='response')
glm.probs[1:10]
contrasts(Direction)
glm.pred <- ifelse(glm.probs > 0.5, 'Up', 'Down')
table(glm.pred, Direction)
mean(glm.pred == Direction)
train <- (Year < 2005)
Smarket.2005 <- Smarket[!train, ]
dim(Smarket.2005)
Direction.2005 <- Direction[!train]
glm.fit <- glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume,
data=Smarket, family=binomial, subset=train)
glm.probs <- predict(glm.fit, Smarket.2005, type='response')
glm.pred <- ifelse(glm.probs > 0.5, 'Up', 'Down')
table(glm.pred, Direction.2005)
mean(glm.pred == Direction.2005)
glm.fit <- glm(Direction ~ Lag1 + Lag2, data=Smarket, family=binomial,
subset=train)
glm.probs <- predict(glm.fit, Smarket.2005, type='response')
glm.pred <- ifelse(glm.probs > 0.5, 'Up', 'Down')
table(glm.pred, Direction.2005)
mean(glm.pred == Direction.2005)
# LDA
library(MASS)
lda.fit <- lda(Direction ~ Lag1 + Lag2, data=Smarket, subset=train)
lda.fit
plot(lda.fit)
lda.pred <- predict(lda.fit, Smarket.2005)
names(lda.pred)
lda.class <- lda.pred$class
table(lda.class, Direction.2005)
mean(lda.class == Direction.2005)
# Quadratic Disctiminant Analysis
qda.fit <- qda(Direction ~ Lag1 + Lag2, data=Smarket, subset=train)
qda.fit
qda.class <- predict(qda.fit, Smarket.2005)$class
table(qda.class, Direction.2005)
mean(qda.class == Direction.2005)
# K-NN
library(class)
train.X <- cbind(Lag1, Lag2)[train,]
test.X <- cbind(Lag1, Lag2)[!train,]
train.Direction <- Direction[train]
set.seed(1)
knn.pred <- knn(train.X, test.X, train.Direction, k=1)
table(knn.pred, Direction.2005)
mean(knn.pred == Direction.2005)
for (i in 1:5) {
knn.pred <- knn(train.X, test.X, train.Direction, k=i)
print(c(i, mean(knn.pred == Direction.2005)))
}
dim(Caravan)
attach(Caravan)
summary(Purchase)
standardized.X = scale(Caravan[, -86])
var(Caravan[,1])
var(standardized.X[,1])
test <- 1:1000
train.X <- standardized.X[-test,]
test.X <- standardized.X[test,]
train.Y <- Purchase[-test]
test.Y <- Purchase[test]
set.seed(1)
knn.pred <- knn(train.X, test.X, train.Y, k=5)
mean(test.Y != knn.pred)
mean(test.Y != 'No')
table(knn.pred, test.Y)
|
02255f41e26895df54eb0f27524e584242d261e2 | 9773087c2ea664ab7211ad2393b2a01fdf1cf595 | /Paritala_Sravan_hw1_problem5.R | 6617194fedfed1a3dc83f42635c90bed98723d70 | [] | no_license | sra1dataninja/R | 5e940dbd031b7b1e2d39f25b4ab446d9cbb2e3e8 | 859707fb7fe3a1aa46496193e4e25676dd6e5c93 | refs/heads/master | 2020-03-12T18:43:04.795896 | 2018-04-23T23:19:50 | 2018-04-23T23:19:50 | 130,767,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,509 | r | Paritala_Sravan_hw1_problem5.R | ##Assume a set of the following objects: a<-"7" b<-"10" c<-2017"
##Write a script that converts a, b, and c to "7/10/2017" and stores it as an object called delivery_date.
##Convert delivery_date into a date.##Assume that 7/16/2017 is the delivery deadline.
##Show in R the difference between the delivery_deadline and the delivery_date.
################## Problem 5 script begins ######################################
# Input objects
a <- "7"
b <- "10"
c <- "2017"
# Combining input objcts a,b,c to for the date using paste function
delivery_date <- paste(a,b,c, sep = "/")
# printing the delivery date to the console
print(delivery_date)
# converting the delivery_date to date string
delivery_date <- as.Date(delivery_date, format = "%m/%d/%Y")
# verifying the type and class of delivery_date
typeof(delivery_date)
class(delivery_date)
# assigning delivery_deadline and converting the delivery_deadline to date string
delivery_deadline <- "07/16/2017"
delivery_deadline <- as.Date(delivery_deadline, format = "%m/%d/%Y")
# verifying the type and class of delivery_deadline
typeof(delivery_deadline)
class(delivery_deadline)
# new variable "diff_in_days" to calcualte the difference in days between delivery deadline & delivery date
diff_in_days <- delivery_deadline - delivery_date
#printing the differnce in day to the console. Answer is 6
print(diff_in_days)
######################## Problem 5 scripts ends ################### |
f38384b271a17979409831ea902685ae74478306 | 5f1f6eae7549284d6fb6ce29e4d23052b71d620a | /DataVis_R_Lab06_Shiny_DrawScatter.R | 9a1f68685fa0b834ceb964b68c32351016511ebf | [] | no_license | YangJongWon/DataVisualization | 721225192b90bbdf75311c54b2f4c260f16bf41e | ae2372630cb3a657b75b69aa0539915a7ceee126 | refs/heads/master | 2020-04-02T08:41:22.179165 | 2018-10-22T14:02:31 | 2018-10-22T14:02:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 48 | r | DataVis_R_Lab06_Shiny_DrawScatter.R | library(shiny)
shiny::runApp('DrawScatterPlot')
|
1e2c6591565f3ec0de5659579dac6b45fa594907 | 4e3d58e1e2165b2fd8a5c5141e9990a70914e8d9 | /man/col2value.Rd | 3f0238b0bca35b5523f50095674cbccde05c386b | [
"MIT"
] | permissive | jokergoo/circlize | d287b72d1e2cb3079068d9c36f253342fde29bf7 | 11ddb741620c44e9c566c992a4e28bb213fab19f | refs/heads/master | 2023-08-23T02:27:35.268033 | 2022-12-09T16:16:41 | 2022-12-09T16:16:41 | 9,529,406 | 884 | 156 | NOASSERTION | 2022-02-03T11:26:44 | 2013-04-18T18:56:04 | R | UTF-8 | R | false | false | 1,328 | rd | col2value.Rd | \name{col2value}
\alias{col2value}
\title{
Transform back from colors to values
}
\description{
Transform back from colors to values
}
\usage{
col2value(r, g, b, col_fun)
}
\arguments{
\item{r}{red channel in \code{\link[colorspace]{sRGB}} color space, value should be between 0 and 1. The \code{r}, \code{g} and \code{b} argumentc can be wrapped into one variable which is either a three-column matrix or a vector of colors.}
\item{g}{green channel in \code{\link[colorspace]{sRGB}} color space, value should be between 0 and 1.}
\item{b}{blue channel in \code{\link[colorspace]{sRGB}} color space, value should be between 0 and 1.}
\item{col_fun}{the color mapping function generated by \code{\link{colorRamp2}}.}
}
\details{
\code{\link{colorRamp2}} transforms values to colors and this function does the reversed job.
Note for some color spaces, it cannot transform back to the original value perfectly.
}
\value{
A vector of original numeric values.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
x = seq(0, 1, length.out = 11)
col_fun = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))
col = col_fun(x)
col2value(col, col_fun = col_fun)
col2value("red", col_fun = col_fun)
col_fun = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"), space = "sRGB")
col = col_fun(x)
col2value(col, col_fun = col_fun)
}
|
e280ae0e257108aef0a1d5554f89ee7c7a321e82 | 458d654dbc6c0478fb04153d88c7e7c199948cd5 | /MATH 300 Statistics Code/Section 3.4.R | 68f95eadf785f61a8317710159715a9e79506157 | [] | no_license | htphan16/Math-Stats-class-code | 07634c2bb783506deb29ab81ec67c87e69741e67 | 3cb30dda8176d734e8890547047e82f03fbdd0e9 | refs/heads/master | 2020-04-21T05:48:18.454513 | 2019-02-06T03:34:02 | 2019-02-06T03:34:02 | 169,350,567 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 347 | r | Section 3.4.R | # 3-4.8 (b)
x = c(0.7938,0.8032,0.8089,0.8222,0.8268,0.8383,0.8442,0.8490,0.8528,
0.8572,0.8674,0.8734,0.8786,0.8850,0.8873,0.8920,0.9069,0.9150,0.9243)
qqnorm(x, datax = TRUE)
qqline(x, datax = TRUE)
# 3-4.8 (c) The points on the plot seems to fall close to the straight line
# so the data look like observations from normal distribution
|
16b94bd70e1ad4cda09dce52eb6da3e28247cfc2 | 96990ccb6c1ade2506d0060cd64709a6f665e21d | /N-2 Repetition Cost Reliability/code.R | d010b5af38a3225b757ef43b7df16a5ff0f297c0 | [] | no_license | norberello/paperData | d46cfeb69f8ff7d3abfa8f0efb2917b592219497 | 430a4d426d4f4d8ae1dbda46590d7da08e9af869 | refs/heads/master | 2021-06-07T20:38:53.998851 | 2016-11-20T10:34:02 | 2016-11-20T10:34:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,605 | r | code.R | rm(list = ls())
setwd("~/Git/paperData/N-2 Repetition Cost Reliability")
source("functions.R")
library(dplyr)
library(ez)
library(ggplot2)
library(moments)
library(tidyr)
library(Hmisc)
library(ppcor)
# make sure the most recent version of trimr is installed
#devtools::install_github("JimGrange/trimr")
library(trimr)
# import the data
target <- read.csv("raw_target.csv", stringsAsFactors = FALSE)
visual <- read.csv("raw_visual.csv", stringsAsFactors = FALSE)
numeric <- read.csv("raw_numeric.csv", stringsAsFactors = FALSE)
colnames(target) <- c("participant", "trial", "condition", "accuracy", "rt")
colnames(visual) <- c("participant", "trial", "condition", "accuracy", "rt")
colnames(numeric) <- c("participant", "trial", "condition", "accuracy", "rt")
# add accuracy trimming column to each data set & declare each paradigm
target <- mutate(target, paradigm = "target", accTrim = 0)
visual <- mutate(visual, paradigm = "visual", accTrim = 0)
numeric <- mutate(numeric, paradigm = "numeric", accTrim = 0)
#------------------------------------------------------------------------------
# for the 'equal trials' analysis
# visual <- subset(visual, trial < 361)
# numeric <- subset (numeric, trial < 361)
#------------------------------------------------------------------------------
### sort the null trials for each paradigm
## target data first because of the coding error
# trials to remove for participants 1-23
n23 <- c(1, 2, 103, 104, 205, 206, 307, 308)
# trials to remove for participants > 23
n24 <- c(1, 2, 121, 122, 241, 242, 361, 362)
# loop over participants and do the trimming
for(i in 1:nrow(target)){
if(target$participant[i] <= 23){
if(target$trial[i] %in% n23) {
target$condition[i] <- "null"
}
}
if(target$participant[i] > 23){
if(target$trial[i] %in% n24){
target$condition[i] <- "null"
}
}
}
## visual & numeric null trials
nullTrials <- c(1, 2, 121, 122, 241, 242, 361, 362)
for(i in 1:nrow(visual)){
if(visual$trial[i] %in% nullTrials){
visual$condition[i] <- "null"
}
}
for(i in 1:nrow(numeric)){
if(numeric$trial[i] %in% nullTrials){
numeric$condition[i] <- "null"
}
}
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### removing the first block of each paradigm to conduct analysis for practice
### effect
# to be calculated before individual paradigms data are combined
# (before the null trials are removed)
# target23 <- subset(target, participant < 24)
# target23 <- subset(target23, trial > 102)
# target24 <- subset(target, participant > 23)
# target24 <- subset(target24, trial > 120)
# target <- rbind(target23, target24)
# visual <- subset(visual, trial > 120)
# numeric <- subset(numeric, trial > 120)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### data collation & participant removal checks
# bind all data together
allData <- rbind(target, visual, numeric)
# remove the null trials
allData <- subset(allData, allData$condition != "null")
# check which participants don't have data for all conditions
incompleteRemoval <- completeData(allData)
# check which participants have accuracy too low
accCriterion <- 90
accRemoval <- accuracyRemoval(allData, accCriterion)
# collate all removal, and do the removal
participantsRemoved <- sort(unique(c(incompleteRemoval, accRemoval)))
allData <- allData[!allData$participant %in% participantsRemoved, ]
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### overall and per-paradigm proportion of trials removed due to accuracy and
### RTs trimming
# Part 1
# 2nd part starts at line 150 and 3rd part 261 (after the trimming is finished)
# take allData, before accuracy trimming (before removing 2 trials after an error),
# and assign the trials length to a vector allTrials
allTrials <- length(allData$trial)
# additionally subset paradigms from the same allData or calculation of
# percentage of trials removed per paradigm
allTarget <- subset(allData, paradigm == "target")
allTarget <- length(allTarget$trial)
allVisual <- subset(allData, paradigm == "visual")
allVisual <- length(allVisual$trial)
allNumeric <- subset(allData, paradigm == "numeric")
allNumeric <- length(allNumeric$trial)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# accuracy trimming (remove two trials following an error)
for(i in 3:nrow(allData)){
allData$accTrim[i] <- allData$accuracy[i - 2] * allData$accuracy[i - 1]
}
allData <- subset(allData, allData$accTrim == 1)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# proportion of errors removed
# assign the length of trimmed data frame to trimmedTrials
errorTrials <- length(allData$trial)
# calculate the overall number of removed trials
rmErrorTrials <- allTrials - errorTrials
# subset paradigms from allData (after error removal) for calculation of
# percentage of trials removed per paradigm
# and calculate number of trials removed
errorTarget <- subset(allData, paradigm == "target")
errorTarget <- length(errorTarget$trial)
rmErrorTarget <- allTarget - errorTarget
errorVisual <- subset(allData, paradigm =="visual")
errorVisual <- length(errorVisual$trial)
rmErrorVisual <- allVisual - errorVisual
errorNumeric <- subset(allData, paradigm == "numeric")
errorNumeric <- length(errorNumeric$trial)
rmErrorNumeric <- allNumeric - errorNumeric
# calculate the overall percentage of removed trials
propErrRemoved <- (100*rmErrorTrials)/allTrials
round(proportionRemoved, 1)
# calculate the percetage of trials removed per paradigm
propErrTarget <- (100*rmErrorTarget)/allTarget
round(propErrTarget,1)
propErrVisual <- (100*rmErrorVisual)/ allVisual
round(propErrVisual,1)
propErrNumeric <- (100*rmErrorNumeric)/ allNumeric
round(propErrNumeric,1)
#------------------------------------------------------------------------------
### main accuracy analysis
accuracy <- allData %>%
group_by(paradigm, condition, participant) %>%
summarise(rawAcc = (sum(accuracy) / length(accuracy)) * 100)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### response time analysis
# disable scientific notation
options(scipen = 999)
# trim the slow RTs
rtData <- rtTrimming(allData, sd = 2.5, minRT = 150)
# get the mean RT for each participant
rt <- rtData %>%
group_by(paradigm, condition, participant) %>%
summarise(meanRT = mean(rt))
# how many participants?
nparticipants <- length(unique(rt$participant))
# change paradigm & condition to factor so we can do ANOVA on it
rt$paradigm <- as.factor(rt$paradigm)
rt$condition <- as.factor(rt$condition)
# do the ANOVA
rtANOVA <- ezANOVA(
data = data.frame(rt),
dv = .(meanRT),
wid = .(participant),
within = .(paradigm, condition),
between = NULL,
detailed = FALSE
)
rt <- data.frame(rt)
# get the mean RT per cell
meanRT <- rt %>%
group_by(paradigm, condition) %>%
summarise(rt = round(mean(meanRT), 0), se = round(sd(meanRT) /
sqrt(nparticipants), 0))
# main effect of condition
seq <- rtData %>%
group_by(condition) %>%
summarise(meanRT = round(mean(rt), 0), se = round(sd(rt) /
sqrt(nparticipants), 0))
# main effect of paradigm
# get the mean RT per cell
paradigm <- rtData %>%
group_by(paradigm) %>%
summarise(meanRT = round(mean(rt), 0), se = round(sd(rt) /
sqrt(nparticipants), 0))
## t-tests of each paradigm's n-2 repetition cost
targetABA <- subset(rt, rt$paradigm == "target" & rt$condition == "ABA")
targetCBA <- subset(rt, rt$paradigm == "target" & rt$condition == "CBA")
targetTtest <- t.test(targetABA$meanRT, targetCBA$meanRT, paired = TRUE)
visualABA <- subset(rt, rt$paradigm == "visual" & rt$condition == "ABA")
visualCBA <- subset(rt, rt$paradigm == "visual" & rt$condition == "CBA")
visualTtest <- t.test(visualABA$meanRT, visualCBA$meanRT, paired = TRUE)
numericABA <- subset(rt, rt$paradigm == "numeric" & rt$condition == "ABA")
numericCBA <- subset(rt, rt$paradigm == "numeric" & rt$condition == "CBA")
numericTtest <- t.test(numericABA$meanRT, numericCBA$meanRT, paired = TRUE)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### Part 3 of calculating proportion of trials removed
# after trimming RTs, assign the length of trimmed data frame to trimmedTrials
trimmedTrials <- length(rtData$trial)
# calculate the overall number of removed trials
removedTrials <- allTrials - trimmedTrials
# subset paradigms from trimmed rtData for calculation of
# percentage of trials removed per paradigm
# and calculate number of trials removed
trimTarget <- subset(rtData, paradigm == "target")
trimTarget <- length(trimTarget$trial)
removedTarget <- allTarget - trimTarget
trimVisual <- subset(rtData, paradigm =="visual")
trimVisual <- length(trimVisual$trial)
removedVisual <- allVisual - trimVisual
trimNumeric <- subset(rtData, paradigm == "numeric")
trimNumeric <- length(trimNumeric$trial)
removedNumeric <- allNumeric - trimNumeric
# calculate the overall percentage of removed trials
proportionRemoved <- (100*removedTrials)/allTrials
round(proportionRemoved, 1)
# calculate the percetage of trials removed per paradigm
propRemTarget <- (100*removedTarget)/allTarget
round(propRemTarget,1)
propRemVisual <- (100*removedVisual)/ allVisual
round(propRemVisual,1)
propRemNumeric <- (100*removedNumeric)/ allNumeric
round(propRemNumeric,1)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# change paradigm & condition to factor so we can do ANOVA on it
accuracy$paradigm <- as.factor(accuracy$paradigm)
accuracy$condition <- as.factor(accuracy$condition)
# do the ANOVA
accuracyANOVA <- ezANOVA(
data = data.frame(accuracy),
dv = .(rawAcc),
wid = .(participant),
within = .(paradigm, condition),
between = NULL,
detailed = FALSE
)
# get the mean accuracy per cell
meanAcc <- accuracy %>%
group_by(paradigm, condition) %>%
summarise(meanAcc = round(mean(rawAcc), 2),
se = round(sd(rawAcc) / sqrt(nparticipants), 2))
# main effect of condition
seq <- accuracy %>%
group_by(condition) %>%
summarise(meanAcc = round(mean(rawAcc), 2),
se = round(sd(rawAcc) / sqrt(nparticipants), 2))
# main effect of condition
paradigm <- accuracy %>%
group_by(paradigm) %>%
summarise(meanAcc = round(mean(rawAcc), 2),
se = round(sd(rawAcc) / sqrt(nparticipants), 2))
## t-tests of each paradigm's n-2 repetition cost
targetABA <- subset(accuracy, accuracy$paradigm == "target" &
accuracy$condition == "ABA")
targetCBA <- subset(accuracy, accuracy$paradigm == "target" &
accuracy$condition == "CBA")
targetTtest <- t.test(targetABA$rawAcc, targetCBA$rawAcc, paired = TRUE)
visualABA <- subset(accuracy, accuracy$paradigm == "visual" &
accuracy$condition == "ABA")
visualCBA <- subset(accuracy, accuracy$paradigm == "visual" &
accuracy$condition == "CBA")
visualTtest <- t.test(visualABA$rawAcc, visualCBA$rawAcc, paired = TRUE)
numericABA <- subset(accuracy, accuracy$paradigm == "numeric" &
accuracy$condition == "ABA")
numericCBA <- subset(accuracy, accuracy$paradigm == "numeric" &
accuracy$condition == "CBA")
numericTtest <- t.test(numericABA$rawAcc, numericCBA$rawAcc, paired = TRUE)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### look at individual differences in the n-2 repetition cost
#---- Response Time
# get a data frame with n-2 repetition cost as the DV.
wideRt <- spread(rt, condition, meanRT)
n2Cost <- wideRt %>%
group_by(paradigm,participant) %>%
summarise(n2Cost=ABA-CBA)
n2Cost$n2Cost <- round(n2Cost$n2Cost, 0)
# load individual differences data
indData <- read.csv("ind_data.csv", stringsAsFactors = FALSE)
wideN2Cost <- spread(n2Cost, paradigm, n2Cost)
corData <- merge(wideN2Cost, indData, by = "participant")
# impute the missing data point for subject 68 (in position 51)
corData$processing[51] <- mean(corData$processing, na.rm = TRUE)
# draw overlapping density functions of n-2 repetition costs
pdf("biDistributions_rt.pdf", width = 8, height = 8)
ggplot(n2Cost, aes(x = n2Cost, colour = paradigm, linetype = paradigm)) +
geom_line(stat = "density", size = 1.3) +
scale_linetype_manual(values = c("solid", "dashed", "dotdash")) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16),
panel.background = element_rect(fill = "grey86")) +
scale_x_continuous(name = "N-2 Repetition Cost (ms)") +
scale_y_continuous(name = "Density") +
theme(legend.text=element_text(size = 14),
legend.title=element_text(size = 16))
dev.off()
# same plot, but save as PNG
png("biDistributions_rt.png", width = 8, height = 8, units = "in", res = 500)
ggplot(n2Cost, aes(x = n2Cost, colour = paradigm, linetype = paradigm)) +
geom_line(stat = "density", size = 1.3) +
scale_linetype_manual(values = c("solid", "dashed", "dotdash")) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16),
panel.background = element_rect(fill = "grey86")) +
scale_x_continuous(name = "N-2 Repetition Cost (ms)") +
scale_y_continuous(name = "Density") +
theme(legend.text=element_text(size = 14),
legend.title=element_text(size = 16))
dev.off()
# get summary of distributions for each paradigm
RtDistributions <- n2Cost %>%
group_by(paradigm) %>%
summarise(min = min(n2Cost),
max = max(n2Cost),
sd = sd(n2Cost),
skew = skewness(n2Cost),
kurtosis = kurtosis(n2Cost),
normality = shapiro.test(n2Cost)$p.value,
mean = mean(n2Cost))
#---- Accuracy
# re-calculate mean accuracy per participant/ condition/ paradigm
trimmedAcc <- allData %>%
group_by(paradigm, condition, participant) %>%
summarise(rawAcc = (sum(accuracy) / length(accuracy)) * 100)
# change the data frame format to wide
wideTrimmedAcc <- spread(trimmedAcc, condition, rawAcc)
# calculate n-2 repetition cost for accuracy
AccN2Cost <- wideTrimmedAcc %>%
group_by(paradigm, participant) %>%
summarise(AccN2Cost = ABA - CBA)
# round to 2 decimal places
AccN2Cost$AccN2Cost <- round(AccN2Cost$AccN2Cost, 2)
# draw overlapping density functions of n-2 repetition costs
pdf("biDistributions_acc.pdf", width = 8, height = 8)
ggplot(AccN2Cost, aes(x = AccN2Cost, colour = paradigm, linetype = paradigm)) +
geom_line(stat = "density", size = 1.3) +
scale_linetype_manual(values = c("solid", "dashed", "dotdash")) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16),
panel.background = element_rect(fill = "grey86")) +
scale_x_continuous(name = "N-2 Repetition Cost (Accuracy)") +
scale_y_continuous(name = "Density") +
theme(legend.text=element_text(size = 14),
legend.title=element_text(size = 16))
dev.off()
# sampe plot, in PNG
png("biDistributions_acc.png", width = 8, height = 8, unit = "in", res = 500)
ggplot(AccN2Cost, aes(x = AccN2Cost, colour = paradigm, linetype = paradigm)) +
geom_line(stat = "density", size = 1.3) +
scale_linetype_manual(values = c("solid", "dashed", "dotdash")) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16),
panel.background = element_rect(fill = "grey86")) +
scale_x_continuous(name = "N-2 Repetition Cost (Accuracy)") +
scale_y_continuous(name = "Density") +
theme(legend.text=element_text(size = 14),
legend.title=element_text(size = 16))
dev.off()
# get summary of n-2 repetition costs for accuracy distributions for each paradigm
AccDistributions <- AccN2Cost %>%
group_by(paradigm) %>%
summarise(min = min(AccN2Cost),
max = max(AccN2Cost),
sd = sd(AccN2Cost),
skew = skewness(AccN2Cost),
kurtosis = kurtosis(AccN2Cost),
normality = shapiro.test(AccN2Cost)$p.value,
mean = mean(AccN2Cost))
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### Sequencing effect analysis
# load a .csv file with data of the possible order of paradigms
order <- read.csv("paradigms_order.csv", stringsAsFactors = FALSE)
# change names of columns
colnames(order) <- c("participant", "sixOrders", "threeOrders")
# remove column with data of order of experiment components
# which include processing speed and RRS order
order <- order[,-2]
# combine the data frame for n-2 repetition cost, the n2Cost and order.csv
orderData <- merge(n2Cost, order, by = "participant")
orderData$threeOrders <- as.numeric(orderData$threeOrders)
# subset orderData based on order 1, 2, and 3
# then assign 1,2,3 depending on which paradigm was conducted first
# order1: target 1st, visual 2nd, numeric 3rd
order1 <- subset(orderData, orderData$threeOrders == 1)
# target is already coded as 1st
# code visual as 2nd
for (i in 1:nrow(order1)){
if (order1$paradigm[i] == "visual"){
order1$threeOrders[i] = 2}
}
# code numeric as 3rd
for (i in 1:nrow(order1)){
if(order1$paradigm[i] == "numeric"){
order1$threeOrders[i] = 3
}
}
# order2: visual 1st, numeric 2nd, target 3rd
# subset order2 from orderData
order2 <- subset(orderData, orderData$threeOrders == 2)
# code visual as 1st
for (i in 1:nrow(order2)){
if(order2$paradigm[i] == "visual"){
order2$threeOrders[i] = 1
}
}
# numeric is already coded as 2nd
# code target as 3rd
for (i in 1:nrow(order2)){
if(order2$paradigm[i] == "target"){
order2$threeOrders[i] = 3
}
}
# order3: numeric 1st, target 2nd, visual 3rd
# subset order3 from orderData
order3 <- subset(orderData, orderData$threeOrders == 3)
# code numeric as 1st
for (i in 1:nrow(order3)){
if(order3$paradigm[i] == "numeric"){
order3$threeOrders[i] = 1
}
}
# code target as 2nd
for (i in 1:nrow(order3)){
if(order3$paradigm[i] == "target"){
order3$threeOrders[i] = 2
}
}
# visual is already coded as 3rd
# combine the order1, order2, and order3, wich have correctly coded order
n2CostOrder <- rbind(order1, order2, order3)
# anova
n2CostOrder$paradigm <- as.factor(n2CostOrder$paradigm)
n2CostOrder$threeOrders <- as.factor(n2CostOrder$threeOrders)
# ANOVA for n2cost as DV and threeOrders as IV
orderANOVA <- ezANOVA(
data = data.frame(n2CostOrder),
dv = .(n2Cost),
wid = .(participant),
within = .(threeOrders),
between = NULL,
detailed = FALSE
)
meanN2CostOrder <- n2CostOrder %>%
group_by(threeOrders) %>%
summarise(meanN2Cost= round(mean(n2Cost), 0),
se = round(sd(n2Cost) / sqrt(nparticipants), 0))
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### correlations
#--- Response time
# Overall (mean) response times
rtCor <- rtData %>%
group_by(paradigm, participant) %>%
summarise(meanRT = mean(rt))
wideRtCor <- spread(rtCor, paradigm, meanRT)
wideRtCor <- merge(wideRtCor, indData, by = "participant")
indRtCor <- rcorr(as.matrix(wideRtCor))
# n-2 repetition cost and Ind Diff correlations
indCor <- rcorr(as.matrix(corData))
# partial correlations, controlling for processing speed (as requested
# by reviewer)
partial_target_visual <- pcor.test(corData$target, corData$visual,
corData$processing)
partial_target_numeric <- pcor.test(corData$target, corData$numeric,
corData$processing)
partial_visual_numeric <- pcor.test(corData$visual, corData$numeric,
corData$processing)
#--- Accuracy
# Overall (mean) accuracy
accAve <- allData %>%
group_by(paradigm, participant) %>%
summarise(rawAcc = (sum(accuracy) / length(accuracy)) * 100)
wideAcc <- spread(accAve, paradigm, rawAcc)
wideAcc <- merge(wideAcc, indData, by = "participant")
accCor <- rcorr(as.matrix(wideAcc))
# change data frame format to wide
wideAccN2Cost <- spread(AccN2Cost, paradigm, AccN2Cost)
wideAccN2Cor <- merge(wideAccN2Cost, indData, by = "participant")
# impute the missing data point for subject 68 (in position 51)
wideAccN2Cor$processing[51] <- mean(wideAccN2Cor$processing, na.rm = TRUE)
# calulate correlations
indAccN2Cor <- rcorr(as.matrix(wideAccN2Cor))
round(indAccN2Cor$r, 2)
round(indAccN2Cor$P, 3)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# multiple regressions (just for RT)
# normalise Ind Diff scores for regression
corData$rumination <- scale(corData$rumination)
corData$processing <- scale(corData$processing)
nIndCor <- rcorr(as.matrix(corData))
visualReg <- lm(visual ~ rumination + processing, data = corData)
targetReg <- lm(target ~ rumination + processing, data = corData)
numericReg <- lm(numeric ~ rumination + processing, data = corData)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### do the reliability checks
# run the reliability function for response times
set.seed(200)
correlations_rt <- splitHalf(rtData, splitType = "random", nSplits = 500)
colnames(correlations_rt) <- c("Target Detection", "Visual Judgment",
"Numeric Judgment")
# violin plot of the reliability bootstrap
library(vioplot)
pdf("violin Reliability_rt.pdf", width = 8, height = 8)
vioplot(correlations_rt[, 1], correlations_rt[, 2], correlations_rt[, 3],
col = "skyblue", names = c("Target Detection", "Visual Judgment",
"Numeric Judgment"), lwd = 1.5,
ylim = c(-0.2, 1))
title(ylab = "Correlation (r)", xlab = "Paradigm")
abline(h = 0.5385, lwd = 2, lty = 2)
dev.off()
# PNG
png("violin Reliability_rt.png", width = 8, height = 8, units = "in",
res = 500)
vioplot(correlations_rt[, 1], correlations_rt[, 2], correlations_rt[, 3],
col = "skyblue", names = c("Target Detection", "Visual Judgment",
"Numeric Judgment"), lwd = 1.5,
ylim = c(-0.2, 1))
title(ylab = "Correlation (r)", xlab = "Paradigm")
abline(h = 0.5385, lwd = 2, lty = 2)
dev.off()
# run the reliability function for accuracy
set.seed(200)
correlations_acc <- splitHalf_acc(allData, splitType = "random", nSplits = 500)
colnames(correlations_acc) <- c("Target Detection", "Visual Judgment",
"Numeric Judgment")
# violin plot of the reliability bootstrap
library(vioplot)
pdf("violin Reliability_accuracy.pdf", width = 8, height = 8)
vioplot(correlations_acc[, 1], correlations_acc[, 2], correlations_acc[, 3],
col = "skyblue", names = c("Target Detection", "Visual Judgment",
"Numeric Judgment"), lwd = 1.5,
ylim = c(-0.2, 1))
title(ylab = "Correlation (r)", xlab = "Paradigm")
abline(h = 0.5385, lwd = 2, lty = 2)
dev.off()
# PNG
png("violin Reliability_accuracy.png", width = 8, height = 8, units = "in",
res = 500)
vioplot(correlations_acc[, 1], correlations_acc[, 2], correlations_acc[, 3],
col = "skyblue", names = c("Target Detection", "Visual Judgment",
"Numeric Judgment"), lwd = 1.5,
ylim = c(-0.2, 1))
title(ylab = "Correlation (r)", xlab = "Paradigm")
abline(h = 0.5385, lwd = 2, lty = 2)
dev.off()
#------------------------------------------------------------------------------ |
ec5fc1ea32a9e30c58da9e75f931c7d97a1efe29 | 1f22aa2008d18e61f008412621472a972d8760f4 | /ATACseq/src/find_differential_peak_or_gene.r | 16f572d0a64725d9fe448e449d4aa46f0a402f71 | [] | no_license | youna2/mouse_epigenome_analysis | 2966afbfc9ab81727fb728dcef8e9cc0da2b96b7 | 5b4bbf1135210429546c96e84b7dd64186363d59 | refs/heads/master | 2020-03-26T04:54:26.037035 | 2019-04-24T19:52:14 | 2019-04-24T19:52:14 | 144,527,883 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,560 | r | find_differential_peak_or_gene.r |
p.cutoff=0.05
p.mat=NULL ##bed[,"p.value"]
fc.mat= NULL ##-bed[,"Fold"]
#### remove samples of age 26.75 months ######
if(selB6) selectsample= (TYPE=="B6" & AGE != 26.75) else selectsample= (TYPE=="NZO" & AGE != 26.75)
y0=Y[,selectsample]
y0forheatmap=bed[,selectsample]
age0=AGE[selectsample]
gender0=GENDER[selectsample]
tissue0=TISSUE[selectsample]
type0=TYPE[selectsample]
samplemouseid0=SAMPLEMOUSEID[selectsample]
## Do timeseries analysis separately for each tissue type and gender #######
setwd("../results")
utissue=c( "spleen", "BM", "memory","naive", "PBL")
if(selB6)
{
pdf(file="diffpeakB6.pdf")
topgene="B6topgene"
}else{
pdf(file="diffpeakNZO.pdf")
topgene="NZOtopgene"
}
twoway.barplot.argF1=twoway.barplot.argF2=twoway.barplot.argF3=NULL
twoway.barplot.argM1=twoway.barplot.argM2=twoway.barplot.argM3=NULL
for(i in 1:length(utissue))
{
y=y0[,tissue0==utissue[i]& gender0=="M"]
age=age0[tissue0==utissue[i]& gender0=="M"]
gender=gender0[tissue0==utissue[i]& gender0=="M"]
type=type0[tissue0==utissue[i]& gender0=="M"]
### Do timeseries analysis within male
atac.glmtopM=edgeRfit(y,age,gender,type)
y=y0[,tissue0==utissue[i]& gender0=="F"]
age=age0[tissue0==utissue[i]& gender0=="F"]
gender=gender0[tissue0==utissue[i]& gender0=="F"]
type=type0[tissue0==utissue[i]& gender0=="F"]
### Do timeseries analysis within female
atac.glmtopF=edgeRfit(y,age,gender,type)
#### print number of age-increasing/decreasing peaks or genes #################
print("In Tissue")
print(utissue[i])
print("In Female, significantly changing peaks/genes")
print( sum(atac.glmtopF[,"FDR"]<p.cutoff))
print("In Female, significantly increasing peaks/genes")
print( sum(atac.glmtopF[,"FDR"]<p.cutoff & atac.glmtopF[,"logFC"]>0))
print("In Female, significantly decreasing peaks/genes")
print( sum(atac.glmtopF[,"FDR"]<p.cutoff & atac.glmtopF[,"logFC"]<0))
twoway.barplot.argF1=c(twoway.barplot.argF1,rep(paste(utissue[i]),2))
twoway.barplot.argF2=c(twoway.barplot.argF2,c( sum(atac.glmtopF[,"FDR"]<p.cutoff & atac.glmtopF[,"logFC"]>0), -sum(atac.glmtopF[,"FDR"]<p.cutoff & atac.glmtopF[,"logFC"]<0)))
twoway.barplot.argF3=c(twoway.barplot.argF3,c("+","-"))
print("In Male, significantly changing peaks/genes")
print( sum(atac.glmtopM[,"FDR"]<p.cutoff))
print("In Male, significantly increasing peaks/genes")
print( sum(atac.glmtopM[,"FDR"]<p.cutoff & atac.glmtopM[,"logFC"]>0))
print("In Male, significantly decreasing peaks/genes")
print( sum(atac.glmtopM[,"FDR"]<p.cutoff & atac.glmtopM[,"logFC"]<0))
twoway.barplot.argM1=c(twoway.barplot.argM1,rep(paste(utissue[i]),2))
twoway.barplot.argM2=c(twoway.barplot.argM2,c( sum(atac.glmtopM[,"FDR"]<p.cutoff & atac.glmtopM[,"logFC"]>0), -sum(atac.glmtopM[,"FDR"]<p.cutoff & atac.glmtopM[,"logFC"]<0)))
twoway.barplot.argM3=c(twoway.barplot.argM3,c("+","-"))
print("peaks/genes that are significantly changing in both male and female")
print( sum(atac.glmtopM[,"FDR"]<p.cutoff & atac.glmtopF[,"FDR"]<p.cutoff))
#### draw heatmap of age-increasing/decreasing peaks or genes #################
for(sex in c("F","M"))
{
if(sex=="M") atac.glmtop=atac.glmtopM
if(sex=="F") atac.glmtop=atac.glmtopF
p.mat=cbind(p.mat,atac.glmtop[,"FDR"])
fc.mat=cbind(fc.mat,atac.glmtop[,"logFC"])
if(sum(atac.glmtop[,"FDR"]<p.cutoff)>2)
{
heatmapmat=y0forheatmap[atac.glmtop[,"FDR"]<p.cutoff,tissue0==utissue[i]& gender0==sex]
annot.row=annot=atac.glmtop[atac.glmtop[,"FDR"]<p.cutoff,"logFC"]
annot[annot.row>0]="opening"
annot[annot.row<0]="closing"
colnames(heatmapmat)=paste( samplemouseid0[tissue0==utissue[i]& gender0==sex] ,"_",age0[tissue0==utissue[i] & gender0==sex ],sep="")
heatmapmat=heatmapmat[,order(age0[tissue0==utissue[i] & gender0==sex])]
if(nrow(heatmapmat)>40000)
{
tmpsample=sample(1:nrow(heatmapmat),40000)
heatmapmat=heatmapmat[tmpsample,]
annot=annot[tmpsample]
}
annot=as.data.frame(annot)
rownames(annot)=rownames(heatmapmat)
if(min(heatmapmat)==0) heatmapmat=heatmapmat+1
pheatmap(log(heatmapmat),scale="row",cluster_cols = FALSE,main=paste(utissue[i],sex, sum(atac.glmtop[,"FDR"]<p.cutoff & atac.glmtop[,"logFC"]>0),"opening", sum(atac.glmtop[,"FDR"]<p.cutoff & atac.glmtop[,"logFC"]<0),"closing"),annotation_row=annot,show_rownames=F,color=colorRampPalette(c("blue","white","red"))(100))
}
}
}
colnames(fc.mat)=colnames(p.mat)=paste(rep(utissue,each=2),rep(c("F","M"),length(utissue)))
save(p.mat,fc.mat,file=paste("pmat_fcmat_B6_",selB6,".Rdata",sep=""))
### heatmap of p-values of peaks/genes across tissues and gender
global.heatmap(p.mat,fc.mat)
### peaks/genes that are commonly increasing/decreasing across tissues and gender
common.peaks(p.mat,fc.mat,TRUE,topgene,annotation)
f.increasing=nrow(read.delim(paste(topgene,"_F_increasing",".txt",sep="")))
f.decreasing=nrow(read.delim(paste(topgene,"_F_decreasing",".txt",sep="")))
m.increasing=nrow(read.delim(paste(topgene,"_M_increasing",".txt",sep="")))
m.decreasing=nrow(read.delim(paste(topgene,"_M_decreasing",".txt",sep="")))
twoway.barplot.argF1=c(twoway.barplot.argF1,rep("common",2))
twoway.barplot.argF2=c(twoway.barplot.argF2,c(f.increasing,-f.decreasing))
twoway.barplot.argF3=c(twoway.barplot.argF3,c("+","-"))
twoway.barplot.argM1=c(twoway.barplot.argM1,rep("common",2))
twoway.barplot.argM2=c(twoway.barplot.argM2,c(m.increasing,-m.decreasing))
twoway.barplot.argM3=c(twoway.barplot.argM3,c("+","-"))
ylimmax=max(abs(c(twoway.barplot.argM2,twoway.barplot.argF2)))
ylimmax=40000;YLIM=c(-ylimmax,ylimmax)
q1=twoway.barplot(twoway.barplot.argF1,twoway.barplot.argF2,twoway.barplot.argF3,(-10):10*1000,(-10):10*1000,"Tissue","no. differential peaks/genes",paste(type0[1]," F",sep=""),YLIM)
q2=twoway.barplot(twoway.barplot.argM1,twoway.barplot.argM2,twoway.barplot.argM3,(-10):10*1000,(-10):10*1000,"Tissue","no. differential peaks/genes",paste(type0[1]," M",sep=""),YLIM)
multiplot(q1,NA,q2,NA,cols=2)
tissue.gender.type <- c(colnames(p.mat),"_F_increasing","_F_decreasing","_M_increasing","_M_decreasing","_all_increasing","_all_decreasing")
### save differential peaks/genes of each tissue as a txt file ####
diff.peaks(p.mat,fc.mat,topgene)
### See if the age-related pattern is common across tissues
for(jj in 1:2)
{
if(jj==1) tt=((p.mat<p.cutoff & fc.mat>0)) else tt=((p.mat<p.cutoff & fc.mat<0))
fisher.p=fisher.p0=fisher.stat=matrix(NA,nr=ncol(p.mat),nc=ncol(p.mat))
rownames(fisher.p)=colnames(fisher.p)=colnames(p.mat)
rownames(fisher.stat)=colnames(fisher.stat)=colnames(p.mat)
for(i in 1:ncol(tt))
for(j in 1:ncol(tt))
{
print(c(colnames(tt)[i],colnames(tt)[j]));
print(table(tt[,i],tt[,j]));
temp=table(tt[,i],tt[,j])
if(ncol(temp)>1 & nrow(temp)>1)
{
total=sum(temp)
black=sum(temp[1,])
white=sum(temp[2,])
pick=sum(temp[,2])
whitepick=temp[2,2]-1
fisher.p0[i,j]= 1-phyper(whitepick,white,black,pick)
fisher.p[i,j]= fisher.test(temp,alternative="greater")$"p.value"
fisher.stat[i,j]= fisher.test(temp,alternative="greater")$"estimate"
}
}
print(mean(abs(fisher.p-fisher.p0)),na.rm=T) ## to check if my calculation is correct
fisher.p=signif(fisher.p,2)
fisher.p[upper.tri(fisher.p,diag=T)]="*"
fisher.stat[upper.tri(fisher.stat,diag=T)]= 0
if(jj==1)
{
write.csv(fisher.p,file=paste("fisher_pvalue_increasing_B6_",selB6,".csv",sep=""),quote=F)
pheatmap(fisher.stat,scale="none",cluster_cols = FALSE,cluster_rows=FALSE,main=paste("overlap of increasing peaks in",TYPE,"(odds ratio)"))
#
} else
{
write.csv(fisher.p,file=paste("fisher_pvalue_decreasing_B6_",selB6,".csv",sep=""),quote=F)
pheatmap(fisher.stat,scale="none",cluster_cols = FALSE,cluster_rows=FALSE,main=paste("overlap of decreasing peaks in ",TYPE,"(odds ratio)"))
}
}
dev.off()
#### Do pathway analysis using immune genes ####
library("biomaRt")
load("../../ATACseq/data/biomaRt_human_mouse.Rdata")
load("../../ATACseq/data/mousehumangene_annotation.Rdata")
all.path.res=vector("list",2)
for(pathwaytype in 1:2)
{
if(pathwaytype==1)
{
immunemodule=FALSE
celltype.annotation=TRUE
}else{
immunemodule=TRUE
celltype.annotation=FALSE
}
enrichpath=vector("list",3)
for(i in 1:3) enrichpath[[i]]=vector("list",length(tissue.gender.type))
for(N in 1:3)
{
directionsel=N
for(k in 1:length(tissue.gender.type))
{
temptissue=tissue.gender.type[k]
scanfile=scan(paste(topgene,temptissue,".txt",sep=""))
if(length(scanfile)>2)
{
diff.gene=as.matrix(read.table(paste(topgene,temptissue,".txt",sep=""),header=F))### differential gene
if(directionsel==2) diff.gene=rbind(diff.gene[diff.gene[,3]>0,])
if(directionsel==3) diff.gene=rbind(diff.gene[diff.gene[,3]<0,])
if(nrow(diff.gene)>1)
{
genesV2 = getLDS(attributes = c("entrezgene"), filters = "entrezgene", values = diff.gene[,1] , mart = mouse, attributesL = c("hgnc_symbol"), martL = human, uniqueRows=T)
}
if(nrow(genesV2)>1)
{
genesV2[,2]=toupper(genesV2[, 2])
gene.and.CA=diff.gene[match(genesV2[,1],diff.gene[,1]),]
gene.and.CA[,1]==genesV2[,1]
human.diff.gene <- unique(genesV2[, 2]) ### human ortholog of the differential gene
write.table(human.diff.gene,file=paste(topgene,temptissue,"human.txt",sep=""),quote=F,row.names=F,col.names=F)
allpath=NULL
mean(human.diff.gene %in% gene.universe)
if(immunemodule)
{
version="2008"#"2015"
all.gene=as.matrix(read.table(paste("../../ATACseq/data/immunemodule/VP",version,"_Modules_genes.txt",sep=""),header=T))
path.annotation=as.matrix(read.csv(paste("../../ATACseq/data/immunemodule/VP",version,"_Modules_annotations.csv",sep="")))
}
if(celltype.annotation)
{
load("../../ATACseq/data/pbmc_specific_genes.annotations_April.2018.EJM_10x.RData")
all.gene= geneset.genes.scrnaseq_pbmc_specific
path.annotation= geneset.names.scrnaseq_pbmc_specific[,c(2,1)]
}
pathid=unique(all.gene[,1])
pathp=rep(NA,length(pathid))
for(i in 1:length(pathid))
{
path.gene=toupper(all.gene[all.gene[,1]==pathid[i],2])### all genes in pathway i
total=length(unique(gene.universe)) ### all human ortholog genes
white=length(unique(intersect(path.gene,gene.universe))) ### all genes in pathway i in universe
black=total-white
pick=length(human.diff.gene)
intersection=intersect(human.diff.gene,path.gene)
lintersection= length(intersection )
whitepick=lintersection-1
pathp[i]= 1-phyper(whitepick,white,black,pick)
temp=match(intersection,genesV2[,2])
}
allpath=rbind(allpath,cbind(path.annotation[ match(pathid,path.annotation[,1]), 2],pathp))
pick=rep(NA,nrow(allpath))
for(i in 1:length(pick))
pick[i]=pick_null_pathway(allpath[i,1])
allpath=allpath[!pick,]
print(paste("all pathways that are named with fdr<0.05 for",temptissue))
enrichpath[[N]][[k]]=rbind(allpath[p.adjust(as.numeric(allpath[,2]),"fdr")<0.05 & allpath[,1]!="Unknown",])
print(enrichpath[[N]][[k]])
}
}
}
}
all.path.res[[pathwaytype]]=enrichpath
}
#### draw plots of pathway analysis results ####
if(selB6)
{
save(all.path.res,file="enrichpathwayB6.Rdata")
pdf(file="pathwayplotB6.pdf")
}else{
save(all.path.res,file="enrichpathwayNZO.Rdata")
pdf(file="pathwayplotNZO.pdf")
}
source("../../ATACseq/src/plot.r")
dev.off()
|
9767d07af36d574e28731c9336eabe85fb05a738 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /multivariance/inst/testfiles/doubleCenterBiasCorrectedUpperLower/libFuzzer_doubleCenterBiasCorrectedUpperLower/doubleCenterBiasCorrectedUpperLower_valgrind_files/1612796396-test.R | 946d12d2b02ccdf8a1c3491f369907327cad59d6 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 505 | r | 1612796396-test.R | testlist <- list(n = 0L, x = structure(c(2.84809454419421e-306, 1.30294416220416e-284, 8.17853591442822e-227, 1.19601978825194e-304, 1.4916681464354e-154, 5.41108927834472e-312, 3.22057684190183e-231, 1.41131393662151e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 9L)))
result <- do.call(multivariance:::doubleCenterBiasCorrectedUpperLower,testlist)
str(result) |
e1927013a093595edb15e389747f1ab4ef5e3756 | 4bc92e9b2ef6c43b1da7cb76621baa8fec73c66a | /exploratory-data-analysis/week1/plot1.R | 35165fe767fc59ac5152b3abb9e934f3f44522b9 | [] | no_license | aditya1kismatrao/datasciencecoursera-2 | f6f0ea6a8c87c7d0b00d97b893893e889e876d8a | 6fb76479bf8d012019420fe145e9af9ed7ab207c | refs/heads/master | 2020-03-29T13:31:37.846353 | 2016-07-13T08:20:52 | 2016-07-13T08:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 588 | r | plot1.R | data <- read.table("exdata_data_household_power_consumption/
household_power_consumption.txt",
sep=';', header=T, nrows=2075259)
data$Date <- as.Date(data$Date, '%d/%m/%Y')
data <- data[data$Date >= '2007-02-01' & data$Date <= '2007-02-02',]
plot1 <- function(data){
x <- as.numeric(as.character(data$Global_active_power))
png(filename = 'plot1.png', width = 480, height = 480,
units = 'px')
hist(GAP, col='red', xlab = 'Global Reactive Power (kilowatts)',
main = 'Global Reactive Power')
dev.off()
}
plot1(data)
|
94050183d27982ad370e854c97872955d36fd33f | 995b7197ebed2e02ed6d1db376b9c48cc17aeb7a | /cardRA.R | 81d0023d29da6862efde144488864f6b73abf7fb | [] | no_license | lucasRemera/Cartographie | 87a70058be8f486e374cfbc6acfd8907e7c6d565 | 17fbafacaa19c49e407f4db4e506c9ce109585eb | refs/heads/master | 2020-03-11T15:13:34.172648 | 2018-05-29T14:29:17 | 2018-05-29T14:29:17 | 130,077,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,400 | r | cardRA.R | library(ggplot2)
load("rhone.RData",verbose = T)
load("isere.RData",verbose = T)
load("loire.RData",verbose = T)
load("ain.RData",verbose = T)
ggrhone=fortify(rhone)
ggloire=fortify(loire)
ggisere=fortify(isere)
ggain=fortify(a2) #departement in polygon format
# AR=1/cos(mean(ggisere$lat)*pi/180)
# g2=ggplot()+ geom_polygon(data=ggrhone, aes(long, lat, group = group),colour = alpha("black", 1/2), size = 0.7, fill = 'grey', alpha = .3)+
# geom_polygon(data=ggisere, aes(long, lat, group = group), colour = alpha("black", 1/2), size = 0.7, fill = 'grey', alpha = .3)+
# geom_polygon(data=ggain, aes(long, lat, group = group), colour = alpha("black", 1/2), size = 0.7, fill = 'grey', alpha = .3)+
# geom_polygon(data=ggloire, aes(long, lat, group = group), colour = alpha("black", 1/2), size = 0.7, fill = 'grey', alpha = .3)+
# coord_fixed(ratio=AR) #the card of Rhone-Alpes region
#get card of Rhone-Alpes region
plotRA=function(region=list(ggrhone,ggisere,ggloire,ggain),ar=TRUE,transparency=0.3){
gg=ggplot()
for(r in region){
gg=gg+geom_polygon(data=r, aes(long, lat, group = group),colour = alpha("black", 1/2), size = 0.7, fill = 'grey', alpha = transparency)
}
if(ar){
AR=1/cos(mean(region[[1]]$lat)*pi/180)
gg=gg+coord_fixed(ratio=AR)
}
return(gg)
}
# ril=rbind(ggrhone,ggisere,ggloire,ggain)
# prec=200
# mx=seq(min(ril[,1])-0.05,max(ril[,1]),length.out = prec)
# my=seq(min(ril[,2])-0.05,max(ril[,2]),length.out = prec)
# mm=expand.grid(mx,my)
# mIn=expand.grid(mx,my)
# library(sp)
# inRegion=which( point.in.polygon(mIn[,1],mIn[,2],ggrhone[,1],ggrhone[,2])>0 |
# point.in.polygon(mIn[,1],mIn[,2],ggisere[,1],ggisere[,2])>0|
# point.in.polygon(mIn[,1],mIn[,2],ggain[,1],ggain[,2])>0|
# point.in.polygon(mIn[,1],mIn[,2],ggloire[,1],ggloire[,2])>0)
#
#
# mIn=mIn[inRegion,]
# colnames(mIn)=c("x","y")
# #mIn
discreteRegion=function(region=list(ggrhone,ggisere,ggloire,ggain),precision=200){
ril=do.call(rbind,region)
mx=seq(min(ril[,1])-0.05,max(ril[,1]),length.out = precision)
my=seq(min(ril[,2])-0.05,max(ril[,2]),length.out = precision)
mm=expand.grid(mx,my)
mIn=expand.grid(mx,my)
isInRegion=rep(FALSE,nrow(mIn))
for(r in region){
isInRegion=isInRegion|point.in.polygon(mIn[,1],mIn[,2],r[,1],r[,2])>0
}
mIn=mIn[which(isInRegion),]
colnames(mIn)=c("x","y")
return(mIn)
}
|
dd007d0099ca4c060b34127a96e91c218ea8dc3e | 3e5d8d362b3367e4ff0e152b0242b7a285d8484f | /R/resample_cifti.R | 0cd0f3a9d570be174761b2041a3f32a2b7c24a40 | [] | no_license | mandymejia/ciftiTools | d591a6e8732dd9df17dd62d959a7a808eee16bef | 7becc99a6301c47541c883739f7fb2f0f3413e60 | refs/heads/master | 2023-08-17T06:07:35.229385 | 2023-01-23T20:00:17 | 2023-01-23T20:00:17 | 241,136,369 | 30 | 10 | null | 2023-08-21T21:47:22 | 2020-02-17T15:06:01 | HTML | UTF-8 | R | false | false | 16,954 | r | resample_cifti.R | #' \code{resample_cifti} wrapper
#'
#' Calls \code{resample_cifti} using the original file names
#' listed in the \code{original_fnames} argument and the target file names
#' listed in the \code{resamp_fnames} argument.
#'
#' Currently used by read_cifti and resample_cifti.
#'
#' @inheritParams original_fnames_Param_resampled
#' @param resamp_fnames Where to write the resampled files. This is a named list
#' where each entry's name is a file type label, and each entry's value
#' is a file name indicating where to write the corresponding resampled file.
#' The recognized file type labels are: "cortexL", "cortexR",
#' "ROIcortexL", "ROIcortexR", "validROIcortexL", and "validROIcortexR".
#'
#' Entry values can be \code{NULL}, in which case a default file name will be
#' used: see \code{\link{resample_cifti_default_fname}}. Default file names
#' will also be used for files that need to be resampled/written but without a
#' corresponding entry in \code{resamp_fnames}.
#'
#' Entries in \code{resamp_fnames} will be ignored if they are not needed
#' based on \code{[ROI_]brainstructures}. For example, if
#' \code{brainstructures="left"}, then \code{resamp_fnames$cortexR} will be
#' ignored if specified.
#'
#' The \code{write_dir} argument can be used to place each resampled file in
#' the same directory.
#' @param original_res The original resolution(s) of the CIFTI cortical surface(s).
#' @inheritParams resamp_res_Param_required
#' @inheritParams surfL_fname_Param
#' @inheritParams surfR_fname_Param
#' @param surfL_target_fname,surfR_target_fname (Optional) File path for
#' the resampled GIFTI surface geometry file representing the left/right
#' cortex. If NULL (default),
#' @inheritParams read_dir_Param_separated
#' @inheritParams write_dir_Param_generic
#'
#' @return The return value of the \code{resample_cifti} call
#'
#' @keywords internal
#'
resample_cifti_wrapper <- function(
original_fnames, resamp_fnames=NULL,
original_res,
resamp_res, resamp_method=c("barycentric", "adaptive"),
areaL_original_fname=NULL, areaR_original_fname=NULL,
surfL_original_fname=NULL, surfR_original_fname=NULL,
surfL_target_fname=NULL, surfR_target_fname=NULL,
read_dir=NULL, write_dir=NULL) {
# Get kwargs.
resamp_kwargs <- list(
original_res=original_res,
resamp_res=resamp_res, resamp_method=resamp_method,
areaL_original_fname=areaL_original_fname,
areaR_original_fname=areaR_original_fname,
surfL_original_fname=surfL_original_fname,
surfR_original_fname=surfR_original_fname,
surfL_target_fname=surfL_target_fname,
surfR_target_fname=surfR_target_fname,
read_dir=read_dir, write_dir=write_dir
)
# Get expected file names.
expected_labs <- get_kwargs(resample_cifti_components)
expected_labs <- expected_labs[grepl("fname", expected_labs, fixed=TRUE)]
expected_labs <- unique(gsub("_.*", "", expected_labs))
# Check and add original file names to the kwargs.
if (!is.null(original_fnames)) {
match_input(names(original_fnames), expected_labs,
user_value_label="original_fnames")
resamp_kwargs[paste0(names(original_fnames), "_original_fname")] <- original_fnames
}
# Check and add resampled/target file names to the kwargs.
if (!is.null(resamp_fnames)) {
match_input(names(resamp_fnames), expected_labs,
user_value_label="resamp_fnames")
resamp_kwargs[paste0(names(resamp_fnames), "_target_fname")] <- resamp_fnames
}
# Do resample_cifti_components.
resamp_kwargs[vapply(resamp_kwargs, is.null, FALSE)] <- NULL
do.call(resample_cifti_components, resamp_kwargs)
}
#' Resample CIFTI data
#'
#' Performs spatial resampling of CIFTI data on the cortical surface
#' by separating it into GIFTI and NIFTI files, resampling the GIFTIs, and then
#' putting them together. (The subcortex is not resampled.)
#'
#' Can accept a \code{"xifti"} object as well as a path to a CIFTI-file.
#'
#' @param x The CIFTI file name or \code{"xifti"} object to resample. If
#' \code{NULL}, the result will be a \code{"xifti"} with resampled surfaces
#' given by \code{surfL_original_fname} and \code{surfR_original_fname}.
#' @param cifti_target_fname File name for the resampled CIFTI. Will be placed
#' in \code{write_dir}. If \code{NULL}, will be written to "resampled.d*.nii".
#' \code{write_dir} will be appended to the beginning of the path.
#' @param surfL_original_fname,surfR_original_fname (Optional) Path to a GIFTI
#' surface geometry file representing the left/right cortex. One or both can be
#' provided. These will be resampled too, and are convenient for visualizing
#' the resampled data.
#'
#' If \code{x} is a \code{"xifti"} object with surfaces, these arguments
#' will override the surfaces in the \code{"xifti"}.
#' @param surfL_target_fname,surfR_target_fname (Optional) File names for the
#' resampled GIFTI surface geometry files. Will be placed in \code{write_dir}.
#' If \code{NULL} (default), will use default names created by
#' \code{\link{resample_cifti_default_fname}}.
#' @inheritParams resamp_res_Param_required
#' @inheritParams resamp_method_Param
#' @inheritParams resamp_area_Param
#' @param write_dir Where to write the resampled CIFTI (and surfaces if present.)
#' If \code{NULL} (default), will use the current working directory if \code{x}
#' was a CIFTI file, and a temporary directory if \code{x} was a \code{"xifti"}
#' object.
#' @param mwall_values If the medial wall locations are not indicated in the
#' CIFTI, use these values to infer the medial wall mask. Default:
#' \code{c(NA, NaN)}. If \code{NULL}, do not attempt to infer the medial wall.
#'
#' Correctly indicating the medial wall locations is important for resampling,
#' because the medial wall mask is taken into account during resampling
#' calculations.
#' @inheritParams verbose_Param_TRUE
#'
#' @return A named character vector of written files: \code{"cifti"} and
#' potentially \code{"surfL"} (if \code{surfL_original_fname} was provided)
#' and/or \code{"surfR"} (if \code{surfR_original_fname} was provided).
#'
#' @family common
#' @export
#'
#' @section Connectome Workbench:
#' This function interfaces with the \code{"-metric-resample"}, \code{"-label-resample"},
#' and/or \code{"-surface-resample"} Workbench commands, depending on the input.
#'
resample_cifti <- function(
x=NULL, cifti_target_fname=NULL,
surfL_original_fname=NULL, surfR_original_fname=NULL,
surfL_target_fname=NULL, surfR_target_fname=NULL,
resamp_res, resamp_method=c("barycentric", "adaptive"),
areaL_original_fname=NULL, areaR_original_fname=NULL,
write_dir=NULL, mwall_values=c(NA, NaN), verbose=TRUE) {
# Handle if no data ----------------------------------------------------------
if (is.null(x)) {
if (is.null(surfL_original_fname) && is.null(surfR_original_fname)) {
warning("`x`, `surfL_original_fname` and `surfR_original_fname` were all NULL: Nothing to resample!\n")
return(NULL)
}
return(read_cifti(
surfL_fname=surfL_original_fname,
surfR_fname=surfR_original_fname,
resamp_res=resamp_res
))
}
input_is_xifti <- is.xifti(x, messages=FALSE)
if (input_is_xifti && all(vapply(x$data, is.null, FALSE))) {
x <- add_surf(x, surfL=surfL_original_fname, surfR=surfR_original_fname)
if (!is.null(x$surf$cortex_left)) {
x$surf$cortex_left <- resample_surf(x$surf$cortex_left, resamp_res, "left")
}
if (!is.null(x$surf$cortex_right)) {
x$surf$cortex_right <- resample_surf(x$surf$cortex_right, resamp_res, "right")
}
return(x)
}
# Args check -----------------------------------------------------------------
if (is.null(write_dir) & input_is_xifti) { write_dir <- tempdir() }
stopifnot(resamp_res > 0)
surfL_return <- surfR_return <- FALSE
if (verbose) { exec_time <- Sys.time() }
# Setup ----------------------------------------------------------------------
if (input_is_xifti) {
# Check intent. Treat unknown itents as dscalar.
x_intent <- x$meta$cifti$intent
if (!is.null(x_intent) && (x_intent %in% supported_intents()$value)) {
x_extn <- supported_intents()$extension[supported_intents()$value == x_intent]
} else {
warning("The CIFTI intent was unknown, so resampling as a dscalar.")
x_extn <- "dscalar.nii"
}
# Write out the CIFTI.
cifti_original_fname <- file.path(tempdir(), paste0("to_resample.", x_extn))
write_cifti(x, cifti_original_fname, verbose=FALSE)
# Set the target CIFTI file name.
if (is.null(cifti_target_fname)) {
cifti_target_fname <- basename(gsub(
"to_resample.", "resampled.", cifti_original_fname, fixed=TRUE
))
}
# Get the surfaces present.
if (is.null(surfL_original_fname) && !is.null(x$surf$cortex_left)) {
surfL_return <- TRUE
surfL_original_fname <- file.path(tempdir(), "left.surf.gii")
write_surf_gifti(x$surf$cortex_left, surfL_original_fname, hemisphere="left")
}
if (is.null(surfR_original_fname) && !is.null(x$surf$cortex_right)) {
surfR_return <- TRUE
surfR_original_fname <- file.path(tempdir(), "right.surf.gii")
write_surf_gifti(x$surf$cortex_right, surfR_original_fname, hemisphere="right")
}
cifti_info <- x$meta
brainstructures <- vector("character")
if (!is.null(x$data$cortex_left)) { brainstructures <- c(brainstructures, "left") }
if (!is.null(x$data$cortex_right)) { brainstructures <- c(brainstructures, "right") }
if (!is.null(x$data$subcort)) { brainstructures <- c(brainstructures, "subcortical") }
ROI_brainstructures <- brainstructures
original_res <- infer_resolution(x)
if (!is.null(original_res) && any(original_res < 2 & original_res > 0)) {
warning("The CIFTI resolution is already too low (< 2 vertices). Skipping resampling.")
return(x)
}
} else {
# Check that the original file is valid.
cifti_original_fname <- x
stopifnot(file.exists(cifti_original_fname))
cifti_info <- info_cifti(cifti_original_fname)
brainstructures <- ROI_brainstructures <- cifti_info$cifti$brainstructures
# Check that the resolutions match
# Set the target CIFTI file name.
if (is.null(cifti_target_fname)) {
cifti_target_fname <- paste0("resampled.", get_cifti_extn(cifti_original_fname))
}
original_res <- infer_resolution(cifti_info)
if (!is.null(original_res) && any(original_res < 2 & original_res > 0)) {
warning("The CIFTI resolution is already too low (< 2 vertices). Skipping resampling.")
return(NULL)
}
}
cifti_target_fname <- format_path(cifti_target_fname, write_dir, mode=2)
# Check that at least one surface is present.
if (!("left" %in% brainstructures || "right" %in% brainstructures)) {
warning("The CIFTI does not have cortical data, so there's nothing to resample.")
if (input_is_xifti) { return(x) } else { return(NULL) }
}
# Separate the CIFTI ---------------------------------------------------------
if (verbose) { cat("Separating CIFTI file.\n") }
to_cif <- separate_cifti_wrapper(
cifti_fname=cifti_original_fname,
brainstructures=brainstructures, ROI_brainstructures=ROI_brainstructures,
sep_fnames=NULL, write_dir=tempdir()
)
if (verbose) {
print(Sys.time() - exec_time)
exec_time <- Sys.time()
}
# Handle medial wall values --------------------------------------------------
if (!is.null(mwall_values)) {
if ("left" %in% brainstructures) {
fix_gifti_mwall(
to_cif["cortexL"], to_cif["cortexL"],
to_cif["ROIcortexL"], to_cif["ROIcortexL"],
mwall_values
)
}
if ("right" %in% brainstructures) {
fix_gifti_mwall(
to_cif["cortexR"], to_cif["cortexR"],
to_cif["ROIcortexR"], to_cif["ROIcortexR"],
mwall_values
)
}
}
# resample_cifti_components() ------------------------------------------------
# Do not resample the subcortical data.
to_resample <- to_cif[!grepl("subcort", names(to_cif))]
if (verbose) { cat("Resampling CIFTI file.\n") }
# Do resample_cifti_components.
resamp_result <- resample_cifti_wrapper(
original_res=original_res,
resamp_res=resamp_res, resamp_method=resamp_method,
areaL_original_fname=areaL_original_fname,
areaR_original_fname=areaR_original_fname,
original_fnames=to_resample, resamp_fnames=NULL,
surfL_original_fname=surfL_original_fname,
surfR_original_fname=surfR_original_fname,
surfL_target_fname=surfL_target_fname,
surfR_target_fname=surfR_target_fname,
read_dir=NULL, write_dir=tempdir()
)
# Replace resampled files.
to_cif[names(to_cif) %in% names(resamp_result)] <- resamp_result[names(to_cif)[names(to_cif) %in% names(resamp_result)]]
# Copy resampled surface files to desired file paths.
if (!is.null(surfL_original_fname)) {
surfL_target_fname_old <- resamp_result["surfL"]
surfL_target_fname <- format_path(basename(surfL_target_fname_old), write_dir, mode=2)
file.copy(surfL_target_fname_old, surfL_target_fname)
}
if (!is.null(surfR_original_fname)) {
surfR_target_fname_old <- resamp_result["surfR"]
surfR_target_fname <- format_path(basename(surfR_target_fname_old), write_dir, mode=2)
file.copy(surfR_target_fname_old, surfR_target_fname)
}
if (verbose) {
print(Sys.time() - exec_time)
exec_time <- Sys.time()
}
# Put together ---------------------------------------------------------------
# Create target CIFTI dense timeseries.
if (verbose) cat("Merging components into a CIFTI file... \n")
to_cif <- to_cif[names(to_cif) != "ROIsubcortVol"]
wcfs_kwargs <- c(list(cifti_fname=cifti_target_fname), as.list(to_cif))
do.call(write_cifti_from_separate, wcfs_kwargs)
if (verbose) {
print(Sys.time() - exec_time)
exec_time <- Sys.time()
}
# Return results -------------------------------------------------------------
if (input_is_xifti) {
read_xifti_args <- list(
cifti_fname = cifti_target_fname,
brainstructures = brainstructures
)
if (surfL_return) { read_xifti_args$surfL_fname <- surfL_target_fname }
if (surfR_return) { read_xifti_args$surfR_fname <- surfR_target_fname }
return(do.call(read_xifti, read_xifti_args))
} else {
return(unlist(list(
cifti=cifti_target_fname,
surfL=surfL_target_fname, surfR=surfR_target_fname
)))
}
}
#' @rdname resample_cifti
#' @export
resampleCIfTI <- function(
x=NULL, cifti_target_fname=NULL,
surfL_original_fname=NULL, surfR_original_fname=NULL,
surfL_target_fname=NULL, surfR_target_fname=NULL,
resamp_res, resamp_method=c("barycentric", "adaptive"),
areaL_original_fname=NULL, areaR_original_fname=NULL,
write_dir=NULL, mwall_values=c(NA, NaN), verbose=TRUE) {
resample_cifti(
x=x, cifti_target_fname=cifti_target_fname,
surfL_original_fname=surfL_original_fname, surfR_original_fname=surfR_original_fname,
surfL_target_fname=surfL_target_fname, surfR_target_fname=surfR_target_fname,
resamp_res=resamp_res, resamp_method=resamp_method,
areaL_original_fname=areaL_original_fname, areaR_original_fname=areaR_original_fname,
write_dir=write_dir, mwall_values=mwall_values, verbose=verbose
)
}
#' @rdname resample_cifti
#' @export
resamplecii <- function(
x=NULL, cifti_target_fname=NULL,
surfL_original_fname=NULL, surfR_original_fname=NULL,
surfL_target_fname=NULL, surfR_target_fname=NULL,
resamp_res, resamp_method=c("barycentric", "adaptive"),
areaL_original_fname=NULL, areaR_original_fname=NULL,
write_dir=NULL, mwall_values=c(NA, NaN), verbose=TRUE) {
resample_cifti(
x=x, cifti_target_fname=cifti_target_fname,
surfL_original_fname=surfL_original_fname, surfR_original_fname=surfR_original_fname,
surfL_target_fname=surfL_target_fname, surfR_target_fname=surfR_target_fname,
resamp_res=resamp_res, resamp_method=resamp_method,
areaL_original_fname=areaL_original_fname, areaR_original_fname=areaR_original_fname,
write_dir=write_dir, mwall_values=mwall_values, verbose=verbose
)
}
#' @rdname resample_cifti
#' @export
resample_xifti <- function(
x=NULL, cifti_target_fname=NULL,
surfL_original_fname=NULL, surfR_original_fname=NULL,
surfL_target_fname=NULL, surfR_target_fname=NULL,
resamp_res, resamp_method=c("barycentric", "adaptive"),
areaL_original_fname=NULL, areaR_original_fname=NULL,
write_dir=NULL, mwall_values=c(NA, NaN), verbose=TRUE) {
resample_cifti(
x=x, cifti_target_fname=cifti_target_fname,
surfL_original_fname=surfL_original_fname, surfR_original_fname=surfR_original_fname,
surfL_target_fname=surfL_target_fname, surfR_target_fname=surfR_target_fname,
resamp_res=resamp_res, resamp_method=resamp_method,
areaL_original_fname=areaL_original_fname, areaR_original_fname=areaR_original_fname,
write_dir=write_dir, mwall_values=mwall_values, verbose=verbose
)
}
|
05470e3c1c099482aac48b41a4920472dcd1ce6b | 7d18e60a7da4b47b43bf9a90f0e7b47903e26c68 | /task1/submitReview/ATTEvaluation.R | 4396d0efa43653d4f19a2b5220ec421054ae8776 | [] | no_license | danranyiyu123456/Black-Swan | 74149e7667b3495b44507cac66382e637afcbd27 | 571839ff218b4b7f6dd999842b67f6230aa0c98c | refs/heads/master | 2022-11-23T03:49:52.482740 | 2020-08-01T05:52:34 | 2020-08-01T05:52:34 | 284,000,634 | 0 | 0 | null | 2020-07-31T09:53:42 | 2020-07-31T09:53:41 | null | UTF-8 | R | false | false | 793 | r | ATTEvaluation.R | ###################################################
#####The model evaluation
###################################################
mapeFunc <- function(y, yhat){mean(abs((y - yhat)/y))}
### The function for caret turning
mapeSummary <- function (data,lev = NULL,model = NULL) {
out <- mean(abs((data$obs-data$pred)/data$obs))
names(out) <- "MAPE"
out
}
### The function for xgboost
mapeXgb <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- mean(abs((labels-preds)/labels))
return(list(metric = "madNew", value = err))
}
### The function for lightgbm
mapelgb <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- mean(abs((labels-preds)/labels))
return(list(name = "error", value = err, higher_better=FALSE))
}
|
598fe5aa6ddb3af7d0efe826ca992861b2bbc67f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rfml/examples/ml.load.sample.data.Rd.R | 3d556de7f6cd5930cadcdee530cbb0a5dea66a1f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 270 | r | ml.load.sample.data.Rd.R | library(rfml)
### Name: ml.load.sample.data
### Title: Load sample data set into MarkLogic server
### Aliases: ml.load.sample.data
### ** Examples
## Not run:
##D locConn <- ml.connect()
##D mlBaskets <- ml.load.sample.data(locConn, "baskets")
## End(Not run)
|
30402d9ed9a8219e821f7516b391451bdf9f9ab2 | 631c05e0a714621d4753129d9670ad4c54f3d664 | /0507_outlier.R | 6304245ffc01b545a319e76b60fe9dd33e8a661e | [] | no_license | convin305/Hankyung_academy_R | 3358fc601f3828c7e938498ec7f6e168071be507 | 4b57d37d9209d7e3a12f5086963f0dbe68a3ea61 | refs/heads/main | 2023-05-08T17:52:35.635207 | 2021-06-02T00:32:30 | 2021-06-02T00:32:30 | 372,992,880 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,609 | r | 0507_outlier.R | library(dplyr)
data <- read.csv('c:/ken/data/three_sample.csv',header=T)
data %>% head()
data %>% summary()
#데이터 정제, 전처리
data <- subset(data , !is.na(score),c(method,score))
data
#차트를 이용해서 outlier확인
plot(data$score)
barplot(data$score)
mean(data$score) #평균이 14이므로 14이상은 아웃라이어로 간주하고 제거
boxplot(data$score)
#아웃라이어 제거
length(data$score)
data2 <- subset(data,score <= 14)
data2$score %>% length()
#정제된 데이터 보기
boxplot(data2$score)
#_____________________________________________________
val_1 <- c(2.5,3.2,5.7,4.6,5.8,60)
year_1 <- c(2016:2021)
fit_2 <- lm(val_1 ~ year_1)
plot(year_1,val_1)
abline(fit_2,col="blue")
summary(fit_2)
#__________________________________________________
히스토그램의 시작점과 끝점에 따라 그래프 모양이 달라지는 단점을 보왆기 위한 대안으로
<<밀도함수>>를 이용해보자.
#density()
density()
plot()
str(iris)
hist(iris$Sepal.Width)
ds_iris <- density(iris$Petal.Width)
plot(ds_iris) #기본 형태의 밀도 곡선
#내부 색상을 위해서는 먼저 기존 데이터를 가져오기
iris
ds_iris <- density(iris$Petal.Width)
plot(ds_iris,main="확률 밀도") #기본 형태의 밀도 곡선 완성
polygon(ds_iris,col='red',border = "blue") #내부와 외부의 경계선 만들기
rug(iris$Sepal.Width,col="brown")
#______________________
x <- iris$Sepal.Length
par(mfrow=c(1,2))
qqnorm(x)
qqline(x,col='red',lwd=2)
hist(x,breaks = 15,probability = T,) |
b269fd100024e04f3a4c6f2ea90ac033abef68b8 | 6b3215ae22fb53df23457105f4249e2a7e56bd2e | /man/convergence_precheck.Rd | 314c98c0211b37cbedd26f29ee87ccbac6f31f1e | [] | no_license | cran/lmvar | 26f45be9ebeb93467ae389ec00f034bb8054c239 | a40d38d4d227aa3aade534bae73fb4625d4ae96d | refs/heads/master | 2021-01-22T14:15:23.807165 | 2019-05-16T09:10:10 | 2019-05-16T09:10:10 | 82,301,074 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,266 | rd | convergence_precheck.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convergence_precheck.R
\name{convergence_precheck}
\alias{convergence_precheck}
\title{Pre-check model matrices for convergence issues}
\usage{
convergence_precheck(y, X_mu, X_sigma)
}
\arguments{
\item{y}{Numeric, response vector y}
\item{X_mu}{Model matrix for the expected values}
\item{X_sigma}{Model matrix for the standard deviations. This must be a full-rank matrix.}
}
\value{
A list with the following members:
\itemize{
\item \code{column_numbers} The numbers of the columns of \code{X_sigma} that can be kept
\item \code{column_names} The names of the columns of \code{X_sigma} that can be kept
}
Numbers and names refer to the same columns. They are supplied both for convenience.
}
\description{
The model matrices \eqn{X_\mu} and \eqn{X_\sigma} are checked to see if problems
with the convergence of the fit can be anticipated. If so, it is determined which columns
must be removed from \eqn{X_\sigma} to attempt to avoid convergence issues.
}
\details{
A matrix can be of class 'matrix',
'Matrix' or 'numeric' (in case it is a matrix of one column only).
An intercept term must be included in the model matrices if the model is such.
}
|
1587f57f223f53d4c08fe28fee47988302f2fd81 | 3d63d1ec4d25fabcb91300a205ab7a5642399c59 | /man/croston_fit_impl.Rd | f8ad0f6930e8e9e3fa31a41e6ea05a1fbb8e6fe4 | [
"MIT"
] | permissive | silverf62/modeltime | c98e702fc0af63c814119393b7c701886d474614 | 61eed2bfc996191cb91c0416bbfeaa62587573e1 | refs/heads/master | 2023-09-02T05:50:26.266276 | 2021-10-27T15:35:32 | 2021-10-27T15:35:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 614 | rd | croston_fit_impl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parsnip-exp_smoothing.R
\name{croston_fit_impl}
\alias{croston_fit_impl}
\title{Low-Level Exponential Smoothing function for translating modeltime to forecast}
\usage{
croston_fit_impl(x, y, alpha = 0.1, ...)
}
\arguments{
\item{x}{A dataframe of xreg (exogenous regressors)}
\item{y}{A numeric vector of values to fit}
\item{alpha}{Value of alpha. Default value is 0.1.}
\item{...}{Additional arguments passed to \code{forecast::ets}}
}
\description{
Low-Level Exponential Smoothing function for translating modeltime to forecast
}
|
24abc8a921b1ec1dd0f9ec0d3402a887b41d3f0c | 01ecfe640427e47369c344d56d05390736b75c55 | /R-Projects/R-Logical Operations.r | 7df0a21accb4634a353e92e4cb6ae2b2e30180cb | [] | no_license | ysfkymz/R-Projects | 247790a9f5acdecb743ccfa31bdc90f8b431f359 | d1c899d217db2c05c9e40ff2e6ec5e70b7cc63d7 | refs/heads/master | 2020-06-06T11:08:47.599354 | 2019-06-19T12:04:26 | 2019-06-19T12:04:26 | 192,723,667 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,231 | r | R-Logical Operations.r | #R programming
#16.06.2019
#by Yusuf Kaymaz
#Logical Operators
> attach(cancer)
> sex==2
[1] FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE TRUE
[13] TRUE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE TRUE FALSE FALSE
[25] FALSE TRUE FALSE FALSE FALSE FALSE TRUE FALSE FALSE TRUE FALSE TRUE
[37] FALSE TRUE FALSE TRUE FALSE TRUE TRUE TRUE FALSE TRUE FALSE FALSE
[49] FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE TRUE FALSE TRUE TRUE
[61] TRUE FALSE FALSE TRUE FALSE FALSE TRUE TRUE FALSE FALSE FALSE TRUE
[73] FALSE FALSE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE TRUE
[85] FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE FALSE TRUE TRUE FALSE
[97] FALSE FALSE FALSE TRUE TRUE TRUE FALSE FALSE FALSE FALSE TRUE FALSE
[109] FALSE TRUE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE
[121] FALSE TRUE TRUE FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE FALSE
[133] FALSE TRUE FALSE TRUE TRUE FALSE FALSE FALSE TRUE FALSE FALSE TRUE
[145] FALSE TRUE FALSE FALSE FALSE TRUE FALSE FALSE TRUE TRUE FALSE FALSE
[157] TRUE FALSE FALSE TRUE TRUE TRUE FALSE FALSE FALSE TRUE TRUE FALSE
[169] FALSE FALSE FALSE TRUE FALSE TRUE FALSE TRUE FALSE TRUE TRUE TRUE
[181] FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE
[193] FALSE FALSE FALSE FALSE TRUE FALSE TRUE FALSE TRUE FALSE TRUE TRUE
[205] TRUE FALSE TRUE TRUE FALSE TRUE TRUE FALSE FALSE TRUE FALSE FALSE
[217] TRUE FALSE TRUE TRUE FALSE TRUE FALSE FALSE FALSE TRUE FALSE TRUE
> age[sex==2]
[1] 68 71 68 68 56 49 70 69 60 62 66 64 73 59 60 76 50 72 65 58 64 75 65 67 64
[26] 48 53 71 51 56 44 62 44 57 69 70 58 69 54 75 75 54 60 69 77 48 59 55 74 58
[51] 73 65 53 59 62 53 68 56 62 44 57 60 58 43 59 55 53 74 66 65 51 45 72 63 52
[76] 64 63 50 63 55 50 59 60 64 41 70 57 71 75 58
> mean(age[sex==2])
[1] 61.07778
> mean(age[age>60])
[1] 68.68657
> bayan<-cancer[sex==2,]
> bayan
inst time status age sex ph.ecog ph.karno pat.karno meal.cal wt.loss
7 7 310 2 68 2 2 70 60 384 10
8 11 361 2 71 2 2 60 80 538 1
12 16 654 2 68 2 2 70 70 NA 23
13 11 728 2 68 2 1 90 90 NA 5
19 1 61 2 56 2 2 60 60 238 10
22 6 81 2 49 2 0 100 70 1175 -8
26 12 520 2 70 2 1 90 80 825 6
31 12 473 2 69 2 1 90 90 1025 -1
34 16 107 2 60 2 2 50 60 925 -15
> erkek<-cancer[sex==1]
Error in `[.data.frame`(cancer, sex == 1) : undefined columns selected
> erkek<-cancer[sex==1,]
> erkek
inst time status age sex ph.ecog ph.karno pat.karno meal.cal wt.loss
1 3 306 2 74 1 1 90 100 1175 NA
2 3 455 2 68 1 0 90 90 1225 15
3 3 1010 1 56 1 0 90 90 NA 15
4 5 210 2 57 1 1 90 60 1150 11
5 1 883 2 60 1 0 100 90 NA 0
> dim(erkek)
[1] 138 10
> bayan[bayan$age>65,]
inst time status age sex ph.ecog ph.karno pat.karno meal.cal wt.loss
7 7 310 2 68 2 2 70 60 384 10
8 11 361 2 71 2 2 60 80 538 1
12 16 654 2 68 2 2 70 70 NA 23
13 11 728 2 68 2 1 90 90 NA 5
26 12 520 2 70 2 1 90 80 825 6
31 12 473 2 69 2 1 90 90 1025 -1
38 15 965 1 66 2 1 70 90 875 4
42 11 153 2 73 2 2 60 70 1075 11
46 7 95 2 76 2 2 60 60 625 -24
51 3 735 2 72 2 1 90 90 NA 9
> bayan$age>65
[1] TRUE TRUE TRUE TRUE FALSE FALSE TRUE TRUE FALSE FALSE TRUE FALSE
[13] TRUE FALSE FALSE TRUE FALSE TRUE FALSE FALSE FALSE TRUE FALSE TRUE
[25] FALSE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE
[37] FALSE TRUE FALSE TRUE TRUE FALSE FALSE TRUE TRUE FALSE FALSE FALSE
[49] TRUE FALSE TRUE FALSE FALSE FALSE FALSE FALSE TRUE FALSE FALSE FALSE
[61] FALSE FALSE FALSE FALSE FALSE FALSE FALSE TRUE TRUE FALSE FALSE FALSE
[73] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE
[85] FALSE TRUE FALSE TRUE TRUE FALSE
> summary(cancer)
inst time status age
Min. : 1.00 Min. : 5.0 Min. :1.000 Min. :39.00
1st Qu.: 3.00 1st Qu.: 166.8 1st Qu.:1.000 1st Qu.:56.00
Median :11.00 Median : 255.5 Median :2.000 Median :63.00
Mean :11.09 Mean : 305.2 Mean :1.724 Mean :62.45
3rd Qu.:16.00 3rd Qu.: 396.5 3rd Qu.:2.000 3rd Qu.:69.00
Max. :33.00 Max. :1022.0 Max. :2.000 Max. :82.00
NA's :1
sex ph.ecog ph.karno pat.karno
Min. :1.000 Min. :0.0000 Min. : 50.00 Min. : 30.00
1st Qu.:1.000 1st Qu.:0.0000 1st Qu.: 75.00 1st Qu.: 70.00
Median :1.000 Median :1.0000 Median : 80.00 Median : 80.00
Mean :1.395 Mean :0.9515 Mean : 81.94 Mean : 79.96
3rd Qu.:2.000 3rd Qu.:1.0000 3rd Qu.: 90.00 3rd Qu.: 90.00
Max. :2.000 Max. :3.0000 Max. :100.00 Max. :100.00
NA's :1 NA's :1 NA's :3
meal.cal wt.loss
Min. : 96.0 Min. :-24.000
1st Qu.: 635.0 1st Qu.: 0.000
Median : 975.0 Median : 7.000
Mean : 928.8 Mean : 9.832
3rd Qu.:1150.0 3rd Qu.: 15.750
Max. :2600.0 Max. : 68.000
NA's :47 NA's :14
> sustu<-cancer[age>65 & sex==2,]
> sustu
inst time status age sex ph.ecog ph.karno pat.karno meal.cal wt.loss
7 7 310 2 68 2 2 70 60 384 10
8 11 361 2 71 2 2 60 80 538 1
12 16 654 2 68 2 2 70 70 NA 23
13 11 728 2 68 2 1 90 90 NA 5
26 12 520 2 70 2 1 90 80 825 6
31 12 473 2 69 2 1 90 90 1025 -1
38 15 965 1 66 2 1 70 90 875 4
42 11 153 2 73 2 2 60 70 1075 11
46 7 95 2 76 2 2 60 60 625 -24
51 3 735 2 72 2 1 90 90 NA 9
61 22 444 2 75 2 2 70 70 438 8
67 16 208 2 67 2 2 70 NA 538 2
76 12 426 2 71 2 1 90 90 1075 19
95 1 588 1 69 2 0 100 90 NA 13 |
c21a22ab7598059d90944fbb532763c36d85f73a | ba7639872ae549ba254b32d64291c92657fab4aa | /NewPCAMCMC/ShortPlayDiags.R | be1ef8f9dcbe92017e4ae7e2d05f53934bd4d7d5 | [] | no_license | gregvirus2/EcoEvoModelDwyerEtAl2021AmNat | 0bff9ca50c229f9a26d2634223436ae6e040992c | 38a5bad2c76089fe0942cc17e007b6f19066f4e7 | refs/heads/main | 2023-03-25T00:41:07.934531 | 2021-03-20T22:55:18 | 2021-03-20T22:55:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,054 | r | ShortPlayDiags.R |
#DataTemp = read.table("AllGA8P38YA.dat");
#DataTemp = read.table("AllGA8O29YC.dat");
#DataTemp = read.table("AllGA8O1YC.dat");
#DataTemp = read.table("AllGA8O42YC.dat");
#DataTemp = read.table("AllGA8P0YC.dat");
#DataTemp = read.table("AllGA8O5YC.dat");
#DataTemp = read.table("AllGA8O42ZC.dat");
DataTemp = read.table("AllGA8O15ZC.dat");
#DataTemp = read.table("AllGA8O15ZD.dat");
Index = which(DataTemp[,10]==max(DataTemp[,10]));
x = c(DataTemp[Index,2]);
#write(as.numeric(DataTemp[Index,]),file="BestParamsGA8P23ZA.dat",ncolumns=ncol(DataTemp));
mFixed = length(unique(DataTemp[,8]));
if(mFixed>1){
mFix = 0;
}else{
mFix = 1;
}
deltaFixed = length(unique(DataTemp[,7]));
if(deltaFixed>1){
deltaFix = 0;
}else{
deltaFix = 1;
}
ratioFixed = length(unique(DataTemp[,6]));
khtg = length(unique(DataTemp[,2]));
if(DataTemp[1,5]>0){
Jump = 100;
}else{
Jump = 1000;
}
require(coda);
#No k, no sigma:
if((DataTemp[1,2]>1e4)&&(DataTemp[1,5]<=0))
DataAll = cbind(log10(DataTemp[,3]),log10(DataTemp[,4]),DataTemp[,6],DataTemp[,7],DataTemp[,8],DataTemp[,9]);
#k and sigma:
#GM:
if((DataTemp[1,2]<=1e4)&&(DataTemp[1,5]>=0)){
cat("k and sigma...\n");
if(mFixed>1){
DataAll = cbind(log(DataTemp[,2]),log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,5]),log(DataTemp[,6]),log(DataTemp[,7]),log(DataTemp[,8]),log(DataTemp[,9]));
}else{
if(deltaFixed>1){
cat("inside if statement...\n");
DataAll = cbind(log(DataTemp[,2]),log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,5]),log(DataTemp[,6]),log(DataTemp[,7]),log(DataTemp[,9]));
}else{
printf("mFixed is 1, deltaFixed is 0\n");
DataAll = cbind(log(DataTemp[,2]),log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,5]),log(DataTemp[,6]),log(DataTemp[,9]));
}
}
}
#no k, sigma
if((DataTemp[1,2]>=1e4)&&(DataTemp[1,5]>=0)){
cat("sigma, no k...\n");
if(mFixed>1){
cat("m not Fixed, big k...\n");
if(ratioFixed==1){
cat("k big, ratio fixed, m fixed...\n");
DataAll = cbind(log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,5]),log(DataTemp[,7]),log(DataTemp[,8]),log(DataTemp[,9]));
}else{
DataAll = cbind(log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,5]),log(DataTemp[,6]),log(DataTemp[,7]),log(DataTemp[,8]),log(DataTemp[,9]));
}
}else{
if(deltaFixed==1){
if(ratioFixed==1){
DataAll = cbind(log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,5]),log(DataTemp[,9]));
}
} else{
if(ratioFixed==1){
cat("k big, ratio fixed, m fixed...\n");
DataAll = cbind(log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,5]),log(DataTemp[,7]),log(DataTemp[,9]));
}else{
DataAll = cbind(log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,5]),log(DataTemp[,6]),log(DataTemp[,7]),log(DataTemp[,9]));
}
}
}
}
#OS:
#DataAll = cbind(log(DataTemp[,2]),DataTemp[,3],DataTemp[,4],log(DataTemp[,5]),DataTemp[,6],DataTemp[,7],DataTemp[,8]);
#k, no sigma
#GM:
if((DataTemp[1,2]<=1e3)&&(DataTemp[1,5]<=0)){
cat("k, no sigma\n");
DataAll = cbind(log(DataTemp[,2]),log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,6]),log(DataTemp[,7]),log(DataTemp[,8]),log(DataTemp[,9]));
cat("k, no sigma\n");
}
#k, no sigma, no ratio
if(((DataTemp[1,2]<=1e3)&&(DataTemp[1,5]<=0))&&(DataTemp[1,6]<=0)){
cat("k, no sigma, no ratio\n");
DataAll = cbind(log(DataTemp[,2]),log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,7]),log(DataTemp[,8]),log(DataTemp[,9]));
}
#k, no sigma
#GM:
if(((DataTemp[1,2]<=1e3)&&(DataTemp[1,5]<=0))&&(DataTemp[1,6]==1)){
cat("k, no sigma, ratio equals 1\n");
DataAll = cbind(log(DataTemp[,2]),log(DataTemp[,3]),log(DataTemp[,4]),log(DataTemp[,7]),log(DataTemp[,8]),log(DataTemp[,9]));
}
require(coda);
NumFiles = 1;
xLast = numeric();
Diff = numeric();
String = 1;
ColLength = ncol(DataTemp);
for(i in 1:(nrow(DataTemp)-1)){
#for(i in 1:1e2){
if(DataTemp[i,1] > DataTemp[i+1,1]){
cat("String:",String," i:",i," i+1:",i+1,"\n");
xLast[String] = i;
String = String + 1;
NumFiles = NumFiles + 1;
cat("i:",i,"NumFiles:",NumFiles,"xLast:",xLast[String-1],"\n");
}
if(0){
for(j in 1:(ColLength-2)){
if(DataTemp[i,j]<=1e-5){
DataTemp[i,j] = 1e-5;
}
if(is.na(DataTemp[i,j])) cat("found an na...\n");
if(is.infinite(DataTemp[i,j])) cat("found an inf...\n");
if(is.nan(DataTemp[i,j])) cat("found an inf...\n");
}
}
}
xLast[NumFiles] = nrow(DataTemp);
for(i in 1:(NumFiles-1)){
Diff[i] = xLast[i+1]-xLast[i];
cat("i:",i," xLast:",xLast[i]," xLast:",xLast[i+1]," Diff:",Diff,"\n");
}
Last = min(Diff);
cat("NumFiles:",NumFiles,"\n");
#xLast = 1e6*c(1:10);
thinVal = 1;
x1 = mcmc(DataAll[1:Last,],thin=thinVal);
Index = which(x1==-Inf);
x1[Index] = -13;
#x1b = DataAll[seq(1,Last,thinVal),];
x1b = DataAll[seq(1,Last,thinVal),];
temp1 = seq(1,Last,thinVal);
cat("temp1:",length(temp1),"\n");
dummy = xLast[1]+1;
x2 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x2==-Inf);
x2[Index] = -13;
temp2 = seq(dummy,dummy+Last-1,thinVal);
cat("temp2:",length(temp2),"\n");
x2b = DataAll[seq(dummy,(dummy+Last-1),thinVal),];
dummy = xLast[2]+1;
if(NumFiles>2){
x3 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x3==-Inf);
x3[Index] = -13;
temp3 = seq(dummy,dummy+Last-1,thinVal);
cat("temp3:",length(temp3),"\n");
x3b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[3]+1;
}
if(NumFiles>3){
cat("just before mcmc...\n");
x4 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x4==-Inf);
x4[Index] = -13;
x4b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[4]+1;
}
if(NumFiles>4){
x5 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x5==-Inf);
x5[Index] = -13;
x5b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[5]+1;
}
if(NumFiles>5){
x6 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x6==-Inf);
x6[Index] = -13;
x6b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[6]+1;
}
if(NumFiles>6){
x7 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x7==-Inf);
x7[Index] = -13;
x7b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[7]+1;
}
if(NumFiles>7){
x8 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x8==-Inf);
x8[Index] = -13;
x8b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[8]+1;
}
if(NumFiles>8){
x9 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x9==-Inf);
x9[Index] = -13;
x9b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[9]+1;
}
if(NumFiles>9){
x10 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
Index = which(x10==-Inf);
x10[Index] = -13;
x10b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[10]+1;
}
if(NumFiles>10){
x11 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x11b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[11]+1;
}
if(NumFiles>11){
x12 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x12b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[12]+1;
}
if(NumFiles>12){
x13 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x13b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[13]+1;
}
if(NumFiles>13){
x14 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x14b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[14]+1;
}
if(NumFiles>14){
x15 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x15b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[15]+1;
}
if(NumFiles>15){
x16 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x16b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[16]+1;
}
if(NumFiles>16){
x17 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x17b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[17]+1;
}
if(NumFiles>17){
x18 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x18b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[18]+1;
}
if(NumFiles>18){
#cat("19 Files...\n");
x19 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x19b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[19]+1;
}
if(NumFiles>19){
x20 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x20b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[20]+1;
}
if(NumFiles>20){
x21 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x21b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[21]+1;
}
if(NumFiles>21){
x22 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x22b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[22]+1;
}
if(NumFiles>22){
x23 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x23b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[23]+1;
}
if(NumFiles>23){
x24 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x24b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[24]+1;
}
if(NumFiles>24){
x25 = mcmc(DataAll[(dummy):(dummy+Last-1),],thin=thinVal);
x25b = DataAll[seq(dummy,dummy+Last-1,thinVal),];
dummy = xLast[25]+1;
}
if(NumFiles==2) xAll = mcmc.list(x1,x2);
if(NumFiles==3) xAll = mcmc.list(x1,x2,x3);
if(NumFiles==4) xAll = mcmc.list(x1,x2,x3,x4);
if(NumFiles==5) xAll = mcmc.list(x1,x2,x3,x4,x5);
if(NumFiles==6) xAll = mcmc.list(x1,x2,x3,x4,x5,x6);
if(NumFiles==7) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7);
if(NumFiles==8) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8);
if(NumFiles==9) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9);
if(NumFiles==10) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10);
if(NumFiles==11) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11);
if(NumFiles==12) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12);
if(NumFiles==13) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13);
if(NumFiles==16) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16);
if(NumFiles==19) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19);
if(NumFiles==20){
xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20);
#xAll = mcmc.list(x3,x4,x5);
}
if(NumFiles==21) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21);
if(NumFiles==25) xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25);
#xAll = mcmc.list(x1,x2,x4,x5,x6,x7,x8,x9,x10); #x3 is messed up for 5O1C, x10 for 5P2A, x10 for 5O18XA)
#xAll = mcmc.list(x2,x3,x4,x5,x6,x8,x9,x10); #x6 and x7 messed up in OAO1C
#xAll = mcmc.list(x1,x2,x3,x4,x5,x6,x7,x9,x10);
#gelman.diag(xAll);
if(0){ #why is this next bit here?
par(ask="TRUE");
x1c = mcmc(x1b);
x2c = mcmc(x2b);
if(NumFiles>2) x3c = mcmc(x3b);
if(NumFiles>3) x4c = mcmc(x4b);
if(NumFiles>4) x5c = mcmc(x5b);
if(NumFiles>5) x6c = mcmc(x6b);
if(NumFiles>6) x7c = mcmc(x7b);
if(NumFiles==3) xAllc = mcmc.list(x1c,x2c,x3c);
if(NumFiles==4) xAllc = mcmc.list(x1c,x2c,x3c,x4c);
if(NumFiles==5) xAllc = mcmc.list(x1c,x2c,x3c,x4c,x5c);
if(NumFiles==6) xAllc = mcmc.list(x1c,x2c,x3c,x4c,x5c,x6c);
if(NumFiles==7) xAllc = mcmc.list(x1c,x2c,x3c,x4c,x5c,x6c,x7c);
}
print(gelman.diag(xAll));
diagOut = gelman.diag(xAll);
par(mai=c(0.25,0.25,0.25,0.25));
par(ask="TRUE");
plot(xAll);
Index = which(DataAll[,1]<=0);
print(length(Index)/length(DataAll[,1]));
#GA8O5YCStats = summary(xAll)
#write(t(GA8O5YCStats$statistics[,1:2]),file="GA8O5YCStats.dat",ncol=2);
#GA8O42ZCStats = summary(xAll)
#write(t(GA8O42ZCStats$statistics[,1:2]),file="GA8O42ZCStats.dat",ncol=2);
GA8O15ZCStats = summary(xAll)
write(t(GA8O15ZCStats$statistics[,1:2]),file="GA8O15ZCStats.dat",ncol=2);
|
7a3c2c6a38caabee5ac1a755a0fb84ab207c5d7e | 9f92822dd2bfef54670e7563e015fe75959ab4eb | /man/CMIP5_example_timeseries.Rd | 033c2ce2df86118d89e2067c2de5d57f9a1d448a | [] | no_license | cran/wux | 6ad444d96a487e36006368eb2cb54c91be5cbd53 | 77c246fbd25db73bfe318625e5dddde9bd06df3e | refs/heads/master | 2020-12-24T13:29:12.414854 | 2016-12-14T16:23:42 | 2016-12-14T16:23:42 | 27,069,676 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,035 | rd | CMIP5_example_timeseries.Rd | \name{CMIP5_example_timeseries}
\alias{CMIP5_example_timeseries}
\docType{data}
\title{
Climate change signals of example userinput for models2wux
}
\description{
This example of a WUX data.frame is the result of running
\code{userinput_CMIP5_timeseries} with \code{\link{models2wux}}.
}
\usage{data(CMIP5_example_timeseries)}
\details{
You can download the NetCDF files from ESGF using {\code{\link{CMIP5fromESGF}}}.
}
\seealso{\code{\link{models2wux}}}
\examples{
## thats what CMIP5_timeseries looks like
data("CMIP5_example_timeseries")
head(CMIP5_example_timeseries)
## You can run models2wux to get the same result as
## above.
data(userinput_CMIP5_timeseries)
data(modelinput_test)
\dontrun{
## You must have downloaded the example NetCDF files according to
## "modelinput_test" in order to run "models2wux". See the examples of
## ?CMIP5fromESGF or ?modelinput_test.
CMIP5_example_timeseries <- models2wux(userinput_CMIP5_timeseries,
modelinput = modelinput_test)}
}
\keyword{datasets}
|
0bc1bcddc8fd93f9aac1bc63fc062e1959399277 | d48e34adc6063a5ca3dbfd772ad186fb93922f50 | /package/clinDataReview/man/getJsDepClinDataReview.Rd | 5e9074349c6813c23203cdd29c26576dba84938d | [] | no_license | Lion666/clinDataReview | 080832a95b74bebb595d59796758b9e8b4cf4e18 | 2876140f36c6bfe94d8626038d32b2f3f9477697 | refs/heads/master | 2023-08-05T18:55:26.658847 | 2021-10-01T16:55:27 | 2021-10-02T10:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,073 | rd | getJsDepClinDataReview.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/miscellaneous.R
\name{getJsDepClinDataReview}
\alias{getJsDepClinDataReview}
\title{Get Javascript custom scripts required for specific
clinical data functionalities.}
\usage{
getJsDepClinDataReview(
type = c("collapsibleButton", "patientProfiles"),
dep = NULL
)
}
\arguments{
\item{type}{(optional) Character vector with type of dependencies,
either: 'collapsibleButton' or 'patientProfiles'.}
\item{dep}{(optional) Character vector with names of Javascript dependencies
By default, all dependencies are included.}
}
\value{
List of \code{\link[htmltools]{htmlDependency}}.
To include this dependency in a report e.g. generated with rmarkdown,
these can be passed to the: \code{extra_dependencies} parameter
of the \code{output_format} specific function, e.g.:
\code{rmarkdown::render(...,
output_format = rmarkdown::html_document(extra_dependencies = dep))
}
}
\description{
Get Javascript custom scripts required for specific
clinical data functionalities.
}
\author{
Laure Cougnaud
}
|
c6c6b2961adf243bca5210db5a933020df797077 | 77b7481be4a3d80c8d44e3fc77730d177a386154 | /man/mc_state_equilibrium.Rd | 7c4e19e0adfad3607cdac83c7315af9b910888e4 | [] | no_license | HydrosystemsGroup/Weather-Generator | 05655075e66992e86e0a92d6965cb091a7a6ddc6 | 23250d2b1244a2c3c928fe301fb65197613fb201 | refs/heads/master | 2021-01-20T07:52:27.770546 | 2016-03-31T17:11:48 | 2016-03-31T17:11:48 | 90,059,517 | 6 | 1 | null | null | null | null | UTF-8 | R | false | true | 571 | rd | mc_state_equilibrium.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mc_state_equilibrium.R
\name{mc_state_equilibrium}
\alias{mc_state_equilibrium}
\title{Compute State Equilibrium Vector of Transition Matrix}
\usage{
mc_state_equilibrium(m)
}
\arguments{
\item{m}{markov transition matrix}
}
\description{
Compute State Equilibrium Vector of Transition Matrix
}
\examples{
transitions <- mc_fit(x=sample(c('d', 'w', 'e'), size=720, replace=TRUE, prob=c(0.5, 0.3, 0.2)), months=rep(rep(seq(1, 12), each=30), times=2))
mc_state_equilibrium(transitions[[1]])
}
|
4e9805448cd0ef3a300b0e6a96ca4011afe96a71 | 9e8ac937c14981f229c4dfe72182ec8a6b62b461 | /profiling/2.cj_data/2.A.2_vsb_parentage.R | 5e2fac4afe060dd481d98422821b32fee9574a6f | [] | no_license | rossibarra/phasing_tests | 80da4f02ebfa85f0a84df5ba7f00ac74f67b3f7d | b7b7327a7485880e826528a993332e86177a5b1f | refs/heads/master | 2021-01-01T19:42:25.344718 | 2015-08-29T01:21:53 | 2015-08-29T01:21:53 | 39,414,352 | 0 | 3 | null | 2015-08-29T01:21:53 | 2015-07-21T00:02:05 | R | UTF-8 | R | false | false | 2,479 | r | 2.A.2_vsb_parentage.R | ### Jinliang Yang modified from VSB
### July 30th, 2015
## phasing.R
library(parallel)
library(devtools)
options(mc.cores=NULL)
load_all("~/bin/tasselr")
load_all("~/bin/ProgenyArray")
ob <- load("largedata/cj_data.Rdata")
# load sequence lengths from chromosome
sl <- read.table("largedata/refgen2-lengths.txt", col.names=c("chrom", "length"),
stringsAsFactors = FALSE)
sl <- setNames(sl$length, sl$chrom)
chrs <- ifelse(names(sl) == "UNKNOWN", "0", names(sl))
names(sl) <- chrs
seqlengths(teo@ranges) <- sl[names(seqlengths(teo@ranges))]
## load in parent
parents <- read.delim("largedata/parent_taxa.txt", header=TRUE, stringsAsFactors=FALSE)
progeny <- read.delim("largedata/progeny_merged.txt", header=TRUE, stringsAsFactors=FALSE)
# all IDs found?
stopifnot(all(progeny$mother %in% parents$shorthand))
# all parent and progeny IDs in genotypes?
sample_names <- colnames(geno(teo))
stopifnot(all(parents$taxa %in% sample_names))
stopifnot(all(progeny$taxa %in% sample_names))
# stricter:
#length(setdiff(c(parents$taxa, progeny$taxa), sample_names))
#length(setdiff(sample_names, c(parents$taxa, progeny$taxa)))
## Load into ProgenyArray object
# mothers is given as an index to which column in parent genotype. Note that
# this is in the same order as the genotype columns (below) are ordered.
mothers <- match(progeny$mother, parents$shorthand)
pa <- ProgenyArray(geno(teo)[, progeny$taxa],
geno(teo)[, parents$taxa],
mothers,
loci=teo@ranges)
#201511 loci are fixed
## Infer parentage
# calculate allele frequencies
pa <- calcFreqs(pa)
# infer parents
pa <- inferParents(pa, ehet=0.6, ehom=0.1, verbose=TRUE)
#inferring parents for 4805 progeny
#4804/4805 progeny completed
#Warning message:
# In inferParents(pa, ehet = 0.6, ehom = 0.1, verbose = TRUE) :
# found 45 mothers that are inconsistent
save(pa, file= "largedata/cj_parentage.RData")
#ProgenyArray object: 598043 loci, 70 parents, 4805 progeny
#number of chromosomes: 11
#object size: 11640.408 Mb
#number of progeny: 4805
#number of parents: 70
#proportion missing:
# progeny: 0.503
#parents: 0.393
#number of complete parental loci: 9202
###########################################################################
map <- as.data.frame(pa@ranges)
geno1 <- pa@parents_geno
geno1[is.na(geno1)] <- 3
geno1 <- t(geno1)
genodf <- data.frame(fid=1:70, iid=row.names(geno1), pid=0, mid=0, as.data.frame(geno1))
|
141909eee8806e54848d19023d40d78cf244397a | df762ff9f6fa60e0657140218fdbd604b9d0d669 | /server.R | 69bb289e9069e842201371d74f3ea5a4725d3c13 | [] | no_license | sunzibinfa/DevelopingDataProductsWeek4Assigment | fe99ec7b1939c124f9b411c77bcce4a14009bf00 | 8bcec51e97bb30369dd45e9fd9cf2936f0055e29 | refs/heads/master | 2022-08-01T19:40:19.174701 | 2020-05-15T20:39:17 | 2020-05-15T20:39:17 | 264,269,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,284 | r | server.R | library(shiny)
library(dplyr)
# Read in data set and do process data in order to get into right format
data(EuStockMarkets)
EuStockMarkets2 <- diff(EuStockMarkets)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$indexPlot <- renderPlot({
# Subset data based on user inputs
if (input$type == 'level') {
EuStockMarkets3 <- EuStockMarkets
}
else if (input$type == 'diff') {
EuStockMarkets3 <- EuStockMarkets2
}
if (input$index == 'DAX') {
EuStockMarkets3 <- EuStockMarkets3[,1]
}
else if (input$index == 'SMI') {
EuStockMarkets3 <- EuStockMarkets3[,2]
}
else if (input$index == 'CAC') {
EuStockMarkets3 <- EuStockMarkets3[,3]
}
else if (input$index == 'FTSE') {
EuStockMarkets3 <- EuStockMarkets3[,4]
}
# draw the graph
if (input$type == 'level') {
plot(EuStockMarkets3, main='Plot of Stock Index Level', xlab='Date', ylab='Value')
}
else {
plot(EuStockMarkets3, main=paste('Plot of Daily Differences', xlab='Date', ylab='Value'))
}
})
})
|
7d501ea07b4f2cb2ca4c95f84b1d1da088cf1c3f | ef5654db62bce3446ef12053c6149542fc1c6e66 | /man/rawMeanInterval.Rd | 8d1b73512f5656f3db3981e83e538a560fe7e37d | [] | no_license | DanielEverland/smartstats | e6392dbf7f433d84bc8faab29fe4fe6e62cd170a | f0be82545c7f3744ef069c8a43a476ee00ad98e6 | refs/heads/master | 2023-05-06T04:45:07.824212 | 2021-05-30T07:21:31 | 2021-05-30T07:21:31 | 370,746,254 | 0 | 0 | null | 2021-05-29T09:35:01 | 2021-05-25T15:43:50 | R | UTF-8 | R | false | true | 511 | rd | rawMeanInterval.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConfidenceInterval.R
\name{rawMeanInterval}
\alias{rawMeanInterval}
\title{Calculates the mean confidence interval without a vector}
\usage{
rawMeanInterval(mu, sd, len, alpha = 0.05)
}
\arguments{
\item{mu}{Given mean}
\item{sd}{Standard deviation}
\item{len}{Length of the vector}
\item{alpha}{Significance level}
}
\value{
The mean confidence interval
}
\description{
Calculates the mean confidence interval without a vector
}
|
0869a470ba3aa3db8f3836083cb0199a34991247 | 7f4d3010da25ef1e82afebfd8430d9d3e05df34e | /code/waterfall.r | 996ef414479c1d5bb50c8d13d3cfff8535ee4c15 | [
"Apache-2.0"
] | permissive | jkeirstead/hygem-b | b382a796047e8c6bb1d4d95347c7835f038af72e | cada87b0f0dade6ac7b8b5a4d5b1d0bd1bbd6fa3 | refs/heads/master | 2021-01-22T11:37:52.382498 | 2013-12-19T17:08:11 | 2013-12-19T17:08:11 | 11,445,520 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,831 | r | waterfall.r | ## Make a waterfall plot
## @param df a dataframe with columns labelled category (an ordered factor, such that 1 + 2 + ... + n-1 = n), value, and an additional sector column for a further split
waterfall <- function(df) {
df <- transform(df, order=as.numeric(category))
df <- arrange(df, order)
ids <- which(df$order==max(df$order))
df$value[ids] <- -df$value[ids]
## Calculate the cumulative sums
df <- ddply(df, .(order, category, sector, value), summarize, cs1=cumsum(value))
df <- mutate(df, cs2=cumsum(cs1))
## Calculate the max and mins for each category and sector
df <- transform(df, min.val=c(0, head(cs2, -1)),
max.val=c(head(cs2, -1), 0))
df <- ddply(df, .(order, category, sector, value), summarize, min=min(min.val, max.val), max=max(min.val, max.val))
## Make the plot
offset <- 0.3
df <- mutate(df, offset=offset)
## Create the lines data frame
cs <- cumsum(ddply(df, .(order), summarize, value=sum(value))$value)
lines <- data.frame(x=df$order,
y=c(0, head(rep(cs, each=2), -2), 0))
require(scales)
gg <- ggplot() +
geom_line(data=lines, aes(x=x, y=y), linetype="dashed") +
geom_rect(data=df, aes(xmin=order - offset,
xmax=order + offset,
ymin=min,
ymax=max, fill=sector)) +
scale_x_continuous(breaks=df$order, labels=df$category)
return(gg)
}
debug <- FALSE
if (debug) {
raw <- data.frame(category=c("A", "B", "C", "D"),
value=c(100, -20, 10, 90))
df1 <- transform(raw, category=factor(category))
gg1 <- waterfall(df1) + theme_bw() + labs(x="", y="Value")
df2 <- transform(raw, category=factor(category, levels=c("A", "C", "B", "D")))
gg2 <- waterfall(df2) + theme_bw() + labs(x="", y="Value")
}
|
e0e8132a4f438b7c92067dd1dfad107f4a4a6726 | c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d | /Biostrings/doc/MultipleAlignments.R | 90dd31a0f3114d169d0db14ddca9ed2c7f7b13a9 | [
"MIT"
] | permissive | solgenomics/R_libs | bcf34e00bf2edef54894f6295c4f38f1e480b3fc | e8cdf30fd5f32babf39c76a01df5f5544062224e | refs/heads/master | 2023-07-08T10:06:04.304775 | 2022-05-09T15:41:26 | 2022-05-09T15:41:26 | 186,859,606 | 0 | 2 | MIT | 2023-03-07T08:59:16 | 2019-05-15T15:57:13 | C++ | UTF-8 | R | false | false | 5,259 | r | MultipleAlignments.R | ### R code from vignette source 'MultipleAlignments.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: objectCreation
###################################################
library(Biostrings)
origMAlign <-
readDNAMultipleAlignment(filepath =
system.file("extdata",
"msx2_mRNA.aln",
package="Biostrings"),
format="clustal")
phylipMAlign <-
readAAMultipleAlignment(filepath =
system.file("extdata",
"Phylip.txt",
package="Biostrings"),
format="phylip")
###################################################
### code chunk number 2: renameRows
###################################################
rownames(origMAlign)
rownames(origMAlign) <- c("Human","Chimp","Cow","Mouse","Rat",
"Dog","Chicken","Salmon")
origMAlign
###################################################
### code chunk number 3: detail (eval = FALSE)
###################################################
## detail(origMAlign)
###################################################
### code chunk number 4: usingMasks
###################################################
maskTest <- origMAlign
rowmask(maskTest) <- IRanges(start=1,end=3)
rowmask(maskTest)
maskTest
colmask(maskTest) <- IRanges(start=c(1,1000),end=c(500,2343))
colmask(maskTest)
maskTest
###################################################
### code chunk number 5: nullOut masks
###################################################
rowmask(maskTest) <- NULL
rowmask(maskTest)
colmask(maskTest) <- NULL
colmask(maskTest)
maskTest
###################################################
### code chunk number 6: invertMask
###################################################
rowmask(maskTest, invert=TRUE) <- IRanges(start=4,end=8)
rowmask(maskTest)
maskTest
colmask(maskTest, invert=TRUE) <- IRanges(start=501,end=999)
colmask(maskTest)
maskTest
###################################################
### code chunk number 7: setup
###################################################
## 1st lets null out the masks so we can have a fresh start.
colmask(maskTest) <- NULL
rowmask(maskTest) <- NULL
###################################################
### code chunk number 8: appendMask
###################################################
## Then we can demonstrate how the append argument works
rowmask(maskTest) <- IRanges(start=1,end=3)
maskTest
rowmask(maskTest,append="intersect") <- IRanges(start=2,end=5)
maskTest
rowmask(maskTest,append="replace") <- IRanges(start=5,end=8)
maskTest
rowmask(maskTest,append="replace",invert=TRUE) <- IRanges(start=5,end=8)
maskTest
rowmask(maskTest,append="union") <- IRanges(start=7,end=8)
maskTest
###################################################
### code chunk number 9: maskMotif
###################################################
tataMasked <- maskMotif(origMAlign, "TATA")
colmask(tataMasked)
###################################################
### code chunk number 10: maskGaps
###################################################
autoMasked <- maskGaps(origMAlign, min.fraction=0.5, min.block.width=4)
autoMasked
###################################################
### code chunk number 11: asmatrix
###################################################
full = as.matrix(origMAlign)
dim(full)
partial = as.matrix(autoMasked)
dim(partial)
###################################################
### code chunk number 12: alphabetFreq
###################################################
alphabetFrequency(autoMasked)
###################################################
### code chunk number 13: consensus
###################################################
consensusMatrix(autoMasked, baseOnly=TRUE)[, 84:90]
substr(consensusString(autoMasked),80,130)
consensusViews(autoMasked)
###################################################
### code chunk number 14: cluster
###################################################
sdist <- stringDist(as(origMAlign,"DNAStringSet"), method="hamming")
clust <- hclust(sdist, method = "single")
pdf(file="badTree.pdf")
plot(clust)
dev.off()
###################################################
### code chunk number 15: cluster2
###################################################
sdist <- stringDist(as(autoMasked,"DNAStringSet"), method="hamming")
clust <- hclust(sdist, method = "single")
pdf(file="goodTree.pdf")
plot(clust)
dev.off()
fourgroups <- cutree(clust, 4)
fourgroups
###################################################
### code chunk number 16: fastaExample (eval = FALSE)
###################################################
## DNAStr = as(origMAlign, "DNAStringSet")
## writeXStringSet(DNAStr, file="myFile.fa")
###################################################
### code chunk number 17: write.phylip (eval = FALSE)
###################################################
## write.phylip(phylipMAlign, filepath="myFile.txt")
###################################################
### code chunk number 18: sessinfo
###################################################
sessionInfo()
|
8e4f237f897a7cf418ff911e84609f35e4513a2b | 20fb140c414c9d20b12643f074f336f6d22d1432 | /man/NISTpeckTOliter.Rd | d8740fd121df00a136b025ab3c6894626f1dfe5b | [] | no_license | cran/NISTunits | cb9dda97bafb8a1a6a198f41016eb36a30dda046 | 4a4f4fa5b39546f5af5dd123c09377d3053d27cf | refs/heads/master | 2021-03-13T00:01:12.221467 | 2016-08-11T13:47:23 | 2016-08-11T13:47:23 | 27,615,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 718 | rd | NISTpeckTOliter.Rd | \name{NISTpeckTOliter}
\alias{NISTpeckTOliter}
\title{Convert peck to liter }
\usage{NISTpeckTOliter(peck)}
\description{\code{NISTpeckTOliter} converts from peck (U.S.) (pk) to liter (L) }
\arguments{
\item{peck}{peck (U.S.) (pk) }
}
\value{liter (L) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTpeckTOliter(10)
}
\keyword{programming} |
3235c9089f35ddddf713321f164a1cbd1a1c0906 | 0f64ac5e3d3cf43124dcb4917a4154829e7bb535 | /uncert/fm3.uncertqpV.R | b2248a563dbb480d3d5452724570022e5cae2902 | [] | no_license | wactbprot/r4vl | 8e1d6b920dfd91d22a01c8e270d8810f02cea27c | a34b1fa9951926796186189202750c71e7883f8d | refs/heads/master | 2016-09-11T02:22:39.828280 | 2014-10-07T14:37:55 | 2014-10-07T14:37:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,757 | r | fm3.uncertqpV.R | fm3.uncertqpV <- function(ccc){
msg <- "Calculated by fm3.uncertqpV()"
a <- abbrevList(ccc)
pfill <- getConstVal(a$cav, "fill")
uncertRes <- rep(0,length(pfill))
uDPfillList <- getSubList(a$cav, "uncertDPfill")
uPfillList <- getSubList(a$cav, "uncertPfill")
uDVList <- getSubList(a$cav, "uncertDeltaV")
uDtList <- getSubList(a$cav, "uncertDeltat")
uDVDtList <- getSubList(a$cav, "uncertDeltaVDeltat")
uPresList <- getSubList(a$cav, "uncertPres")
uConstLwList <- getSubList(a$cav, "uncertConstC")
if((uPfillList$Unit == uDPfillList$Unit) &
(uPfillList$Unit == uDVList$Unit) &
(uPfillList$Unit == uDVDtList$Unit) &
(uPfillList$Unit == uDtList$Unit) &
(uPfillList$Unit == uConstLwList$Unit)&
(uPfillList$Unit == uPresList$Unit)&
(uPfillList$Unit == "1")){
uDPfill <- getConstVal( NA,NA, uDPfillList )
uPfill <- getConstVal( NA,NA, uPfillList )
uDV <- getConstVal( NA,NA, uDVList )
uDt <- getConstVal( NA,NA, uDtList )
uDVDt <- getConstVal( NA,NA, uDVDtList )
uConstLw <- getConstVal( NA,NA, uConstLwList)
uPres <- getConstVal( NA,NA, uPresList)
uncertRes <- sqrt(uDPfill^2 +
uPfill^2 +
uDV^2 +
uDt^2 +
uDVDt^2 +
uPres^2 +
uConstLw^2)
}
ccc$Calibration$Analysis$Values$Uncertainty <-
setCcl(ccc$Calibration$Analysis$Values$Uncertainty,
"uncertqpV",
"1",
uncertRes,
msg)
return(ccc)
}
|
e328ba51591c800abc86a5a60ffc85480fdf54e2 | 29585dff702209dd446c0ab52ceea046c58e384e | /SemiCompRisks/R/methods.R | 8ac6ce107e3011910b3b8be6fb909588eb6b2ecd | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 102,217 | r | methods.R |
####
## PRINT METHOD
####
##
print.Freq <- function(x, digits=3, ...)
{
obj <- x
##
logEst <- obj$estimate
logSE <- sqrt(diag(obj$Finv))
value <- cbind(logEst, logSE, logEst - 1.96*logSE, logEst + 1.96*logSE)
##
dimnames(value) <- list(obj$myLabels, c( "Estimate", "SE", "LL", "UL"))
##
if(class(obj)[2] == "Surv")
{
##
cat("\nAnalysis of independent univariate time-to-event data \n")
##
#cat("\nBaseline hazard function components:\n")
#print(round(value[c(1:2),], digits=digits))
##
cat("\nRegression coefficients:\n")
print(round(value[-c(1:2),], digits=digits))
}
##
if(class(obj)[2] == "ID")
{
##
cat("\nAnalysis of independent semi-competing risks data \n")
cat(class(obj)[5], "assumption for h3\n")
##
#cat("\nBaseline hazard function components:\n")
#print(round(value[c(1:6),], digits=digits))
##
value_theta <- matrix(exp(value[7,]), ncol = 4)
dimnames(value_theta) <- list("", c( "Estimate", "SE", "LL", "UL"))
value_theta[1,2] <- value[7,2] * exp(value[7,1])
cat("\nVariance of frailties, theta:\n")
if(obj$frailty == TRUE) print(round(value_theta, digits=digits))
if(obj$frailty == FALSE) print("NA")
##
cat("\nRegression coefficients:\n")
if(obj$frailty == TRUE) print(round(value[-c(1:7),], digits=digits))
if(obj$frailty == FALSE) print(round(value[-c(1:6),], digits=digits))
}
##
invisible()
}
print.Bayes <- function(x, digits=3, ...)
{
nChain = x$setup$nChain
if(class(x)[2] == "ID")
{
if(class(x)[3] == "Cor")
{
##
cat("\nAnalysis of cluster-correlated semi-competing risks data \n")
}
if(class(x)[3] == "Ind")
{
##
cat("\nAnalysis of independent semi-competing risks data \n")
}
##
cat(x$setup$model, "assumption for h3\n")
}
if(class(x)[2] == "Surv")
{
if(class(x)[3] == "Cor")
{
##
cat("\nAnalysis of cluster-correlated univariate time-to-event data \n")
}
if(class(x)[3] == "Ind")
{
##
cat("\nAnalysis of independent univariate time-to-event data \n")
}
}
##
cat("\nNumber of chains: ", nChain,"\n")
##
cat("Number of scans: ", x$setup$numReps,"\n")
##
cat("Thinning: ", x$setup$thin,"\n")
##
cat("Percentage of burnin: ", x$setup$burninPerc*100, "%\n", sep = "")
# convergence diagnostics
if(nChain > 1){
cat("\n######\n")
cat("Potential Scale Reduction Factor\n")
if(class(x)[2] == "ID")
{
theta <- x$chain1$theta.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
theta <- cbind(theta, x[[nam]]$theta.p)
}
psrftheta <- matrix(calcPSR(theta), 1, 1)
dimnames(psrftheta) <- list("", "")
cat("\nVariance of frailties, theta:")
print(round(psrftheta, digits=digits))
beta.names <- unique(c(x$chain1$covNames1, x$chain1$covNames2, x$chain1$covNames3))
nP <- length(beta.names)
output <- matrix(NA, nrow=nP, ncol=3)
dimnames(output) <- list(beta.names, c("beta1", "beta2", "beta3"))
if(length(x$chain1$beta1.p) != 0){
#beta1
p1 = dim(x$chain1$beta1.p)[2]
psrfBeta1 <- rep(NA, p1)
for(j in 1:p1){
#namPara = paste("beta_", j, sep = "")
beta1 <- x$chain1$beta1[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
beta1 <- cbind(beta1, x[[nam]]$beta1[,j])
}
psrfBeta1[j] <- calcPSR(beta1)
}
for(i in 1:nP)
{
for(k in 1:p1) if(x$chain1$covNames1[k] == beta.names[i]) output[i,1] <- psrfBeta1[k]
}
}
if(length(x$chain1$beta2.p) != 0){
#beta2
p2 = dim(x$chain1$beta2.p)[2]
psrfBeta2 <- rep(NA, p2)
for(j in 1:p2){
#namPara = paste("beta_", j, sep = "")
beta2 <- x$chain1$beta2[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
beta2 <- cbind(beta2, x[[nam]]$beta2[,j])
}
psrfBeta2[j] <- calcPSR(beta2)
}
for(i in 1:nP)
{
for(k in 1:p2) if(x$chain1$covNames2[k] == beta.names[i]) output[i,2] <- psrfBeta2[k]
}
}
if(length(x$chain1$beta3.p) != 0){
#beta3
p3 = dim(x$chain1$beta3.p)[2]
psrfBeta3 <- rep(NA, p3)
for(j in 1:p3){
#namPara = paste("beta_", j, sep = "")
beta3 <- x$chain1$beta3[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
beta3 <- cbind(beta3, x[[nam]]$beta3[,j])
}
psrfBeta3[j] <- calcPSR(beta3)
}
for(i in 1:nP)
{
for(k in 1:p3) if(x$chain1$covNames3[k] == beta.names[i]) output[i,3] <- psrfBeta3[k]
}
}
if(nP > 0)
{
cat("\nRegression coefficients:\n")
output.coef <- output
print(round(output.coef, digits=digits))
}
##
cat("\nBaseline hazard function components:\n")
if(class(x)[4] == "WB")
{
##
# alpha
alpha <- x$chain1$alpha1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
alpha <- cbind(alpha, x[[nam]]$alpha1.p)
}
psrfAlpha1 <- calcPSR(alpha)
alpha <- x$chain1$alpha2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
alpha <- cbind(alpha, x[[nam]]$alpha2.p)
}
psrfAlpha2 <- calcPSR(alpha)
alpha <- x$chain1$alpha3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
alpha <- cbind(alpha, x[[nam]]$alpha3.p)
}
psrfAlpha3 <- calcPSR(alpha)
# kappa
kappa <- x$chain1$kappa1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
kappa <- cbind(kappa, x[[nam]]$kappa1.p)
}
psrfKappa1 <- calcPSR(kappa)
kappa <- x$chain1$kappa2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
kappa <- cbind(kappa, x[[nam]]$kappa2.p)
}
psrfKappa2 <- calcPSR(kappa)
kappa <- x$chain1$kappa3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
kappa <- cbind(kappa, x[[nam]]$kappa3.p)
}
psrfKappa3 <- calcPSR(kappa)
bh_WB <- matrix(c(psrfKappa1, psrfKappa2, psrfKappa3, psrfAlpha1, psrfAlpha2, psrfAlpha3), 2, 3, byrow = T)
dimnames(bh_WB) <- list(c("kappa", "alpha"), c("h1", "h2", "h3"))
print(round(bh_WB, digits=digits))
}
if(class(x)[4] == "PEM")
{
##
ntime1 = length(x$chain1$time_lambda1)
ntime2 = length(x$chain1$time_lambda2)
ntime3 = length(x$chain1$time_lambda3)
# lambda's
psrfLam <- rep(NA, ntime1)
for(j in 1:ntime1){
lambda1 <- x$chain1$lambda1.fin[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
lambda1 <- cbind(lambda1, x[[nam]]$lambda1.fin[,j])
}
psrfLam[j] <- calcPSR(lambda1)
}
cat("\nlambda1: summary statistics", "\n")
print(round(summary(psrfLam), digits=digits))
psrfLam <- rep(NA, ntime2)
for(j in 1:ntime2){
lambda2 <- x$chain1$lambda2.fin[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
lambda2 <- cbind(lambda2, x[[nam]]$lambda2.fin[,j])
}
psrfLam[j] <- calcPSR(lambda2)
}
cat("\nlambda2: summary statistics", "\n")
print(round(summary(psrfLam), digits=digits))
psrfLam <- rep(NA, ntime3)
for(j in 1:ntime3){
lambda3 <- x$chain1$lambda3.fin[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
lambda3 <- cbind(lambda3, x[[nam]]$lambda3.fin[,j])
}
psrfLam[j] <- calcPSR(lambda3)
}
cat("\nlambda3: summary statistics", "\n")
print(round(summary(psrfLam), digits=digits))
# mu_lam
mu <- x$chain1$mu_lam1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
mu <- cbind(mu, x[[nam]]$mu_lam1.p)
}
psrfMu1 <- calcPSR(mu)
mu <- x$chain1$mu_lam2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
mu <- cbind(mu, x[[nam]]$mu_lam2.p)
}
psrfMu2 <- calcPSR(mu)
mu <- x$chain1$mu_lam3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
mu <- cbind(mu, x[[nam]]$mu_lam3.p)
}
psrfMu3 <- calcPSR(mu)
# sigSq_lam
sig <- x$chain1$sigSq_lam1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
sig <- cbind(sig, x[[nam]]$sigSq_lam1.p)
}
psrfSig1 <- calcPSR(sig)
sig <- x$chain1$sigSq_lam2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
sig <- cbind(sig, x[[nam]]$sigSq_lam2.p)
}
psrfSig2 <- calcPSR(sig)
sig <- x$chain1$sigSq_lam3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
sig <- cbind(sig, x[[nam]]$sigSq_lam3.p)
}
psrfSig3 <- calcPSR(sig)
# J
J <- x$chain1$K1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
J <- cbind(J, x[[nam]]$K1.p)
}
psrfJ1 <- calcPSR(J)
J <- x$chain1$K2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
J <- cbind(J, x[[nam]]$K2.p)
}
psrfJ2 <- calcPSR(J)
J <- x$chain1$K3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
J <- cbind(J, x[[nam]]$K3.p)
}
psrfJ3 <- calcPSR(J)
bh_PEM <- matrix(c(psrfMu1, psrfMu2, psrfMu3, psrfSig1, psrfSig2, psrfSig3, psrfJ1, psrfJ2, psrfJ3), 3, 3, byrow = T)
dimnames(bh_PEM) <- list(c("mu", "sigmaSq", "K"), c("h1", "h2", "h3"))
cat("\n")
print(round(bh_PEM, digits=digits))
}
}
if(class(x)[2] == "Surv")
{
beta.names <- c(x$chain1$covNames)
nP <- length(beta.names)
output <- matrix(NA, nrow=nP, ncol=1)
dimnames(output) <- list(beta.names, c("beta"))
if(length(x$chain1$beta.p) != 0){
#beta
p = dim(x$chain1$beta.p)[2]
psrfBeta <- rep(NA, p)
for(j in 1:p){
beta <- x$chain1$beta[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
beta <- cbind(beta, x[[nam]]$beta[,j])
}
psrfBeta[j] <- calcPSR(beta)
}
for(i in 1:nP)
{
for(k in 1:p) if(x$chain1$covNames[k] == beta.names[i]) output[i,1] <- psrfBeta[k]
}
}
if(nP > 0)
{
cat("\nRegression coefficients:\n")
output.coef <- output
print(round(output.coef, digits=digits))
}
if(class(x)[4] == "WB")
{
##
# alpha
alpha <- x$chain1$alpha.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
alpha <- cbind(alpha, x[[nam]]$alpha.p)
}
psrfAlpha <- calcPSR(alpha)
# kappa
kappa <- x$chain1$kappa.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
kappa <- cbind(kappa, x[[nam]]$kappa.p)
}
psrfKappa <- calcPSR(kappa)
bh_WB <- matrix(c(psrfKappa, psrfAlpha), 2, 1, byrow = T)
dimnames(bh_WB) <- list(c("kappa", "alpha"), c("h"))
print(round(bh_WB, digits=digits))
}
if(class(x)[4] == "PEM")
{
##
ntime = length(x$chain1$time_lambda)
# lambda
psrfLam <- rep(NA, ntime)
for(j in 1:ntime){
namPara = paste("beta_", j, sep = "")
lambda <- x$chain1$lambda.fin[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
lambda <- cbind(lambda, x[[nam]]$lambda.fin[,j])
}
psrfLam[j] <- calcPSR(lambda)
}
cat("\n lambda: summary statistics", "\n")
print(round(summary(psrfLam), 2))
# mu_lam
mu <- x$chain1$mu_lam.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
mu <- cbind(mu, x[[nam]]$mu_lam.p)
}
psrfMu <- calcPSR(mu)
# sigSq_lam
sig <- x$chain1$sigSq_lam.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
sig <- cbind(sig, x[[nam]]$sigSq_lam.p)
}
psrfSig <- calcPSR(sig)
# J
J <- x$chain1$K.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
J <- cbind(J, x[[nam]]$K.p)
}
psrfJ <- calcPSR(J)
bh_PEM <- matrix(c(psrfMu, psrfSig, psrfJ), 3, 1, byrow = T)
dimnames(bh_PEM) <- list(c("mu", "sigmaSq", "K"), c("h"))
cat("\n")
print(round(bh_PEM, digits=digits))
}
}
}
else if(nChain == 1)
{
cat("Potential scale reduction factor cannot be calculated. \n")
cat("The number of chains must be larger than 1. \n")
}
cat("\n######\n")
cat("Estimates\n")
if(class(x)[2] == "ID")
{
##
cat("\nVariance of frailties, theta:\n")
theta.p <- x$chain1$theta.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
theta.p <- rbind(theta.p, x[[nam]]$theta.p)
}
}
theta.pMed <- apply(theta.p, 2, median)
theta.pSd <- apply(theta.p, 2, sd)
theta.pUb <- apply(theta.p, 2, quantile, prob = 0.975)
theta.pLb <- apply(theta.p, 2, quantile, prob = 0.025)
tbl <- matrix(NA, 1, 4)
dimnames(tbl) <- list("", c( "Estimate", "SD", "LL", "UL"))
tbl[,1] <- theta.pMed
tbl[,2] <- theta.pSd
tbl[,3] <- theta.pLb
tbl[,4] <- theta.pUb
print(round(tbl, digits=digits))
##
tbl_beta <- NULL
if(length(x$chain1$beta1.p) != 0){
p1 = dim(x$chain1$beta1.p)[2]
beta.p <- x$chain1$beta1.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
beta.p <- rbind(beta.p, x[[nam]]$beta1.p)
}
}
beta.pMed <- apply(beta.p, 2, median)
beta.pSd <- apply(beta.p, 2, sd)
beta.pUb <- apply(beta.p, 2, quantile, prob = 0.975)
beta.pLb <- apply(beta.p, 2, quantile, prob = 0.025)
tbl1 <- matrix(NA, p1, 4)
rownames(tbl1) <- x$chain1$covNames1
tbl1[,1] <- beta.pMed
tbl1[,2] <- beta.pSd
tbl1[,3] <- exp(beta.pLb)
tbl1[,4] <- exp(beta.pUb)
tbl_beta <- tbl1
}
if(length(x$chain1$beta2.p) != 0){
p2 = dim(x$chain1$beta2.p)[2]
beta.p <- x$chain1$beta2.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
beta.p <- rbind(beta.p, x[[nam]]$beta2.p)
}
}
beta.pMed <- apply(beta.p, 2, median)
beta.pSd <- apply(beta.p, 2, sd)
beta.pUb <- apply(beta.p, 2, quantile, prob = 0.975)
beta.pLb <- apply(beta.p, 2, quantile, prob = 0.025)
tbl2 <- matrix(NA, p2, 4)
rownames(tbl2) <- x$chain1$covNames2
tbl2[,1] <- beta.pMed
tbl2[,2] <- beta.pSd
tbl2[,3] <- exp(beta.pLb)
tbl2[,4] <- exp(beta.pUb)
tbl_beta <- rbind(tbl_beta, tbl2)
}
if(length(x$chain1$beta3.p) != 0){
p3 = dim(x$chain1$beta3.p)[2]
beta.p <- x$chain1$beta3.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
beta.p <- rbind(beta.p, x[[nam]]$beta3.p)
}
}
beta.pMed <- apply(beta.p, 2, median)
beta.pSd <- apply(beta.p, 2, sd)
beta.pUb <- apply(beta.p, 2, quantile, prob = 0.975)
beta.pLb <- apply(beta.p, 2, quantile, prob = 0.025)
tbl3 <- matrix(NA, p3, 4)
rownames(tbl3) <- x$chain1$covNames3
tbl3[,1] <- beta.pMed
tbl3[,2] <- beta.pSd
tbl3[,3] <- exp(beta.pLb)
tbl3[,4] <- exp(beta.pUb)
tbl_beta <- rbind(tbl_beta, tbl3)
}
}
if(class(x)[2] == "Surv")
{
if(length(x$chain1$beta.p) != 0){
p = dim(x$chain1$beta.p)[2]
beta.p <- x$chain1$beta.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
beta.p <- rbind(beta.p, x[[nam]]$beta.p)
}
}
beta.pMed <- apply(beta.p, 2, median)
beta.pSd <- apply(beta.p, 2, sd)
beta.pUb <- apply(beta.p, 2, quantile, prob = 0.975)
beta.pLb <- apply(beta.p, 2, quantile, prob = 0.025)
tbl_beta <- matrix(NA, p, 4)
rownames(tbl_beta) <- x$chain1$covNames
tbl_beta[,1] <- beta.pMed
tbl_beta[,2] <- beta.pSd
tbl_beta[,3] <- exp(beta.pLb)
tbl_beta[,4] <- exp(beta.pUb)
}
}
if(!is.null(tbl_beta))
{
cat("\nRegression coefficients:\n")
colnames(tbl_beta) <- c( "Estimate", "SD", "LL", "UL")
print(round(tbl_beta, digits=digits))
}
invisible()
}
####
## SUMMARY METHOD
####
##
summary.Freq <- function(object, digits=3, ...)
{
obj <- object
##
logEst <- obj$estimate
logSE <- sqrt(diag(obj$Finv))
results <- cbind(logEst, logEst - 1.96*logSE, logEst + 1.96*logSE)
##
if(class(obj)[2] == "Surv")
{
##
#cat("\nRegression coefficients:\n")
output.coef <- results[-c(1:2),]
dimnames(output.coef) <- list(unique(obj$myLabels[-c(1:2)]), c("beta", "LL", "UL"))
##
#cat("\nBaseline hazard function components:\n")
output.h0 <- results[c(1:2),]
dimnames(output.h0) <- list(c("Weibull: log-kappa", "Weibull: log-alpha"), c("beta", "LL", "UL"))
##
value <- list(coef=output.coef, h0=output.h0, code=obj$code, logLike=obj$logLike, nP=nrow(results))
class(value) <- c("summ.Freq", "Surv")
}
##
if(class(obj)[2] == "ID")
{
##
nP.0 <- ifelse(obj$frailty, 7, 6)
nP.1 <- obj$nP[1]
nP.2 <- obj$nP[2]
nP.3 <- obj$nP[3]
##
beta.names <- unique(obj$myLabels[-c(1:nP.0)])
nP <- length(beta.names)
##
#cat("\nRegression coefficients:\n")
output <- matrix(NA, nrow=nP, ncol=9)
dimnames(output) <- list(beta.names, c("beta1", "LL", "UL", "beta2", "LL", "UL", "beta3", "LL", "UL"))
for(i in 1:nP)
{
for(j in 1:nP.1) if(obj$myLabels[nP.0+j] == beta.names[i]) output[i,1:3] <- results[nP.0+j,]
for(j in 1:nP.2) if(obj$myLabels[nP.0+nP.1+j] == beta.names[i]) output[i,4:6] <- results[nP.0+nP.1+j,]
for(j in 1:nP.3) if(obj$myLabels[nP.0+nP.1+nP.2+j] == beta.names[i]) output[i,7:9] <- results[nP.0+nP.1+nP.2+j,]
}
output.coef <- output
##
#cat("\nVariance of frailties:\n")
output <- matrix(NA, nrow=1, ncol=3)
dimnames(output) <- list(c("theta"), c("Estimate", "LL", "UL"))
if(obj$frailty == TRUE) output[1,] <- exp(results[7,])
if(obj$frailty == FALSE) output[1,] <- rep(NA, 3)
output.theta <- output
##
#cat("\nBaseline hazard function components:\n")
output <- matrix(NA, nrow=2, ncol=9)
dimnames(output) <- list(c("Weibull: log-kappa", "Weibull: log-alpha"), c("h1-PM", "LL", "UL", "h2-PM", "LL", "UL", "h3-PM", "LL", "UL"))
output[1,1:3] <- results[1,]
output[1,4:6] <- results[3,]
output[1,7:9] <- results[5,]
output[2,1:3] <- results[2,]
output[2,4:6] <- results[4,]
output[2,7:9] <- results[6,]
output.h0 <- output
##
value <- list(coef=output.coef, theta=output.theta, h0=output.h0, code=obj$code, logLike=obj$logLike, nP=nrow(results))
if(class(obj)[5] == "semi-Markov")
{
class(value) <- c("summ.Freq", "ID", "semi-Markov")
}
if(class(obj)[5] == "Markov")
{
class(value) <- c("summ.Freq", "ID", "Markov")
}
}
##
return(value)
}
summary.Bayes <- function(object, digits=3, ...)
{
x <- object
nChain = x$setup$nChain
# convergence diagnostics
psrf <- NULL
if(nChain > 1){
if(class(x)[2] == "ID")
{
theta <- x$chain1$theta.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
theta <- cbind(theta, x[[nam]]$theta.p)
}
psrftheta <- matrix(calcPSR(theta), 1, 1)
dimnames(psrftheta) <- list("", "")
beta.names <- unique(c(x$chain1$covNames1, x$chain1$covNames2, x$chain1$covNames3))
nP <- length(beta.names)
output <- matrix(NA, nrow=nP, ncol=3)
dimnames(output) <- list(beta.names, c("beta1", "beta2", "beta3"))
if(length(x$chain1$beta1.p) != 0){
#beta1
p1 = dim(x$chain1$beta1.p)[2]
psrfBeta1 <- rep(NA, p1)
for(j in 1:p1){
#namPara = paste("beta_", j, sep = "")
beta1 <- x$chain1$beta1[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
beta1 <- cbind(beta1, x[[nam]]$beta1[,j])
}
psrfBeta1[j] <- calcPSR(beta1)
}
for(i in 1:nP)
{
for(k in 1:p1) if(x$chain1$covNames1[k] == beta.names[i]) output[i,1] <- psrfBeta1[k]
}
}
if(length(x$chain1$beta2.p) != 0){
#beta2
p2 = dim(x$chain1$beta2.p)[2]
psrfBeta2 <- rep(NA, p2)
for(j in 1:p2){
#namPara = paste("beta_", j, sep = "")
beta2 <- x$chain1$beta2[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
beta2 <- cbind(beta2, x[[nam]]$beta2[,j])
}
psrfBeta2[j] <- calcPSR(beta2)
}
for(i in 1:nP)
{
for(k in 1:p2) if(x$chain1$covNames2[k] == beta.names[i]) output[i,2] <- psrfBeta2[k]
}
}
if(length(x$chain1$beta3.p) != 0){
#beta3
p3 = dim(x$chain1$beta3.p)[2]
psrfBeta3 <- rep(NA, p3)
for(j in 1:p3){
#namPara = paste("beta_", j, sep = "")
beta3 <- x$chain1$beta3[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
beta3 <- cbind(beta3, x[[nam]]$beta3[,j])
}
psrfBeta3[j] <- calcPSR(beta3)
}
for(i in 1:nP)
{
for(k in 1:p3) if(x$chain1$covNames3[k] == beta.names[i]) output[i,3] <- psrfBeta3[k]
}
}
psrfcoef <- NULL
if(nP > 0)
{
psrfcoef <- output
}
##
if(class(x)[4] == "WB")
{
##
# alpha
alpha <- x$chain1$alpha1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
alpha <- cbind(alpha, x[[nam]]$alpha1.p)
}
psrfAlpha1 <- calcPSR(alpha)
alpha <- x$chain1$alpha2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
alpha <- cbind(alpha, x[[nam]]$alpha2.p)
}
psrfAlpha2 <- calcPSR(alpha)
alpha <- x$chain1$alpha3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
alpha <- cbind(alpha, x[[nam]]$alpha3.p)
}
psrfAlpha3 <- calcPSR(alpha)
# kappa
kappa <- x$chain1$kappa1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
kappa <- cbind(kappa, x[[nam]]$kappa1.p)
}
psrfKappa1 <- calcPSR(kappa)
kappa <- x$chain1$kappa2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
kappa <- cbind(kappa, x[[nam]]$kappa2.p)
}
psrfKappa2 <- calcPSR(kappa)
kappa <- x$chain1$kappa3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
kappa <- cbind(kappa, x[[nam]]$kappa3.p)
}
psrfKappa3 <- calcPSR(kappa)
bh <- matrix(c(psrfKappa1, psrfKappa2, psrfKappa3, psrfAlpha1, psrfAlpha2, psrfAlpha3), 2, 3, byrow = T)
dimnames(bh) <- list(c("kappa", "alpha"), c("h1", "h2", "h3"))
psrf <- list(theta=psrftheta, coef=psrfcoef, h0=bh)
}
if(class(x)[4] == "PEM")
{
##
ntime1 = length(x$chain1$time_lambda1)
ntime2 = length(x$chain1$time_lambda2)
ntime3 = length(x$chain1$time_lambda3)
# lambda's
psrfLam1 <- rep(NA, ntime1)
for(j in 1:ntime1){
lambda1 <- x$chain1$lambda1.fin[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
lambda1 <- cbind(lambda1, x[[nam]]$lambda1.fin[,j])
}
psrfLam1[j] <- calcPSR(lambda1)
}
psrfLam2 <- rep(NA, ntime2)
for(j in 1:ntime2){
lambda2 <- x$chain1$lambda2.fin[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
lambda2 <- cbind(lambda2, x[[nam]]$lambda2.fin[,j])
}
psrfLam2[j] <- calcPSR(lambda2)
}
psrfLam3 <- rep(NA, ntime3)
for(j in 1:ntime3){
lambda3 <- x$chain1$lambda3.fin[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
lambda3 <- cbind(lambda3, x[[nam]]$lambda3.fin[,j])
}
psrfLam3[j] <- calcPSR(lambda3)
}
# mu_lam
mu <- x$chain1$mu_lam1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
mu <- cbind(mu, x[[nam]]$mu_lam1.p)
}
psrfMu1 <- calcPSR(mu)
mu <- x$chain1$mu_lam2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
mu <- cbind(mu, x[[nam]]$mu_lam2.p)
}
psrfMu2 <- calcPSR(mu)
mu <- x$chain1$mu_lam3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
mu <- cbind(mu, x[[nam]]$mu_lam3.p)
}
psrfMu3 <- calcPSR(mu)
# sigSq_lam
sig <- x$chain1$sigSq_lam1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
sig <- cbind(sig, x[[nam]]$sigSq_lam1.p)
}
psrfSig1 <- calcPSR(sig)
sig <- x$chain1$sigSq_lam2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
sig <- cbind(sig, x[[nam]]$sigSq_lam2.p)
}
psrfSig2 <- calcPSR(sig)
sig <- x$chain1$sigSq_lam3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
sig <- cbind(sig, x[[nam]]$sigSq_lam3.p)
}
psrfSig3 <- calcPSR(sig)
# J
J <- x$chain1$K1.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
J <- cbind(J, x[[nam]]$K1.p)
}
psrfJ1 <- calcPSR(J)
J <- x$chain1$K2.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
J <- cbind(J, x[[nam]]$K2.p)
}
psrfJ2 <- calcPSR(J)
J <- x$chain1$K3.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
J <- cbind(J, x[[nam]]$K3.p)
}
psrfJ3 <- calcPSR(J)
bh <- matrix(c(psrfMu1, psrfMu2, psrfMu3, psrfSig1, psrfSig2, psrfSig3, psrfJ1, psrfJ2, psrfJ3), 3, 3, byrow = T)
dimnames(bh) <- list(c("mu", "sigmaSq", "K"), c("h1", "h2", "h3"))
psrf <- list(theta=psrftheta, coef=psrfcoef, h0=bh, lambda1=psrfLam1, lambda2=psrfLam2, lambda3=psrfLam3)
}
}
if(class(x)[2] == "Surv")
{
beta.names <- c(x$chain1$covNames)
nP <- length(beta.names)
output <- matrix(NA, nrow=nP, ncol=1)
dimnames(output) <- list(beta.names, c("beta"))
if(length(x$chain1$beta.p) != 0){
#beta
p = dim(x$chain1$beta.p)[2]
psrfBeta <- rep(NA, p)
for(j in 1:p){
beta <- x$chain1$beta[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
beta <- cbind(beta, x[[nam]]$beta[,j])
}
psrfBeta[j] <- calcPSR(beta)
}
for(i in 1:nP)
{
for(k in 1:p) if(x$chain1$covNames[k] == beta.names[i]) output[i,1] <- psrfBeta[k]
}
}
psrfcoef <- NULL
if(nP > 0)
{
psrfcoef <- output
}
if(class(x)[4] == "WB")
{
##
# alpha
alpha <- x$chain1$alpha.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
alpha <- cbind(alpha, x[[nam]]$alpha.p)
}
psrfAlpha <- calcPSR(alpha)
# kappa
kappa <- x$chain1$kappa.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
kappa <- cbind(kappa, x[[nam]]$kappa.p)
}
psrfKappa <- calcPSR(kappa)
bh <- matrix(c(psrfKappa, psrfAlpha), 2, 1, byrow = T)
dimnames(bh) <- list(c("kappa", "alpha"), c("h"))
psrf <- list(coef=psrfcoef, h0=bh)
}
if(class(x)[4] == "PEM")
{
##
ntime = length(x$chain1$time_lambda)
# lambda
psrfLam <- rep(NA, ntime)
for(j in 1:ntime){
namPara = paste("beta_", j, sep = "")
lambda <- x$chain1$lambda.fin[,j]
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
lambda <- cbind(lambda, x[[nam]]$lambda.fin[,j])
}
psrfLam[j] <- calcPSR(lambda)
}
# mu_lam
mu <- x$chain1$mu_lam.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
mu <- cbind(mu, x[[nam]]$mu_lam.p)
}
psrfMu <- calcPSR(mu)
# sigSq_lam
sig <- x$chain1$sigSq_lam.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
sig <- cbind(sig, x[[nam]]$sigSq_lam.p)
}
psrfSig <- calcPSR(sig)
# J
J <- x$chain1$K.p
for(i in 2:nChain){
nam <- paste("chain", i, sep = "")
J <- cbind(J, x[[nam]]$K.p)
}
psrfJ <- calcPSR(J)
bh <- matrix(c(psrfMu, psrfSig, psrfJ), 3, 1, byrow = T)
dimnames(bh) <- list(c("mu", "sigmaSq", "K"), c("h"))
psrf <- list(coef=psrfcoef, h0=bh, lambda=psrfLam)
}
}
}
# estimates
if(class(x)[2] == "ID")
{
##
theta.p <- x$chain1$theta.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
theta.p <- rbind(theta.p, x[[nam]]$theta.p)
}
}
theta.pMed <- apply(theta.p, 2, median)
theta.pUb <- apply(theta.p, 2, quantile, prob = 0.975)
theta.pLb <- apply(theta.p, 2, quantile, prob = 0.025)
tbl_theta <- matrix(NA, 1, 3)
dimnames(tbl_theta) <- list("", c( "theta", "LL", "UL"))
tbl_theta[,1] <- theta.pMed
tbl_theta[,2] <- theta.pLb
tbl_theta[,3] <- theta.pUb
##
beta.names <- unique(c(x$chain1$covNames1, x$chain1$covNames2, x$chain1$covNames3))
nP <- length(beta.names)
output <- matrix(NA, nrow=nP, ncol=9)
dimnames(output) <- list(beta.names, c("exp(beta1)", "LL", "UL", "exp(beta2)", "LL", "UL", "exp(beta3)", "LL", "UL"))
if(length(x$chain1$beta1.p) != 0){
#beta1
p1 = dim(x$chain1$beta1.p)[2]
beta.p <- x$chain1$beta1.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
beta.p <- rbind(beta.p, x[[nam]]$beta1.p)
}
}
beta.pMed <- apply(exp(beta.p), 2, median)
beta.pSd <- apply(exp(beta.p), 2, sd)
beta.pUb <- apply(exp(beta.p), 2, quantile, prob = 0.975)
beta.pLb <- apply(exp(beta.p), 2, quantile, prob = 0.025)
tbl1 <- matrix(NA, p1, 3)
rownames(tbl1) <- x$chain1$covNames1
tbl1[,1] <- beta.pMed
tbl1[,2] <- beta.pLb
tbl1[,3] <- beta.pUb
for(i in 1:nP)
{
for(k in 1:p1) if(x$chain1$covNames1[k] == beta.names[i]) output[i,1:3] <- tbl1[k,]
}
}
if(length(x$chain1$beta2.p) != 0){
#beta2
p2 = dim(x$chain1$beta2.p)[2]
beta.p <- x$chain1$beta2.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
beta.p <- rbind(beta.p, x[[nam]]$beta2.p)
}
}
beta.pMed <- apply(exp(beta.p), 2, median)
beta.pSd <- apply(exp(beta.p), 2, sd)
beta.pUb <- apply(exp(beta.p), 2, quantile, prob = 0.975)
beta.pLb <- apply(exp(beta.p), 2, quantile, prob = 0.025)
tbl2 <- matrix(NA, p2, 3)
rownames(tbl2) <- x$chain1$covNames2
tbl2[,1] <- beta.pMed
tbl2[,2] <- beta.pLb
tbl2[,3] <- beta.pUb
for(i in 1:nP)
{
for(k in 1:p2) if(x$chain1$covNames2[k] == beta.names[i]) output[i,4:6] <- tbl2[k,]
}
}
if(length(x$chain1$beta3.p) != 0){
#beta3
p3 = dim(x$chain1$beta3.p)[2]
beta.p <- x$chain1$beta3.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
beta.p <- rbind(beta.p, x[[nam]]$beta3.p)
}
}
beta.pMed <- apply(exp(beta.p), 2, median)
beta.pSd <- apply(exp(beta.p), 2, sd)
beta.pUb <- apply(exp(beta.p), 2, quantile, prob = 0.975)
beta.pLb <- apply(exp(beta.p), 2, quantile, prob = 0.025)
tbl3 <- matrix(NA, p3, 3)
rownames(tbl3) <- x$chain1$covNames3
tbl3[,1] <- beta.pMed
tbl3[,2] <- beta.pLb
tbl3[,3] <- beta.pUb
for(i in 1:nP)
{
for(k in 1:p3) if(x$chain1$covNames3[k] == beta.names[i]) output[i,7:9] <- tbl3[k,]
}
}
output.coef <- NULL
if(nP > 0)
{
output.coef <- output
}
if(class(x)[4] == "WB")
{
##
alpha.p <- x$chain1$alpha1.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
alpha.p <- rbind(alpha.p, x[[nam]]$alpha1.p)
}
}
alpha.pMed <- apply(log(alpha.p), 2, median)
alpha.pUb <- apply(log(alpha.p), 2, quantile, prob = 0.975)
alpha.pLb <- apply(log(alpha.p), 2, quantile, prob = 0.025)
tbl_a1 <- c(alpha.pMed,alpha.pLb, alpha.pUb)
##
alpha.p <- x$chain1$alpha2.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
alpha.p <- rbind(alpha.p, x[[nam]]$alpha2.p)
}
}
alpha.pMed <- apply(log(alpha.p), 2, median)
alpha.pUb <- apply(log(alpha.p), 2, quantile, prob = 0.975)
alpha.pLb <- apply(log(alpha.p), 2, quantile, prob = 0.025)
tbl_a2 <- c(alpha.pMed,alpha.pLb, alpha.pUb)
##
alpha.p <- x$chain1$alpha3.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
alpha.p <- rbind(alpha.p, x[[nam]]$alpha3.p)
}
}
alpha.pMed <- apply(log(alpha.p), 2, median)
alpha.pUb <- apply(log(alpha.p), 2, quantile, prob = 0.975)
alpha.pLb <- apply(log(alpha.p), 2, quantile, prob = 0.025)
tbl_a3 <- c(alpha.pMed,alpha.pLb, alpha.pUb)
##
kappa.p <- x$chain1$kappa1.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
kappa.p <- rbind(kappa.p, x[[nam]]$kappa1.p)
}
}
kappa.pMed <- apply(log(kappa.p), 2, median)
kappa.pUb <- apply(log(kappa.p), 2, quantile, prob = 0.975)
kappa.pLb <- apply(log(kappa.p), 2, quantile, prob = 0.025)
tbl_k1 <- c(kappa.pMed, kappa.pLb, kappa.pUb)
##
kappa.p <- x$chain1$kappa2.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
kappa.p <- rbind(kappa.p, x[[nam]]$kappa2.p)
}
}
kappa.pMed <- apply(log(kappa.p), 2, median)
kappa.pUb <- apply(log(kappa.p), 2, quantile, prob = 0.975)
kappa.pLb <- apply(log(kappa.p), 2, quantile, prob = 0.025)
tbl_k2 <- c(kappa.pMed, kappa.pLb, kappa.pUb)
##
kappa.p <- x$chain1$kappa3.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
kappa.p <- rbind(kappa.p, x[[nam]]$kappa3.p)
}
}
kappa.pMed <- apply(log(kappa.p), 2, median)
kappa.pUb <- apply(log(kappa.p), 2, quantile, prob = 0.975)
kappa.pLb <- apply(log(kappa.p), 2, quantile, prob = 0.025)
tbl_k3 <- c(kappa.pMed, kappa.pLb, kappa.pUb)
bh <- matrix(c(tbl_a1, tbl_a2, tbl_a3, tbl_k1, tbl_k2, tbl_k3), 2, 9, byrow = T)
dimnames(bh) <- list(c("Weibull: log-kappa", "Weibull: log-alpha"), c("h1-PM", "LL", "UL", "h2-PM", "LL", "UL", "h3-PM", "LL", "UL"))
value <- list(classFit=class(x), psrf=psrf, theta=tbl_theta, coef=output.coef, h0=bh)
}
if(class(x)[4] == "PEM")
{
##
mu_lam.p <- x$chain1$mu_lam1.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
mu_lam.p <- rbind(mu_lam.p, x[[nam]]$mu_lam1.p)
}
}
mu_lam.pMed <- apply(mu_lam.p, 2, median)
mu_lam.pUb <- apply(mu_lam.p, 2, quantile, prob = 0.975)
mu_lam.pLb <- apply(mu_lam.p, 2, quantile, prob = 0.025)
tbl_m1 <- c(mu_lam.pMed, mu_lam.pLb, mu_lam.pUb)
##
mu_lam.p <- x$chain1$mu_lam2.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
mu_lam.p <- rbind(mu_lam.p, x[[nam]]$mu_lam2.p)
}
}
mu_lam.pMed <- apply(mu_lam.p, 2, median)
mu_lam.pUb <- apply(mu_lam.p, 2, quantile, prob = 0.975)
mu_lam.pLb <- apply(mu_lam.p, 2, quantile, prob = 0.025)
tbl_m2 <- c(mu_lam.pMed, mu_lam.pLb, mu_lam.pUb)
##
mu_lam.p <- x$chain1$mu_lam3.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
mu_lam.p <- rbind(mu_lam.p, x[[nam]]$mu_lam3.p)
}
}
mu_lam.pMed <- apply(mu_lam.p, 2, median)
mu_lam.pUb <- apply(mu_lam.p, 2, quantile, prob = 0.975)
mu_lam.pLb <- apply(mu_lam.p, 2, quantile, prob = 0.025)
tbl_m3 <- c(mu_lam.pMed, mu_lam.pLb, mu_lam.pUb)
##
sigSq_lam.p <- x$chain1$sigSq_lam1.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
sigSq_lam.p <- rbind(sigSq_lam.p, x[[nam]]$sigSq_lam1.p)
}
}
sigSq_lam.pMed <- apply(sigSq_lam.p, 2, median)
sigSq_lam.pUb <- apply(sigSq_lam.p, 2, quantile, prob = 0.975)
sigSq_lam.pLb <- apply(sigSq_lam.p, 2, quantile, prob = 0.025)
tbl_s1 <- c(sigSq_lam.pMed, sigSq_lam.pLb, sigSq_lam.pUb)
##
sigSq_lam.p <- x$chain1$sigSq_lam2.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
sigSq_lam.p <- rbind(sigSq_lam.p, x[[nam]]$sigSq_lam2.p)
}
}
sigSq_lam.pMed <- apply(sigSq_lam.p, 2, median)
sigSq_lam.pUb <- apply(sigSq_lam.p, 2, quantile, prob = 0.975)
sigSq_lam.pLb <- apply(sigSq_lam.p, 2, quantile, prob = 0.025)
tbl_s2 <- c(sigSq_lam.pMed, sigSq_lam.pLb, sigSq_lam.pUb)
##
sigSq_lam.p <- x$chain1$sigSq_lam3.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
sigSq_lam.p <- rbind(sigSq_lam.p, x[[nam]]$sigSq_lam3.p)
}
}
sigSq_lam.pMed <- apply(sigSq_lam.p, 2, median)
sigSq_lam.pUb <- apply(sigSq_lam.p, 2, quantile, prob = 0.975)
sigSq_lam.pLb <- apply(sigSq_lam.p, 2, quantile, prob = 0.025)
tbl_s3 <- c(sigSq_lam.pMed, sigSq_lam.pLb, sigSq_lam.pUb)
##
J.p <- x$chain1$K1.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
J.p <- rbind(J.p, x[[nam]]$K1.p)
}
}
J.pMed <- apply(J.p, 2, median)
J.pUb <- apply(J.p, 2, quantile, prob = 0.975)
J.pLb <- apply(J.p, 2, quantile, prob = 0.025)
tbl_j1 <- c(J.pMed, J.pLb, J.pUb)
##
J.p <- x$chain1$K2.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
J.p <- rbind(J.p, x[[nam]]$K2.p)
}
}
J.pMed <- apply(J.p, 2, median)
J.pUb <- apply(J.p, 2, quantile, prob = 0.975)
J.pLb <- apply(J.p, 2, quantile, prob = 0.025)
tbl_j2 <- c(J.pMed, J.pLb, J.pUb)
##
J.p <- x$chain1$K3.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
J.p <- rbind(J.p, x[[nam]]$K3.p)
}
}
J.pMed <- apply(J.p, 2, median)
J.pUb <- apply(J.p, 2, quantile, prob = 0.975)
J.pLb <- apply(J.p, 2, quantile, prob = 0.025)
tbl_j3 <- c(J.pMed, J.pLb, J.pUb)
bh <- matrix(c(tbl_m1, tbl_m2, tbl_m3, tbl_s1, tbl_s2, tbl_s3, tbl_j1, tbl_j2, tbl_j3), 3, 9, byrow = T)
dimnames(bh) <- list(c("mu", "sigmaSq", "K"), c("h1-PM", "LL", "UL", "h2-PM", "LL", "UL", "h3-PM", "LL", "UL"))
##
lambda.p <- x$chain1$lambda1.fin
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
lambda.p <- rbind(lambda.p, x[[nam]]$lambda1.fin)
}
}
lambda.pMed <- apply(lambda.p, 2, median)
lambda.pUb <- apply(lambda.p, 2, quantile, prob = 0.975)
lambda.pLb <- apply(lambda.p, 2, quantile, prob = 0.025)
lambda1 <- cbind(x$chain1$time_lambda1, lambda.pMed, lambda.pLb, lambda.pUb)
dimnames(lambda1) <- list(rep("", length(x$chain1$time_lambda1)), c("time", "lambda1-PM", "LL", "UL"))
##
lambda.p <- x$chain1$lambda2.fin
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
lambda.p <- rbind(lambda.p, x[[nam]]$lambda2.fin)
}
}
lambda.pMed <- apply(lambda.p, 2, median)
lambda.pUb <- apply(lambda.p, 2, quantile, prob = 0.975)
lambda.pLb <- apply(lambda.p, 2, quantile, prob = 0.025)
lambda2 <- cbind(x$chain1$time_lambda2, lambda.pMed, lambda.pLb, lambda.pUb)
dimnames(lambda2) <- list(rep("", length(x$chain1$time_lambda2)), c("time", "lambda2-PM", "LL", "UL"))
##
lambda.p <- x$chain1$lambda3.fin
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
lambda.p <- rbind(lambda.p, x[[nam]]$lambda3.fin)
}
}
lambda.pMed <- apply(lambda.p, 2, median)
lambda.pUb <- apply(lambda.p, 2, quantile, prob = 0.975)
lambda.pLb <- apply(lambda.p, 2, quantile, prob = 0.025)
lambda3 <- cbind(x$chain1$time_lambda3, lambda.pMed, lambda.pLb, lambda.pUb)
dimnames(lambda3) <- list(rep("", length(x$chain1$time_lambda3)), c("time", "lambda3-PM", "LL", "UL"))
value <- list(classFit=class(x), psrf=psrf, theta=tbl_theta, coef=output.coef, h0=bh, lambda1=lambda1, lambda2=lambda2, lambda3=lambda3)
}
if(class(x)[3] == "Cor")
{
if(class(x)[5] == "MVN")
{
nS <- dim(x$chain1$Sigma_V.p)[3]
Sigma <- array(NA, c(3,3, nS*nChain))
Sigma[,,1:nS] <- x$chain1$Sigma_V.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
Sigma[,,(nS*(i-1)+1):(nS*i)] <- x[[nam]]$Sigma_V.p
}
}
}
if(class(x)[5] == "DPM")
{
##
tau.p <- x$chain1$tau.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
tau.p <- rbind(tau.p, x[[nam]]$tau.p)
}
}
tau.pMed <- apply(tau.p, 2, median)
tau.pUb <- apply(tau.p, 2, quantile, prob = 0.975)
tau.pLb <- apply(tau.p, 2, quantile, prob = 0.025)
tbl_tau <- matrix(NA, 1, 3)
dimnames(tbl_tau) <- list("", c( "tau", "LL", "UL"))
tbl_tau[,1] <- tau.pMed
tbl_tau[,2] <- tau.pLb
tbl_tau[,3] <- tau.pUb
nS <- dim(x$chain1$Sigma.p)[3]
Sigma <- array(NA, c(3,3, nS*nChain))
Sigma[,,1:nS] <- calVar_DPM_MVN(x$chain1)
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
Sigma[,,(nS*(i-1)+1):(nS*i)] <- calVar_DPM_MVN(x[[nam]])
}
}
value$tau <- tbl_tau
}
Sigma.Med <- apply(Sigma, c(1,2), median)
Sigma.Sd <- apply(Sigma, c(1,2), sd)
Sigma.Ub <- apply(Sigma, c(1,2), quantile, prob = 0.975)
Sigma.Lb <- apply(Sigma, c(1,2), quantile, prob = 0.025)
dimnames(Sigma.Med) <- list(c("", "", ""), c("Sigma_V-PM", "", ""))
dimnames(Sigma.Sd) <- list(c("", "", ""), c("Sigma_V-SD", "", ""))
dimnames(Sigma.Lb) <- list(c("", "", ""), c("Sigma_V-LL", "", ""))
dimnames(Sigma.Ub) <- list(c("", "", ""), c("Sigma_V-UL", "", ""))
value$Sigma.PM <- Sigma.Med
value$Sigma.SD <- Sigma.Sd
value$Sigma.UL <- Sigma.Ub
value$Sigma.LL <- Sigma.Lb
}
}
if(class(x)[2] == "Surv")
{
##
beta.names <- c(x$chain1$covNames)
nP <- length(beta.names)
output <- matrix(NA, nrow=nP, ncol=3)
dimnames(output) <- list(beta.names, c("exp(beta)", "LL", "UL"))
if(length(x$chain1$beta.p) != 0){
#beta
p = dim(x$chain1$beta.p)[2]
beta.p <- x$chain1$beta.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
beta.p <- rbind(beta.p, x[[nam]]$beta.p)
}
}
beta.pMed <- apply(exp(beta.p), 2, median)
beta.pSd <- apply(exp(beta.p), 2, sd)
beta.pUb <- apply(exp(beta.p), 2, quantile, prob = 0.975)
beta.pLb <- apply(exp(beta.p), 2, quantile, prob = 0.025)
tbl <- matrix(NA, p, 3)
rownames(tbl) <- x$chain1$covNames
tbl[,1] <- beta.pMed
tbl[,2] <- beta.pLb
tbl[,3] <- beta.pUb
for(i in 1:nP)
{
for(k in 1:p) if(x$chain1$covNames[k] == beta.names[i]) output[i,1:3] <- tbl[k,]
}
}
output.coef <- NULL
if(nP > 0)
{
output.coef <- output
}
if(class(x)[4] == "WB")
{
##
alpha.p <- x$chain1$alpha.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
alpha.p <- rbind(alpha.p, x[[nam]]$alpha.p)
}
}
alpha.pMed <- apply(log(alpha.p), 2, median)
alpha.pUb <- apply(log(alpha.p), 2, quantile, prob = 0.975)
alpha.pLb <- apply(log(alpha.p), 2, quantile, prob = 0.025)
tbl_a <- c(alpha.pMed,alpha.pLb, alpha.pUb)
##
kappa.p <- x$chain1$kappa.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
kappa.p <- rbind(kappa.p, x[[nam]]$kappa.p)
}
}
kappa.pMed <- apply(log(kappa.p), 2, median)
kappa.pUb <- apply(log(kappa.p), 2, quantile, prob = 0.975)
kappa.pLb <- apply(log(kappa.p), 2, quantile, prob = 0.025)
tbl_k <- c(kappa.pMed, kappa.pLb, kappa.pUb)
bh <- matrix(c(tbl_a, tbl_k), 2, 3, byrow = T)
dimnames(bh) <- list(c("Weibull: log-kappa", "Weibull: log-alpha"), c("h-PM", "LL", "UL"))
value <- list(coef=output.coef, h0=bh, psrf=psrf, classFit=class(x))
}
if(class(x)[4] == "PEM")
{
##
mu_lam.p <- x$chain1$mu_lam.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
mu_lam.p <- rbind(mu_lam.p, x[[nam]]$mu_lam.p)
}
}
mu_lam.pMed <- apply(mu_lam.p, 2, median)
mu_lam.pUb <- apply(mu_lam.p, 2, quantile, prob = 0.975)
mu_lam.pLb <- apply(mu_lam.p, 2, quantile, prob = 0.025)
tbl_m <- c(mu_lam.pMed, mu_lam.pLb, mu_lam.pUb)
##
sigSq_lam.p <- x$chain1$sigSq_lam.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
sigSq_lam.p <- rbind(sigSq_lam.p, x[[nam]]$sigSq_lam.p)
}
}
sigSq_lam.pMed <- apply(sigSq_lam.p, 2, median)
sigSq_lam.pUb <- apply(sigSq_lam.p, 2, quantile, prob = 0.975)
sigSq_lam.pLb <- apply(sigSq_lam.p, 2, quantile, prob = 0.025)
tbl_s <- c(sigSq_lam.pMed, sigSq_lam.pLb, sigSq_lam.pUb)
##
J.p <- x$chain1$K.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
J.p <- rbind(J.p, x[[nam]]$K.p)
}
}
J.pMed <- apply(J.p, 2, median)
J.pUb <- apply(J.p, 2, quantile, prob = 0.975)
J.pLb <- apply(J.p, 2, quantile, prob = 0.025)
tbl_j <- c(J.pMed, J.pLb, J.pUb)
bh <- matrix(c(tbl_m, tbl_s, tbl_j), 3, 3, byrow = T)
dimnames(bh) <- list(c("mu", "sigmaSq", "K"), c("h-PM", "LL", "UL"))
##
lambda.p <- x$chain1$lambda.fin
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
lambda.p <- rbind(lambda.p, x[[nam]]$lambda.fin)
}
}
lambda.pMed <- apply(lambda.p, 2, median)
lambda.pUb <- apply(lambda.p, 2, quantile, prob = 0.975)
lambda.pLb <- apply(lambda.p, 2, quantile, prob = 0.025)
lambda <- cbind(x$chain1$time_lambda, lambda.pMed, lambda.pLb, lambda.pUb)
dimnames(lambda) <- list(rep("", length(x$chain1$time_lambda)), c("time", "lambda-PM", "LL", "UL"))
value <- list(coef=output.coef, h0=bh, psrf=psrf, lambda=lambda, classFit=class(x))
}
if(class(x)[3] == "Cor")
{
if(class(x)[5] == "Normal")
{
#sigmaV
sigV <- 1/x$chain1$zeta.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
sigV <- rbind(sigV, 1/x[[nam]]$zeta.p)
}
}
}
if(class(x)[5] == "DPM")
{
##
tau.p <- x$chain1$tau.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
tau.p <- rbind(tau.p, x[[nam]]$tau.p)
}
}
tau.pMed <- apply(tau.p, 2, median)
tau.pUb <- apply(tau.p, 2, quantile, prob = 0.975)
tau.pLb <- apply(tau.p, 2, quantile, prob = 0.025)
tbl_tau <- matrix(NA, 1, 3)
dimnames(tbl_tau) <- list("", c( "tau", "LL", "UL"))
tbl_tau[,1] <- tau.pMed
tbl_tau[,2] <- tau.pLb
tbl_tau[,3] <- tau.pUb
nS <- dim(x$chain1$zeta.p)[1]
sigV <- rep(NA, nS*nChain)
sigV[1:nS] <- calVar_DPM_Normal(x$chain1)
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
sigV[(nS*(i-1)+1):(nS*i)] <- calVar_DPM_Normal(x[[nam]])
}
}
value$tau <- tbl_tau
}
sigVMed <- median(sigV)
sigVSd <- sd(sigV)
sigVUb <- quantile(sigV, prob = 0.975)
sigVLb <- quantile(sigV, prob = 0.025)
tbl_sigV <- matrix(NA, nrow=1, ncol=3)
tbl_sigV[,1] <- sigVMed
tbl_sigV[,2] <- sigVLb
tbl_sigV[,3] <- sigVUb
dimnames(tbl_sigV) <- list("", c("sigma_V-PM", "LL", "UL"))
value$sigma_V <- tbl_sigV
}
}
value$setup <- x$setup
# if(class(x)[3] == "Cor")
# {
# class(value) <- c("summ.Bayes", as.vector(class(x)[2]), "Cor", as.vector(class(x)[4]), as.vector(class(x)[5]))
# }
# if(class(x)[3] == "Ind")
# {
# class(value) <- c("summ.Bayes", as.vector(class(x)[2]), "Ind", as.vector(class(x)[4]))
# }
class(value) <- "summ.Bayes"
return(value)
}
####
## PRINT.SUMMARY METHOD
####
##
print.summ.Freq <- function(x, digits=3, ...)
{
obj <- x
##
if(class(obj)[2] == "Surv")
{
##
cat("\nAnalysis of independent univariate time-to-event data \n")
}
if(class(obj)[2] == "ID")
{
##
cat("\nAnalysis of independent semi-competing risks data \n")
cat(class(obj)[3], "assumption for h3\n")
}
##
#cat("\nRegression coefficients:\n")
#print(round(obj$coef, digits=digits))
##
cat("\nHazard ratios:\n")
print(round(exp(obj$coef), digits=digits))
##
if(class(obj)[2] == "ID"){
cat("\nVariance of frailties:\n")
print(round(obj$theta, digits=digits))
}
##
cat("\nBaseline hazard function components:\n")
print(round(obj$h0, digits=digits))
##
invisible()
}
print.summ.Bayes <- function(x, digits=3, ...)
{
nChain = x$setup$nChain
if(x$classFit[2] == "ID")
{
if(x$classFit[3] == "Cor")
{
##
cat("\nAnalysis of cluster-correlated semi-competing risks data \n")
}
if(x$classFit[3] == "Ind")
{
##
cat("\nAnalysis of independent semi-competing risks data \n")
}
##
cat(x$setup$model, "assumption for h3\n")
}
if(x$classFit[2] == "Surv")
{
if(x$classFit[3] == "Cor")
{
##
cat("\nAnalysis of cluster-correlated univariate time-to-event data \n")
}
if(x$classFit[3] == "Ind")
{
##
cat("\nAnalysis of independent univariate time-to-event data \n")
}
}
cat("\n#####\n")
##
cat("\nHazard ratios:\n")
print(round(x$coef, digits=digits))
if(x$classFit[2] == "ID")
{
##
cat("\nVariance of frailties:\n")
print(round(x$theta, digits=digits))
}
##
cat("\nBaseline hazard function components:\n")
print(round(x$h0, digits=digits))
if(x$classFit[3] == "Cor")
{
if(x$classFit[5] == "DPM")
{
##
cat("\nPrecision parameter of DPM prior:\n")
print(round(x$tau, digits=digits))
}
if(x$classFit[2] == "ID")
{
##
cat("\nVariance-covariance matrix of cluster-specific random effects:\n")
print(round(x$Sigma.PM, digits=digits))
}
if(x$classFit[2] == "Surv")
{
##
cat("\nVariance of cluster-specific random effects:\n")
print(round(x$sigma_V, digits=digits))
}
}
invisible()
}
####
## PLOT METHOD
####
##
plot.Freq <- function(x, tseq=c(0, 5, 10), plot=TRUE, plot.est="BS", xlab=NULL, ylab=NULL, ...)
{
obj <- x
T2seq <- tseq
yLim <- NULL
##
## SEs based on the Delta method using log(-log(S0))
##
if(class(obj)[2] == "Surv")
{
T2 <- seq(from=min(T2seq), to=max(T2seq), length=100)
##
kappa <- exp(obj$estimate[1])
alpha <- exp(obj$estimate[2])
log_kappa <- obj$estimate[1]
log_alpha <- obj$estimate[2]
S0 <- exp(-(kappa*(T2)^alpha))
## Delta method based on log(-log(S0))
#J <- cbind(1/kappa, log(T2))
J <- cbind(1, exp(log_alpha)*log(T2))
Var.loglogS0 <- J %*% obj$Finv[1:2,1:2] %*% t(J)
se.loglogS0 <- sqrt(diag(Var.loglogS0))
se.loglogS0[is.na(se.loglogS0)] <- 0
LL <- S0^exp(-qnorm(0.025)*se.loglogS0)
UL <- S0^exp(qnorm(0.025)*se.loglogS0)
##
BS_tbl <- cbind(T2, S0, LL, UL)
dimnames(BS_tbl) <- list(rep("", length(T2)), c("time", "S0", "LL", "UL"))
##
h0 <- alpha*kappa*(T2)^(alpha-1)
J <- cbind(h0, h0*(1+alpha*log(T2)))
Var.h0 <- J %*% obj$Finv[1:2,1:2] %*% t(J)
se.h0 <- sqrt(diag(Var.h0))
se.h0[is.nan(se.h0)] <- 0
LLh0 <- h0 - qnorm(0.025)*se.h0
ULh0 <- h0 + qnorm(0.025)*se.h0
LLh0[LLh0 < 0] <- 0
T2h <- T2
if(T2[1] == 0)
{
T2h <- T2h[-1]
h0 <- h0[-1]
LLh0 <- LLh0[-1]
ULh0 <- ULh0[-1]
}
BH_tbl <- cbind(T2h, h0, LLh0, ULh0)
dimnames(BH_tbl) <- list(rep("", length(T2h)), c("time", "h0", "LL", "UL"))
value <- list(h0=BH_tbl, S0=BS_tbl)
##
if(is.null(yLim))
{
if(plot.est=="BS")
{
yLim <- seq(from=0, to=1, by=0.2)
}
if(plot.est=="BH")
{
grid <- (max(ULh0) - min(LLh0))/5
yLim <- seq(from=min(LLh0), to=max(ULh0), by=grid)
}
}
##
if(is.null(ylab))
{
if(plot.est=="BS")
{
ylab <- "Baseline survival"
}
if(plot.est=="BH")
{
ylab <- "Baseline hazard"
}
}
##
if(is.null(xlab)) xlab <- "Time"
##
if(plot == TRUE){
if(plot.est == "BS")
{
##
plot(range(T2seq), range(yLim), xlab=xlab, ylab=ylab, type="n", main = expression(paste("Estimated ", S[0](t), "")), axes=FALSE)
axis(1, at=T2seq)
axis(2, at=yLim)
lines(T2, S0, col="red", lwd=3)
lines(T2, LL, col="red", lwd=3, lty=3)
lines(T2, UL, col="red", lwd=3, lty=3)
}
if(plot.est == "BH")
{
##
plot(range(T2seq), range(yLim), xlab=xlab, ylab=ylab, type="n", main = expression(paste("Estimated ", h[0](t), "")), axes=FALSE)
axis(1, at=T2seq)
axis(2, at=round(yLim, 4))
lines(T2h, h0, col="red", lwd=3)
lines(T2h, LLh0, col="red", lwd=3, lty=3)
lines(T2h, ULh0, col="red", lwd=3, lty=3)
}
}
if(plot == FALSE) return(value)
}
##
if(class(obj)[2] == "ID")
{
##
T2 <- seq(from=min(T2seq), to=max(T2seq), length=100)
##
kappa <- exp(obj$estimate[1])
alpha <- exp(obj$estimate[2])
log_alpha <- obj$estimate[2]
S0.1 <- exp(-kappa*(T2)^alpha)
J <- cbind(1, exp(log_alpha)*log(T2))
Var.loglogS0 <- J %*% obj$Finv[1:2,1:2] %*% t(J)
se.loglogS0 <- sqrt(diag(Var.loglogS0))
LL.1 <- S0.1^exp(-qnorm(0.025)*se.loglogS0)
UL.1 <- S0.1^exp(qnorm(0.025)*se.loglogS0)
##
h0.1 <- alpha*kappa*(T2)^(alpha-1)
J <- cbind(h0.1, h0.1*(1+alpha*log(T2)))
Var.h0.1 <- J %*% obj$Finv[1:2,1:2] %*% t(J)
se.h0.1 <- sqrt(diag(Var.h0.1))
se.h0.1[is.nan(se.h0.1)] <- 0
LLh0.1 <- h0.1 - qnorm(0.025)*se.h0.1
ULh0.1 <- h0.1 + qnorm(0.025)*se.h0.1
LLh0.1[LLh0.1 < 0] <- 0
##
kappa <- exp(obj$estimate[3])
alpha <- exp(obj$estimate[4])
log_alpha <- obj$estimate[4]
S0.2 <- exp(-kappa*(T2)^alpha)
J <- cbind(1, exp(log_alpha)*log(T2))
Var.loglogS0 <- J %*% obj$Finv[3:4,3:4] %*% t(J)
se.loglogS0 <- sqrt(diag(Var.loglogS0))
LL.2 <- S0.2^exp(-qnorm(0.025)*se.loglogS0)
UL.2 <- S0.2^exp(qnorm(0.025)*se.loglogS0)
##
h0.2 <- alpha*kappa*(T2)^(alpha-1)
J <- cbind(h0.2, h0.2*(1+alpha*log(T2)))
Var.h0.2 <- J %*% obj$Finv[1:2,1:2] %*% t(J)
se.h0.2 <- sqrt(diag(Var.h0.2))
se.h0.2[is.nan(se.h0.2)] <- 0
LLh0.2 <- h0.2 - qnorm(0.025)*se.h0.2
ULh0.2 <- h0.2 + qnorm(0.025)*se.h0.2
LLh0.2[LLh0.2 < 0] <- 0
##
kappa <- exp(obj$estimate[5])
alpha <- exp(obj$estimate[6])
log_alpha <- obj$estimate[6]
S0.3 <- exp(-kappa*(T2)^alpha)
J <- cbind(1, exp(log_alpha)*log(T2))
Var.loglogS0 <- J %*% obj$Finv[5:6,5:6] %*% t(J)
se.loglogS0 <- sqrt(diag(Var.loglogS0))
LL.3 <- S0.3^exp(-qnorm(0.025)*se.loglogS0)
UL.3 <- S0.3^exp(qnorm(0.025)*se.loglogS0)
##
h0.3 <- alpha*kappa*(T2)^(alpha-1)
J <- cbind(h0.3, h0.3*(1+alpha*log(T2)))
Var.h0.3 <- J %*% obj$Finv[1:2,1:2] %*% t(J)
se.h0.3 <- sqrt(diag(Var.h0.3))
se.h0.3[is.nan(se.h0.3)] <- 0
LLh0.3 <- h0.3 - qnorm(0.025)*se.h0.3
ULh0.3 <- h0.3 + qnorm(0.025)*se.h0.3
LLh0.3[LLh0.3 < 0] <- 0
T2h <- T2
if(T2[1] == 0)
{
T2h <- T2h[-1]
h0.1 <- h0.1[-1]
LLh0.1 <- LLh0.1[-1]
ULh0.1 <- ULh0.1[-1]
h0.2 <- h0.2[-1]
LLh0.2 <- LLh0.2[-1]
ULh0.2 <- ULh0.2[-1]
h0.3 <- h0.3[-1]
LLh0.3 <- LLh0.3[-1]
ULh0.3 <- ULh0.3[-1]
}
BH1_tbl <- cbind(T2h, h0.1, LLh0.1, ULh0.1)
dimnames(BH1_tbl) <- list(rep("", length(T2h)), c("time", "h0.1", "LL.1", "UL.1"))
BH2_tbl <- cbind(T2h, h0.2, LLh0.2, ULh0.2)
dimnames(BH2_tbl) <- list(rep("", length(T2h)), c("time", "h0.2", "LL.2", "UL.2"))
BH3_tbl <- cbind(T2h, h0.3, LLh0.3, ULh0.3)
dimnames(BH3_tbl) <- list(rep("", length(T2h)), c("time", "h0.3", "LL.3", "UL.3"))
BS1_tbl <- cbind(T2, S0.1, LL.1, UL.1)
dimnames(BS1_tbl) <- list(rep("", length(T2)), c("time", "S0.1", "LL.1", "UL.1"))
BS2_tbl <- cbind(T2, S0.2, LL.2, UL.2)
dimnames(BS2_tbl) <- list(rep("", length(T2)), c("time", "S0.2", "LL.2", "UL.2"))
BS3_tbl <- cbind(T2, S0.3, LL.3, UL.3)
dimnames(BS3_tbl) <- list(rep("", length(T2)), c("time", "S0.3", "LL.3", "UL.3"))
value <- list(h0.1=BH1_tbl, h0.2=BH2_tbl, h0.3=BH3_tbl, S0.1=BS1_tbl, S0.2=BS2_tbl, S0.3=BS3_tbl)
##
if(is.null(yLim))
{
if(plot.est=="BS")
{
yLim <- seq(from=0, to=1, by=0.2)
}
if(plot.est=="BH")
{
grid <- (max(ULh0.1, ULh0.2, ULh0.3) - min(LLh0.1, LLh0.2, LLh0.3))/5
yLim <- seq(from=min(LLh0.1, LLh0.2, LLh0.3), to=max(ULh0.1, ULh0.2, ULh0.3), by=grid)
}
}
##
if(is.null(ylab))
{
if(plot.est=="BS")
{
ylab <- "Baseline survival"
}
if(plot.est=="BH")
{
ylab <- "Baseline hazard"
}
}
##
if(is.null(xlab))
{
xlab <- c("Time", "Time", "Time")
if(class(obj)[5] == "semi-Markov")
{
xlab[3] <- "Time since non-terminal event"
}
}
##
if(plot == TRUE){
if(plot.est == "BS")
{
##
par(mfrow=c(1,3))
##
plot(range(T2seq), range(yLim), xlab=xlab[1], ylab=ylab, type="n", main = expression(paste("Estimated ", S[0][1](t), "")), axes=FALSE)
axis(1, at=T2seq)
axis(2, at=yLim)
lines(T2, S0.1, col="blue", lwd=3)
lines(T2, LL.1, col="blue", lwd=3, lty=3)
lines(T2, UL.1, col="blue", lwd=3, lty=3)
##
plot(range(T2seq), range(yLim), xlab=xlab[2], ylab=ylab, type="n", main = expression(paste("Estimated ", S[0][2](t), "")), axes=FALSE)
axis(1, at=T2seq)
axis(2, at=yLim)
lines(T2, S0.2, col="red", lwd=3)
lines(T2, LL.2, col="red", lwd=3, lty=3)
lines(T2, UL.2, col="red", lwd=3, lty=3)
##
plot(range(T2seq), range(yLim), xlab=xlab[3], ylab=ylab, type="n", main = expression(paste("Estimated ", S[0][3](t), "")), axes=FALSE)
axis(1, at=T2seq)
axis(2, at=yLim)
lines(T2, S0.3, col="red", lwd=3)
lines(T2, LL.3, col="red", lwd=3, lty=3)
lines(T2, UL.3, col="red", lwd=3, lty=3)
}
if(plot.est == "BH")
{
##
par(mfrow=c(1,3))
##
plot(range(T2seq), range(yLim), xlab=xlab[1], ylab=ylab, type="n", main = expression(paste("Estimated ", h[0][1](t), "")), axes=FALSE)
axis(1, at=T2seq)
axis(2, at=round(yLim, 4))
lines(T2h, h0.1, col="blue", lwd=3)
lines(T2h, LLh0.1, col="blue", lwd=3, lty=3)
lines(T2h, ULh0.1, col="blue", lwd=3, lty=3)
##
plot(range(T2seq), range(yLim), xlab=xlab[2], ylab=ylab, type="n", main = expression(paste("Estimated ", h[0][2](t), "")), axes=FALSE)
axis(1, at=T2seq)
axis(2, at=round(yLim, 4))
lines(T2h, h0.2, col="red", lwd=3)
lines(T2h, LLh0.2, col="red", lwd=3, lty=3)
lines(T2h, ULh0.2, col="red", lwd=3, lty=3)
##
plot(range(T2seq), range(yLim), xlab=xlab[3], ylab=ylab, type="n", main = expression(paste("Estimated ", h[0][3](t), "")), axes=FALSE)
axis(1, at=T2seq)
axis(2, at=round(yLim, 4))
lines(T2h, h0.3, col="red", lwd=3)
lines(T2h, LLh0.3, col="red", lwd=3, lty=3)
lines(T2h, ULh0.3, col="red", lwd=3, lty=3)
}
}
if(plot == FALSE) return(value)
}
##
invisible()
}
plot.Bayes <- function(x, tseq=c(0, 5, 10), plot=TRUE, plot.est="BS", xlab=NULL, ylab=NULL, ...)
{
nChain = x$setup$nChain
if(class(x)[2] == "ID")
{
if(class(x)[4] == "PEM")
{
time1 <- x$chain1$time_lambda1
time2 <- x$chain1$time_lambda2
time3 <- x$chain1$time_lambda3
time1hz <- time1
time2hz <- time2
time3hz <- time3
lambda1.fin <- x$chain1$lambda1.fin
lambda2.fin <- x$chain1$lambda2.fin
lambda3.fin <- x$chain1$lambda3.fin
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
lambda1.fin <- rbind(lambda1.fin, x[[nam]]$lambda1.fin)
lambda2.fin <- rbind(lambda2.fin, x[[nam]]$lambda2.fin)
lambda3.fin <- rbind(lambda3.fin, x[[nam]]$lambda3.fin)
}
}
BH1Med <- apply(exp(lambda1.fin), 2, median)
BH1Ub <- apply(exp(lambda1.fin), 2, quantile, prob = 0.975)
BH1Lb <- apply(exp(lambda1.fin), 2, quantile, prob = 0.025)
BH2Med <- apply(exp(lambda2.fin), 2, median)
BH2Ub <- apply(exp(lambda2.fin), 2, quantile, prob = 0.975)
BH2Lb <- apply(exp(lambda2.fin), 2, quantile, prob = 0.025)
BH3Med <- apply(exp(lambda3.fin), 2, median)
BH3Ub <- apply(exp(lambda3.fin), 2, quantile, prob = 0.975)
BH3Lb <- apply(exp(lambda3.fin), 2, quantile, prob = 0.025)
dif1 <- diff(c(0, time1hz))
dif2 <- diff(c(0, time2hz))
dif3 <- diff(c(0, time3hz))
BS1 <- matrix(NA, dim(lambda1.fin)[1], dim(lambda1.fin)[2])
for(i in 1:dim(lambda1.fin)[1])
{
BS1[i,] <- exp(-cumsum(exp(lambda1.fin[i,])* dif1) )
}
BS2 <- matrix(NA, dim(lambda2.fin)[1], dim(lambda2.fin)[2])
for(i in 1:dim(lambda2.fin)[1])
{
BS2[i,] <- exp(-cumsum(exp(lambda2.fin[i,])* dif2) )
}
BS3 <- matrix(NA, dim(lambda3.fin)[1], dim(lambda3.fin)[2])
for(i in 1:dim(lambda3.fin)[1])
{
BS3[i,] <- exp(-cumsum(exp(lambda3.fin[i,])* dif3) )
}
BS1Med <- apply(BS1, 2, median)
BS1Ub <- apply(BS1, 2, quantile, prob = 0.975)
BS1Lb <- apply(BS1, 2, quantile, prob = 0.025)
BS2Med <- apply(BS2, 2, median)
BS2Ub <- apply(BS2, 2, quantile, prob = 0.975)
BS2Lb <- apply(BS2, 2, quantile, prob = 0.025)
BS3Med <- apply(BS3, 2, median)
BS3Ub <- apply(BS3, 2, quantile, prob = 0.975)
BS3Lb <- apply(BS3, 2, quantile, prob = 0.025)
}
if(class(x)[4] == "WB")
{
time1 <- time2 <- time3 <- seq(from=min(tseq), to=max(tseq), length=100)
nStore <- length(x$chain1$alpha1.p)
numSpl <- nStore * nChain
basehaz1 <- matrix(NA, numSpl, length(time1))
basehaz2 <- matrix(NA, numSpl, length(time2))
basehaz3 <- matrix(NA, numSpl, length(time3))
basesurv1 <- matrix(NA, numSpl, length(time1))
basesurv2 <- matrix(NA, numSpl, length(time2))
basesurv3 <- matrix(NA, numSpl, length(time3))
alpha1.p <- x$chain1$alpha1.p
alpha2.p <- x$chain1$alpha2.p
alpha3.p <- x$chain1$alpha3.p
kappa1.p <- x$chain1$kappa1.p
kappa2.p <- x$chain1$kappa2.p
kappa3.p <- x$chain1$kappa3.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
alpha1.p <- c(alpha1.p, x[[nam]]$alpha1.p)
alpha2.p <- c(alpha2.p, x[[nam]]$alpha2.p)
alpha3.p <- c(alpha3.p, x[[nam]]$alpha3.p)
kappa1.p <- c(kappa1.p, x[[nam]]$kappa1.p)
kappa2.p <- c(kappa2.p, x[[nam]]$kappa2.p)
kappa3.p <- c(kappa3.p, x[[nam]]$kappa3.p)
}
}
for(i in 1:numSpl){
basehaz1[i, ] <- alpha1.p[i] * kappa1.p[i] * time1^(alpha1.p[i] - 1)
basehaz2[i, ] <- alpha2.p[i] * kappa2.p[i] * time2^(alpha2.p[i] - 1)
basehaz3[i, ] <- alpha3.p[i] * kappa3.p[i] * time3^(alpha3.p[i] - 1)
basesurv1[i, ] <- exp(-kappa1.p[i] * time1^(alpha1.p[i]))
basesurv2[i, ] <- exp(-kappa2.p[i] * time2^(alpha2.p[i]))
basesurv3[i, ] <- exp(-kappa3.p[i] * time3^(alpha3.p[i]))
}
time1hz <- time1
time2hz <- time2
time3hz <- time3
if(tseq[1] == 0){
time1hz <- time1[-1]
time2hz <- time2[-1]
time3hz <- time3[-1]
basehaz1 <- basehaz1[,-1]
basehaz2 <- basehaz2[,-1]
basehaz3 <- basehaz3[,-1]
}
BH1Med <- apply(basehaz1, 2, median)
BH1Ub <- apply(basehaz1, 2, quantile, prob = 0.975)
BH1Lb <- apply(basehaz1, 2, quantile, prob = 0.025)
BH2Med <- apply(basehaz2, 2, median)
BH2Ub <- apply(basehaz2, 2, quantile, prob = 0.975)
BH2Lb <- apply(basehaz2, 2, quantile, prob = 0.025)
BH3Med <- apply(basehaz3, 2, median)
BH3Ub <- apply(basehaz3, 2, quantile, prob = 0.975)
BH3Lb <- apply(basehaz3, 2, quantile, prob = 0.025)
BS1Med <- apply(basesurv1, 2, median)
BS1Ub <- apply(basesurv1, 2, quantile, prob = 0.975)
BS1Lb <- apply(basesurv1, 2, quantile, prob = 0.025)
BS2Med <- apply(basesurv2, 2, median)
BS2Ub <- apply(basesurv2, 2, quantile, prob = 0.975)
BS2Lb <- apply(basesurv2, 2, quantile, prob = 0.025)
BS3Med <- apply(basesurv3, 2, median)
BS3Ub <- apply(basesurv3, 2, quantile, prob = 0.975)
BS3Lb <- apply(basesurv3, 2, quantile, prob = 0.025)
}
BH1_tbl <- cbind(time1hz, BH1Med, BH1Lb, BH1Ub)
dimnames(BH1_tbl) <- list(rep("", length(time1hz)), c("time", "h0.1", "LL.1", "UL.1"))
BH2_tbl <- cbind(time2hz, BH2Med, BH2Lb, BH2Ub)
dimnames(BH2_tbl) <- list(rep("", length(time2hz)), c("time", "h0.2", "LL.2", "UL.2"))
BH3_tbl <- cbind(time3hz, BH3Med, BH3Lb, BH3Ub)
dimnames(BH3_tbl) <- list(rep("", length(time3hz)), c("time", "h0.3", "LL.3", "UL.3"))
BS1_tbl <- cbind(time1, BS1Med, BS1Lb, BS1Ub)
dimnames(BS1_tbl) <- list(rep("", length(time1)), c("time", "S0.1", "LL.1", "UL.1"))
BS2_tbl <- cbind(time2, BS2Med, BS2Lb, BS2Ub)
dimnames(BS2_tbl) <- list(rep("", length(time2)), c("time", "S0.2", "LL.2", "UL.2"))
BS3_tbl <- cbind(time3, BS3Med, BS3Lb, BS3Ub)
dimnames(BS3_tbl) <- list(rep("", length(time3)), c("time", "S0.3", "LL.3", "UL.3"))
value <- list(h0.1=BH1_tbl, h0.2=BH2_tbl, h0.3=BH3_tbl, S0.1=BS1_tbl, S0.2=BS2_tbl, S0.3=BS3_tbl)
if(plot == TRUE)
{
if(is.null(xlab))
{
xlab <- c("Time", "Time", "Time")
if(x$setup$model == "semi-Markov")
{
xlab[3] <- "Time since non-terminal event"
}
}
if(plot.est == "BH")
{
if(is.null(ylab))
{
ylab <- "Baseline hazard"
}
ygrid <- (max(BH1Ub, BH2Ub, BH3Ub) - 0)/5
ylim <- seq(from=0, to=max(BH1Ub, BH2Ub, BH3Ub), by=ygrid)
##
par(mfrow=c(1,3))
##
plot(c(0, max(time1)), range(ylim), xlab=xlab[1], ylab=ylab, type="n", main = expression(paste("Estimated ", h[0][1](t), "")), axes=FALSE)
if(class(x)[4] == "PEM")
{
axis(1, at=c(0, max(time1)))
}
if(class(x)[4] == "WB")
{
axis(1, at=tseq)
}
axis(2, at=round(ylim, 4))
#if(time1hz[1] == 0)
#{
# lines(time1hz, BH1Med, col="blue", lwd=3)
# lines(time1hz, BH1Ub, col="blue", lwd=3, lty=3)
# lines(time1hz, BH1Lb, col="blue", lwd=3, lty=3)
#}else
#{
# lines(unique(c(0, time1hz)), c(0, BH1Med), col="red", lwd=3)
# lines(unique(c(0, time1hz)), c(0, BH1Ub), col="red", lwd=3, lty=3)
# lines(unique(c(0, time1hz)), c(0, BH1Lb), col="red", lwd=3, lty=3)
#}
lines(time1hz, BH1Med, col="blue", lwd=3)
lines(time1hz, BH1Ub, col="blue", lwd=3, lty=3)
lines(time1hz, BH1Lb, col="blue", lwd=3, lty=3)
##
plot(c(0, max(time2)), range(ylim), xlab=xlab[2], ylab=ylab, type="n", main = expression(paste("Estimated ", h[0][2](t), "")), axes=FALSE)
if(class(x)[4] == "PEM")
{
axis(1, at=c(0, max(time2)))
}
if(class(x)[4] == "WB")
{
axis(1, at=tseq)
}
axis(2, at=round(ylim, 4))
#if(time2hz[1] == 0)
#{
# lines(time2hz, BH2Med, col="blue", lwd=3)
# lines(time2hz, BH2Ub, col="blue", lwd=3, lty=3)
# lines(time2hz, BH2Lb, col="blue", lwd=3, lty=3)
#}else
#{
# lines(unique(c(0, time2hz)), c(0, BH2Med), col="red", lwd=3)
# lines(unique(c(0, time2hz)), c(0, BH2Ub), col="red", lwd=3, lty=3)
# lines(unique(c(0, time2hz)), c(0, BH2Lb), col="red", lwd=3, lty=3)
#}
lines(time2hz, BH2Med, col="red", lwd=3)
lines(time2hz, BH2Ub, col="red", lwd=3, lty=3)
lines(time2hz, BH2Lb, col="red", lwd=3, lty=3)
##
plot(c(0, max(time3)), range(ylim), xlab=xlab[3], ylab=ylab, type="n", main = expression(paste("Estimated ", h[0][3](t), "")), axes=FALSE)
if(class(x)[4] == "PEM")
{
axis(1, at=c(0, max(time3)))
}
if(class(x)[4] == "WB")
{
axis(1, at=tseq)
}
axis(2, at=round(ylim, 4))
#if(time3hz[1] == 0)
#{
# lines(time3hz, BH3Med, col="blue", lwd=3)
# lines(time3hz, BH3Ub, col="blue", lwd=3, lty=3)
# lines(time3hz, BH3Lb, col="blue", lwd=3, lty=3)
#}else
#{
# lines(unique(c(0, time3hz)), c(0, BH3Med), col="red", lwd=3)
# lines(unique(c(0, time3hz)), c(0, BH3Ub), col="red", lwd=3, lty=3)
# lines(unique(c(0, time3hz)), c(0, BH3Lb), col="red", lwd=3, lty=3)
#}
lines(time3hz, BH3Med, col="red", lwd=3)
lines(time3hz, BH3Ub, col="red", lwd=3, lty=3)
lines(time3hz, BH3Lb, col="red", lwd=3, lty=3)
}
if(plot.est == "BS")
{
if(is.null(ylab))
{
ylab <- "Baseline survival"
}
ylim <- seq(from=0, to=1, by=0.2)
##
par(mfrow=c(1,3))
##
plot(c(0, max(time1)), range(ylim), xlab=xlab[1], ylab=ylab, type="n", main = expression(paste("Estimated ", S[0][1](t), "")), axes=FALSE)
if(class(x)[4] == "PEM")
{
axis(1, at=c(0, max(time1)))
}
if(class(x)[4] == "WB")
{
axis(1, at=tseq)
}
axis(2, at=ylim)
if(time1[1] == 0)
{
lines(time1, BS1Med, col="blue", lwd=3)
lines(time1, BS1Ub, col="blue", lwd=3, lty=3)
lines(time1, BS1Lb, col="blue", lwd=3, lty=3)
}else
{
lines(unique(c(0, time1)), c(1, BS1Med), col="red", lwd=3)
lines(unique(c(0, time1)), c(1, BS1Ub), col="red", lwd=3, lty=3)
lines(unique(c(0, time1)), c(1, BS1Lb), col="red", lwd=3, lty=3)
}
##
plot(c(0, max(time2)), range(ylim), xlab=xlab[2], ylab=ylab, type="n", main = expression(paste("Estimated ", S[0][2](t), "")), axes=FALSE)
if(class(x)[4] == "PEM")
{
axis(1, at=c(0, max(time2)))
}
if(class(x)[4] == "WB")
{
axis(1, at=tseq)
}
axis(2, at=ylim)
if(time2[1] == 0)
{
lines(time2, BS2Med, col="blue", lwd=3)
lines(time2, BS2Ub, col="blue", lwd=3, lty=3)
lines(time2, BS2Lb, col="blue", lwd=3, lty=3)
}else
{
lines(unique(c(0, time2)), c(1, BS2Med), col="red", lwd=3)
lines(unique(c(0, time2)), c(1, BS2Ub), col="red", lwd=3, lty=3)
lines(unique(c(0, time2)), c(1, BS2Lb), col="red", lwd=3, lty=3)
}
##
plot(c(0, max(time3)), range(ylim), xlab=xlab[3], ylab=ylab, type="n", main = expression(paste("Estimated ", S[0][3](t), "")), axes=FALSE)
if(class(x)[4] == "PEM")
{
axis(1, at=c(0, max(time3)))
}
if(class(x)[4] == "WB")
{
axis(1, at=tseq)
}
axis(2, at=ylim)
if(time3[1] == 0)
{
lines(time3, BS3Med, col="blue", lwd=3)
lines(time3, BS3Ub, col="blue", lwd=3, lty=3)
lines(time3, BS3Lb, col="blue", lwd=3, lty=3)
}else
{
lines(unique(c(0, time3)), c(1, BS3Med), col="red", lwd=3)
lines(unique(c(0, time3)), c(1, BS3Ub), col="red", lwd=3, lty=3)
lines(unique(c(0, time3)), c(1, BS3Lb), col="red", lwd=3, lty=3)
}
}
}
if(plot == FALSE)
{
return(value)
}
}
if(class(x)[2] == "Surv")
{
if(class(x)[4] == "PEM")
{
time <- x$chain1$time_lambda
timehz <- time
lambda.fin <- x$chain1$lambda.fin
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
lambda.fin <- rbind(lambda.fin, x[[nam]]$lambda.fin)
}
}
BHMed <- apply(exp(lambda.fin), 2, median)
BHUb <- apply(exp(lambda.fin), 2, quantile, prob = 0.975)
BHLb <- apply(exp(lambda.fin), 2, quantile, prob = 0.025)
dif <- diff(c(0, timehz))
BS <- matrix(NA, dim(lambda.fin)[1], dim(lambda.fin)[2])
for(i in 1:dim(lambda.fin)[1])
{
BS[i,] <- exp(-cumsum(exp(lambda.fin[i,])* dif) )
}
BSMed <- apply(BS, 2, median)
BSUb <- apply(BS, 2, quantile, prob = 0.975)
BSLb <- apply(BS, 2, quantile, prob = 0.025)
}
if(class(x)[4] == "WB")
{
time <- seq(from=min(tseq), to=max(tseq), length=100)
nStore <- length(x$chain1$alpha.p)
numSpl <- nStore * nChain
basehaz <- matrix(NA, numSpl, length(time))
basesurv <- matrix(NA, numSpl, length(time))
alpha.p <- x$chain1$alpha.p
kappa.p <- x$chain1$kappa.p
if(nChain > 1){
for(i in 2:nChain){
nam <- paste("chain", i, sep="")
alpha.p <- c(alpha.p, x[[nam]]$alpha.p)
kappa.p <- c(kappa.p, x[[nam]]$kappa.p)
}
}
for(i in 1:numSpl){
basehaz[i, ] <- alpha.p[i] * kappa.p[i] * time^(alpha.p[i] - 1)
basesurv[i, ] <- exp(-kappa.p[i] * time^(alpha.p[i]))
}
timehz <- time
if(tseq[1] == 0){
timehz <- time[-1]
basehaz <- basehaz[,-1]
}
BHMed <- apply(basehaz, 2, median)
BHUb <- apply(basehaz, 2, quantile, prob = 0.975)
BHLb <- apply(basehaz, 2, quantile, prob = 0.025)
BSMed <- apply(basesurv, 2, median)
BSUb <- apply(basesurv, 2, quantile, prob = 0.975)
BSLb <- apply(basesurv, 2, quantile, prob = 0.025)
}
BH_tbl <- cbind(timehz, BHMed, BHLb, BHUb)
dimnames(BH_tbl) <- list(rep("", length(timehz)), c("time", "h0", "LL", "UL"))
BS_tbl <- cbind(time, BSMed, BSLb, BSUb)
dimnames(BS_tbl) <- list(rep("", length(time)), c("time", "S0", "LL", "UL"))
value <- list(h0=BH_tbl, S0=BS_tbl)
if(plot == TRUE)
{
if(is.null(xlab))
{
xlab <- "Time"
}
if(plot.est == "BH")
{
if(is.null(ylab))
{
ylab <- "Baseline hazard"
}
ygrid <- (max(BHUb) - 0)/5
ylim <- seq(from=0, to=max(BHUb), by=ygrid)
##
plot(c(0, max(time)), range(ylim), xlab=xlab, ylab=ylab, type="n", main = expression(paste("Estimated ", h[0](t), "")), axes=FALSE)
if(class(x)[4] == "PEM")
{
axis(1, at=c(0, max(time)))
}
if(class(x)[4] == "WB")
{
axis(1, at=tseq)
}
axis(2, at=round(ylim, 4))
#if(timehz[1] == 0)
#{
# lines(timehz, BHMed, col="red", lwd=3)
# lines(timehz, BHUb, col="red", lwd=3, lty=3)
# lines(timehz, BHLb, col="red", lwd=3, lty=3)
#}else
#{
# lines(unique(c(0, timehz)), c(0, BHMed), col="red", lwd=3)
# lines(unique(c(0, timehz)), c(0, BHUb), col="red", lwd=3, lty=3)
# lines(unique(c(0, timehz)), c(0, BHLb), col="red", lwd=3, lty=3)
#}
lines(timehz, BHMed, col="red", lwd=3)
lines(timehz, BHUb, col="red", lwd=3, lty=3)
lines(timehz, BHLb, col="red", lwd=3, lty=3)
}
if(plot.est == "BS")
{
if(is.null(ylab))
{
ylab <- "Baseline survival"
}
ylim <- seq(from=0, to=1, by=0.2)
##
plot(c(0, max(time)), range(ylim), xlab=xlab, ylab=ylab, type="n", main = expression(paste("Estimated ", S[0](t), "")), axes=FALSE)
if(class(x)[4] == "PEM")
{
axis(1, at=c(0, max(time)))
}
if(class(x)[4] == "WB")
{
axis(1, at=tseq)
}
axis(2, at=ylim)
if(time[1] == 0)
{
lines(time, BSMed, col="red", lwd=3)
lines(time, BSUb, col="red", lwd=3, lty=3)
lines(time, BSLb, col="red", lwd=3, lty=3)
}else
{
lines(unique(c(0, time)), c(1, BSMed), col="red", lwd=3)
lines(unique(c(0, time)), c(1, BSUb), col="red", lwd=3, lty=3)
lines(unique(c(0, time)), c(1, BSLb), col="red", lwd=3, lty=3)
}
}
}
if(plot == FALSE)
{
return(value)
}
}
invisible()
}
|
4d9c5a1384689e1be1c1a934755bf47b0e06a6a6 | 8679542ac6fbea927c8b5d9240238e36f708d229 | /Scripts/budget_cw.R | c7336290da033799768887c3f1c55e1222a33148 | [
"MIT"
] | permissive | tessam30/2018_Kenya | efe3e89761d43dd18feb78abec060c281818624f | 69a4c5b99f8919df13869fd52af1dc8b175cf0d5 | refs/heads/master | 2020-03-25T05:57:27.019978 | 2019-12-02T02:22:48 | 2019-12-02T02:22:48 | 143,475,397 | 0 | 0 | MIT | 2018-10-05T19:40:46 | 2018-08-03T21:37:08 | R | UTF-8 | R | false | false | 954 | r | budget_cw.R | budget_cw <-
tibble::tribble(
~`Category Code`, ~Budget_title,
0, "Uncategorized",
1, "County Assembly",
2, "Governer or County Executive",
3, "Treasury, Finance or Administration",
4, "Transport and Infrastructure",
5, "Economic Growth, Commerce or Tourism",
6, "Education, Sports or Arts",
7, "Health",
8, "Land, Housing and Physical Planning",
9, "Agriculture",
10, "Youth, Gender, and Culture",
11, "Water and Natural Resources",
12, "Public Service Boards and Public Service",
13, "Total"
)
|
9b45bb432d5dbaf81b72a9405b0f827f9462ed2d | 5c65c461a02973a2710d0913ac101a18be4c8a12 | /plot4.R | 3fe6b3f6b70e428a8254354d4a1c22f3e64c83b0 | [] | no_license | learn4bcd/ExData_Plotting1 | 17776c0fe962db535c7f995677b3ed0ef1485093 | 0a73c165856d832f9edfea1d96ce328c112a220a | refs/heads/master | 2021-01-18T18:17:55.137665 | 2014-06-08T09:44:35 | 2014-06-08T09:44:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,065 | r | plot4.R | ## This scripts is for plotting plot4.png,
## which shows the distributions of "Global Active Power", "Voltage",
## three different "Energy sub meterings" as well as "Global Reactive Power"
## against "datetime" from 2007-02-01 00:00:00 to 2007-02-02 23:59:00
# This function is for reading data from raw file in a memory-efficient
# manner. It only loads lines matched to pattern assigned by the 'pattern'
# parameter, and returns a matrix containing 9 columns corresponding to:
# Date Time Global_active_power Global_reactive_power Voltage
# Global_intensity Sub_metering_1 Sub_metering_2 Sub_metering_3
readData <- function(infile = "household_power_consumption.txt",
pattern = "^([1-2])/2/2007",
splitstring = ";"){
filehandle <- file(infile,"r")
header <- readLines(filehandle,n=1)
header <- strsplit(header,split=splitstring)[[1]]
data <- t(data.frame(row.names = header))
# Date Time Global_active_power Global_reactive_power Voltage
# Global_intensity Sub_metering_1 Sub_metering_2 Sub_metering_3
tag <- 0
while(T){
thisLine <- readLines(filehandle,n=1)
if(length(thisLine) == 0) break
if (length(grep(pattern,thisLine)) ){
# for time-saving, do not use perl = T ...
tag <- 1
values <- strsplit(thisLine,split=splitstring)[[1]]
## for some 'wrong' records,ex.
## 22/2/2007;22:58:00;?;?;?;?;?;?;
if (length(values) != 9) values <- c(values,rep("?",9))[1:9]
data <- rbind(data, values)
}else{
if (tag == 1) break
}
}
close(filehandle)
rownames(data) <- c()
return(data)
}
data <- readData()
## data processing ...
data <- as.data.frame(data,stringsAsFactors = F)
data[,3] <- as.numeric(data[,3])
data[,4] <- as.numeric(data[,4])
data[,5] <- as.numeric(data[,5])
data[,6] <- as.numeric(data[,6])
data[,7] <- as.numeric(data[,7])
data[,8] <- as.numeric(data[,8])
data[,9] <- as.numeric(data[,9])
data <- cbind(data, datetime = strptime(paste(data[,1],data[,2],sep=" "),
"%d/%m/%Y %H:%M:%S")
)
## plotting ...
png("plot4.png",width = 480, height = 480, units = "px",bg=NA)
par(mfrow = c(2, 2))
with(data,{
# top-left
plot(datetime,Global_active_power,type="l",xlab="",
ylab="Global Active Power")
# top-right
plot(datetime,Voltage,type="l")
# bottom-left
plot(datetime,Sub_metering_1,col="black",type="l",xlab="",
ylab="Energy sub metering")
lines(datetime,Sub_metering_2,col="red",type="l")
lines(datetime,Sub_metering_3,col="blue",type="l")
legend("topright", lty=1, box.lty = 0,
col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# bottom-right
plot(datetime,Global_reactive_power,type="l")
}
)
dev.off() |
ea7a70a5ebd2a70a1b0b27cc5349b715cd32200f | affb0b24f1fe0e922b9543f74f440adeed57d1e4 | /src/R/app/teamsTableDT.R | bc8e3248313832b200800e4a4dc7a4a93f83721b | [] | no_license | dnegrey/nflFamilyPicks | a09d6f8e64740cc8a4e1d83a8e38f915189c0552 | 1e9f9d9a1c8f9e9ecc4dd109d4a12e56ca696a65 | refs/heads/master | 2022-11-01T05:13:27.887200 | 2022-01-12T00:42:40 | 2022-01-12T00:42:40 | 204,978,085 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | teamsTableDT.R | teamsTableDT <- function(x) {
y <- datatable(
data = x,
escape = FALSE,
selection = "none",
extensions = "Responsive",
options = list(
dom = "t",
pageLength = nrow(x),
ordering = FALSE,
columnDefs = list(
list(
targets = c(0:4),
className = "dt-center"
)
)
),
rownames = FALSE,
colnames = c(
"Logo",
"Team",
"Name",
"Conference",
"Division"
)
)
return(y)
}
|
d6eb95d89505a9eece4dfd2fd822241ac588b914 | 29585dff702209dd446c0ab52ceea046c58e384e | /SAFD/R/DShistogram.R | 22b91d2af7a73d8a1707cd9ebac35ed0587f665d | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,666 | r | DShistogram.R | DShistogram <-
function(XX,limx=NA,npart=10,nl=101,pic=TRUE,pdf=FALSE){
#function makes a partition of the interval stated in xlim of nbreaks elements
#XX...fuzzy sample (list as always)
#xlim...limits of the histrogram - if NA then the max and min of the supps will be taken
#nl...number of levels
#make use of frequency function
#construct 2 dim matrix and use 3d plot via persp and contour plot
#construct limits
if(length(limx)<=1|limx[2]<=limx[1]){
a<-XX[[1]]$x[1]
b<-XX[[1]]$x[nrow(XX[[1]])]
if(length(XX)>1){
for (i in 2:length(XX)){
a<-min(a,XX[[i]]$x[1])
b<-max(b,XX[[i]]$x[nrow(XX[[i]])])
}
}
limx<-c(a,b)
}
k<-length(XX)
if(k>500){
ygrid<-seq(0,1,length=501)
}
if(k<=500){
ygrid<-sort(union(seq(0,1,length=(k+1)),seq(0,1,length=101)))
}
breaks<-seq(limx[1],limx[2],length=npart+1)
FR<-vector("list",length=npart)
FR2<-vector("list",length=npart)
for (i in 1:npart){
FR[[i]]<-DSfrequency(XX,breaks[i:(i+1)],0,nl)
print(i)
R<-FR[[i]][(nl+1):(2*nl),]
a<-approx(R$x,R$alpha,xout=ygrid,yleft=R$alpha[1],yright=R$alpha[nl],
method="constant",f=1,ties="max")
L<-FR[[i]][1:nl,]
b<-approx(L$x,L$alpha,xout=ygrid,yleft=L$alpha[1],yright=L$alpha[nl],
method="constant",f=0,ties="max")
value<-ifelse(a$y>=b$y,b$y,a$y)
FR2[[i]]<-data.frame(x=ygrid,y=value)
}
#construct grid for y-coordinate in plotting
grid1<-breaks+(breaks[2]-breaks[1])/1000
grid2<-breaks-(breaks[2]-breaks[1])/1000
grid3<-c(grid1,grid2)
grid3<-sort(subset(grid3,grid3>=min(breaks)&grid3<=max(breaks)))
gridx<-grid3
gridy<-ygrid
M<-matrix(numeric(npart*length(gridy)),ncol=length(gridy))
for (i in 1:npart){
M[i,]<-FR2[[i]]$y
}
M2<-M[rep(1:npart, rep(2,npart)),]
k<-length(XX)
lower<-rep(0,k)
upper<-lower
for (j in 1:k){
lower[j]<-min(XX[[j]]$x)
upper[j]<-max(XX[[j]]$x)
}
lim_temp<-c(min(lower),max(upper))
if(pdf==TRUE){
pdf(file="histo.pdf",width=12,height=8)
#BBreaks<-list(length=length(breaks))
#for (m in 1:length(breaks)){
# BBreaks[[m]]<-data.frame(x=rep(breaks[m],2),alpha=c(-0.05,1.05))
#}
#plot(XX[[1]],type="l", xlim=lim_temp,lwd=0.3,xlab=" ", ylab=" ",cex.main=1, col="gray50",
# main=paste("Sample",sep=""))
#for (j in 2:min(k,200)){
# lines(XX[[j]],type="l",lwd=0.3,col="gray50")
#}
#for (m in 1:length(breaks)){
# lines(BBreaks[[m]],type="l",col="red",lwd=2)
# }
color<-rainbow(100,start=.7,end=.17)
# Compute the z-value at the facet centres
zfacet <- M2[-1, -1] + M2[-1, -ncol(M2)] + M2[-nrow(M2), -1] + M2[-nrow(M2), -ncol(M2)]
facetcol <- cut(zfacet, 100)
M<-M2
#calculate plot limit for y-coordinate
colmax<-rep(0,trunc(length(gridy)/10))
for (i in 1:trunc(length(gridy)/10)){
colmax[i]<-max(M[,10*i])
}
Cut<-data.frame(nr=seq(1,length(colmax),by=1),colmax=colmax)
Cut<-subset(Cut,Cut$colmax>0)
cutindex<-min(round(10*Cut$nr[nrow(Cut)]*1.25,0),length(gridy))
ym<-min(gridy[10*Cut$nr[nrow(Cut)]]*1.25,1)
#print(ym)
Mp<-M[,1:cutindex]
gridyp<-gridy[1:cutindex]
persp(gridx,gridyp,Mp, xlab="x", ylab="upper/lower frequency", zlab=expression(alpha),
xlim=limx, main=paste("Histogram 3d",sep=""),cex.main=1,
theta = -45, phi = 35, expand = 0.35, col=color[facetcol],
shade = 0.25, ticktype = "detailed",border=NA)
persp(gridx,gridyp,Mp, xlab="x", ylab="upper/lower frequency", zlab=expression(alpha),
xlim=limx, main=paste("Histogram 3d",sep=""),cex.main=1,
theta = 45, phi = 35, expand = 0.35, col=color[facetcol],
shade = 0.25, ticktype = "detailed",border=NA)
image(gridx,gridyp,Mp, xlab="x", ylab="upper/lower frequency", xlim=limx,
col=rainbow(100,start=.7,end=.17),cex.axis=1,
main=paste("Histogram level view","\n",
"(black lines denote 1-cut, white lines 0.5-cut)",sep=""),cex.main=1)
contour(gridx,gridyp,Mp, xlab=NA, ylab=NA, xlim=limx,lwd=c(1.5,1.5),
levels = seq(0.5,1,by=0.5), add = TRUE, col = c("white","black"),
lty = c(1,1), drawlabels=FALSE)
dev.off()
}
if(pic==TRUE){
color<-rainbow(100,start=.7,end=.17)
# Compute the z-value at the facet centres
zfacet <- M2[-1, -1] + M2[-1, -ncol(M2)] + M2[-nrow(M2), -1] + M2[-nrow(M2), -ncol(M2)]
facetcol <- cut(zfacet, 100)
M<-M2
#calculate plot limit for y-coordinate
colmax<-rep(0,trunc(length(gridy)/10))
for (i in 1:trunc(length(gridy)/10)){
colmax[i]<-max(M[,10*i])
}
Cut<-data.frame(nr=seq(1,length(colmax),by=1),colmax=colmax)
Cut<-subset(Cut,Cut$colmax>0)
cutindex<-min(round(10*Cut$nr[nrow(Cut)]*1.25,0),length(gridy))
ym<-min(gridy[10*Cut$nr[nrow(Cut)]]*1.25,1)
#print(ym)
Mp<-M[,1:cutindex]
gridyp<-gridy[1:cutindex]
persp(gridx,gridyp,Mp, xlab="x", ylab="upper/lower frequency", zlab=expression(alpha),
xlim=limx, main=paste("Histogram 3d",sep=""),cex.main=1,
theta = -45, phi = 35, expand = 0.35, col=color[facetcol],
shade = 0.25, ticktype = "detailed",border=NA)
dev.new()
image(gridx,gridyp,Mp, xlab="x", ylab="upper/lower frequency", xlim=limx,
col=rainbow(100,start=.7,end=.17),cex.axis=1,
main=paste("Histogram level view","\n",
"(black lines denote 1-cut, white lines 0.5-cut)",sep=""),cex.main=1)
contour(gridx,gridyp,Mp, xlab="", ylab="", xlim=limx,lwd=c(1.5,1.5),
levels = seq(0.5,1,by=0.5), add = TRUE, col = c("white","black"),
lty = c(1,1), drawlabels=FALSE)
}
H<-list(gridx=gridx,gridy=gridy,M=M,breaks=breaks)
invisible(H)
}
|
27ae1c29e12370c1e2f13b022918d3cdc43aba52 | 1ceae37e7dee1bfa332742b8c0f0a593a26d89ea | /scripts/seurat_analysis_combined_timesteps.R | aff3f96a5045b5b5764b860ce368cfc9887411d2 | [] | no_license | decarlin/ChiLab_10x_mouseCardiac | 250e2047f244ef9c625060f47412dc74e21d1111 | 6fbf8dc29f8341d7990619953148f662e1d994e1 | refs/heads/main | 2023-04-29T23:56:19.924910 | 2021-05-13T18:12:56 | 2021-05-13T18:12:56 | 367,120,856 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,234 | r | seurat_analysis_combined_timesteps.R | #load some libraries that we will need
library(Seurat)
library(Matrix)
library(stringr)
library(entropy)
library(cluster)
library(RColorBrewer)
library(ggplot2)
library('monocle')
#load the data, /Users/Dan/projects/Chi_10x/age_E825/data/ is where the e8.25 data lives
pbmc.data <- Read10X("/Users/Dan/projects/Chi_10x/age_E825/data/")
pbmc_E825 <- CreateSeuratObject(pbmc.data)
pbmc.data <- Read10X("/Users/Dan/projects/Chi_10x/age_E775/data/")
pbmc_E775 <- CreateSeuratObject(pbmc.data)
pbmc.data <- Read10X("/Users/Dan/projects/Chi_10x/age_E750/data/")
pbmc_E750 <- CreateSeuratObject(pbmc.data)
pbmc.data <- Read10X("/Users/Dan/projects/Chi_10x/age_E720/data/")
pbmc_E720 <- CreateSeuratObject(pbmc.data)
pbmc.combined <- MergeSeurat(object1 = pbmc_E825, object2 = pbmc_E775, add.cell.id1 = "E825", add.cell.id2 = "E775", project = "Mesp1")
pbmc.combined <- MergeSeurat(object1 = pbmc_E750, object2 = pbmc.combined, add.cell.id1 = "E750", project = "Mesp1")
pbmc.combined <- MergeSeurat(object1 = pbmc_E720, object2 = pbmc.combined, add.cell.id1 = "E720", project = "Mesp1")
pbmc<-pbmc.combined
#rm(pbmc.combined,pbmc.data,pbmc_E825,pbmc_E775,pbmc_E750,pbmc_E720)
#find the mito genes
mito.genes <- grep("^mt-", rownames(pbmc@data), value = T)
percent.mito <- Matrix::colSums(expm1(pbmc@data[mito.genes, ])) / Matrix::colSums(expm1(pbmc@data))
#AddMetaData adds columns to object@data.info, and is a great place to stash QC stats
pbmc <- AddMetaData(pbmc, percent.mito, "percent.mito")
#if you want a violin plot of the stats, uncomment this
#VlnPlot(pbmc, c("nGene", "nUMI", "percent.mito"), nCol = 3)
#get the batch info
cell_names<-pbmc@cell.names
time_batch<-factor(str_extract(cell_names,"E[:digit:]+"))
pbmc@meta.data$batch<-time_batch
#normalize, find variable genes
pbmc <- NormalizeData(object = pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc <- FindVariableGenes(object = pbmc, mean.function = ExpMean, dispersion.function = LogVMR, x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5)
#Read in cell cycle genes
s.genes<-readLines(con='/Users/Dan/Data/mouse_cell_cycle/s_phase.txt')
g2m.genes<-readLines(con='/Users/Dan/Data/mouse_cell_cycle/G2M_phase.txt')
#We filter out cells that have > 5% mitochondrial percentage, nUMI < 25000
pbmc <- SubsetData(pbmc, subset.name = "percent.mito", accept.high = 0.05)
pbmc <- SubsetData(pbmc, subset.name='nUMI', accept.low = 25000)
#cell cycle scoring
pbmc<-CellCycleScoring(pbmc,s.genes=s.genes,g2m.genes=g2m.genes)
#here we split the batch correction versus not
pbmc_scaled_batch <- ScaleData(pbmc,vars.to.regress = c("percent.mito", "nUMI","batch"))
pbmc_scaled_noBatch <- ScaleData(pbmc,vars.to.regress = c("percent.mito", "nUMI"))
pbmc_scaled_batch <- RunPCA(pbmc_scaled_batch, pc.genes = pbmc@var.genes)
pbmc_scaled_noBatch<- RunPCA(object = pbmc_scaled_noBatch, pc.genes = pbmc@var.genes)
PCAPlot(object = pbmc_scaled_batch, group.by='batch')
PCAPlot(object = pbmc_scaled_noBatch, group.by='batch')
pbmc_scaled_batch <- RunTSNE(object = pbmc_scaled_batch, dims.use = 1:10, do.fast = TRUE)
pbmc_scaled_noBatch <- RunTSNE(object = pbmc_scaled_noBatch, dims.use = 1:10, do.fast = TRUE)
#attempt to justify batch correction using k means
pbmc_scaled_noBatch<-KClustDimension(pbmc_scaled_noBatch, dims.use = 1:10, reduction.use = "pca", k.use = 10, set.ident = TRUE, seed.use = 1)
pbmc_scaled_batch<-KClustDimension(pbmc_scaled_batch, dims.use = 1:10, reduction.use = "pca", k.use = 10, set.ident = TRUE, seed.use = 1)
noBatch_forMI<-mi.plugin(table(c(pbmc_scaled_noBatch@meta.data$kdimension.ident),c(pbmc_scaled_noBatch@meta.data$batch)))
batch_forMI<-mi.plugin(table(c(pbmc_scaled_batch@meta.data$kdimension.ident),c(pbmc_scaled_batch@meta.data$batch)))
#> batch_forMI
#[1] 0.3745987
#> noBatch_forMI
#[1] 0.4041218
#how many k? use silhouette
x = GetCellEmbeddings(object = pbmc_scaled_noBatch, reduction.type = "pca", dims.use = 1:10)
i=1
for (k in 8:25){
pbmc_scaled_noBatch<-KClustDimension(pbmc_scaled_noBatch, dims.use = 1:10, reduction.use = "pca", k.use = k, set.ident = TRUE, seed.use = 1)
s<-silhouette(pbmc_scaled_noBatch@meta.data$kdimension.ident,dist(x,method = 'euclidean'))
s_mean<-mean(s[,'sil_width'])
if (i==1){
s_means<-c(k,s_mean)
} else{
s_means<-rbind(s_means,c(k,s_mean))
}
i=i+1
}
plot(s_means, type='l')
#final k-means here, k=15
pbmc_scaled_noBatch<-KClustDimension(pbmc_scaled_noBatch, dims.use = 1:10, reduction.use = "pca", k.use = 15, set.ident = TRUE, seed.use = 1)
#pbmc_scaled_noBatch<-KClustDimension(pbmc_scaled_batch, dims.use = 1:10, reduction.use = "pca", k.use = 13, set.ident = TRUE, seed.use = 1)
# cluster tsne plot
#set the colors
#darkcols <- c(brewer.pal(9, "Set1"),brewer.pal(6,"Set2"))
darkcols <- c('#E41A1C','#377EB8','#4DAF4A','#984EA3','#FF7F00',
'#FFFF33','#A65628','#F781BF','#999999','#66C2A5',
'#FC8D62','#8DA0CB','#000000','#A6D854','#FFD92F')
TSNEPlot(pbmc_scaled_noBatch,do.label=TRUE, colors.use=darkcols)
#3D
library(scatterplot3d)
tsne_1 <- pbmc_scaled_noBatch@dr$tsne@cell.embeddings[,1]
tsne_2 <- pbmc_scaled_noBatch@dr$tsne@cell.embeddings[,2]
tsne_3 <- pbmc_scaled_noBatch@dr$tsne@cell.embeddings[,3]
scatterplot3d(x = tsne_1, y = tsne_2, z = tsne_3, col=pbmc_scaled_noBatch@ident)
#get the markers
pbmc.markers <- FindAllMarkers(object = pbmc_scaled_batch, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)
write.table(pbmc.markers,file='combined_cluster_markers.txt', quote=FALSE, sep='\t',col.names=NA)
#hierarchical clustering, x is the first ten PCA reduction
x = GetCellEmbeddings(object = pbmc_scaled_batch, reduction.type = "pca", dims.use = 1:10)
clusters<-hclust(dist(x,method = 'euclidean'))
cluster_colors<-sapply(pbmc_scaled_batch@meta.data$kdimension.ident,function(x)darkcols[x])
#save.image('combined.Rdata')
#get the top 100 most variable genes for the heatmap
top_var<-sort(apply(as.matrix(pbmc_scaled_batch@data[pbmc@var.genes,]),1,var),decreasing=TRUE)
most_variable_genes<-names(top_var)[1:100]
pdf('E825_heatmap.pdf', height=9, pointsize=9)
heatmap.2(as.matrix(pbmc_scaled_batch@data[most_variable_genes,]),Colv=as.dendrogram(clusters), ColSideColors=cluster_colors, trace='none',labCol = FALSE)
dev.off()
#You can look at any set of genes on the tsne plot
markers_from_josh_clustering<-c('Tbx4','Meox1','Lefty2','Tbx18','Trim10','Hba-x','Six2','Tbx1')
FeaturePlot(object = pbmc_scaled_batch, features.plot = markers_from_josh_clustering, cols.use = c("grey", "blue"), reduction.use = "tsne")
#here is the code for marking the gene set PC scoring
#this returns a per-gene score on the first PC
pcGenesetSignal<-function(obj, genelist)
{
genes<-readLines(genelist)
overlap<-genes[genes %in% rownames(obj@data)]
overlap_non_zero<-overlap[rowSums(as.matrix(obj@data[overlap,]))!=0]
col_non_zero<-colSums(as.matrix(obj@data[overlap,]))!=0
pc<-prcomp(obj@data[overlap_non_zero,col_non_zero], scale.=TRUE)
correct_direction<-sum(pc$x[,'PC1']>0)>(length(pc$x[,'PC1'])/2)
outscore<-rep(0,length(colnames(obj@data)))
names(outscore)<-colnames(obj@data)
if (!correct_direction){
outscore[col_non_zero]<--1*pc$rotation[,'PC1']
outscore[!col_non_zero]<--1*max(pc$rotation[,'PC1'])
}else{
outscore[col_non_zero]<-pc$rotation[,'PC1']
outscore[!col_non_zero]<-min(pc$rotation[,'PC1'])
}
return(outscore)
}
#so then you can use this to read in gene lists and score them, adding a vector in pbmc_scaled_batch@meta.data
#that you can visualize on the tsne plot
prefix<-'GenesetsFromJosh'
gene_lists<-c('YSendoderm.txt','YSmesoderm.txt','branchial_arch.txt','cardiac.txt','somites.txt')
for (gl in gene_lists){
path_file<-paste(prefix,gl, sep='/')
pbmc_scaled_batch@meta.data[[gl]]<-pcGenesetSignal(pbmc_scaled_batch,path_file)
}
library('monocle')
#beta tools
#install.packages("devtools")
#devtools::install_github("cole-trapnell-lab/monocle-release@develop")
cds<-importCDS(pbmc_scaled_noBatch)
#possible re normalize?
cds <- estimateSizeFactors(cds)
cds <- estimateDispersions(cds)
disp_table <- dispersionTable(cds)
ordering_genes <- subset(disp_table, mean_expression >= 0.1)
cds <- setOrderingFilter(cds, ordering_genes)
cds <- reduceDimension(cds)
cds <- orderCells(cds) |
77275c1a67dfb50b7c9d875364cc7eccbb69babf | b45c84bc8bc8a2d4d4589eb5e531875131492301 | /RScripts/bcfwa_streamline_sreach.R | d7edab18e5976d3f58cafefa67b43f1cfea04582 | [] | no_license | mattjbayly/samplecode_tmp | dc36bb92389785c3ce830fbb88cffa5016dcf069 | ea9ce8c379cc7d66c6d357fd37dbf04564637e08 | refs/heads/master | 2020-07-29T02:50:50.502613 | 2019-09-20T08:53:41 | 2019-09-20T08:53:41 | 209,638,529 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,622 | r | bcfwa_streamline_sreach.R | #=======================================================
# BCFWA Streamline Networking
#=======================================================
# Search upstream and downstream with the BCFWA
print("Starting........................")
print("bcfwaStreamlineUSDS........................")
tool_exec <- function(in_params, out_params)
{
#### Load Library for Analysis ####
if (!requireNamespace("dplyr", quietly = TRUE))
install.packages("dplyr")
require(dplyr)
#### Get Input Parameters ####
input_reaches <- in_params[[1]]
input_strmnetwork <- in_params[[2]]
input_indextable <- in_params[[3]]
input_usds <- in_params[[4]]
#### Get Input Parameters ####
output_features <- out_params[[1]]
#### Get Linear Feature IDs for Target Reaches ####
tr <- arc.open(input_reaches)
tr_df <- arc.select(tr)
tlfids <- as.data.frame(tr_df[,"LINEAR_FEATURE_ID"])
tlfids <- tlfids %>% unlist() %>% as.numeric()
print(tlfids)
#### Get USDS reaches from index table ###
print(input_indextable)
index_tab <- read.csv(input_indextable)
print(paste0("nrow table: ", nrow(index_tab)))
# If downstream switch directions
if(input_usds == "Downstream"){
print("Working Downstream")
colnames(index_tab) <- c("usid", "id")
}
index_tab_sub <- index_tab[which(index_tab$id %in% tlfids),]
#### Crop out streamnetwork to only include target reaches
strm <- arc.open(input_strmnetwork)
#biglist <- paste0("LINEAR_FEATURE_ID IN(", paste(as.character(index_tab_sub$usid), collapse = ","), ")")
strm_sub <- arc.select(strm) # , where_clause=biglist)
strm_sub <- strm_sub[which(strm_sub$LINEAR_FEATURE_ID %in% index_tab_sub$usid),]
print(paste0("Upstream feature count:", nrow(strm_sub)))
### Add on Target reach id ###
strm_sub$target_id <- index_tab_sub$id[match(strm_sub$LINEAR_FEATURE_ID, index_tab_sub$usid)]
#testoutput <- arc.data2sp(strm_sub)
#plot(testoutput)
arc.write(output_features, strm_sub, overwrite = TRUE)
return(out_params)
}
print("Completed........................")
# For testing in R only - Skip this
if(FALSE){
library(arcgisbinding)
arc.check_product()
in_params <- list()
in_params[[2]] <- "F:/spatial_data_raw/BCFWA/FWA_STREAM_NETWORKS_SP.gdb/SQAM"
in_params[[1]] <- "F:/delete/Output.gdb/targsrtm"
in_params[[3]] <- "F:/FWA Network/1_bcfwa_attributes/1_index_upstream_line_to_line_id_tables/SQAM_us_lfid.csv"
in_params[[4]] <- "Upstream"
out_params <- list()
out_params[[1]] <- "F:/delete/Output.gdb/myoutput"
} # end of testing section
#======================================================
|
95d0f968e5a7e95b6dced24831afec1f31159b4e | d316d28886285962dfe7201038bc17b724321288 | /man/infer_bootnum.Rd | 3b4518fe71cf0316e39c221a913b0cf785004c14 | [
"MIT"
] | permissive | bartongroup/RATS | 606d4923651fc325d1d7fc806d0a0eb07f13ce99 | a356a5a199b52431cef2c8eb0ce6821151a8ef96 | refs/heads/master | 2022-06-13T22:55:19.835931 | 2022-06-03T08:59:26 | 2022-06-03T08:59:26 | 55,973,542 | 30 | 3 | MIT | 2022-06-02T12:48:09 | 2016-04-11T13:11:59 | R | UTF-8 | R | false | true | 467 | rd | infer_bootnum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func.R
\name{infer_bootnum}
\alias{infer_bootnum}
\title{Rule-of-thumb number of iterations.}
\usage{
infer_bootnum(boot_data_A, boot_data_B)
}
\arguments{
\item{boot_data_A}{List of tables of bootstrapped counts.}
\item{boot_data_B}{List of tables of bootstrapped counts.}
}
\value{
The least number of iterations seen in the input.
}
\description{
Rule-of-thumb number of iterations.
}
|
bf5a300bb7b5eb530af8242254866d5ddd0f8478 | 16dcba576362af261592e4a94bab06f23c9f9b6e | /analysis/scripts/simOutbreak.R | dfee1b41f518895b5019c36dd1067a050429d746 | [] | no_license | confunguido/prioritizing_interventions_basic_training | 1bcaa53a20774a4a47d2ca73faa505f623c661b8 | 19911fb4f251bac67ee42c50ffb4e11137f28c6e | refs/heads/master | 2023-06-26T05:49:01.596517 | 2021-07-12T14:32:02 | 2021-07-12T14:32:02 | 384,942,119 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,647 | r | simOutbreak.R | ## simOutbreak.R
##
## Simulates SARS-CoV-2 transmission and interventions in a basic training setting, tracks a single
## cohort of recruits through a 12-week basic training session
##
## INPUTS: contact network built by network_BT.R, parameter values for infection processes,
## parameter values for control processes(testing, isolation/quarantine/contact tracing, masks),
## number of replicates to simulate
##
## OUTPUTS: output contains summary and time series data on number of infections, number of symptomatic
## infections, number in isolation and quarantine, number of tests given, who infected who, and individual
## level dates of infection/symptoms/test
##
## FEATURES:
##
## staggered arrival dates
## quarantine with coccoon from day 4 to day 14
## random contacts allowed throughout
## test symptomatics
## test return delay
## isolate positive cases
## contact tracing and quarantine test-positive cases
## test quarantine after 3 days and release if test-negative
## individual compliance
## recent pre-arrival infection
## clinical and test-based criteria to leave isolation
## https://www.who.int/news-room/commentaries/detail/criteria-for-releasing-covid-19-patients-from-isolation
## https://www.journalofinfection.com/article/S0163-4453(20)30119-5/fulltext#seccesectitle0014
## https://www.cdc.gov/coronavirus/2019-ncov/hcp/duration-isolation.html
## Contact with drill sergeants/staff
## TO DO LIST:
##
## [x] Test trainers
## [x] Implement TOST and incubation period
## [x] Implement a more thorough sensitivity
simOutbreak = function(){
## specify secondary infection distribution
R0 = R0.prob * genint
## specify delay between infection and symptom onset
## Replacing this line with the incub function
##prop.inf.symp = symp.mag * dpois(1:28, symp.tim) / sum(dpois(1:28, symp.tim))
## INCUBATION PERIOD
incub = pgamma(1:21,shape=k_inc,scale=0.948) - pgamma(0:20,shape=k_inc,scale=0.948)
prop.inf.symp = incub / sum(incub)
## previously infected and immune
immune = rep(0, numrecruits+numdrillserg+numStaff)
immune[sample(numrecruits+numdrillserg+numStaff, rbinom(1, numrecruits+numdrillserg+numStaff, initial.immune), replace = F)] = 1
## infected status
## which(immune > 0, replace = F)
infected = rep(0, numrecruits+numdrillserg+numStaff)
if(initial.immune < initial.infected){
initial.immune = initial.infected
}
if(length(which(immune > 0)) > 0){
infected[sample(which(immune > 0), rbinom(1, sum(immune), initial.infected/initial.immune), replace = F)] = 1
}
init.infect = sum(infected)
## simulate day of infection of initial infecteds
## We don't need the foor loop
## arrivebase[something] - sample(1:21, init.infect)
## R0_init = R0.prob * dpois(1:21, R0.tim/2) / sum(dpois(1:21, R0.tim/2)) # specify secondary infection distribution for initial infecteds to force some recent infections
date.infected = rep(NA, numrecruits+numdrillserg+numStaff)
secondary.infected = rep(NA, numrecruits+numdrillserg+numStaff)
incub.period.inf = rep(NA, numrecruits+numdrillserg+numStaff)
## currently infected
date.infected[which(infected > 0)] = arrivebase[which(infected > 0)] - sample(1:39, init.infect, replace = T)
## already immune
date.infected[immune > 0 & infected == 0] = -1e3
infected[immune > 0] = 1
## simulate date of symptom onset and duration
date.symptoms = rep(NA, numrecruits+numdrillserg+numStaff)
symptom.duration = rep(NA, numrecruits+numdrillserg+numStaff)
for(ii in which(infected > 0)){
if(rbinom(1, 1, symp.mag) == 1){
date.symptoms[ii] =
date.infected[ii] +
sample(1:length(prop.inf.symp), 1, prob = prop.inf.symp)
symptom.duration[ii] = rpois(1,symp.mean)
incub.period.inf[ii] = date.symptoms[ii]
}else{
incub.period.inf[ii] = date.infected[ii] +
sample(1:length(prop.inf.symp),1,replace = T,prob = prop.inf.symp)
}
}
## individual status storage
tested.date = rep(NA, numrecruits+numdrillserg+numStaff)
isolate.date = rep(NA, numrecruits+numdrillserg+numStaff)
quarantine.date = rep(NA, numrecruits+numdrillserg+numStaff)
testneg.date = rep(NA, numrecruits+numdrillserg+numStaff)
init.pos = 0
retest.pos = 0
## aggregate numbers over time
numIsolated = rep(0, length(time))
numQuarantine = rep(0, length(time))
numTested = rep(0, length(time))
numInfected = rep(0, length(time))
numSymptoms = rep(0, length(time))
numImported = rep(0, length(time))
numTestPositive = rep(0, length(time))
## keep track of who infected whom
edges = cbind(rep(0, sum(infected)), which(infected > 0))
## loop over each day of basic training
for(tt in time){
compliance.mult = 1
if(BOOL_compliance_time == 1){
compliance.mult = ((compliance.final.avg - compliance.avg)/(length(time) - 1) * (tt-1) + compliance.avg) / compliance.avg
}
## test on arrival
if(tt %in% unique(arrivebase) & BOOL_testOnArrival == 1){
needtesting = which(arrivebase == tt)
trueneg = needtesting[is.na(date.infected[needtesting])]
truepos = setdiff(needtesting, trueneg)
testpos = rep(NA, length(needtesting))
if(length(trueneg) > 0){ # false pos
testpos[which(needtesting %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.commercial)
}
## Upon arrival, always test with PCR
if(length(truepos) > 0){ # true pos
testpos[which(needtesting %in% truepos)] =
rbinom(length(truepos), 1,
pcr.sens.commercial(tt - date.infected[truepos] + 1))
}
tested.date[needtesting] = tt
needtracing = needtesting[which(testpos == 1)] # all pos
isolate.date[needtracing] = tt + testReturn
testneg.date[setdiff(needtesting,needtracing)] = tt
numTested[tt] = numTested[tt] + length(needtesting)
init.pos = init.pos + length(needtracing)
}
## Only if want to test everyday, for bookkeeping
if(BOOL_testDaily == TRUE){
needtesting = 1:numrecruits
truepos = needtesting[!is.na(date.infected[needtesting])]
trueneg = numrecruits - length(truepos)
testpos = 0
if(trueneg > 0){
testpos = sum(rbinom(trueneg, 1, 1 - pcr.spec.commercial))
}
if(length(truepos) > 0){
testpos = testpos + sum(
rbinom(length(truepos), 1,
pcr.sens.commercial(tt - date.infected[truepos] + 1)
))
}
numTestPositive[tt] = testpos
}
## test on specified days
if(tt %in% testdates){
needtesting = 1:numrecruits
trueneg = needtesting[is.na(date.infected[needtesting])]
truepos = setdiff(needtesting, trueneg)
testpos = rep(NA, length(needtesting))
ind_testdates = which(testdates == tt)
tmp_test_type = 'pcr'
if(ind_testdates <= length(testdates_type)){
tmp_test_type = testdates_type[ind_testdates]
}
## can be antigen or PCR, if not antigen, assume pcr
if(tmp_test_type == "antigen"){
if(length(trueneg) > 0){
testpos[which(needtesting %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.screen)
}
if(length(truepos) > 0){
testpos[which(needtesting %in% truepos)] =
rbinom(length(truepos), 1, pcr.sens.screen(tt - date.infected[truepos] +
1))
}
}else{
if(length(trueneg) > 0){
testpos[which(needtesting %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.commercial)
}
if(length(truepos) > 0){
testpos[which(needtesting %in% truepos)] =
rbinom(length(truepos), 1, pcr.sens.commercial(tt - date.infected[truepos] +
1))
}
}
tested.date[needtesting] = tt
needtracing = needtesting[which(testpos == 1)]
isolate.date[needtracing] = tt + testReturn
testneg.date[setdiff(needtesting,needtracing)] = tt
numTested[tt] = numTested[tt] + length(needtesting)
retest.pos = retest.pos + length(needtracing)
for(ii in needtracing){
if(ii <= numrecruits){ # recruits
if(tt <= 14){
contacts.all = unique(c(contacts.random[[ii]],contacts.cocoon[[ii]]))
} else {
contacts.all = unique(c(contacts.random[[ii]],contacts.company[[ii]]))
}
} else if(ii > numrecruits + numdrillserg){ # staff
contacts.all = contacts.staff.recruit[[ii - numrecruits - numdrillserg]]
}else { # drill sergeants
contacts.all = contacts.drillserg.recruit[[ii - numrecruits]]
}
contacts.all = contacts.all[contacts.all != ii]
contacts.all = setdiff(contacts.all, which(!is.na(quarantine.date)))
## CONTACT TRACING!! quarantine.contacts and set date to isolate if positive
contacts.needtest = sample(contacts.all,
min(
rpois(1, quarantine.contacts),
length(contacts.all)
), replace = F)
trueneg = contacts.needtest[is.na(date.infected[contacts.needtest])]
truepos = setdiff(contacts.needtest, trueneg)
testpos = rep(NA, length(contacts.needtest))
## can be antigen or PCR, if not antigen, assume pcr
if(tmp_test_type == "antigen"){
if(length(trueneg) > 0){
testpos[which(contacts.needtest %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.screen)
}
if(length(truepos) > 0){
testpos[which(contacts.needtest %in% truepos)] =
rbinom(length(truepos), 1, pcr.sens.screen(tt - date.infected[truepos] +
1))
}
}else{
if(length(trueneg) > 0){
testpos[which(contacts.needtest %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.commercial)
}
if(length(truepos) > 0){
testpos[which(contacts.needtest %in% truepos)] =
rbinom(length(truepos), 1, pcr.sens.commercial(tt - date.infected[truepos] +
1))
}
}
tested.date[contacts.needtest] = tt
quarantine.date[contacts.needtest[testpos == 1]] = tt + testReturn # Can also be isolate.date
testneg.date[contacts.needtest[testpos != 1]] = tt
numTested[tt] = numTested[tt] + length(contacts.needtest)
retest.pos = retest.pos + length(which(testpos == 1))
}
}
## test staff on specified frequencies
if((testStaffFreq > 0) && ((tt - 1) %% testStaffFreq == 0)){
needtesting = (numrecruits + 1):length(infected) # Is this for staff or drill sergants too?
trueneg = needtesting[is.na(date.infected[needtesting])]
truepos = setdiff(needtesting, trueneg)
testpos = rep(NA, length(needtesting))
## can be antigen or PCR, if not antigen, assume pcr
if(testStaffType == "antigen"){
if(length(trueneg) > 0){
testpos[which(needtesting %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.screen)
}
if(length(truepos) > 0){
testpos[which(needtesting %in% truepos)] =
rbinom(length(truepos), 1, pcr.sens.screen(tt - date.infected[truepos] +
1))
}
}else{
if(length(trueneg) > 0){
testpos[which(needtesting %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.commercial)
}
if(length(truepos) > 0){
testpos[which(needtesting %in% truepos)] =
rbinom(length(truepos), 1, pcr.sens.commercial(tt - date.infected[truepos] +
1))
}
}
tested.date[needtesting] = tt
needtracing = needtesting[which(testpos == 1)]
isolate.date[needtracing] = tt + testReturn
testneg.date[setdiff(needtesting,needtracing)] = tt
numTested[tt] = numTested[tt] + length(needtesting)
retest.pos = retest.pos + length(needtracing)
}
## importation from staff
infected.offcampus = which(rbinom(numrecruits+numdrillserg+numStaff, 1,(1-compliance*compliance.mult)*importation) == 1)
numImported[tt] = length(infected.offcampus)
if(length(infected.offcampus) > 0){
if(sum(is.na(date.infected[infected.offcampus])) > 0){
edges = rbind(
edges,
cbind(0,infected.offcampus[which(is.na(date.infected[infected.offcampus]))]))
}
infected[infected.offcampus] = 1
date.infected[infected.offcampus] = ifelse(
is.na(date.infected[infected.offcampus]),
tt,
date.infected[infected.offcampus])
date.symptoms[infected.offcampus] =
ifelse(
is.na(date.symptoms[infected.offcampus]),
ifelse(
rbinom(length(infected.offcampus), 1, symp.mag) == 1,
date.infected[infected.offcampus] +
sample(
1:length(prop.inf.symp),
length(infected.offcampus),
prob = prop.inf.symp,
replace = T),
NA),
date.symptoms[infected.offcampus])
incub.period.inf[infected.offcampus] = date.symptoms[infected.offcampus]
incub.period.inf[infected.offcampus[is.na(date.symptoms[infected.offcampus])]] = date.infected[infected.offcampus[is.na(date.symptoms[infected.offcampus])]] +
sample(1:length(prop.inf.symp),
length(infected.offcampus[is.na(date.symptoms[infected.offcampus])]),
replace = T, prob = prop.inf.symp)
}
## loop through those who are capable of infecting others
max_infectious_period = 21
infectious = which(infected > 0 &
date.infected < tt &
date.infected > (tt - max_infectious_period) &
arrivebase <= tt)
for(ii in infectious){
infect.today =
R0[(tt - date.infected[ii]) + 1] *
ifelse(is.na(date.symptoms[ii]), asymp.adjust, 1) * (1 - compliance[ii]*compliance.mult)
## We need to prevent immune people from becoming infected again
## look up this person's contacts today
if(!is.na(isolate.date[ii]) | !is.na(quarantine.date[ii])){
if(ii <= numrecruits){
which.in.isolation = unique(c(which(!is.na(isolate.date)), which(!is.na(quarantine.date))))
contacts.all = which.in.isolation
}else{
contacts.all = numeric()
}
} else {
if(ii <= numrecruits){ # recruits
if(tt <= 14){
contacts.all = unique(c(contacts.random[[ii]],contacts.cocoon[[ii]]))
} else {
contacts.all = unique(c(contacts.random[[ii]],contacts.company[[ii]]))
}
} else if(ii > numrecruits + numdrillserg){ # staff
contacts.all = unique(c(contacts.staff.recruit[[ii - numrecruits - numdrillserg]]))
}else { # drill sergeants
contacts.all = unique(c(contacts.drillserg.recruit[[ii - numrecruits]]))
}
}
contacts.all = contacts.all[contacts.all != ii]
## determine who becomes newly infected
infect.who = rbinom(length(contacts.all),
1,
infect.today *(1 - compliance[contacts.all]*compliance.mult))
if(sum(is.na(infect.who)) > 0){
print(infect.today)
print(date.infected[ii])
print(incub.period.inf[ii])
print((tt - date.infected[ii]) + 1)
stop()
}
if(sum(infect.who) > 0){
infect.who = contacts.all[which(infect.who == 1)]
## update their status if they're not already infected
if(sum(infected[infect.who] == 0) > 0){
infect.new = infect.who[which(infected[infect.who] == 0)]
infected[infect.new] = 1
date.infected[infect.new] = tt
date.symptoms[infect.new] =
ifelse(
rbinom(length(infect.new), 1, symp.mag) == 1,
date.infected[infect.new] +
sample(
1:length(prop.inf.symp),
length(infect.new),
prob = prop.inf.symp,
replace = T
),
NA
)
incub.period.inf[infect.new] = date.symptoms[infect.new]
incub.period.inf[infect.new[is.na(date.symptoms[infect.new])]] = date.infected[infect.new[is.na(date.symptoms[infect.new])]] +
sample(1:length(prop.inf.symp),
length(infect.new[is.na(date.symptoms[infect.new])]),
replace = T,
prob = prop.inf.symp)
symptom.duration[infect.new] = rpois(1,symp.mean)
edges = rbind(edges, cbind(ii, infect.new))
}
}
} # end infectious loop
## test symptomatics, and perform isolation and quarantine accordingly
needtesting = which(date.symptoms == tt)
needtesting = needtesting[which(arrivebase[needtesting] < tt)]
needtesting = needtesting[which(is.na(isolate.date[needtesting]))]
needtesting = needtesting[which(is.na(quarantine.date[needtesting]))]
needtesting = needtesting[which(tested.date[needtesting] != tt)]
numTested[tt] = numTested[tt] + length(needtesting)
if(length(needtesting) > 0){
trueneg = needtesting[is.na(date.infected[needtesting])]
truepos = setdiff(needtesting, trueneg)
testpos = rep(NA, length(needtesting))
if(length(trueneg) > 0){
testpos[which(needtesting %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.commercial)
}
if(length(truepos) > 0){
testpos[which(needtesting %in% truepos)] =
rbinom(length(truepos), 1, pcr.sens.commercial(tt - date.infected[truepos] +
1))
}
tested.date[needtesting] = tt
needtracing = needtesting[which(testpos == 1)]
isolate.date[needtracing] = tt # isolate on test date
testneg.date[setdiff(needtesting,needtracing)] = tt
for(ii in needtracing){
if(ii <= numrecruits){ # recruits
if(tt <= 14){
contacts.all = unique(c(contacts.random[[ii]],contacts.cocoon[[ii]]))
} else {
contacts.all = unique(c(contacts.random[[ii]],contacts.company[[ii]]))
}
} else if(ii > numrecruits + numdrillserg){ # staff
contacts.all = contacts.staff.recruit[[ii - numrecruits - numdrillserg]]
}else { # drill sergeants
contacts.all = contacts.drillserg.recruit[[ii - numrecruits]]
}
contacts.all = contacts.all[contacts.all != ii]
contacts.all = setdiff(contacts.all, which(!is.na(quarantine.date)))
quarantine.date[sample(contacts.all, min(
rpois(1, quarantine.contacts),
length(contacts.all)
), replace = F)] =
tt + 1
}
}
## test those who were recently quarantined and release if negative
if(tt > testDelayQuarantine){
needtesting = which(quarantine.date ==(tt - testDelayQuarantine))
needtesting = needtesting[which(tested.date[needtesting] != tt)]
numTested[tt] = numTested[tt] + length(needtesting)
if(length(needtesting) > 0){
trueneg = needtesting[is.na(date.infected[needtesting] + 1)]
truepos = setdiff(needtesting, trueneg)
testpos = rep(NA, length(needtesting))
if(length(trueneg) > 0){
testpos[which(needtesting %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.commercial)
}
if(length(truepos) > 0){
testpos[which(needtesting %in% truepos)] =
rbinom(length(truepos),
1,
pcr.sens.commercial(tt - date.infected[truepos] + 1))
}
tested.date[needtesting] = tt
release = which(testpos == 0)
if(length(release) > 0){
quarantine.date[needtesting[release]] = NA
}
needtracing = needtesting[which(testpos == 1)]
for(ii in needtracing){
if(ii <= numrecruits){ # recruits
if( tt <= 14){
contacts.all = unique(c(contacts.random[[ii]],contacts.cocoon[[ii]]))
} else {
contacts.all = unique(c(contacts.random[[ii]],contacts.company[[ii]]))
}
} else if(ii > numrecruits + numdrillserg){ # staff
contacts.all = contacts.staff.recruit[[ii - numrecruits - numdrillserg]]
}else { # drill sergeants
contacts.all = contacts.drillserg.recruit[[ii - numrecruits]]
}
contacts.all = contacts.all[contacts.all != ii]
contacts.all = setdiff(contacts.all, which(!is.na(quarantine.date)))
quarantine.date[sample(contacts.all, min(
rpois(1, quarantine.contacts),
length(contacts.all)
), replace = F)] =
tt + 1
}
}
}
## record numbers for the day
numIsolated[tt] = sum(!is.na(isolate.date))
numQuarantine[tt] = sum(!is.na(quarantine.date))
numInfected[tt] = sum(date.infected == tt, na.rm = T)
numSymptoms[tt] = sum(date.symptoms == tt, na.rm = T)
## release from isolation
if(BOOL_clinicalrelease){ # release based on clinical criteria
release = c(which(max(date.symptoms+isolate.length, # 10 days from symptom onset
date.symptoms+symptom.duration+isolate.nosymp) == tt), # delay from last day of fever
which(tested.date[which(is.na(date.symptoms))]+isolate.length == tt), # 10 days from test date for asymptomatics
which(testneg.date == tt - testReturn)) # negative tests returned
} else { # release based on testing
needtesting = which(isolate.date <=(tt - isolate.length))
numTested[tt] = numTested[tt] + length(needtesting)
if(length(needtesting) > 0){
trueneg = needtesting[is.na(tt - date.infected[needtesting] + 1)]
truepos = setdiff(needtesting, trueneg)
testpos = rep(NA, length(needtesting))
if(length(trueneg) > 0){
testpos[which(needtesting %in% trueneg)] =
rbinom(length(trueneg), 1, 1 - pcr.spec.commercial)
}
if(length(truepos) > 0){
testpos[which(needtesting %in% truepos)] =
rbinom(length(truepos),
1,
pcr.sens.commercial(tt - date.infected[truepos] + 1))
}
tested.date[needtesting] = tt
release = which(testpos == 0)
} else {
release = numeric()
}
}
if(length(release) > 0){
isolate.date[release] = NA
}
## release from quarantine if time up
release = which(quarantine.date ==(tt - quarantine.max))
if(length(release) > 0){
quarantine.date[release] = NA
}
} # end time loop
## outputs
summary_out = c(init.infect,
init.pos,
retest.pos,
sum(numInfected),
sum(numSymptoms),
max(numIsolated + numQuarantine),
sum(numTested))
timeseries_out = data.frame(cbind(numInfected,numSymptoms,numIsolated,numQuarantine,numTested, numImported, numTestPositive))
indivudal_out = data.frame(cbind(date.symptoms,isolate.date,quarantine.date))
return(list(summary_out,timeseries_out,indivudal_out,edges))
} # end function definition
|
b4f78817dfaa22d3ac80e3a306afda4196bc6317 | d28ae57776ef0a10c831f43cde2015b8a0e2e8dd | /R/GSEAutoAnalysis.R | a6a3d28e36890ae65c8271f927d15c189add807d | [] | no_license | Shicheng-Guo/GEO | 502db3680c9c08ea7a959e0d5b04a8119f67df8c | 22b4039d7862dc2554436c9526a67e09d49775e4 | refs/heads/master | 2021-08-28T13:16:54.268270 | 2021-08-01T01:12:29 | 2021-08-01T01:12:29 | 64,424,455 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 3,123 | r | GSEAutoAnalysis.R | #!/usr/bin/env Rscript
# For PCA Analysis to methylation 450K dataset
# for ips methylatin 450K analysis
# setwd("G:\\geo")
source("http://bioconductor.org/biocLite.R")
biocLite("ggfortify")
library("optparse")
option_list = list(
make_option(c("-id", "--input"), type="character", default=NULL, help="GSE ID", metavar="character")
);
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
if (is.null(opt$input)){
print_help(opt_parser)
stop("At least one argument must be supplied (input file).\n", call.=FALSE)
}
library("GEOquery")
GSEID<-opt$input
GEO <- getGEO(GSEID,destdir=getwd())
library("GEOquery")
save(GEO,file=paste(GSEID,".RData",sep=""))
load(paste(GSEID,".RData",sep=""))
data <- as.data.frame(exprs(GEO[[1]]))
phen <- pData(phenoData(GEO[[1]]))
phen1<-sapply(strsplit(as.character(phen$characteristics_ch1),": "),function(x) unlist(x)[2])
phen2<-sapply(strsplit(as.character(phen$characteristics_ch1.1),": "),function(x) unlist(x)[2])
# phen3<-sapply(strsplit(as.character(phen$characteristics_ch1.3),": "),function(x) unlist(x)[2]) # age
# phen4<-sapply(strsplit(as.character(phen$characteristics_ch1.4),": "),function(x) unlist(x)[2]) # gender
# phen3[phen3=="f"]<-"Female"
# phen3[phen3=="m"]<-"Male"
# phen1[phen1=="rheumatoid arthritis"]<-"Rheumatoid Arthritis"
PCAPlot<-function(data,pheno,output,multifigure=T){
pca <- prcomp(data,center=T,scale = F) # Here, input file: row is individual and column is variable
outputfile=paste(output,".pdf",sep="")
pdf(outputfile)
if(multifigure){
par(mfrow=c(2,2),mar=c(4,4,4,4))
}
plot((pca$sdev[1:10])^2,type="o",xaxt="n",ylab="Variances",xlab="Principle Components",col="red",lwd=2)
axis(1,at=0:10,labels=paste("PC",0:10,sep=""))
var<-c()
for(i in 1:length(pca$sdev)){var[i]<-sum((pca$sdev[1:i])^2)/sum((pca$sdev)^2)}
plot(var,ylab="total variance",xlab="number of principle components",lwd=2,type="l")
abline(h=0.8,col="grey",lty=2)
abline(v=which(var>0.8)[1],col="grey",lty=2)
scores <- data.frame(pheno, pca$x[,1:3])
col = as.numeric(as.factor(pheno))
plot(x=scores$PC1,y=scores$PC2, xlim=c(min(scores$PC1),max(scores$PC1)),ylim=c(min(scores$PC2),max(scores$PC2)),type="n",xlab="PC1",ylab="PC2")
for(i in 1:length(scores$PC1)){
points(scores$PC1[i],scores$PC2[i],pch=as.numeric(as.factor(pheno))[i],col=col[i],cex=0.8,lwd=2)
}
legend("topleft",legend=names(table(pheno)),pch=1:length(table(pheno)),col=1:length(table(pheno)),bty="n",pt.lwd=2,,cex=0.5)
plot(x=scores$PC1,y=scores$PC3, xlim=c(min(scores$PC1),max(scores$PC1)),ylim=c(min(scores$PC3),max(scores$PC3)),type="n",xlab="PC1",ylab="PC3")
for(i in 1:length(scores$PC1)){
points(scores$PC1[i],scores$PC3[i],pch=as.numeric(as.factor(pheno))[i],col=col[i],cex=0.9,lwd=2)
}
legend("bottomleft",legend=names(table(pheno)),pch=1:length(table(pheno)),col=1:length(table(pheno)),bty="n",pt.lwd=2,cex=0.5)
dev.off()
}
data1=na.omit(data)
PCAPlot(t(data1),phen1,output=paste(GSEID,"_phen1.pca.pdf",sep=""),multifigure=T)
PCAPlot(t(data1),phen2,output=paste(GSEID,"_phen2.pca.pdf",sep=""),multifigure=T)
|
7a53e9e12b06b8a51c6c7853846cb3d63d17098e | aee6a655ff4c3006364e02d65782528cd9cdc46c | /winningtimetest.R | cc326fa6c402cbb439a9e9ac105fdca89a8d368a | [] | no_license | tobycrisford/parkrunanalysis | 5a5f88c0bcb91123607d65baa5fd714a4c04d102 | ce28482e68078d7c152d88d7ff500b15c4ec17d7 | refs/heads/master | 2020-12-02T08:58:25.621040 | 2020-02-25T20:20:00 | 2020-02-25T20:20:00 | 230,953,865 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 808 | r | winningtimetest.R | library(DBI)
con = dbConnect(RSQLite::SQLite(), "cambridgedata.db")
dbListTables(con)
results = dbReadTable(con, "results")
dbListFields(con, "results")
winners = results[results[["position"]] == 1,]
print(winners[1:5,])
print(length(winners[,1]))
wintime = winners[winners[["time"]] > 1000,]
print(wintime[1:5,])
print(length(wintime[,1]))
sortwintime = wintime[order(wintime[["runid"]]),]
gaps = array(0,dim=length(sortwintime[,1])-1)
for (i in 1:length(gaps)) {gaps[i] = sortwintime[i+1,"runid"] - sortwintime[i,"runid"]}
hist(gaps)
library(MASS)
f = fitdistr(gaps - 1, "geometric")
print(f["estimate"])
ct = table(factor(gaps-1,levels=0:(10*max(gaps))))
print(ct)
param = as.double(f["estimate"])
chisq.test(ct, p = param*(1-param)^(0:(10*max(gaps))),simulate.p.value=TRUE)
dbDisconnect(con) |
b670a0d10a84ea730b4f106c666f99f7dfe2ed61 | a3aa573ccaee0f38e9d846509aa89599616051ab | /man/data.glass.Rd | 0a00decd838439e5e61d855c1b5e11517500ac0c | [] | no_license | toppu/PLRank | 4986818372c72a32c838c3ff6292d67eb21263f6 | a45dcb5cab61b1468be04e7b6d8e9da87de2a107 | refs/heads/master | 2021-01-13T02:03:47.113600 | 2015-07-06T08:59:54 | 2015-07-06T08:59:54 | 22,955,186 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 336 | rd | data.glass.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{data.glass}
\alias{data.glass}
\title{glass dataset}
\format{A data frame with 214 observations, 9 attributes and 6 labels}
\source{
UCI respository and Statlog collection
}
\usage{
data(data.glass)
}
\description{
glass dataset
}
\examples{
data(data.glass)
}
|
afcb84ef2f09127151b17a46c082d168eda5b04b | ed2629e6745e247aaa43ffef85a777ac0ee1f158 | /Code to Clean Data.R | f399cad243d03837e25ca71a284d633a23a7cee9 | [] | no_license | MeganFantes/Midterm-Project_MA-415 | ee768ce4bb1587bea6c33036057f169efebad95c | 8a2420b6387dcd585fbb266aa580e477d9b61f24 | refs/heads/master | 2021-01-12T15:26:40.700570 | 2016-10-25T02:18:33 | 2016-10-25T02:18:33 | 71,783,908 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,430 | r | Code to Clean Data.R | library(stringr)
library(reshape2)
library(plyr)
# read the csv file, import it into the environment
vehicles <- read.csv("Original Data_Vehicles.csv")
# There are many variables that are given for both Fuel Type 1 and Fuel Type 2.
# We want to melt all variables given for both fuel types, so that there is only 1 colvar
# indicating fuel type, and one colvar indicating the specific variable for each fuel type
# melt annual petroleum consumption in barrels for each fuel type
to_melt <- vehicles[c("id", "barrels08", "barrelsA08")]
to_melt <- melt(to_melt, id = "id", na.rm = FALSE)
names(to_melt) <- c("id", "Fuel_Type", "Annual_Petrol_Consumption")
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "barrels08", 1)
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "barrelsA08", 2)
Fuel_Type_Properties <- to_melt
Fuel_Type_Properties <- arrange(Fuel_Type_Properties, id, Fuel_Type)
# melt city MPG for each fuel type
to_melt <- vehicles[c("id", "city08", "cityA08")]
to_melt <- melt(to_melt, id = "id", na.rm = FALSE)
names(to_melt) <- c("id", "Fuel_Type", "City_MPG")
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "city08", 1)
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "cityA08", 2)
Fuel_Type_Properties <- join(Fuel_Type_Properties, to_melt, by = c("id", "Fuel_Type"))
# melt tailpipe CO2 for each fuel type
to_melt <- vehicles[c("id", "co2TailpipeGpm", "co2TailpipeAGpm")]
to_melt <- melt(to_melt, id = "id", na.rm = FALSE)
names(to_melt) <- c("id", "Fuel_Type", "Tailpipe_CO2")
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "co2TailpipeGpm", 1)
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "co2TailpipeAGpm", 2)
Fuel_Type_Properties <- join(Fuel_Type_Properties, to_melt, by = c("id", "Fuel_Type"))
# melt combined MPG for each fuel type
to_melt <- vehicles[c("id", "comb08", "combA08")]
to_melt <- melt(to_melt, id = "id", na.rm = FALSE)
names(to_melt) <- c("id", "Fuel_Type", "Combined_MPG")
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "comb08", 1)
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "combA08", 2)
Fuel_Type_Properties <- join(Fuel_Type_Properties, to_melt, by = c("id", "Fuel_Type"))
# melt annual fuel cost for each fuel type
to_melt <- vehicles[c("id", "fuelCost08", "fuelCostA08")]
to_melt <- melt(to_melt, id = "id", na.rm = FALSE)
names(to_melt) <- c("id", "Fuel_Type", "Fuel_Cost")
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "fuelCost08", 1)
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "fuelCostA08", 2)
Fuel_Type_Properties <- join(Fuel_Type_Properties, to_melt, by = c("id", "Fuel_Type"))
# melt highway MPG for each fuel type
to_melt <- vehicles[c("id", "highway08", "highwayA08")]
to_melt <- melt(to_melt, id = "id", na.rm = FALSE)
names(to_melt) <- c("id", "Fuel_Type", "Highway_MPG")
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "highway08", 1)
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "highwayA08", 2)
Fuel_Type_Properties <- join(Fuel_Type_Properties, to_melt, by = c("id", "Fuel_Type"))
# melt string values for fuel type
to_melt <- vehicles[c("id", "fuelType1", "fuelType2")]
to_melt$fuelType1 <- as.character(to_melt$fuelType1) # convert to character vector for melting
to_melt$fuelType2 <- as.character(to_melt$fuelType2) # convert to character vector for melting
to_melt <- melt(to_melt, id = "id", na.rm = FALSE)
names(to_melt) <- c("id", "Fuel_Type", "Fuel_Type_Name")
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "fuelType2", 2)
to_melt$Fuel_Type <- str_replace(to_melt$Fuel_Type, "fuelType1", 1)
Fuel_Type_Properties <- join(Fuel_Type_Properties, to_melt, by = c("id", "Fuel_Type"))
# rearrange Fuel_Type_Properties data table so that Fuel_Type and Fuel_Type Name are next to each other
names(Fuel_Type_Properties)
Fuel_Type_Properties <- Fuel_Type_Properties[c("id","Fuel_Type","Fuel_Type_Name","Annual_Petrol_Consumption","City_MPG",
"Highway_MPG","Combined_MPG","Tailpipe_CO2","Fuel_Cost")]
# convert necessary columns to factors
# if factors levels are "", convert to NA
Fuel_Type_Properties$Fuel_Type <- as.factor(Fuel_Type_Properties$Fuel_Type)
Fuel_Type_Properties$Fuel_Type_Name <- as.factor(Fuel_Type_Properties$Fuel_Type_Name)
not_NA_indices <- which(Fuel_Type_Properties$Fuel_Type_Name %in% levels(Fuel_Type_Properties$Fuel_Type_Name)[-1])
Fuel_Type_Properties$Fuel_Type_Name[-not_NA_indices] <- NA
NA_indices <- which(Fuel_Type_Properties$Annual_Petrol_Consumption == 0)
Fuel_Type_Properties$Annual_Petrol_Consumption[NA_indices] <- NA
NA_indices <- which(Fuel_Type_Properties$City_MPG == 0)
Fuel_Type_Properties$City_MPG[NA_indices] <- NA
NA_indices <- which(Fuel_Type_Properties$Highway_MPG == 0)
Fuel_Type_Properties$Highway_MPG[NA_indices] <- NA
NA_indices <- which(Fuel_Type_Properties$Combined_MPG == 0)
Fuel_Type_Properties$Combined_MPG[NA_indices] <- NA
NA_indices <- which(Fuel_Type_Properties$Tailpipe_CO2 == 0)
Fuel_Type_Properties$Tailpipe_CO2[NA_indices] <- NA
NA_indices <- which(Fuel_Type_Properties$Fuel_Cost == 0)
Fuel_Type_Properties$Fuel_Cost[NA_indices] <- NA
# Now we create a table with all other properties
indices <- match(c("id","year","make","model","atvType","cylinders","charge120","charge240","cityCD","cityE",
"cityUF","combE","combinedCD","combinedUF","displ","drive","eng_dscr", "evMotor",
"highwayUF","hlv","hpv","lv2","lv4", "phevBlended","pv2","pv4","trany","youSaveSpend",
"sCharger","tCharger","c240bDscr","startStop","phevCity","phevHwy","phevComb"), names(vehicles))
Vehicle_Properties <- vehicles[indices]
Vehicle_Properties <- arrange(Vehicle_Properties, id)
# Convert all variables to appropriate formats
Vehicle_Properties$id <- as.numeric(Vehicle_Properties$id)
Vehicle_Properties$year <- as.numeric(Vehicle_Properties$year)
Vehicle_Properties$make <- as.factor(Vehicle_Properties$make)
Vehicle_Properties$model <- as.character(Vehicle_Properties$model)
Vehicle_Properties$atvType <- as.factor(Vehicle_Properties$atvType)
Vehicle_Properties$cylinders <- as.numeric(Vehicle_Properties$cylinders)
Vehicle_Properties$charge120 <- as.numeric(Vehicle_Properties$charge120)
Vehicle_Properties$charge240 <- as.numeric(Vehicle_Properties$charge240)
Vehicle_Properties$cityCD <- as.numeric(Vehicle_Properties$cityCD)
Vehicle_Properties$cityE <- as.numeric(Vehicle_Properties$cityE)
Vehicle_Properties$cityUF <- as.numeric(Vehicle_Properties$cityUF)
Vehicle_Properties$combE <- as.numeric(Vehicle_Properties$combE)
Vehicle_Properties$combinedCD <- as.numeric(Vehicle_Properties$combinedCD)
Vehicle_Properties$combinedUF <- as.numeric(Vehicle_Properties$combinedUF)
Vehicle_Properties$displ <- as.numeric(Vehicle_Properties$displ)
Vehicle_Properties$drive <- as.factor(Vehicle_Properties$drive)
Vehicle_Properties$eng_dscr <- as.factor(Vehicle_Properties$eng_dscr)
Vehicle_Properties$evMotor <- as.numeric(Vehicle_Properties$evMotor)
Vehicle_Properties$highwayUF <- as.numeric(Vehicle_Properties$highwayUF)
Vehicle_Properties$hlv <- as.numeric(Vehicle_Properties$hlv)
Vehicle_Properties$hpv <- as.numeric(Vehicle_Properties$hpv)
Vehicle_Properties$lv2 <- as.numeric(Vehicle_Properties$lv2)
Vehicle_Properties$lv4 <- as.numeric(Vehicle_Properties$lv4)
Vehicle_Properties$phevBlended <- as.factor(Vehicle_Properties$phevBlended)
Vehicle_Properties$pv2 <- as.numeric(Vehicle_Properties$pv2)
Vehicle_Properties$pv4 <- as.numeric(Vehicle_Properties$pv4)
Vehicle_Properties$trany <- as.factor(Vehicle_Properties$trany)
Vehicle_Properties$youSaveSpend <- as.numeric(Vehicle_Properties$youSaveSpend)
Vehicle_Properties$sCharger <- as.factor(Vehicle_Properties$sCharger)
Vehicle_Properties$tCharger <- as.factor(Vehicle_Properties$tCharger)
Vehicle_Properties$c240bDscr <- as.factor(Vehicle_Properties$c240bDscr)
Vehicle_Properties$startStop <- as.factor(Vehicle_Properties$startStop)
Vehicle_Properties$phevCity <- as.numeric(Vehicle_Properties$phevCity)
Vehicle_Properties$phevHwy <- as.numeric(Vehicle_Properties$phevHwy)
Vehicle_Properties$phevComb <- as.numeric(Vehicle_Properties$phevComb)
# write output CSV files
write.csv(Fuel_Type_Properties, file = "Cleaned Data_Fuel Type Properties.csv", row.names = FALSE)
write.csv(Vehicle_Properties, file = "Cleaned Data_Vehicle Properties.csv",row.names = FALSE) |
6043b0efcc4c128f7cf245188ebac247c244d085 | 8ab1591ac6fb170a7bd1a421f775488061a04ebc | /s1.R | d86f25a74b0834be22a504d166e208e60c473d32 | [] | no_license | hly89/smalldata | 7e12c5eb5bce1624cd60f4fe4a49d9ca040f5def | 0c69d76b49c1687a0675bdae7de19ec80e7d3956 | refs/heads/master | 2021-01-21T10:13:16.109544 | 2014-03-04T13:08:41 | 2014-03-04T13:08:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,705 | r | s1.R | # each group: 10 docs, 17 groups
totaldocs<-c(1:170)
group<-split(totaldocs,cut(totaldocs,17))
s1_training<-rbind(dtm_training[g[[1]][1:10],],dtm_training[g[[2]][1:10],])
for(groupidx in 3:length(g)){
s1_training<-rbind(s1_training,dtm_training[g[[groupidx]][1:10],])
}
s1_test<-rbind(dtm_test[g[[1]][1:10],],dtm_test[g[[2]][1:10],])
for(groupidx in 3:length(g)){
s1_test<-rbind(s1_test,dtm_test[g[[groupidx]][1:10],])
}
k<-15; # number of topics
seed<-2000;
gibbs_s1<-LDA(s1_training, k=k, method="Gibbs", control=list(seed=seed, burnin=1000, thin=100, iter=2000));
slda_s1<-list() # the results of the gibbs sampling
# set the number of topics for each week is 13
for(t in 1:length(g)){
#slda[[t]]<-LDA(dtm[g[[t]],], k=30, method="Gibbs", control=list(seed=as.integer(Sys.time()), burnin=2000, thin=100, iter=2000));
slda_s1[[t]]<-LDA(s1_training[group[[t]],], k=k, method="Gibbs", control=list(seed=seed, burnin=2000, thin=100, iter=2000));
}
per_sldas1<-vector("numeric",length(g))
for(per_i in 1:length(g)){
per_sldas1[per_i]<-perplexity(slda_s1[[per_i]],s1_test[group[[per_i]],])
}
per_ldas1<-vector("numeric",length(g))
for(per_i in 1:length(g)){
per_ldas1[per_i]<-perplexity(gibbs_s1,s1_test[group[[per_i]],])
}
per_ldas1<-cbind(per_ldas1,c(1:17),rep("lda",17))
per_ldas1<-data.frame(per_ldas1,stringsAsFactors=FALSE)
names(per_ldas1)<-c("perplexity","time","type")
per_sldas1<-cbind(per_sldas1,c(1:17),rep("slda",17))
per_sldas1<-data.frame(per_sldas1,stringsAsFactors=FALSE)
names(per_sldas1)<-c("perplexity","time","type")
pers1<-rbind(per_ldas1,per_sldas1)
perplexity_plots1<-ggplot(data=pers1, aes(x=time,y=perplexity, group=type, colour=type))+geom_line()+geom_point()
|
db68a6df095448f1e578d0fa6343665c49ef1c00 | 4df591b93824f09fbf5bb09bf8c948b797c5c00c | /plot4.r | 13f0b48d2fcd276314655350f168c01b5f80bad6 | [] | no_license | sanchal/ExData_Plotting1 | 3d231623f989897c195a05e27a73f7aa8db60ca8 | 01834cbfcdfe6b9cf26d209dd9e5451c8bac08b4 | refs/heads/master | 2020-12-31T01:47:24.502721 | 2016-04-10T20:45:00 | 2016-04-10T20:45:00 | 55,920,326 | 0 | 0 | null | 2016-04-10T20:15:22 | 2016-04-10T20:15:22 | null | UTF-8 | R | false | false | 1,921 | r | plot4.r | create_plot4 <- function()
{
#read the file with the data downloaded
dtPower <- read.table("household_power_consumption.txt", header=T, sep=";" , stringsAsFactors = FALSE)
#we are only interested in data between 2007-02-01 and 2007-02-02 , subset this data.
dtPowerSs <- subset(dtPower, as.Date(as.character(Date),"%d/%m/%Y") >= as.Date("2007-02-01") & as.Date(as.character(Date),"%d/%m/%Y") <= as.Date("2007-02-02") )
#add a new column with the date and time values combined
dtPowerSs$DateTime <- strptime(paste(dtPowerSs$Date, dtPowerSs$Time), format = "%d/%m/%Y %H:%M:%S")
#open the png device driver with the filename and dimensions set to 480 X 480
png(file="plot4.png",width=480,height=480)
#make the plotting area 2 rows and 2 columns
par(mfrow = c(2,2))
#plot the first graph
with(dtPowerSs,plot(DateTime,Global_active_power, type = "l" , main = "" , ylab = "Global Active Power" , xlab = ""))
#plot the second graph
with(dtPowerSs,plot(DateTime,Voltage, type = "l" , main = "" , ylab = "Voltage" , xlab = "datetime"))
#plot the third graph and then add the lines and the legend
plot(dtPowerSs$DateTime,dtPowerSs$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(dtPowerSs$DateTime,dtPowerSs$Sub_metering_2,col="red")
lines(dtPowerSs$DateTime,dtPowerSs$Sub_metering_3,col="blue")
#set box.lwd = 0 to remove the box to match what is in the assignment..
legend("topright", col=c("black","red","blue"), c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),lty=c(1,1,1), box.lwd = 0)
#plot the fourth graph
with(dtPowerSs,plot(DateTime,Global_reactive_power, type = "l" , main = "" , xlab = "datetime"))
dev.off()
} |
99a4ee819f9e307cd5169513ed7a4a59719ac257 | a076f2e557a4f5b16892134b2189c9f9303a2a66 | /run_analysis.R | ba2b02560bd3052179b91a55c7b72a38c9d162b2 | [] | no_license | DataAbhi/GettingDataProject | 60176269b14619eae08a478326a37cf3f0497860 | b869a3caafafe2fe2e79b81783d6c98aad13d438 | refs/heads/master | 2020-03-30T16:42:25.696902 | 2016-09-18T18:27:28 | 2016-09-18T18:27:28 | 68,539,270 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,991 | r | run_analysis.R | ##Coursera - Cleaning Data - Course Project
##Link for data: https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
##root-data directory: C:\\Data\\R\\Repo\\cleaningdata\\Cleaningdataproject\\data
##test data directory: C:\\Data\\R\\Repo\\cleaningdata\\Cleaningdataproject\\data\\test
##training data directory: C:\\Data\\R\\Repo\\cleaningdata\\Cleaningdataproject\\data\\train
library(reshape2)
library(dplyr)
library(tidyr)
rm(list = ls())
wd<-"C:\\Data\\R\\Repo\\cleaningdata\\Cleaningdataproject\\data\\"
setwd(wd)
features<-read.table("features.txt", stringsAsFactors=FALSE)
activitylabels<-read.table("activity_labels.txt", stringsAsFactors=FALSE)
##Importing Test datasets
xtest<-read.table(".\\test\\X_test.txt")
ytest<-read.table(".\\test\\y_test.txt")
colnames(ytest)[1]<-"act"
subjecttest<-read.table(".\\test\\subject_test.txt")
colnames(subjecttest)[1]<-"sub"
##merging it all together first for test subjects
test<-cbind(xtest, ytest, subjecttest) ##column 562 is activity id and column 563 is subject id
##Importing traing datasets
xtrain<-read.table(".\\train\\X_train.txt")
ytrain<-read.table(".\\train\\y_train.txt")
colnames(ytrain)[1]<-"act"
subjecttrain<-read.table(".\\train\\subject_train.txt")
colnames(subjecttrain)[1]<-"sub"
##merging it all together first for test subjects
train <-cbind(xtrain, ytrain,subjecttrain) ##column 562 is activity id and column 563 is subject id
##2.Creating a combined data set by merging the two datasets
all<-rbind(test,train)
names(all)[1:561]<-features$V2
##3. Now keeping only the variables that are means and standard deviations
##cleaning up variable names
names(all)<-tolower(gsub("-|)|\\(", "", names(all)))
rvart<-all[grepl("(std|std[x|y|z]|mean[x|y|z]|mean|act|sub)$" , names(all))]
##some additional cleaning required
rvar<-rvart[grepl("^[^(angle)]" , names(rvart))]
rvar<-cbind(rvar, rvart$act)
names(rvar)[68] <-"act"
dim(rvar)
##only 66 relevant variables and 2 more columns for the subject and activity
rm(rvart) ##removing the useless dataset
##4. Uses descriptive activity names to name the activities in the data set
rvar$act <- factor(rvar$act, levels = activitylabels$V1, labels = activitylabels$V2)
##5. Creating a second tidy data set with the average of each variable for each activity and each subject.
##First creating a function called mtab to generate summary tables
mtab<-function(var, type=string){ ##takes name of the variable to summarize as a input
a<-rvar %>% ##using rvar
melt(id=c("sub", "act"), measure.vars=var) %>% ##melting the data with sub and act as identifiers
dcast( sub~act, mean) %>% ##using dcast to generate the summary table but activity gets divided into columns
gather(act, max, -sub) %>% ##using gather to put act in rows
arrange(sub, act) ##sorting according to subject and activity
names(a)[3]<-paste0(var,"avg") ##renaming the varirable to input variable + avg
print(a)
}
##using lapply to run this over all the variables in rvar; lapply gives a list so using as.data.frame to convert into a dataset
data<-as.data.frame(lapply(names(select(rvar, -sub, -act)), mtab))
##data frame has multiple duplicate values of sub and act, removing those
tidydata<-data[,grep("[^0-9]$", names(data))]
##removing unnecassry files
rm(data)
##generating self-explanatory labels for activity
tidydata$act<-gsub("WALKING$", "1.Walking", tidydata$act)
tidydata$act<-gsub("WALKING_UPSTAIRS$", "2.Walking_Upstairs",tidydata$act)
tidydata$act<-gsub("WALKING_DOWNSTAIRS$", "3.Walking_downstairs",tidydata$act)
tidydata$act<-gsub("SITTING$", "4.Sitting",tidydata$act)
tidydata$act<-gsub("STANDING$", "5.Standing",tidydata$act)
tidydata$act<-gsub("LAYING$", "6.Laying",tidydata$act)
##converting into factor
tidydata$act<-factor(tidydata$act)
tidydata<-arrange(tidydata,sub, act)
##tidydata is the final output
write.table(tidydata, file="tidydata.txt", row.name=FALSE)
|
3a47df0b8f8a7def559baa683a258d66df039605 | 386a6faa2bfa03d46b394fdd5c683d60026a8242 | /Week5/Trials.R | 241b46a8b0cc24bd7793459a806a85dfe394666e | [] | no_license | wiflore/Analytics-on-Edge | db090c7e80952bf4958bb0e0edbaefe5dc426e17 | f6c92889eb28f04bdd4a9f56418675d3486785d4 | refs/heads/master | 2021-01-21T21:05:51.116105 | 2017-05-25T01:19:04 | 2017-05-25T01:19:04 | 92,305,423 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,204 | r | Trials.R | setwd("~/Dropbox/MIT Analytics/Week5")
# Install new packages
install.packages("ROCR")
library(randomForest)
library(caTools)
library(rpart)
library(rpart.plot)
library(randomForest)
library(tm)
library(SnowballC)
library(ROCR)
Sys.setlocale("LC_ALL", "C")
trials = read.csv("clinical_trial.csv", stringsAsFactors=FALSE)
summary(trials)
str(trials)
max(nchar(trials$abstract))
table(nchar(trials$abstract) == 0)
which.min(nchar(trials$title))
trials$title[1258]
# Create corpus
corpusAbstract = Corpus(VectorSource(trials$abstract))
corpusTitle = Corpus(VectorSource(trials$title))
# Look at corpus
corpusTitle
corpusTitle[[2]]
corpusAbstract
corpusAbstract[[2]]
# Convert to lower-case
corpusAbstract = tm_map(corpusAbstract , tolower)
corpusAbstract [[2]]
corpusTitle = tm_map(corpusTitle , tolower)
corpusTitle [[2]]
# IMPORTANT NOTE: If you are using the latest version of the tm package, you will need to run the following line before continuing (it converts corpus to a Plain Text Document). This is a recent change having to do with the tolower function that occurred after this video was recorded.
corpusTitle = tm_map(corpusTitle, PlainTextDocument)
corpusAbstract = tm_map(corpusAbstract, PlainTextDocument)
corpusAbstract [[2]]
corpusTitle [[2]]
# Remove punctuation
corpusTitle = tm_map(corpusTitle, removePunctuation)
corpusAbstract = tm_map(corpusAbstract, removePunctuation)
corpusAbstract [[2]]
corpusTitle [[2]]
# Look at stop words
stopwords("english")[1:10]
# Remove stopwords and apple
corpusAbstract = tm_map(corpusAbstract, removeWords, c(stopwords("english")))
corpusTitle = tm_map(corpusTitle, removeWords, c(stopwords("english")))
corpusAbstract [[2]]
corpusTitle [[2]]
# Stem document dwxfr5836
corpusTitle = tm_map(corpusTitle, stemDocument)
corpusAbstract = tm_map(corpusAbstract, stemDocument)
corpusAbstract [[2]]
corpusTitle [[2]]
findFreqTerms(corpusAbstract)
findFreqTerms(corpusTitle)
# Create matrix
dtmAbstract = DocumentTermMatrix(corpusAbstract)
dtmTitle = DocumentTermMatrix(corpusTitle)
# Look at matrix
#inspect(frequencies[1000:1005,505:515])
# Check for sparsity
findFreqTerms(dtmAbstract, lowfreq=1)
findFreqTerms(dtmTitle, lowfreq=10)
# Remove sparse terms
dtmAbstract = removeSparseTerms(dtmAbstract , 0.95)
dtmTitle = removeSparseTerms(dtmTitle , 0.95)
# Convert to a data frame
dtmAbstract = as.data.frame(as.matrix(dtmAbstract))
dtmTitle = as.data.frame(as.matrix(dtmTitle))
# Make all variable names R-friendly
colnames(dtmAbstract) = make.names(colnames(dtmAbstract))
colnames(dtmTitle) = make.names(colnames(dtmTitle))
#ncol
ncol(dtmTitle)
ncol(dtmAbstract)
str(dtmAbstract)
#word with more repetitions
which.max(colSums(dtmAbstract))
#adding letter to reclassifation
colnames(dtmTitle) = paste0("T", colnames(dtmTitle))
colnames(dtmTitle)
colnames(dtmAbstract) = paste0("A", colnames(dtmAbstract))
colnames(dtmAbstract)
#concatanating title and abstract
dtm = cbind(dtmTitle, dtmAbstract)
str(dtm)
ncol(dtm)
#Setting split
dtm$trial = trials$trial
set.seed(144)
spl= sample.split(dtm$trial , SplitRatio = 0.7)
train= subset(dtm , spl==TRUE)
test = subset(dtm, spl==FALSE)
table(train)
baseline <-table(train$trial)
max(baseline)/sum(baseline)
#CARTmodel
trialCART = rpart(trial ~ ., data=train, method="class")
prp(trialCART)
trialCART[1]
#Max probability
predTrain= predict(trialCART)max(predTrain[,2])
#Confusion matrix
table(train$trial, predTrain >= 0.5)
#(631+441)/(631+441+99+131), sensitivity 441/(441+131) and specificity 631/(631+99)
predTest = predict(trialCART, newdata=test)[,2]
summary(predTest)
table(test$trial, predTest >= 0.5)
#ROC
# Building ROC Prediction function
ROCRpred = prediction(predTest , test$trial)
# Performance function
ROCRperf = performance(ROCRpred, "tpr", "fpr")
# Plot ROC curve
plot(ROCRperf)
# Add colors
plot(ROCRperf, colorize=TRUE)
# Add threshold labels
plot(ROCRperf, colorize=TRUE, print.cutoffs.at=seq(0,1,by=0.1), text.adj=c(-0.2,1.7))
auc = as.numeric(performance(ROCRpred, "auc")@y.values)
auc
https://rstudio-pubs-static.s3.amazonaws.com/92510_018db285fda546fcb89b53dd2847b5d4.html#separating-spam-from-ham-part-1 |
73b78b492e06fd3f270f5ff4b3bd15cf2a33b5bc | ff9eb712be2af2fa24b28ecc75341b741d5e0b01 | /R/epoisSinglyCensored.mle.R | ea4e20be673b4d673270b742ea51bc7e35986cdd | [] | no_license | alexkowa/EnvStats | 715c35c196832480ee304af1034ce286e40e46c2 | 166e5445d252aa77e50b2b0316f79dee6d070d14 | refs/heads/master | 2023-06-26T19:27:24.446592 | 2023-06-14T05:48:07 | 2023-06-14T05:48:07 | 140,378,542 | 21 | 6 | null | 2023-05-10T10:27:08 | 2018-07-10T04:49:22 | R | UTF-8 | R | false | false | 2,735 | r | epoisSinglyCensored.mle.R | epoisSinglyCensored.mle <-
function (x, censored, censoring.side, ci, ci.method = "profile.likelihood",
ci.type, conf.level, ci.sample.size = N - n.cen, pivot.statistic = "z")
{
N <- length(x)
x.cen <- x[censored]
T1 <- x.cen[1]
n.cen <- length(x.cen)
x.bar <- mean(x[!censored])
if (censoring.side == "left")
fcn <- function(lambda, x.bar, N, T1, n.cen) {
(x.bar - lambda * (1 + ((n.cen/(N - n.cen)) * dpois(T1 -
1, lambda))/ppois(T1 - 1, lambda)))^2
}
else fcn <- function(lambda, x.bar, N, T1, n.cen) {
(x.bar - lambda * (1 - ((n.cen/(N - n.cen)) * dpois(T1,
lambda))/(1 - ppois(T1, lambda))))^2
}
lambda.hat <- nlminb(start = x.bar, objective = fcn, lower = .Machine$double.eps,
x.bar = x.bar, N = N, T1 = T1, n.cen = n.cen)$par
names(lambda.hat) <- "lambda"
ret.list <- list(parameters = lambda.hat)
if (ci) {
ci.method <- match.arg(ci.method, c("normal.approx",
"profile.likelihood"))
pivot.statistic <- match.arg(pivot.statistic, c("z",
"t"))
n <- N - n.cen
if (censoring.side == "left") {
con1 <- ppois(T1 - 1, lambda.hat)
con2 <- dpois(T1 - 1, lambda.hat)/con1
d2.lnL.wrt.lambda <- (-n * x.bar)/lambda.hat^2 -
n.cen * (dpois(T1 - 2, lambda.hat)/con1 - con2 +
con2^2)
}
else {
con1 <- 1 - ppois(T1, lambda.hat)
con2 <- dpois(T1, lambda.hat)/con1
d2.lnL.wrt.lambda <- (-n * x.bar)/lambda.hat^2 +
n.cen * (dpois(T1 - 1, lambda.hat)/con1 - con2 -
con2^2)
}
var.lambda.hat <- -1/d2.lnL.wrt.lambda
var.cov.params <- var.lambda.hat
names(var.cov.params) <- "lambda"
ci.obj <- ci.normal.approx(theta.hat = lambda.hat, sd.theta.hat = sqrt(var.lambda.hat),
n = ci.sample.size, df = ci.sample.size - 1, ci.type = ci.type,
alpha = 1 - conf.level, lb = 0, test.statistic = pivot.statistic)
ci.obj$parameter <- "lambda"
if (ci.method == "profile.likelihood") {
limits <- ci.obj$limits
names(limits) <- NULL
ci.obj <- ci.epoisCensored.profile.likelihood(x = x,
censored = censored, censoring.side = censoring.side,
lambda.mle = lambda.hat, ci.type = ci.type, conf.level = conf.level,
LCL.start = limits[1], UCL.start = limits[2])
}
ret.list <- c(ret.list, list(var.cov.params = var.cov.params,
ci.obj = ci.obj))
}
ret.list
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.