content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Shapes the ddPCR data into a better format for ggplot
#'
#' @param testdata the original data frame after reading in the csv file
#'
#' @export
#'
#' @return Returns a data frame that's combined the mt-ND1 and RPPH probe info
#'
#' @examples
#' combined.data <- ddpcr_process(testdata)
ddpcr_process <- function(testdata) {
require(plyr)
require(dplyr)
require(magrittr)
# drop samples with not enough droplets
good_data <- subset(testdata, AcceptedDroplets > 8000) # dropped two samples
# select columns to care about
good_data <- dplyr::select(good_data, Well, Sample, Target, Concentration, CopiesPer20uLWell, AcceptedDroplets, Ratio, PoissonRatioMax, PoissonRatioMin)
# assign MT ratio
good_data$MT.ratio <- 1/good_data$Ratio
# combine the data into a good dataframe
rpph1 <- subset(good_data, Target == 'RPPH1')
nd1 <- subset(good_data, Target == 'MT-ND1')
rpph1$RPPH_CopiesPer20uLWell <- rpph1$CopiesPer20uLWell
rpph1 <- dplyr::select(rpph1, Well, RPPH_CopiesPer20uLWell)
nd1$ND_CopiesPer20uLWell <- nd1$CopiesPer20uLWell
nd1$CopiesPer20uLWell <- NULL
combined.data <- merge(nd1, rpph1, by = 'Well')
return(combined.data)
}
| /R/ddpcr_process.R | no_license | syyang93/yangR | R | false | false | 1,190 | r | #' Shapes the ddPCR data into a better format for ggplot
#'
#' @param testdata the original data frame after reading in the csv file
#'
#' @export
#'
#' @return Returns a data frame that's combined the mt-ND1 and RPPH probe info
#'
#' @examples
#' combined.data <- ddpcr_process(testdata)
ddpcr_process <- function(testdata) {
require(plyr)
require(dplyr)
require(magrittr)
# drop samples with not enough droplets
good_data <- subset(testdata, AcceptedDroplets > 8000) # dropped two samples
# select columns to care about
good_data <- dplyr::select(good_data, Well, Sample, Target, Concentration, CopiesPer20uLWell, AcceptedDroplets, Ratio, PoissonRatioMax, PoissonRatioMin)
# assign MT ratio
good_data$MT.ratio <- 1/good_data$Ratio
# combine the data into a good dataframe
rpph1 <- subset(good_data, Target == 'RPPH1')
nd1 <- subset(good_data, Target == 'MT-ND1')
rpph1$RPPH_CopiesPer20uLWell <- rpph1$CopiesPer20uLWell
rpph1 <- dplyr::select(rpph1, Well, RPPH_CopiesPer20uLWell)
nd1$ND_CopiesPer20uLWell <- nd1$CopiesPer20uLWell
nd1$CopiesPer20uLWell <- NULL
combined.data <- merge(nd1, rpph1, by = 'Well')
return(combined.data)
}
|
context("Test the present value of an single payment (pv) and annuity payments (spending) made in the future")
test_that("1 test that start of a timeseries is set correct with timeseries on inflation but not rate", {
fv = -10000
r1=0.02
r2=ts(rep(0.02,30),start = 2000)
nper=30
infl = ts(rep(0.02,30),start = 2000)
res = pv(r1,infl,nper,fv)-pv(r2,infl,nper,fv)
expect_identical(res,ts(rep(0,30),start =2000))
})
test_that("2 test that start of a timeseries is set correct with timeseries on inflation but not rate", {
fv = -123654
r1=0.02
nper=30
infl = ts(rep(0.02,30),start = 2000)
res = pv(r1,infl,nper,fv)
true= ts(c(118852.364475201830,114237.182309882584,109801.213292851375,105537.498359142046,
101439.348672762455,97500.335133374130,93714.278290440328,90075.238649019928,
86577.507351999157,83215.597224143741,79984.234163921312,76878.348869589885,
73893.068886572350,71023.710963641250,68265.773705921994,65614.930513189145,
63067.022792377109,60618.053433657347,58264.180539847519,56001.711399315180,
53827.096692921165,51736.924925914223,49727.917076042118,47796.921449482994,
45940.908736527294,44156.967259253455,42442.298403742265,40794.212229663841,
39210.123250349716,37687.546376729828),start=2000)
expect_identical(res,true)
})
test_that("3 test that start of a timeseries is set correct with timeseries on pmt ", {
fv = -123654
pmt=ts(rep(-1000,30),start = 2000)
r1=0.04
nper=30
infl = ts(rep(0.02,30),start = 2000)
res = pv(r1,infl,nper,fv,pmt)
true= ts(c(117528.280542986424, 111771.786681954356, 106362.658913504405, 101280.317434171331,
96505.387762685205, 92019.630664880766, 87805.876133121885, 83847.961186388420,
80130.671270632956, 76639.685051700159, 73361.522405059630, 70283.495417871411,
67393.662229526177, 64680.783546812636, 62134.281679300271, 59744.201949418959,
57501.176340098071, 55396.389250727923, 53421.545239651459, 51568.838638411369,
49830.924929591558, 48200.893786324697, 46672.243677412698, 45238.857947543125,
43894.982287303043, 42635.203512609405, 41454.429577810282, 40347.870751079048,
39311.021884840709, 38339.645717849293),start=2000)
expect_identical(res,true)
})
test_that("3 test both pmt and fv ", {
fv = -123654
pmt=-1000
r1=0.04
nper=30
infl = ts(rep(0.02,30),start = 2000)
res = pv(r1,infl,nper,fv,pmt)
true= ts(c(117528.280542986424, 111771.786681954356, 106362.658913504405, 101280.317434171331,
96505.387762685205, 92019.630664880766, 87805.876133121885, 83847.961186388420,
80130.671270632956, 76639.685051700159, 73361.522405059630, 70283.495417871411,
67393.662229526177, 64680.783546812636, 62134.281679300271, 59744.201949418959,
57501.176340098071, 55396.389250727923, 53421.545239651459, 51568.838638411369,
49830.924929591558, 48200.893786324697, 46672.243677412698, 45238.857947543125,
43894.982287303043, 42635.203512609405, 41454.429577810282, 40347.870751079048,
39311.021884840709, 38339.645717849293),start=2000)
expect_identical(res,true)
})
| /tests/testthat/testpv.R | no_license | eaoestergaard/UNPIE | R | false | false | 3,300 | r | context("Test the present value of an single payment (pv) and annuity payments (spending) made in the future")
test_that("1 test that start of a timeseries is set correct with timeseries on inflation but not rate", {
fv = -10000
r1=0.02
r2=ts(rep(0.02,30),start = 2000)
nper=30
infl = ts(rep(0.02,30),start = 2000)
res = pv(r1,infl,nper,fv)-pv(r2,infl,nper,fv)
expect_identical(res,ts(rep(0,30),start =2000))
})
test_that("2 test that start of a timeseries is set correct with timeseries on inflation but not rate", {
fv = -123654
r1=0.02
nper=30
infl = ts(rep(0.02,30),start = 2000)
res = pv(r1,infl,nper,fv)
true= ts(c(118852.364475201830,114237.182309882584,109801.213292851375,105537.498359142046,
101439.348672762455,97500.335133374130,93714.278290440328,90075.238649019928,
86577.507351999157,83215.597224143741,79984.234163921312,76878.348869589885,
73893.068886572350,71023.710963641250,68265.773705921994,65614.930513189145,
63067.022792377109,60618.053433657347,58264.180539847519,56001.711399315180,
53827.096692921165,51736.924925914223,49727.917076042118,47796.921449482994,
45940.908736527294,44156.967259253455,42442.298403742265,40794.212229663841,
39210.123250349716,37687.546376729828),start=2000)
expect_identical(res,true)
})
test_that("3 test that start of a timeseries is set correct with timeseries on pmt ", {
fv = -123654
pmt=ts(rep(-1000,30),start = 2000)
r1=0.04
nper=30
infl = ts(rep(0.02,30),start = 2000)
res = pv(r1,infl,nper,fv,pmt)
true= ts(c(117528.280542986424, 111771.786681954356, 106362.658913504405, 101280.317434171331,
96505.387762685205, 92019.630664880766, 87805.876133121885, 83847.961186388420,
80130.671270632956, 76639.685051700159, 73361.522405059630, 70283.495417871411,
67393.662229526177, 64680.783546812636, 62134.281679300271, 59744.201949418959,
57501.176340098071, 55396.389250727923, 53421.545239651459, 51568.838638411369,
49830.924929591558, 48200.893786324697, 46672.243677412698, 45238.857947543125,
43894.982287303043, 42635.203512609405, 41454.429577810282, 40347.870751079048,
39311.021884840709, 38339.645717849293),start=2000)
expect_identical(res,true)
})
test_that("3 test both pmt and fv ", {
fv = -123654
pmt=-1000
r1=0.04
nper=30
infl = ts(rep(0.02,30),start = 2000)
res = pv(r1,infl,nper,fv,pmt)
true= ts(c(117528.280542986424, 111771.786681954356, 106362.658913504405, 101280.317434171331,
96505.387762685205, 92019.630664880766, 87805.876133121885, 83847.961186388420,
80130.671270632956, 76639.685051700159, 73361.522405059630, 70283.495417871411,
67393.662229526177, 64680.783546812636, 62134.281679300271, 59744.201949418959,
57501.176340098071, 55396.389250727923, 53421.545239651459, 51568.838638411369,
49830.924929591558, 48200.893786324697, 46672.243677412698, 45238.857947543125,
43894.982287303043, 42635.203512609405, 41454.429577810282, 40347.870751079048,
39311.021884840709, 38339.645717849293),start=2000)
expect_identical(res,true)
})
|
## Estimate Rt using epinow2
## Test the model given uniformly distributed times to observation, using the true mean.
## Load dependencies and set parameters ------------------------------------------------------
rm(list = ls())
source('../00-load_packages.R')
source('../00-util.R')
source('../00-run_test.R')
ggplot2::theme_set(theme_bw())
## Check if synthetic data already exists.
## If so, load from cache
## If not, make the data
parlist <- load_parlist()
# Synthetic data is loaded using:
# get_sim_df()
## Set parameters for EpiNow2 test
testpars <- list(
last_obs_time = 150,
output_folder = 'misspec-delay-distribution',
## True delays
true_mean_case_delay = 5,
true_sd_case_delay = 1.7,
true_mean_death_delay = 15,
true_sd_death_delay = 1.5,
true_mean_inc = exp(EpiNow2::covid_incubation_period[1, ]$mean),
true_sd_inc = exp(EpiNow2::covid_incubation_period[1, ]$sd))
## Delays specified in model
testpars$input_mean_case_delay = testpars$true_mean_case_delay
testpars$input_sd_case_delay = testpars$true_sd_case_delay
testpars$input_mean_death_delay = testpars$true_mean_death_delay
testpars$input_sd_death_delay = testpars$true_sd_death_delay
testpars$input_mean_inc = testpars$true_mean_inc
testpars$input_sd_inc = testpars$true_sd_inc
testpars$input_mean_gi = parlist$true_mean_GI
testpars$input_sd_gi = sqrt(parlist$true_var_GI)
dir_check(testpars$output_folder)
run_test(parlist,
testpars,
max_time = testpars$last_obs_time,
r_case_dist = function(nn){runif(nn, 0, testpars$true_mean_case_delay*2)},
r_death_dist = function(nn){runif(nn, 5, (testpars$true_mean_death_delay*2)-5)},
d_case_dist = function(xx){dunif(xx, 0, testpars$true_mean_case_delay*2)},
d_death_dist = function(xx){dunif(xx, 5, testpars$true_mean_death_delay*2-5)})
| /07-08-2020/02-test_misspec-delay-uniform.R | no_license | kgostic/epinow2_tests | R | false | false | 1,836 | r | ## Estimate Rt using epinow2
## Test the model given uniformly distributed times to observation, using the true mean.
## Load dependencies and set parameters ------------------------------------------------------
rm(list = ls())
source('../00-load_packages.R')
source('../00-util.R')
source('../00-run_test.R')
ggplot2::theme_set(theme_bw())
## Check if synthetic data already exists.
## If so, load from cache
## If not, make the data
parlist <- load_parlist()
# Synthetic data is loaded using:
# get_sim_df()
## Set parameters for EpiNow2 test
testpars <- list(
last_obs_time = 150,
output_folder = 'misspec-delay-distribution',
## True delays
true_mean_case_delay = 5,
true_sd_case_delay = 1.7,
true_mean_death_delay = 15,
true_sd_death_delay = 1.5,
true_mean_inc = exp(EpiNow2::covid_incubation_period[1, ]$mean),
true_sd_inc = exp(EpiNow2::covid_incubation_period[1, ]$sd))
## Delays specified in model
testpars$input_mean_case_delay = testpars$true_mean_case_delay
testpars$input_sd_case_delay = testpars$true_sd_case_delay
testpars$input_mean_death_delay = testpars$true_mean_death_delay
testpars$input_sd_death_delay = testpars$true_sd_death_delay
testpars$input_mean_inc = testpars$true_mean_inc
testpars$input_sd_inc = testpars$true_sd_inc
testpars$input_mean_gi = parlist$true_mean_GI
testpars$input_sd_gi = sqrt(parlist$true_var_GI)
dir_check(testpars$output_folder)
run_test(parlist,
testpars,
max_time = testpars$last_obs_time,
r_case_dist = function(nn){runif(nn, 0, testpars$true_mean_case_delay*2)},
r_death_dist = function(nn){runif(nn, 5, (testpars$true_mean_death_delay*2)-5)},
d_case_dist = function(xx){dunif(xx, 0, testpars$true_mean_case_delay*2)},
d_death_dist = function(xx){dunif(xx, 5, testpars$true_mean_death_delay*2-5)})
|
library(plsdepot)
### Name: plot.plsreg2
### Title: Plot PLS-R2 basic results
### Aliases: plot.plsreg2
### ** Examples
## Not run:
##D # load dataset vehicles
##D data(vehicles)
##D
##D # apply plsreg2
##D pls2 = plsreg2(vehicles[,1:12], vehicles[,13:16])
##D
##D # plot variables (circle of correlations)
##D plot(pls2, what="variables")
##D
##D # plot observations (as points)
##D plot(pls2, what="observations")
##D
##D # plot observations with labels
##D plot(pls2, what="observations", show.names=TRUE)
##D
## End(Not run)
| /data/genthat_extracted_code/plsdepot/examples/plot.plsreg2.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 553 | r | library(plsdepot)
### Name: plot.plsreg2
### Title: Plot PLS-R2 basic results
### Aliases: plot.plsreg2
### ** Examples
## Not run:
##D # load dataset vehicles
##D data(vehicles)
##D
##D # apply plsreg2
##D pls2 = plsreg2(vehicles[,1:12], vehicles[,13:16])
##D
##D # plot variables (circle of correlations)
##D plot(pls2, what="variables")
##D
##D # plot observations (as points)
##D plot(pls2, what="observations")
##D
##D # plot observations with labels
##D plot(pls2, what="observations", show.names=TRUE)
##D
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nzv.R
\name{step_nzv}
\alias{step_nzv}
\alias{tidy.step_nzv}
\title{Near-Zero Variance Filter}
\usage{
step_nzv(
recipe,
...,
role = NA,
trained = FALSE,
freq_cut = 95/5,
unique_cut = 10,
options = list(freq_cut = 95/5, unique_cut = 10),
removals = NULL,
skip = FALSE,
id = rand_id("nzv")
)
\method{tidy}{step_nzv}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose which
variables that will be evaluated by the filtering. See
\code{\link[=selections]{selections()}} for more details. For the \code{tidy}
method, these are not currently used.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{freq_cut, unique_cut}{Numeric parameters for the filtering process. See
the Details section below.}
\item{options}{A list of options for the filter (see Details
below).}
\item{removals}{A character string that contains the names of
columns that should be removed. These values are not determined
until \code{\link[=prep.recipe]{prep.recipe()}} is called.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked
when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_nzv} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} which
is the columns that will be removed.
}
\description{
\code{step_nzv} creates a \emph{specification} of a recipe step
that will potentially remove variables that are highly sparse
and unbalanced.
}
\details{
This step diagnoses predictors that have one unique
value (i.e. are zero variance predictors) or predictors that have
both of the following characteristics:
\enumerate{
\item they have very few unique values relative to the number
of samples and
\item the ratio of the frequency of the most common value to
the frequency of the second most common value is large.
}
For example, an example of near-zero variance predictor is one
that, for 1000 samples, has two distinct values and 999 of them
are a single value.
To be flagged, first, the frequency of the most prevalent value
over the second most frequent value (called the "frequency
ratio") must be above \code{freq_cut}. Secondly, the "percent of
unique values," the number of unique values divided by the total
number of samples (times 100), must also be below
\code{unique_cut}.
In the above example, the frequency ratio is 999 and the unique
value percent is 0.2\%.
}
\examples{
library(modeldata)
data(biomass)
biomass$sparse <- c(1, rep(0, nrow(biomass) - 1))
biomass_tr <- biomass[biomass$dataset == "Training",]
biomass_te <- biomass[biomass$dataset == "Testing",]
rec <- recipe(HHV ~ carbon + hydrogen + oxygen +
nitrogen + sulfur + sparse,
data = biomass_tr)
nzv_filter <- rec \%>\%
step_nzv(all_predictors())
filter_obj <- prep(nzv_filter, training = biomass_tr)
filtered_te <- bake(filter_obj, biomass_te)
any(names(filtered_te) == "sparse")
tidy(nzv_filter, number = 1)
tidy(filter_obj, number = 1)
}
\seealso{
\code{\link[=step_corr]{step_corr()}} \code{\link[=recipe]{recipe()}}
\code{\link[=prep.recipe]{prep.recipe()}} \code{\link[=bake.recipe]{bake.recipe()}}
}
\concept{preprocessing}
\concept{variable_filters}
\keyword{datagen}
| /man/step_nzv.Rd | permissive | labouz/recipes | R | false | true | 3,974 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nzv.R
\name{step_nzv}
\alias{step_nzv}
\alias{tidy.step_nzv}
\title{Near-Zero Variance Filter}
\usage{
step_nzv(
recipe,
...,
role = NA,
trained = FALSE,
freq_cut = 95/5,
unique_cut = 10,
options = list(freq_cut = 95/5, unique_cut = 10),
removals = NULL,
skip = FALSE,
id = rand_id("nzv")
)
\method{tidy}{step_nzv}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose which
variables that will be evaluated by the filtering. See
\code{\link[=selections]{selections()}} for more details. For the \code{tidy}
method, these are not currently used.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{freq_cut, unique_cut}{Numeric parameters for the filtering process. See
the Details section below.}
\item{options}{A list of options for the filter (see Details
below).}
\item{removals}{A character string that contains the names of
columns that should be removed. These values are not determined
until \code{\link[=prep.recipe]{prep.recipe()}} is called.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked
when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_nzv} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} which
is the columns that will be removed.
}
\description{
\code{step_nzv} creates a \emph{specification} of a recipe step
that will potentially remove variables that are highly sparse
and unbalanced.
}
\details{
This step diagnoses predictors that have one unique
value (i.e. are zero variance predictors) or predictors that have
both of the following characteristics:
\enumerate{
\item they have very few unique values relative to the number
of samples and
\item the ratio of the frequency of the most common value to
the frequency of the second most common value is large.
}
For example, an example of near-zero variance predictor is one
that, for 1000 samples, has two distinct values and 999 of them
are a single value.
To be flagged, first, the frequency of the most prevalent value
over the second most frequent value (called the "frequency
ratio") must be above \code{freq_cut}. Secondly, the "percent of
unique values," the number of unique values divided by the total
number of samples (times 100), must also be below
\code{unique_cut}.
In the above example, the frequency ratio is 999 and the unique
value percent is 0.2\%.
}
\examples{
library(modeldata)
data(biomass)
biomass$sparse <- c(1, rep(0, nrow(biomass) - 1))
biomass_tr <- biomass[biomass$dataset == "Training",]
biomass_te <- biomass[biomass$dataset == "Testing",]
rec <- recipe(HHV ~ carbon + hydrogen + oxygen +
nitrogen + sulfur + sparse,
data = biomass_tr)
nzv_filter <- rec \%>\%
step_nzv(all_predictors())
filter_obj <- prep(nzv_filter, training = biomass_tr)
filtered_te <- bake(filter_obj, biomass_te)
any(names(filtered_te) == "sparse")
tidy(nzv_filter, number = 1)
tidy(filter_obj, number = 1)
}
\seealso{
\code{\link[=step_corr]{step_corr()}} \code{\link[=recipe]{recipe()}}
\code{\link[=prep.recipe]{prep.recipe()}} \code{\link[=bake.recipe]{bake.recipe()}}
}
\concept{preprocessing}
\concept{variable_filters}
\keyword{datagen}
|
#' Full Maximum Likelihood inference of birth and death rates together with their changes along a phylogeny under a multi-type birth-death model.
#'
#' Infers a complete MSBD model from a phylogeny, including the most likely number of states, positions and times of state changes, and parameters associated with each state.
#' Uses a greedy approach to add states and Maximum Likelihood inference for the other parameters.
#'
#' @param tree Phylogenetic tree (in ape format) to calculate the likelihood on.
#' @param initial_values Initial values for the optimizer, to be provided as a vector in this order: gamma (optional), lambda, lambda decay rate (optional), mu (optional). See 'Details'.
#' @param uniform_weights Whether all states are weighted uniformly in shifts, default TRUE. If FALSE, the weights of states are calculated from the distributions \code{p_lambda} and \code{p_mu}. See 'Details'.
#' @param p_lambda Prior probability distribution on lambdas, used if \code{uniform_weights = FALSE}.
#' @param p_mu Prior probability distribution on mus, used if \code{uniform_weights = FALSE}.
#' @param rho Sampling proportion on extant tips, default 1.
#' @param sigma Sampling probability on extinct tips (tips are sampled upon extinction), default 0.
#' @param rho_sampling Whether the most recent tips should be considered extant tips, sampled with sampling proportion \code{rho}. If FALSE, all tips will be considered extinct tips, sampled with sampling probability \code{sigma}. Should be TRUE for most macroevolution datasets and FALSE for most epidemiology datasets.
#' @param lineage_counts For trees with clade collapsing. Number of lineages collapsed on each tip. Should be set to 1 for extinct tips.
#' @param tcut For trees with clade collapsing. Times of clade collapsing for each tip (i.e time of the MRCA of all collapsed lineages). Can be a single number or a vector of length the number of tips.
#' @param stepsize Size of the step to use for time discretization with exponential decay, default NULL. To use exponential decay, an initial value for \code{lambda_rates} should also be provided.
#' @param no_extinction Whether to use the Yule process (\code{mu=0}) for all states, default FALSE. If TRUE no initial value for \code{mu} is needed.
#' @param fixed_gamma Value to which \code{gamma} should be fixed, default NULL. If provided no initial value for \code{gamma} is needed.
#' @param unique_lambda Whether to use the same value of \code{lambda} for all states, default FALSE. If TRUE and exponential decay is active all states will also share the same value for \code{lambda_rate}.
#' @param unique_mu Whether to use the same value of \code{mu} for all states, default FALSE.
#'
#' @param optim_control Control list for the optimizer, corresponds to control input in optim function, see \code{?optim} for details.
#' @param attempt_remove Whether to attempt to remove shifts at the end of the inference, default TRUE. If FALSE, use a pure greedy algorithm.
#' @param max_nshifts Maximum number of shifts to test for, default \code{Inf}.
#' @param saved_state If provided, the inference will be restarted from this state.
#' @param save_path If provided, the progress of the inference will be saved to this path after each optimization step.
#' @param time_mode String controlling the time positions of inferred shifts. See 'Details'.
#' @param fast_optim Whether to use the faster mode of optimization, default FALSE. If TRUE only rates associated with the state currently being added to the tree and its ancestor will be optimized at each step, otherwise all rates are optimized.
#' @param parallel Whether the computation should be run in parallel, default FALSE. Will use a user-defined cluster if one is found, otherwise will define its own.
#' @param ncores Number of cores to use for a parallel computation.
#'
#' @return Returns a list describing the most likely model found, with the following components:
#' \item{\code{likelihood}}{the negative log likelihood of the model}
#' \item{\code{shifts.edge}}{the indexes of the edges where shifts happen, 0 indicates the root state}
#' \item{\code{shifts.time}}{the time positions of shifts}
#' \item{\code{gamma}}{the rate of state change}
#' \item{\code{lambdas}}{the birth rates of all states}
#' \item{\code{lambda_rates}}{if exponential decay was activated, the rates of decay of birth rate for all states}
#' \item{\code{mus}}{the death rates of all states}
#' \item{\code{best_models}}{a vector containing the negative log likelihood of the best model found for each number of states tested (\code{best_models[i]} corresponds to i states, i.e i-1 shifts)}
#' All vectors are indexed in the same way, so that the state with parameters \code{lambdas[i]}, \code{lambda_rates[i]} and \code{mus[i]} starts on edge \code{shifts.edge[i]} at time \code{shifts.time[i]}.
#'
#' @details It is to be noted that all times are counted backwards, with the most recent tip positioned at 0. \cr\cr
#'
#' Five time modes are possible for the input \code{time_mode}.
#' In \code{tip} mode, the shifts will be placed at 10\% of the length of the edge.
#' In \code{mid} mode, the shifts will be placed at 50\% of the length of the edge.
#' In \code{root} mode, the shifts will be placed at 90\% of the length of the edge.
#' In \code{3pos} mode, the three "tip", "mid" and "root" positions will be tested.\cr\cr
#'
#' The weights w are used for calculating the transition rates q from each state i to j: \eqn{q_{i,j}=\gamma*w_{i,j}}{q(i,j)=\gamma*w(i,j)}.
#' If \code{uniform_weights = TRUE}, \eqn{w_{i,j} = \frac{1}{N-1}}{w(i,j)=1/(N-1)} for all i,j, where N is the total number of states.
#' If \code{uniform_weights = FALSE}, \eqn{w_{i,j} = \frac{p_\lambda(\lambda_j)p_\mu(\mu_j)}{sum_{k \ne i}p_\lambda(\lambda_k)p_\mu(\mu_k)}}{w(i,j)=p\lambda(\lambdaj)p\mu(\muj)/sum(p\lambda(\lambdak)p\mu(\muk)) for all k!=i}
#' where the distributions \eqn{p_\lambda}{p\lambda} and \eqn{p_\mu}{p\mu} are provided by the inputs \code{p_lambda} and \code{p_mu}.\cr\cr
#'
#' Initial values for the optimization need to be provided as a vector and contain the following elements (in order):
#' an initial value for gamma, which is required unless \code{fixed_gamma} is provided,
#' an initial value for lambda which is always required,
#' an initial value for lambda decay rate, which is required if \code{stepsize} is provided,
#' and an initial value for mu, which is required unless \code{no_extinction = TRUE}.
#' An error will be raised if the number of initial values provided does not match the one expected from the rest of the settings,
#' and the function will fail if the likelihood cannot be calculated at the initial values.
#'
#' @examples
#' # Input a phylogeny
#' tree <- ape::read.tree(text = "(((t4:0.7293960718,(t1:0.450904974,t3:0.09259337652)
#' :0.04068535892):0.4769176776,t8:0.1541864066):0.7282000314,((t7:0.07264320855,
#' (((t5:0.8231869878,t6:0.3492440532):0.2380232813,t10:0.2367582193):0.5329497182,
#' t9:0.1016243151):0.5929288475):0.3003101915,t2:0.8320755605):0.2918686506);")
#'
#' # Infer the most likely multi-states birth-death model
#' # with full extant & extinct sampling
#' \dontrun{ML_MSBD(tree, initial_values = c(0.1, 10, 1), sigma = 1, time_mode = "mid") }
#' # Infer the most likely multi-states birth-death model with exponential decay
#' # and full extant & extinct sampling
#' \dontrun{ML_MSBD(tree, initial_values = c(0.1, 10, 0.5, 1), sigma = 1,
#' stepsize = 0.1, time_mode = "mid")}
#'
#' # Input a phylogeny with extant samples
#' tree2 <- ape::read.tree(text = "(t3:0.9703302342,((t4:0.1999577823,(t2:0.1287530271,
#' (t7:0.08853561159,(t8:0.07930237712,t9:0.07930237712):0.009233234474):0.04021741549):
#' 0.07120475526):0.4269919425,(((t10:0.0191876225,t5:0.0191876225):0.04849906822,
#' t6:0.06768669072):0.1672340445,t1:0.2349207353):0.3920289896):0.3433805094);")
#'
#' # Infer the most likely multi-states Yule model with partial extant sampling
#' \dontrun{ML_MSBD(tree2, initial_values = c(0.1, 10), no_extinction = TRUE,
#' rho = 0.5, time_mode = "mid")}
#' # Infer the most likely multi-states birth-death model with full extant sampling
#' # and unresolved extant tips
#' \dontrun{ML_MSBD(tree2, initial_values = c(0.1, 10, 1),
#' lineage_counts = c(2,5,1,3,1,1,1,1,2,6), tcut = 0.05, time_mode = "mid")}
#'
#' @export
ML_MSBD = function(tree,initial_values,
uniform_weights=TRUE,p_lambda=0,p_mu=0,
rho = 1, sigma=0, rho_sampling = TRUE,
lineage_counts = c(), tcut = 0,
stepsize=NULL, no_extinction=FALSE, fixed_gamma=NULL,
unique_lambda = FALSE, unique_mu = FALSE,
optim_control = list(), attempt_remove=TRUE, max_nshifts=Inf,
saved_state = NULL, save_path = NULL,
time_mode = c("3pos","tip","mid","root"),
fast_optim = FALSE,
parallel = FALSE, ncores = getOption('mc.cores', 2L)) {
if(time_mode %in% c("tip","mid","root")) time_positions = time_mode
else if (time_mode == "3pos") time_positions = c("tip","mid","root")
else stop("Invalid time positions mode, available are tip, mid, root and 3pos")
if(rho>1 || rho<0 || sigma>1 || sigma<0) stop("Invalid sampling proportions")
if(!rho_sampling && rho != 0) rho=0
if(length(lineage_counts) > 0 && !is.null(tcut)) {
print("Clade collapsing detected")
ntips = length(tree$tip.label)
if(length(lineage_counts) != ntips) stop("The vector of number of collapsed species doesn't match with the number of tips")
if(is.null(tcut)) stop("Time(s) of clade collapsing need to be provided")
if(length(tcut) ==1) tcut = rep(tcut, ntips)
if(length(tcut) != ntips) stop("The vector of times of clade collapsing doesn't match with the number of tips")
}
if(parallel) {
if (! requireNamespace("doParallel", quietly = TRUE)) stop("Parallel computation requires the doParallel package.")
if (! foreach::getDoParRegistered() ) {
doParallel::registerDoParallel(ncores)
message('Registered parallel computation with ', ncores, ' workers')
on.exit(doParallel::stopImplicitCluster())
} else {
message('Using parallel computation with existing', foreach::getDoParName(),
' with ', foreach::getDoParWorkers(), ' workers')
}
`%d%` <- foreach::`%dopar%`
} else {
message('Executing sequential computation')
`%d%` <- foreach::`%do%`
}
ptm = proc.time()[3]
if(is.null(saved_state)) {
initial_values = .get_initial_values_from_vector(initial_values,stepsize, no_extinction, fixed_gamma)
#first test without shifts
temp = .ML_optim(tree,c(),initial_values, c(),
uniform_weights,p_lambda,p_mu,
rho,sigma,rho_sampling,
lineage_counts,tcut,
optim_control, "fixed",
stepsize,no_extinction,fixed_gamma,
unique_lambda,unique_mu, fast_optim)
if(is.na(temp$l)) {
stop("First optimization failure")
}
else {
saved_state = list(initial_screen = TRUE, bestl = temp$l, pars = temp$p, initial_values = initial_values)
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
}
#initial conditions screen
if(!is.null(saved_state$initial_screen)) {
while(TRUE) {
done = TRUE
for(i in 1:length(saved_state$initial_values)) {
#no screen on gamma, not useful
if(names(saved_state$initial_values)[i]=="gamma") next
iv = saved_state$initial_values
iv[[i]] = iv[[i]]*10
temp = .ML_optim(tree,c(),iv, c(),uniform_weights,p_lambda,p_mu,rho,sigma,rho_sampling,lineage_counts,tcut,
optim_control, "fixed",stepsize,no_extinction,fixed_gamma,unique_lambda,unique_mu, fast_optim)
if(!is.na(temp$l) && temp$l<saved_state$bestl) {
done = FALSE
saved_state$bestl = temp$l
saved_state$initial_values = iv
saved_state$pars = temp$p
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
iv[[i]] = iv[[i]]/100
temp = .ML_optim(tree,c(),iv, c(),uniform_weights,p_lambda,p_mu,rho,sigma,rho_sampling,lineage_counts,tcut,
optim_control, "fixed",stepsize,no_extinction,fixed_gamma,unique_lambda,unique_mu, fast_optim)
if(!is.na(temp$l) && temp$l<saved_state$bestl) {
done = FALSE
saved_state$bestl = temp$l
saved_state$initial_values = iv
saved_state$pars = temp$p
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
}
if(done) break
}
newsaved_state = list()
newsaved_state$params = saved_state$pars
newsaved_state$likelihood = saved_state$bestl
newsaved_state$best_models = c(saved_state$bestl)
newsaved_state$edges = c()
newsaved_state$times = c()
newsaved_state$initial_values = saved_state$pars
if(is.null(fixed_gamma)) newsaved_state$initial_values$gamma = saved_state$initial_values$gamma
saved_state = newsaved_state
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
while(TRUE) {
#if no more edges free, stop
if(length(saved_state$edges)==length(tree$edge) || length(saved_state$edges) == max_nshifts) break
if(!parallel && is.null(saved_state$partial)) {
saved_state$partial = list(edge_min = 0, time_min = NULL, p_min = NULL, min_lik = Inf, tested_edges = c())
}
#test max estimates for all edges
all_edges = foreach::foreach (i = 1:length(tree$edge[,1]), .packages = "ML.MSBD") %d% {
if(!parallel && i %in% saved_state$partial$tested_edges) return(list(edge = i, lik = Inf)) #already tested edge
if(is.element(i,saved_state$edges)) return(list(edge = i, lik = Inf)) #disallow multiple shifts on the same edge
if(tree$edge.length[i] == 0) { #zero-length edge
if(!parallel) saved_state$partial$tested_edges = c(saved_state$partial$tested_edges,i)
return(list(edge = i, lik = Inf))
}
if(fast_optim) anc_state = .find_ancestral_state(tree, i, saved_state$edges)
edge_results = list(edge = i, lik = Inf, pars = NULL, time = NULL)
for(time_pos in time_positions) {
temp = .ML_optim(tree,c(saved_state$edges,i),
saved_state$initial_values,saved_state$times,
uniform_weights,p_lambda,p_mu,
rho,sigma,rho_sampling,
lineage_counts,tcut,
optim_control, time_pos,
stepsize,no_extinction,fixed_gamma,
unique_lambda,unique_mu, fast_optim, saved_state$params, anc_state)
if(!is.na(temp$l) && temp$l < edge_results$lik) {
edge_results$lik = temp$l
edge_results$pars = temp$p
edge_results$time = temp$t
}
}
if(!parallel) {
if(edge_results$lik < saved_state$partial$min_lik) {
saved_state$partial$edge_min = i
saved_state$partial$time_min = temp$t
saved_state$partial$p_min = temp$p
saved_state$partial$min_lik = temp$l
}
saved_state$partial$tested_edges = c(saved_state$partial$tested_edges,i)
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
return(edge_results)
}
if(parallel) {
liks = sapply(all_edges, function(x) x$lik)
best = which(liks == min(liks))
saved_state$partial = list(edge_min = all_edges[[best]]$edge, time_min = all_edges[[best]]$time,
p_min = all_edges[[best]]$pars, min_lik = all_edges[[best]]$lik)
}
saved_state$best_models = c(saved_state$best_models, saved_state$partial$min_lik)
if(saved_state$likelihood > saved_state$partial$min_lik) {
saved_state$edges = c(saved_state$edges,saved_state$partial$edge_min)
saved_state$times = c(saved_state$times,saved_state$partial$time_min)
saved_state$params = saved_state$partial$p_min
saved_state$likelihood = saved_state$partial$min_lik
saved_state$initial_values$gamma = saved_state$params$gamma #gamma needs to increase with the number of clusters
}
else {
break
}
saved_state$partial = NULL
if(!is.null(save_path)) save(saved_state, file=save_path)
}
#optional : try removing stuff from the shifts and test
if(attempt_remove && length(saved_state$edges)>0) {
removal = TRUE
while(removal==TRUE) {
removal = FALSE
for(i in length(saved_state$edges):1) {
edgetemp = saved_state$edges[-i]
time_tmp = saved_state$times[-i]
if(fast_optim) anc_state = .find_ancestral_state(tree, saved_state$edges[i], edgetemp)
values_temp = .remove_state(saved_state$params, i+1, stepsize, no_extinction, unique_lambda, unique_mu)
temp = .ML_optim(tree, edgetemp,
saved_state$initial_values, time_tmp,
uniform_weights,p_lambda,p_mu,
rho,sigma,rho_sampling,
lineage_counts,tcut,
optim_control, "fixed",
stepsize,no_extinction,fixed_gamma,
unique_lambda,unique_mu,
fast_optim,values_temp, anc_state)
if(!is.na(temp$l) && temp$l<saved_state$likelihood) {
print("shift removed")
saved_state$params = temp$p
saved_state$likelihood = temp$l
saved_state$best_models[length(edgetemp)+1] = temp$l
saved_state$edges = edgetemp
saved_state$times = time_tmp
removal = TRUE
}
else {
if(!is.na(temp$l) && saved_state$best_models[length(edgetemp)+1]> temp$l) saved_state$best_models[length(edgetemp)+1] = temp$l
}
}
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
}
#decombine parameter vector in exploitable form
result = saved_state$params
result$likelihood = saved_state$likelihood
result$shifts.edge = c(0,saved_state$edges)
result$shifts.time = c(0,saved_state$times)
result$best_models = saved_state$best_models
return(result)
}
.ML_optim = function(tree,edges,initial_values, times,
uniform_weights=TRUE,p_lambda=0,p_mu=0,
rho=1, sigma=0, rho_sampling = TRUE,
lineage_counts = c(), tcut = 0,
optim_control=list(), time_mode = c("tip","mid","root","fixed"),
stepsize=NULL, no_extinction=FALSE, fixed_gamma=NULL,
unique_lambda = FALSE, unique_mu = FALSE,
fast_optim = FALSE, current_values = NULL, anc_state=0) {
n = length(edges)
if(time_mode != "fixed") { #fixed means all times are given as input
e = edges[n]
depths = ape::node.depth.edgelength(tree)
tor = max(depths)
tmin = tor-depths[tree$edge[e,2]] #times are backward
tmax = tor-depths[tree$edge[e,1]]
#if applicable, fix time of last shift
if(time_mode == "tip") times[n] = tmin*0.01+tmax*0.99
if(time_mode == "mid") times[n] = (tmin+tmax)/2
if(time_mode == "root") times[n] = tmin*0.99+tmax*0.01
}
if(no_extinction && !uniform_weights) p_mu = function(x) {
cond = function(c) { if(c==0){1}else{0}}
sapply(x,cond)
}
#create function of parameters
auxfun = function(p) {
shifts = NULL
if(!fast_optim) values = .get_params_from_vector(p, n+1, stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
else values = .get_params_from_vector_fast(p, n+1, current_values, anc_state,
stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
#test validity of parameters
if(values$gamma<0) return(NA)
if(sum(values$lambdas<0)>0) return(NA)
if(!is.null(values$lambda_rates) && sum(values$lambda_rates<0)>0) return(NA)
if(sum(values$mus<0)>0) return(NA)
for(j in seq(along=edges)) {
e = edges[j]
t = times[j]
shifts = rbind(shifts,c(e,t,1+j))
}
res = likelihood_MSBD_unresolved(tree,shifts,values$gamma,values$lambdas,values$mus,
values$lambda_rates,stepsize,
uniform_weights,p_lambda,p_mu,
rho,sigma,rho_sampling,
lineage_counts,tcut)
return(res)
}
#initial parameters values
if(!fast_optim) initp = .get_vector_from_initial_values(initial_values, n+1, stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
else initp = .get_vector_from_initial_values_fast(initial_values, n+1, stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
#optim on auxfun
out=try(stats::optim(initp, auxfun,control=optim_control))
if (class(out) == "try-error") {
print("Optimization failed")
result = list(l=NA,p=out)
}
else {
while(out$convergence==1) {
if(is.null(optim_control$maxit)) optim_control$maxit = 1000
else optim_control$maxit = 2*optim_control$maxit
out=stats::optim(out$par, auxfun,control=optim_control)
}
result = list(l=out$value, t=times[n])
if(!fast_optim) result$p=.get_params_from_vector(out$par, n+1, stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
else result$p=.get_params_from_vector_fast(out$par, n+1, current_values, anc_state,
stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
}
return(result)
} | /R/ML_MSBD.R | no_license | cran/ML.MSBD | R | false | false | 22,469 | r | #' Full Maximum Likelihood inference of birth and death rates together with their changes along a phylogeny under a multi-type birth-death model.
#'
#' Infers a complete MSBD model from a phylogeny, including the most likely number of states, positions and times of state changes, and parameters associated with each state.
#' Uses a greedy approach to add states and Maximum Likelihood inference for the other parameters.
#'
#' @param tree Phylogenetic tree (in ape format) to calculate the likelihood on.
#' @param initial_values Initial values for the optimizer, to be provided as a vector in this order: gamma (optional), lambda, lambda decay rate (optional), mu (optional). See 'Details'.
#' @param uniform_weights Whether all states are weighted uniformly in shifts, default TRUE. If FALSE, the weights of states are calculated from the distributions \code{p_lambda} and \code{p_mu}. See 'Details'.
#' @param p_lambda Prior probability distribution on lambdas, used if \code{uniform_weights = FALSE}.
#' @param p_mu Prior probability distribution on mus, used if \code{uniform_weights = FALSE}.
#' @param rho Sampling proportion on extant tips, default 1.
#' @param sigma Sampling probability on extinct tips (tips are sampled upon extinction), default 0.
#' @param rho_sampling Whether the most recent tips should be considered extant tips, sampled with sampling proportion \code{rho}. If FALSE, all tips will be considered extinct tips, sampled with sampling probability \code{sigma}. Should be TRUE for most macroevolution datasets and FALSE for most epidemiology datasets.
#' @param lineage_counts For trees with clade collapsing. Number of lineages collapsed on each tip. Should be set to 1 for extinct tips.
#' @param tcut For trees with clade collapsing. Times of clade collapsing for each tip (i.e time of the MRCA of all collapsed lineages). Can be a single number or a vector of length the number of tips.
#' @param stepsize Size of the step to use for time discretization with exponential decay, default NULL. To use exponential decay, an initial value for \code{lambda_rates} should also be provided.
#' @param no_extinction Whether to use the Yule process (\code{mu=0}) for all states, default FALSE. If TRUE no initial value for \code{mu} is needed.
#' @param fixed_gamma Value to which \code{gamma} should be fixed, default NULL. If provided no initial value for \code{gamma} is needed.
#' @param unique_lambda Whether to use the same value of \code{lambda} for all states, default FALSE. If TRUE and exponential decay is active all states will also share the same value for \code{lambda_rate}.
#' @param unique_mu Whether to use the same value of \code{mu} for all states, default FALSE.
#'
#' @param optim_control Control list for the optimizer, corresponds to control input in optim function, see \code{?optim} for details.
#' @param attempt_remove Whether to attempt to remove shifts at the end of the inference, default TRUE. If FALSE, use a pure greedy algorithm.
#' @param max_nshifts Maximum number of shifts to test for, default \code{Inf}.
#' @param saved_state If provided, the inference will be restarted from this state.
#' @param save_path If provided, the progress of the inference will be saved to this path after each optimization step.
#' @param time_mode String controlling the time positions of inferred shifts. See 'Details'.
#' @param fast_optim Whether to use the faster mode of optimization, default FALSE. If TRUE only rates associated with the state currently being added to the tree and its ancestor will be optimized at each step, otherwise all rates are optimized.
#' @param parallel Whether the computation should be run in parallel, default FALSE. Will use a user-defined cluster if one is found, otherwise will define its own.
#' @param ncores Number of cores to use for a parallel computation.
#'
#' @return Returns a list describing the most likely model found, with the following components:
#' \item{\code{likelihood}}{the negative log likelihood of the model}
#' \item{\code{shifts.edge}}{the indexes of the edges where shifts happen, 0 indicates the root state}
#' \item{\code{shifts.time}}{the time positions of shifts}
#' \item{\code{gamma}}{the rate of state change}
#' \item{\code{lambdas}}{the birth rates of all states}
#' \item{\code{lambda_rates}}{if exponential decay was activated, the rates of decay of birth rate for all states}
#' \item{\code{mus}}{the death rates of all states}
#' \item{\code{best_models}}{a vector containing the negative log likelihood of the best model found for each number of states tested (\code{best_models[i]} corresponds to i states, i.e i-1 shifts)}
#' All vectors are indexed in the same way, so that the state with parameters \code{lambdas[i]}, \code{lambda_rates[i]} and \code{mus[i]} starts on edge \code{shifts.edge[i]} at time \code{shifts.time[i]}.
#'
#' @details It is to be noted that all times are counted backwards, with the most recent tip positioned at 0. \cr\cr
#'
#' Five time modes are possible for the input \code{time_mode}.
#' In \code{tip} mode, the shifts will be placed at 10\% of the length of the edge.
#' In \code{mid} mode, the shifts will be placed at 50\% of the length of the edge.
#' In \code{root} mode, the shifts will be placed at 90\% of the length of the edge.
#' In \code{3pos} mode, the three "tip", "mid" and "root" positions will be tested.\cr\cr
#'
#' The weights w are used for calculating the transition rates q from each state i to j: \eqn{q_{i,j}=\gamma*w_{i,j}}{q(i,j)=\gamma*w(i,j)}.
#' If \code{uniform_weights = TRUE}, \eqn{w_{i,j} = \frac{1}{N-1}}{w(i,j)=1/(N-1)} for all i,j, where N is the total number of states.
#' If \code{uniform_weights = FALSE}, \eqn{w_{i,j} = \frac{p_\lambda(\lambda_j)p_\mu(\mu_j)}{sum_{k \ne i}p_\lambda(\lambda_k)p_\mu(\mu_k)}}{w(i,j)=p\lambda(\lambdaj)p\mu(\muj)/sum(p\lambda(\lambdak)p\mu(\muk)) for all k!=i}
#' where the distributions \eqn{p_\lambda}{p\lambda} and \eqn{p_\mu}{p\mu} are provided by the inputs \code{p_lambda} and \code{p_mu}.\cr\cr
#'
#' Initial values for the optimization need to be provided as a vector and contain the following elements (in order):
#' an initial value for gamma, which is required unless \code{fixed_gamma} is provided,
#' an initial value for lambda which is always required,
#' an initial value for lambda decay rate, which is required if \code{stepsize} is provided,
#' and an initial value for mu, which is required unless \code{no_extinction = TRUE}.
#' An error will be raised if the number of initial values provided does not match the one expected from the rest of the settings,
#' and the function will fail if the likelihood cannot be calculated at the initial values.
#'
#' @examples
#' # Input a phylogeny
#' tree <- ape::read.tree(text = "(((t4:0.7293960718,(t1:0.450904974,t3:0.09259337652)
#' :0.04068535892):0.4769176776,t8:0.1541864066):0.7282000314,((t7:0.07264320855,
#' (((t5:0.8231869878,t6:0.3492440532):0.2380232813,t10:0.2367582193):0.5329497182,
#' t9:0.1016243151):0.5929288475):0.3003101915,t2:0.8320755605):0.2918686506);")
#'
#' # Infer the most likely multi-states birth-death model
#' # with full extant & extinct sampling
#' \dontrun{ML_MSBD(tree, initial_values = c(0.1, 10, 1), sigma = 1, time_mode = "mid") }
#' # Infer the most likely multi-states birth-death model with exponential decay
#' # and full extant & extinct sampling
#' \dontrun{ML_MSBD(tree, initial_values = c(0.1, 10, 0.5, 1), sigma = 1,
#' stepsize = 0.1, time_mode = "mid")}
#'
#' # Input a phylogeny with extant samples
#' tree2 <- ape::read.tree(text = "(t3:0.9703302342,((t4:0.1999577823,(t2:0.1287530271,
#' (t7:0.08853561159,(t8:0.07930237712,t9:0.07930237712):0.009233234474):0.04021741549):
#' 0.07120475526):0.4269919425,(((t10:0.0191876225,t5:0.0191876225):0.04849906822,
#' t6:0.06768669072):0.1672340445,t1:0.2349207353):0.3920289896):0.3433805094);")
#'
#' # Infer the most likely multi-states Yule model with partial extant sampling
#' \dontrun{ML_MSBD(tree2, initial_values = c(0.1, 10), no_extinction = TRUE,
#' rho = 0.5, time_mode = "mid")}
#' # Infer the most likely multi-states birth-death model with full extant sampling
#' # and unresolved extant tips
#' \dontrun{ML_MSBD(tree2, initial_values = c(0.1, 10, 1),
#' lineage_counts = c(2,5,1,3,1,1,1,1,2,6), tcut = 0.05, time_mode = "mid")}
#'
#' @export
ML_MSBD = function(tree,initial_values,
uniform_weights=TRUE,p_lambda=0,p_mu=0,
rho = 1, sigma=0, rho_sampling = TRUE,
lineage_counts = c(), tcut = 0,
stepsize=NULL, no_extinction=FALSE, fixed_gamma=NULL,
unique_lambda = FALSE, unique_mu = FALSE,
optim_control = list(), attempt_remove=TRUE, max_nshifts=Inf,
saved_state = NULL, save_path = NULL,
time_mode = c("3pos","tip","mid","root"),
fast_optim = FALSE,
parallel = FALSE, ncores = getOption('mc.cores', 2L)) {
if(time_mode %in% c("tip","mid","root")) time_positions = time_mode
else if (time_mode == "3pos") time_positions = c("tip","mid","root")
else stop("Invalid time positions mode, available are tip, mid, root and 3pos")
if(rho>1 || rho<0 || sigma>1 || sigma<0) stop("Invalid sampling proportions")
if(!rho_sampling && rho != 0) rho=0
if(length(lineage_counts) > 0 && !is.null(tcut)) {
print("Clade collapsing detected")
ntips = length(tree$tip.label)
if(length(lineage_counts) != ntips) stop("The vector of number of collapsed species doesn't match with the number of tips")
if(is.null(tcut)) stop("Time(s) of clade collapsing need to be provided")
if(length(tcut) ==1) tcut = rep(tcut, ntips)
if(length(tcut) != ntips) stop("The vector of times of clade collapsing doesn't match with the number of tips")
}
if(parallel) {
if (! requireNamespace("doParallel", quietly = TRUE)) stop("Parallel computation requires the doParallel package.")
if (! foreach::getDoParRegistered() ) {
doParallel::registerDoParallel(ncores)
message('Registered parallel computation with ', ncores, ' workers')
on.exit(doParallel::stopImplicitCluster())
} else {
message('Using parallel computation with existing', foreach::getDoParName(),
' with ', foreach::getDoParWorkers(), ' workers')
}
`%d%` <- foreach::`%dopar%`
} else {
message('Executing sequential computation')
`%d%` <- foreach::`%do%`
}
ptm = proc.time()[3]
if(is.null(saved_state)) {
initial_values = .get_initial_values_from_vector(initial_values,stepsize, no_extinction, fixed_gamma)
#first test without shifts
temp = .ML_optim(tree,c(),initial_values, c(),
uniform_weights,p_lambda,p_mu,
rho,sigma,rho_sampling,
lineage_counts,tcut,
optim_control, "fixed",
stepsize,no_extinction,fixed_gamma,
unique_lambda,unique_mu, fast_optim)
if(is.na(temp$l)) {
stop("First optimization failure")
}
else {
saved_state = list(initial_screen = TRUE, bestl = temp$l, pars = temp$p, initial_values = initial_values)
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
}
#initial conditions screen
if(!is.null(saved_state$initial_screen)) {
while(TRUE) {
done = TRUE
for(i in 1:length(saved_state$initial_values)) {
#no screen on gamma, not useful
if(names(saved_state$initial_values)[i]=="gamma") next
iv = saved_state$initial_values
iv[[i]] = iv[[i]]*10
temp = .ML_optim(tree,c(),iv, c(),uniform_weights,p_lambda,p_mu,rho,sigma,rho_sampling,lineage_counts,tcut,
optim_control, "fixed",stepsize,no_extinction,fixed_gamma,unique_lambda,unique_mu, fast_optim)
if(!is.na(temp$l) && temp$l<saved_state$bestl) {
done = FALSE
saved_state$bestl = temp$l
saved_state$initial_values = iv
saved_state$pars = temp$p
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
iv[[i]] = iv[[i]]/100
temp = .ML_optim(tree,c(),iv, c(),uniform_weights,p_lambda,p_mu,rho,sigma,rho_sampling,lineage_counts,tcut,
optim_control, "fixed",stepsize,no_extinction,fixed_gamma,unique_lambda,unique_mu, fast_optim)
if(!is.na(temp$l) && temp$l<saved_state$bestl) {
done = FALSE
saved_state$bestl = temp$l
saved_state$initial_values = iv
saved_state$pars = temp$p
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
}
if(done) break
}
newsaved_state = list()
newsaved_state$params = saved_state$pars
newsaved_state$likelihood = saved_state$bestl
newsaved_state$best_models = c(saved_state$bestl)
newsaved_state$edges = c()
newsaved_state$times = c()
newsaved_state$initial_values = saved_state$pars
if(is.null(fixed_gamma)) newsaved_state$initial_values$gamma = saved_state$initial_values$gamma
saved_state = newsaved_state
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
while(TRUE) {
#if no more edges free, stop
if(length(saved_state$edges)==length(tree$edge) || length(saved_state$edges) == max_nshifts) break
if(!parallel && is.null(saved_state$partial)) {
saved_state$partial = list(edge_min = 0, time_min = NULL, p_min = NULL, min_lik = Inf, tested_edges = c())
}
#test max estimates for all edges
all_edges = foreach::foreach (i = 1:length(tree$edge[,1]), .packages = "ML.MSBD") %d% {
if(!parallel && i %in% saved_state$partial$tested_edges) return(list(edge = i, lik = Inf)) #already tested edge
if(is.element(i,saved_state$edges)) return(list(edge = i, lik = Inf)) #disallow multiple shifts on the same edge
if(tree$edge.length[i] == 0) { #zero-length edge
if(!parallel) saved_state$partial$tested_edges = c(saved_state$partial$tested_edges,i)
return(list(edge = i, lik = Inf))
}
if(fast_optim) anc_state = .find_ancestral_state(tree, i, saved_state$edges)
edge_results = list(edge = i, lik = Inf, pars = NULL, time = NULL)
for(time_pos in time_positions) {
temp = .ML_optim(tree,c(saved_state$edges,i),
saved_state$initial_values,saved_state$times,
uniform_weights,p_lambda,p_mu,
rho,sigma,rho_sampling,
lineage_counts,tcut,
optim_control, time_pos,
stepsize,no_extinction,fixed_gamma,
unique_lambda,unique_mu, fast_optim, saved_state$params, anc_state)
if(!is.na(temp$l) && temp$l < edge_results$lik) {
edge_results$lik = temp$l
edge_results$pars = temp$p
edge_results$time = temp$t
}
}
if(!parallel) {
if(edge_results$lik < saved_state$partial$min_lik) {
saved_state$partial$edge_min = i
saved_state$partial$time_min = temp$t
saved_state$partial$p_min = temp$p
saved_state$partial$min_lik = temp$l
}
saved_state$partial$tested_edges = c(saved_state$partial$tested_edges,i)
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
return(edge_results)
}
if(parallel) {
liks = sapply(all_edges, function(x) x$lik)
best = which(liks == min(liks))
saved_state$partial = list(edge_min = all_edges[[best]]$edge, time_min = all_edges[[best]]$time,
p_min = all_edges[[best]]$pars, min_lik = all_edges[[best]]$lik)
}
saved_state$best_models = c(saved_state$best_models, saved_state$partial$min_lik)
if(saved_state$likelihood > saved_state$partial$min_lik) {
saved_state$edges = c(saved_state$edges,saved_state$partial$edge_min)
saved_state$times = c(saved_state$times,saved_state$partial$time_min)
saved_state$params = saved_state$partial$p_min
saved_state$likelihood = saved_state$partial$min_lik
saved_state$initial_values$gamma = saved_state$params$gamma #gamma needs to increase with the number of clusters
}
else {
break
}
saved_state$partial = NULL
if(!is.null(save_path)) save(saved_state, file=save_path)
}
#optional : try removing stuff from the shifts and test
if(attempt_remove && length(saved_state$edges)>0) {
removal = TRUE
while(removal==TRUE) {
removal = FALSE
for(i in length(saved_state$edges):1) {
edgetemp = saved_state$edges[-i]
time_tmp = saved_state$times[-i]
if(fast_optim) anc_state = .find_ancestral_state(tree, saved_state$edges[i], edgetemp)
values_temp = .remove_state(saved_state$params, i+1, stepsize, no_extinction, unique_lambda, unique_mu)
temp = .ML_optim(tree, edgetemp,
saved_state$initial_values, time_tmp,
uniform_weights,p_lambda,p_mu,
rho,sigma,rho_sampling,
lineage_counts,tcut,
optim_control, "fixed",
stepsize,no_extinction,fixed_gamma,
unique_lambda,unique_mu,
fast_optim,values_temp, anc_state)
if(!is.na(temp$l) && temp$l<saved_state$likelihood) {
print("shift removed")
saved_state$params = temp$p
saved_state$likelihood = temp$l
saved_state$best_models[length(edgetemp)+1] = temp$l
saved_state$edges = edgetemp
saved_state$times = time_tmp
removal = TRUE
}
else {
if(!is.na(temp$l) && saved_state$best_models[length(edgetemp)+1]> temp$l) saved_state$best_models[length(edgetemp)+1] = temp$l
}
}
if(!is.null(save_path) && proc.time()[3] - ptm > 600) {
save(saved_state, file=save_path)
ptm = proc.time()[3]
}
}
}
#decombine parameter vector in exploitable form
result = saved_state$params
result$likelihood = saved_state$likelihood
result$shifts.edge = c(0,saved_state$edges)
result$shifts.time = c(0,saved_state$times)
result$best_models = saved_state$best_models
return(result)
}
.ML_optim = function(tree,edges,initial_values, times,
uniform_weights=TRUE,p_lambda=0,p_mu=0,
rho=1, sigma=0, rho_sampling = TRUE,
lineage_counts = c(), tcut = 0,
optim_control=list(), time_mode = c("tip","mid","root","fixed"),
stepsize=NULL, no_extinction=FALSE, fixed_gamma=NULL,
unique_lambda = FALSE, unique_mu = FALSE,
fast_optim = FALSE, current_values = NULL, anc_state=0) {
n = length(edges)
if(time_mode != "fixed") { #fixed means all times are given as input
e = edges[n]
depths = ape::node.depth.edgelength(tree)
tor = max(depths)
tmin = tor-depths[tree$edge[e,2]] #times are backward
tmax = tor-depths[tree$edge[e,1]]
#if applicable, fix time of last shift
if(time_mode == "tip") times[n] = tmin*0.01+tmax*0.99
if(time_mode == "mid") times[n] = (tmin+tmax)/2
if(time_mode == "root") times[n] = tmin*0.99+tmax*0.01
}
if(no_extinction && !uniform_weights) p_mu = function(x) {
cond = function(c) { if(c==0){1}else{0}}
sapply(x,cond)
}
#create function of parameters
auxfun = function(p) {
shifts = NULL
if(!fast_optim) values = .get_params_from_vector(p, n+1, stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
else values = .get_params_from_vector_fast(p, n+1, current_values, anc_state,
stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
#test validity of parameters
if(values$gamma<0) return(NA)
if(sum(values$lambdas<0)>0) return(NA)
if(!is.null(values$lambda_rates) && sum(values$lambda_rates<0)>0) return(NA)
if(sum(values$mus<0)>0) return(NA)
for(j in seq(along=edges)) {
e = edges[j]
t = times[j]
shifts = rbind(shifts,c(e,t,1+j))
}
res = likelihood_MSBD_unresolved(tree,shifts,values$gamma,values$lambdas,values$mus,
values$lambda_rates,stepsize,
uniform_weights,p_lambda,p_mu,
rho,sigma,rho_sampling,
lineage_counts,tcut)
return(res)
}
#initial parameters values
if(!fast_optim) initp = .get_vector_from_initial_values(initial_values, n+1, stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
else initp = .get_vector_from_initial_values_fast(initial_values, n+1, stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
#optim on auxfun
out=try(stats::optim(initp, auxfun,control=optim_control))
if (class(out) == "try-error") {
print("Optimization failed")
result = list(l=NA,p=out)
}
else {
while(out$convergence==1) {
if(is.null(optim_control$maxit)) optim_control$maxit = 1000
else optim_control$maxit = 2*optim_control$maxit
out=stats::optim(out$par, auxfun,control=optim_control)
}
result = list(l=out$value, t=times[n])
if(!fast_optim) result$p=.get_params_from_vector(out$par, n+1, stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
else result$p=.get_params_from_vector_fast(out$par, n+1, current_values, anc_state,
stepsize, no_extinction, fixed_gamma, unique_lambda, unique_mu)
}
return(result)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyBinaryInfoStats.R
\name{calculateMultiClassMI}
\alias{calculateMultiClassMI}
\title{Calculate single mutual information score from multiclass groups in dplyr friendly manner.}
\usage{
calculateMultiClassMI(df)
}
\arguments{
\item{df}{a dataframe containing one observation per row & minimally p_x1y1, p_x1, p_y1 columns (see probabilitiesFromCounts / probabilitiesFromCooccurrence)}
}
\value{
the datatable with additional columns for MI
}
\description{
The purpose of this is to make it possible to calculate MI from tidy data. This is useful where you have a a data from that
represents a multi-class confusion matrix with unique combinations of inputs and probabilities for the co-occurrence and marginal probabilities
already calculated. Typically this will be generated by the probabilitiesFromCooccurrence function.
}
| /man/calculateMultiClassMI.Rd | permissive | terminological/tidy-info-stats | R | false | true | 907 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyBinaryInfoStats.R
\name{calculateMultiClassMI}
\alias{calculateMultiClassMI}
\title{Calculate single mutual information score from multiclass groups in dplyr friendly manner.}
\usage{
calculateMultiClassMI(df)
}
\arguments{
\item{df}{a dataframe containing one observation per row & minimally p_x1y1, p_x1, p_y1 columns (see probabilitiesFromCounts / probabilitiesFromCooccurrence)}
}
\value{
the datatable with additional columns for MI
}
\description{
The purpose of this is to make it possible to calculate MI from tidy data. This is useful where you have a a data from that
represents a multi-class confusion matrix with unique combinations of inputs and probabilities for the co-occurrence and marginal probabilities
already calculated. Typically this will be generated by the probabilitiesFromCooccurrence function.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func.R
\name{aggregateSum}
\alias{aggregateSum}
\title{Peptide/protein aggregate function using sum}
\usage{
aggregateSum(wp)
}
\arguments{
\item{wp}{Matrix with columns corresponding to samples and rows corresponding
to peptide entries.}
}
\value{
A numeric vector with aggregated values
}
\description{
This function should be used from within \code{\link{makePeptideTable}} or
\code{\link{makeProteinTable}}. It aggregates values by calculating the sum
for each column of \code{wp}.
}
\examples{
\dontrun{
library(proteusLabelFree)
data(proteusLabelFree)
prodat <- makeProteinTable(pepdat.clean, aggregate.fun=aggregateSum)
}
}
| /man/aggregateSum.Rd | permissive | bartongroup/Proteus | R | false | true | 709 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func.R
\name{aggregateSum}
\alias{aggregateSum}
\title{Peptide/protein aggregate function using sum}
\usage{
aggregateSum(wp)
}
\arguments{
\item{wp}{Matrix with columns corresponding to samples and rows corresponding
to peptide entries.}
}
\value{
A numeric vector with aggregated values
}
\description{
This function should be used from within \code{\link{makePeptideTable}} or
\code{\link{makeProteinTable}}. It aggregates values by calculating the sum
for each column of \code{wp}.
}
\examples{
\dontrun{
library(proteusLabelFree)
data(proteusLabelFree)
prodat <- makeProteinTable(pepdat.clean, aggregate.fun=aggregateSum)
}
}
|
# Script for process realtime SST observations
# Author: Steefan Contractor
library(ncdf4)
library(lubridate)
library(dplyr)
library(raster) #
library(leaflet)
library(htmltools)
library(htmlwidgets)
library(geosphere)
library(readr)
library(purrr)
# working directory is where the script is
basePath <- paste0(normalizePath("./"),"/")
##############################################################################
# Read and process entire month of ocean colour and sst data
##############################################################################
#sst
files <- list.files(paste0(basePath,"data/SST/"), pattern = glob2rx("*.nc"))
dates <- ymd(substr(files, 1,8))
df <- data.frame(date = dates, filename = files)
df <- arrange(df, desc(date))
# sst climatology
# 90th percentile
clim_90 <- brick("./data/SSTAARS_NSW_remapcon2.nc", varname = "TEMP_90th_perc")
clim_90 <- setZ(clim_90, z = ymd(strsplit(system("cdo showdate data/SSTAARS_NSW_remapcon2.nc", intern = T), split = " ")[[1]][-1]))
clim.index <- which.min(abs(yday(format(df[1,'date'], format = "%y-%m-%d")) - yday(getZ(clim_90))))
# 10th percentile
clim_10 <- brick("./data/SSTAARS_NSW_remapcon2.nc", varname = "TEMP_10th_perc")
clim_10 <- setZ(clim_10, z = ymd(strsplit(system("cdo showdate data/SSTAARS_NSW_remapcon2.nc", intern = T), split = " ")[[1]][-1]))
#oc
files <- list.files(paste0(basePath,"data/CHL_OC3/"), pattern = glob2rx("*.nc"))
dates <- ymd(substr(files, 7,7+8-1))
df_OC <- data.frame(date = dates, OC_filename = files)
df_OC <- arrange(df_OC, desc(date))
# merge with SST dataframe
df <- base::merge(df, df_OC, by = "date", all.x = T)
# Read and process every file in df
sst_month <- sapply(paste(df$date),function(x) NULL)
sst_10_month <- sapply(paste(df$date),function(x) NULL)
sst_90_month <- sapply(paste(df$date),function(x) NULL)
oc_month <- sapply(paste(df$date),function(x) NULL)
for (d in 1:nrow(df)) {
# sst
nc <- nc_open(paste0(basePath,"data/SST/",df$filename[d]))
lon <- ncvar_get(nc, "lon")
lat <- rev(ncvar_get(nc, "lat"))
sst <- ncvar_get(nc, "analysed_sst")
nc_close(nc)
# convert to raster
sst <- sst - 273.15
sst <- raster(t(sst[,length(lat):1]), xmn=min(lon), xmx=max(lon), ymn=min(lat), ymx=max(lat),
crs="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
sst_90 <- sst
sst_90[which(sst_90[] < clim_90[[clim.index]][])] <- NA
sst_10 <- sst
sst_10[which(sst_10[] > clim_10[[clim.index]][])] <- NA
sst_month[[d]] <- sst
sst_10_month[[d]] <- sst_10
sst_90_month[[d]] <- sst_90
# chl_oc3
if (!is.na(df$OC_filename[d])) {
nc <- nc_open(paste0(basePath,"data/CHL_OC3/",df$OC_filename[d]))
dimnames <- names(nc$dim)
lon <- ncvar_get(nc, ifelse("longitude" %in% dimnames, "longitude", "lon"))
lat <- rev(ncvar_get(nc, ifelse("latitude" %in% dimnames, "latitude", "lat")))
chl_oc3 <- ncvar_get(nc, "chl_oc3")
nc_close(nc)
chl_oc3 <- raster(t(chl_oc3[,length(lat):1]), xmn=min(lon), xmx=max(lon), ymn=min(lat), ymx=max(lat),
crs="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
# Mask values above 10
chl_oc3[which(values(chl_oc3) > 10)] <- NA
# Set values between >5 and 10 as 5
chl_oc3[which(values(chl_oc3) > 5)] <- 5
oc_month[[d]] <- chl_oc3
} else {
oc_month[[d]] <- NA
}
}
# save data
save(sst_month, sst_10_month, sst_90_month, oc_month, clim_90, clim_10, df, file = paste0(basePath,"data/SST/processedSSTandOC.Rdata"))
##########################
# HF Radar data
##########################
# wrapper function for min() that by default removes na (na.rm = T) and returns NA instead of Inf for all NA values
min_NA <- function(x) {
return(ifelse(all(is.na(x)), NA, min(x, na.rm = T)))
}
# function to find the EAC core location, velocity, and distance from coast
find_EAC_char <- function(lon, lat, ucur, vcur) {
coffs_lat <- -30.29628
newc_lat <- -32.91667
# find location
vcur_min <- apply(vcur, 2, min_NA)
# replace any velocities greater than -0.3 with NA
vcur_min[which(vcur_min > -0.3)] <- NA
if (all(is.na(vcur_min))) {
return(NULL)
} else {
# for the non NA velocities in above, find the index of the lon location
lon_ind_vcur_min <- sapply(1:length(lat), function(n) {ifelse(!is.na(vcur_min[n]),
which(vcur[,n] == vcur_min[n]),
NA)})
lat_ind_vcur_min <- which(!is.na(lon_ind_vcur_min))
lon_ind_vcur_min <- lon_ind_vcur_min[!is.na(lon_ind_vcur_min)]
# create a data frame with all necessary attributes
# EAC_char <- setNames(data.frame(matrix(ncol = 6, nrow = 0)), c("lon", "lat", "distance_coast", "ucur", "vcur"))
EAC_char <- data.frame(lon = lon[lon_ind_vcur_min], lat = lat[lat_ind_vcur_min],
ucur = mapply(function(x, y) {ucur[x, y]}, x = lon_ind_vcur_min, y = lat_ind_vcur_min),
vcur = mapply(function(x, y) {vcur[x, y]}, x = lon_ind_vcur_min, y = lat_ind_vcur_min))
# calculate EAC speed
EAC_char <- mutate(EAC_char, speed = sqrt(ucur^2+vcur^2))
# calculate EAC distance
# read coastline data
coast <- read_tsv(paste0(basePath, "data/eaccoast.dat"), col_names = c("lon", "lat"), skip = 1)
EAC_char <- mutate(EAC_char, coast_lon = as.numeric(sapply(lat, function(x) {coast$lon[which.min(abs(coast$lat - x))]})))
EAC_char <- mutate(EAC_char, coast_dist = distVincentySphere(cbind(EAC_char$lon, EAC_char$lat), cbind(EAC_char$coast_lon, EAC_char$lat))/1000)
# drop coast_lon column
EAC_char <- EAC_char %>% dplyr::select(-c(coast_lon, ucur, vcur))
if (max(lat) < coffs_lat) {
EAC_char <- slice(EAC_char, which.min(abs(lat - newc_lat)))
} else {
EAC_char <- slice(EAC_char, which.min(abs(lat - coffs_lat)))
}
return(EAC_char)
}
}
# create empty list to contain uv dataframes for each day
UVCart_month <- sapply(paste(df$date),function(x) NULL)
# create empty dataframe to contain EAC characteristics
EAC_char_month <- setNames(data.frame(matrix(ncol = 6, nrow = 0)), c("date","lon","lat","speed","coast_dist", "coffs"))
for (d in 1:nrow(df)) {
date <- format(df$date[d], format = "%Y%m%d")
fnames <- list.files(paste0(basePath, "data/HFRadar/NEWC"), pattern = glob2rx(paste0("*",date,"*.nc")))
if (length(fnames) > 2) {
# 9 am
nc <- nc_open(paste0(basePath, "data/HFRadar/NEWC/", fnames[1]))
ucur_newc <- ncvar_get(nc, "UCUR")
vcur_newc <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
lat <- apply(lat, 2, mean)
lon <- apply(lon, 1, mean)
ucur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(vcur_newc)))] <- NA
vcur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(ucur_newc)))] <- NA
lonlat <- expand.grid(lon, lat)
w = 0.2 # scaling factor for arrows
uv_cart_df_newc <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], lon1 = lonlat[,1]+c(ucur_newc)*w, lat1 = lonlat[,2]+c(vcur_newc)*w)
uv_cart_df_newc <- uv_cart_df_newc %>% filter(!is.na(lon1))
# 3 pm
nc <- nc_open(paste0(basePath, "data/HFRadar/NEWC/", fnames[2]))
ucur_newc <- ncvar_get(nc, "UCUR")
vcur_newc <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
lat <- apply(lat, 2, mean)
lon <- apply(lon, 1, mean)
ucur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(vcur_newc)))] <- NA
vcur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(ucur_newc)))] <- NA
# find EAC characteristics
EAC_char <- find_EAC_char(lon, lat, ucur_newc, vcur_newc)
if (!is_null(EAC_char)) {EAC_char <- EAC_char %>% mutate(date = date, coffs = F) %>% dplyr::select(date, everything(), coffs)}
EAC_char_month <- rbind(EAC_char_month, EAC_char)
lonlat <- expand.grid(lon, lat)
# w = 1 # scaling factor for arrows
uv_cart_df <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], ucur = c(ucur_newc)*w, vcur = c(vcur_newc)*w)
uv_cart_df <- uv_cart_df %>% filter(!is.na(ucur))
uv_cart_df_newc <- merge(uv_cart_df_newc, uv_cart_df, by=c("lon0", "lat0"))
uv_cart_df_newc <- uv_cart_df_newc %>% mutate(lon2 = lon1 + ucur, lat2 = lat1 + vcur) %>% dplyr::select(-ucur, -vcur)
# 9 pm
nc <- nc_open(paste0(basePath, "data/HFRadar/NEWC/", fnames[3]))
ucur_newc <- ncvar_get(nc, "UCUR")
vcur_newc <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
lat <- apply(lat, 2, mean)
lon <- apply(lon, 1, mean)
ucur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(vcur_newc)))] <- NA
vcur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(ucur_newc)))] <- NA
lonlat <- expand.grid(lon, lat)
# w = 1 # scaling factor for arrows
uv_cart_df <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], ucur = c(ucur_newc)*w, vcur = c(vcur_newc)*w)
uv_cart_df <- uv_cart_df %>% filter(!is.na(ucur))
uv_cart_df_newc <- merge(uv_cart_df_newc, uv_cart_df, by=c("lon0", "lat0"))
uv_cart_df_newc <- uv_cart_df_newc %>% mutate(lon3 = lon2 + ucur, lat3 = lat2 + vcur) %>% dplyr::select(-ucur, -vcur)
} else {
ucur_newc <- c()
vcur_newc <- c()
uv_cart_df_newc <- setNames(data.frame(matrix(ncol = 8, nrow = 0)), c("lon0", "lat0", "lon1", "lat1", "lon2", "lat2","lon3", "lat3"))
}
# save(ucur_newc, vcur_newc, uv_cart_df_newc, file = paste0(basePath, "data/HFRadar/NEWC/NEWC_HFRadar.RData"))
# COFH
fnames <- list.files(paste0(basePath, "data/HFRadar/COFH"), pattern = glob2rx(paste0("*",date,"*.nc")))
if (length(fnames) > 2) {
# 9 am
nc <- nc_open(paste0(basePath, "data/HFRadar/COFH/", fnames[1]))
ucur_cofh <- ncvar_get(nc, "UCUR")
vcur_cofh <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
# lat <- apply(lat, 2, mean)
# lon <- apply(lon, 1, mean)
ucur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(vcur_cofh)))] <- NA
vcur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(ucur_cofh)))] <- NA
lonlat <- expand.grid(lon, lat)
w = 0.2 # scaling factor for arrows
uv_cart_df_cofh <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], lon1 = lonlat[,1]+c(ucur_cofh)*w, lat1 = lonlat[,2]+c(vcur_cofh)*w)
uv_cart_df_cofh <- uv_cart_df_cofh %>% filter(!is.na(lon1))
# 3 pm
nc <- nc_open(paste0(basePath, "data/HFRadar/COFH/", fnames[2]))
ucur_cofh <- ncvar_get(nc, "UCUR")
vcur_cofh <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
# lat <- apply(lat, 2, mean)
# lon <- apply(lon, 1, mean)
ucur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(vcur_cofh)))] <- NA
vcur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(ucur_cofh)))] <- NA
# find EAC characteristics
EAC_char <- find_EAC_char(lon, lat, ucur_cofh, vcur_cofh)
if (!is_null(EAC_char)) {EAC_char <- EAC_char %>% mutate(date = date, coffs = T) %>% dplyr::select(date, everything(), coffs)}
EAC_char_month <- rbind(EAC_char_month, EAC_char)
lonlat <- expand.grid(lon, lat)
# w = 0.11 # scaling factor for arrows
uv_cart_df <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], ucur = c(ucur_cofh)*w, vcur = c(vcur_cofh)*w)
uv_cart_df <- uv_cart_df %>% filter(!is.na(ucur))
uv_cart_df_cofh <- merge(uv_cart_df_cofh, uv_cart_df, by=c("lon0", "lat0"))
uv_cart_df_cofh <- uv_cart_df_cofh %>% mutate(lon2 = lon1 + ucur, lat2 = lat1 + vcur) %>% dplyr::select(-ucur, -vcur)
# 9 pm
nc <- nc_open(paste0(basePath, "data/HFRadar/COFH/", fnames[3]))
ucur_cofh <- ncvar_get(nc, "UCUR")
vcur_cofh <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
# lat <- apply(lat, 2, mean)
# lon <- apply(lon, 1, mean)
ucur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(vcur_cofh)))] <- NA
vcur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(ucur_cofh)))] <- NA
lonlat <- expand.grid(lon, lat)
# w = 0.11 # scaling factor for arrows
uv_cart_df <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], ucur = c(ucur_cofh)*w, vcur = c(vcur_cofh)*w)
uv_cart_df <- uv_cart_df %>% filter(!is.na(ucur))
uv_cart_df_cofh <- merge(uv_cart_df_cofh, uv_cart_df, by=c("lon0", "lat0"))
uv_cart_df_cofh <- uv_cart_df_cofh %>% mutate(lon3 = lon2 + ucur, lat3 = lat2 + vcur) %>% dplyr::select(-ucur, -vcur)
} else {
ucur_cofh <- c()
vcur_cofh <- c()
uv_cart_df_cofh <- setNames(data.frame(matrix(ncol = 4, nrow = 0)), c("lon0", "lat0", "lon1", "lat1"))
}
ucur <- c(ucur_newc, ucur_cofh)
vcur <- c(vcur_newc, vcur_cofh)
uv_cart_df <- rbind(uv_cart_df_newc, uv_cart_df_cofh)
UVCart_month[[d]] <- uv_cart_df
}
save(UVCart_month, EAC_char_month, file = paste0(basePath, "data/HFRadar/HFRadar.RData"))
##################################
# 200m isobath from gebco (https://download.gebco.net/)
##################################
topo <- raster(file.path(basePath, "data", "gebco_2019_n-28.0_s-37.5_w149.5_e155.5.nc"))
topo[which(values(topo) > -200)] <- 0
topo[which(values(topo) < -200)] <- 1
isobath_200 <- rasterToContour(topo)
save(isobath_200, file = file.path(basePath, "data/isobath_200.RData"))
##################################
# save leaflet map on home page as html widget
##################################
# create leaflet map
# n <- 27
# date <- df$date[n]
# sst <- sst_month[[n]]
# sst_10 <- sst_10_month[[n]]
# sst_90 <- sst_90_month[[n]]
# oc <- oc_month[[n]]
#
# # determine colourmapping for sst raster image
# pal <- colorNumeric(
# palette = "magma",
# domain = values(sst),
# na.color = "#00000000",
# reverse = F)
#
# # determine colourmapping for oc raster image
# palOC <- colorNumeric(
# palette = "viridis",
# domain = log(values(oc)),
# na.color = "transparent",
# reverse = F)
#
# # load javascript plugin
# curveplugin <- htmlDependency("leaflet.curve", "0.5.2",
# src = file.path(normalizePath(basePath),"www"),
# script = "leaflet.curve.js")
#
# # A function that takes a plugin htmlDependency object and adds
# # it to the map. This ensures that however or whenever the map
# # gets rendered, the plugin will be loaded into the browser.
# registerPlugin <- function(map, plugin) {
# map$dependencies <- c(map$dependencies, list(plugin))
# map
# }
#
# m <- leaflet() %>% addTiles() #%>% setView(lng = 153.4, lat = -30.5, zoom = 9)
#
# m %>% addRasterImage(x = sst, colors = pal, group = "SST",opacity = 0.8) %>%
# addLegend(pal = pal, values = values(sst), opacity = 0.7, #labFormat = labelFormat(transform = function(x) {sort(x, decreasing = T)}),
# title = "Surface temp", group = c("SST"), position = "topleft") %>% #, labFormat = labelFormat(transform = function(x) sort(x, decreasing = TRUE))
# addRasterImage(x = sst_10, colors = pal, group = "Cold SSTs", opacity = 0.8) %>%
# addRasterImage(x = sst_90, colors = pal, group = "Warm SSTs", opacity = 0.8) %>%
# addLabelOnlyMarkers(lng = 151.4, lat = -27.9, label = HTML(paste("Date:<br>",date)),
# labelOptions = labelOptions(noHide = T, direction = "bottom", textsize = "15px")) %>%
# addRasterImage(x = log(oc), colors = palOC, group = "Ocean Colour", opacity = 0.8) %>%
# addLegend(pal = palOC, values = rev(log(values(oc))), labFormat = labelFormat(transform = exp), opacity = 0.7,
# title = "Ocean colour \n(Chl-a)", group = "Ocean Colour", position = "topleft",) %>%
# # addMarkers(data = stationLocs %>% filter(site_code == "CH100"), lat = ~avg_lat, lng = ~avg_lon,
# # label = HTML(paste(sep = "<br/>", stationLocs %>% dplyr::filter(site_code == "CH100") %>% dplyr::select(site_code), paste(round(rTemps[1],1), "degrees"))),
# # labelOptions = labelOptions(noHide = T, direction = "bottom", textsize = "15px",
# # style = list("background-color" = rBG[1])),
# # group = "Moorings") %>%
# # addMarkers(data = stationLocs %>% dplyr::filter(site_code == "SYD100"), lat = ~avg_lat, lng = ~avg_lon,
# # label = HTML(paste(sep = "<br/>", stationLocs %>% filter(site_code == "SYD100") %>% dplyr::select(site_code), paste(round(rTemps[2],1), "degrees"))),
# # labelOptions = labelOptions(noHide = T, direction = "right", textsize = "15px",
# # style = list("background-color" = rBG[2])),
# # group = "Moorings") %>%
# # addMarkers(data = stationLocs %>% dplyr::filter(site_code == "PH100"), lat = ~avg_lat, lng = ~avg_lon,
# # label = HTML(paste(sep = "<br/>",
# # a(paste(stationLocs %>% dplyr::filter(site_code == "PH100") %>% dplyr::select(site_code)), onclick = "openTab('PH100_Clim')", href="#"),
# # paste(round(rTemps[3],1), "degrees"))),
# # labelOptions = labelOptions(noHide = T, direction = "bottom", textsize = "15px",
# # style = list("background-color" = rBG[3],
# # "pointer-events" = "auto")),
# # group = "Moorings") %>%
# # addMarkers(data = stationLocs %>% dplyr::filter(site_code == "BMP120"), lat = ~avg_lat, lng = ~avg_lon,
# # label = HTML(paste(sep = "<br/>", stationLocs %>% dplyr::filter(site_code == "BMP120") %>% dplyr::select(site_code), paste(round(rTemps[4],1), "degrees"))),
# # labelOptions = labelOptions(noHide = T, direction = "bottom", textsize = "15px",
# # style = list("background-color" = rBG[4])),
# # group = "Moorings") %>%
# #
# # Layers control
# addLayersControl(
# baseGroups = c("SST", "Cold SSTs", "Warm SSTs", "Ocean Colour"),
# # overlayGroups = c("SST", "Ocean Colour"),
# options = layersControlOptions(collapsed = FALSE, autoZIndex = T),
# position = "topleft"
# )# %>% addFlows(uv_cart_df$lon0, uv_cart_df$lat0, uv_cart_df$lon1, uv_cart_df$lat1, maxThickness = 0.5)
#
# m <- m %>% # Register ESRI plugin on this map instance
# registerPlugin(curveplugin) %>%
# # Add your custom JS logic here. The `this` keyword
# # refers to the Leaflet (JS) map object.
# onRender(paste("function(el, x) {",
# paste0("L.curve(['M', [", uv_cart_df$lat0, ",", uv_cart_df$lon0,
# "], 'C', [", uv_cart_df$lat1, ",", uv_cart_df$lon1, "], [",
# uv_cart_df$lat2, ",", uv_cart_df$lon2[], "], [",
# uv_cart_df$lat3, ",", uv_cart_df$lon3[], "]], ",
# "{weight: 0.5, color: 'white', animate: {duration: 1500, iterations: Infinity}}).addTo(this);", sep = " ", collapse = "\n"),
# "}",sep = "\n"))
#
# # save leaflet map as html widget
# system("if [ ! -d www/figures ]; then mkdir www/figures; fi")
# # system("if [ ! -d www/figures/libdir ]; then mkdir www/figures/libdir; fi")
# f <- "www/figures/home_leaflet_map.html"
# saveWidget(m, file=file.path(normalizePath(dirname(f)),basename(f)), libdir = "libdir",
# selfcontained = T)
| /Simple_shiny_Climatology_dashboard_app/processRealtimeSSTdata.R | no_license | SteefanContractor/NSW-IMOS_data_viz_app | R | false | false | 20,415 | r | # Script for process realtime SST observations
# Author: Steefan Contractor
library(ncdf4)
library(lubridate)
library(dplyr)
library(raster) #
library(leaflet)
library(htmltools)
library(htmlwidgets)
library(geosphere)
library(readr)
library(purrr)
# working directory is where the script is
basePath <- paste0(normalizePath("./"),"/")
##############################################################################
# Read and process entire month of ocean colour and sst data
##############################################################################
#sst
files <- list.files(paste0(basePath,"data/SST/"), pattern = glob2rx("*.nc"))
dates <- ymd(substr(files, 1,8))
df <- data.frame(date = dates, filename = files)
df <- arrange(df, desc(date))
# sst climatology
# 90th percentile
clim_90 <- brick("./data/SSTAARS_NSW_remapcon2.nc", varname = "TEMP_90th_perc")
clim_90 <- setZ(clim_90, z = ymd(strsplit(system("cdo showdate data/SSTAARS_NSW_remapcon2.nc", intern = T), split = " ")[[1]][-1]))
clim.index <- which.min(abs(yday(format(df[1,'date'], format = "%y-%m-%d")) - yday(getZ(clim_90))))
# 10th percentile
clim_10 <- brick("./data/SSTAARS_NSW_remapcon2.nc", varname = "TEMP_10th_perc")
clim_10 <- setZ(clim_10, z = ymd(strsplit(system("cdo showdate data/SSTAARS_NSW_remapcon2.nc", intern = T), split = " ")[[1]][-1]))
#oc
files <- list.files(paste0(basePath,"data/CHL_OC3/"), pattern = glob2rx("*.nc"))
dates <- ymd(substr(files, 7,7+8-1))
df_OC <- data.frame(date = dates, OC_filename = files)
df_OC <- arrange(df_OC, desc(date))
# merge with SST dataframe
df <- base::merge(df, df_OC, by = "date", all.x = T)
# Read and process every file in df
sst_month <- sapply(paste(df$date),function(x) NULL)
sst_10_month <- sapply(paste(df$date),function(x) NULL)
sst_90_month <- sapply(paste(df$date),function(x) NULL)
oc_month <- sapply(paste(df$date),function(x) NULL)
for (d in 1:nrow(df)) {
# sst
nc <- nc_open(paste0(basePath,"data/SST/",df$filename[d]))
lon <- ncvar_get(nc, "lon")
lat <- rev(ncvar_get(nc, "lat"))
sst <- ncvar_get(nc, "analysed_sst")
nc_close(nc)
# convert to raster
sst <- sst - 273.15
sst <- raster(t(sst[,length(lat):1]), xmn=min(lon), xmx=max(lon), ymn=min(lat), ymx=max(lat),
crs="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
sst_90 <- sst
sst_90[which(sst_90[] < clim_90[[clim.index]][])] <- NA
sst_10 <- sst
sst_10[which(sst_10[] > clim_10[[clim.index]][])] <- NA
sst_month[[d]] <- sst
sst_10_month[[d]] <- sst_10
sst_90_month[[d]] <- sst_90
# chl_oc3
if (!is.na(df$OC_filename[d])) {
nc <- nc_open(paste0(basePath,"data/CHL_OC3/",df$OC_filename[d]))
dimnames <- names(nc$dim)
lon <- ncvar_get(nc, ifelse("longitude" %in% dimnames, "longitude", "lon"))
lat <- rev(ncvar_get(nc, ifelse("latitude" %in% dimnames, "latitude", "lat")))
chl_oc3 <- ncvar_get(nc, "chl_oc3")
nc_close(nc)
chl_oc3 <- raster(t(chl_oc3[,length(lat):1]), xmn=min(lon), xmx=max(lon), ymn=min(lat), ymx=max(lat),
crs="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
# Mask values above 10
chl_oc3[which(values(chl_oc3) > 10)] <- NA
# Set values between >5 and 10 as 5
chl_oc3[which(values(chl_oc3) > 5)] <- 5
oc_month[[d]] <- chl_oc3
} else {
oc_month[[d]] <- NA
}
}
# save data
save(sst_month, sst_10_month, sst_90_month, oc_month, clim_90, clim_10, df, file = paste0(basePath,"data/SST/processedSSTandOC.Rdata"))
##########################
# HF Radar data
##########################
# wrapper function for min() that by default removes na (na.rm = T) and returns NA instead of Inf for all NA values
min_NA <- function(x) {
return(ifelse(all(is.na(x)), NA, min(x, na.rm = T)))
}
# function to find the EAC core location, velocity, and distance from coast
find_EAC_char <- function(lon, lat, ucur, vcur) {
coffs_lat <- -30.29628
newc_lat <- -32.91667
# find location
vcur_min <- apply(vcur, 2, min_NA)
# replace any velocities greater than -0.3 with NA
vcur_min[which(vcur_min > -0.3)] <- NA
if (all(is.na(vcur_min))) {
return(NULL)
} else {
# for the non NA velocities in above, find the index of the lon location
lon_ind_vcur_min <- sapply(1:length(lat), function(n) {ifelse(!is.na(vcur_min[n]),
which(vcur[,n] == vcur_min[n]),
NA)})
lat_ind_vcur_min <- which(!is.na(lon_ind_vcur_min))
lon_ind_vcur_min <- lon_ind_vcur_min[!is.na(lon_ind_vcur_min)]
# create a data frame with all necessary attributes
# EAC_char <- setNames(data.frame(matrix(ncol = 6, nrow = 0)), c("lon", "lat", "distance_coast", "ucur", "vcur"))
EAC_char <- data.frame(lon = lon[lon_ind_vcur_min], lat = lat[lat_ind_vcur_min],
ucur = mapply(function(x, y) {ucur[x, y]}, x = lon_ind_vcur_min, y = lat_ind_vcur_min),
vcur = mapply(function(x, y) {vcur[x, y]}, x = lon_ind_vcur_min, y = lat_ind_vcur_min))
# calculate EAC speed
EAC_char <- mutate(EAC_char, speed = sqrt(ucur^2+vcur^2))
# calculate EAC distance
# read coastline data
coast <- read_tsv(paste0(basePath, "data/eaccoast.dat"), col_names = c("lon", "lat"), skip = 1)
EAC_char <- mutate(EAC_char, coast_lon = as.numeric(sapply(lat, function(x) {coast$lon[which.min(abs(coast$lat - x))]})))
EAC_char <- mutate(EAC_char, coast_dist = distVincentySphere(cbind(EAC_char$lon, EAC_char$lat), cbind(EAC_char$coast_lon, EAC_char$lat))/1000)
# drop coast_lon column
EAC_char <- EAC_char %>% dplyr::select(-c(coast_lon, ucur, vcur))
if (max(lat) < coffs_lat) {
EAC_char <- slice(EAC_char, which.min(abs(lat - newc_lat)))
} else {
EAC_char <- slice(EAC_char, which.min(abs(lat - coffs_lat)))
}
return(EAC_char)
}
}
# create empty list to contain uv dataframes for each day
UVCart_month <- sapply(paste(df$date),function(x) NULL)
# create empty dataframe to contain EAC characteristics
EAC_char_month <- setNames(data.frame(matrix(ncol = 6, nrow = 0)), c("date","lon","lat","speed","coast_dist", "coffs"))
for (d in 1:nrow(df)) {
date <- format(df$date[d], format = "%Y%m%d")
fnames <- list.files(paste0(basePath, "data/HFRadar/NEWC"), pattern = glob2rx(paste0("*",date,"*.nc")))
if (length(fnames) > 2) {
# 9 am
nc <- nc_open(paste0(basePath, "data/HFRadar/NEWC/", fnames[1]))
ucur_newc <- ncvar_get(nc, "UCUR")
vcur_newc <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
lat <- apply(lat, 2, mean)
lon <- apply(lon, 1, mean)
ucur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(vcur_newc)))] <- NA
vcur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(ucur_newc)))] <- NA
lonlat <- expand.grid(lon, lat)
w = 0.2 # scaling factor for arrows
uv_cart_df_newc <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], lon1 = lonlat[,1]+c(ucur_newc)*w, lat1 = lonlat[,2]+c(vcur_newc)*w)
uv_cart_df_newc <- uv_cart_df_newc %>% filter(!is.na(lon1))
# 3 pm
nc <- nc_open(paste0(basePath, "data/HFRadar/NEWC/", fnames[2]))
ucur_newc <- ncvar_get(nc, "UCUR")
vcur_newc <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
lat <- apply(lat, 2, mean)
lon <- apply(lon, 1, mean)
ucur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(vcur_newc)))] <- NA
vcur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(ucur_newc)))] <- NA
# find EAC characteristics
EAC_char <- find_EAC_char(lon, lat, ucur_newc, vcur_newc)
if (!is_null(EAC_char)) {EAC_char <- EAC_char %>% mutate(date = date, coffs = F) %>% dplyr::select(date, everything(), coffs)}
EAC_char_month <- rbind(EAC_char_month, EAC_char)
lonlat <- expand.grid(lon, lat)
# w = 1 # scaling factor for arrows
uv_cart_df <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], ucur = c(ucur_newc)*w, vcur = c(vcur_newc)*w)
uv_cart_df <- uv_cart_df %>% filter(!is.na(ucur))
uv_cart_df_newc <- merge(uv_cart_df_newc, uv_cart_df, by=c("lon0", "lat0"))
uv_cart_df_newc <- uv_cart_df_newc %>% mutate(lon2 = lon1 + ucur, lat2 = lat1 + vcur) %>% dplyr::select(-ucur, -vcur)
# 9 pm
nc <- nc_open(paste0(basePath, "data/HFRadar/NEWC/", fnames[3]))
ucur_newc <- ncvar_get(nc, "UCUR")
vcur_newc <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
lat <- apply(lat, 2, mean)
lon <- apply(lon, 1, mean)
ucur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(vcur_newc)))] <- NA
vcur_newc[which(!(ucur_qc %in% c(1,2) & vcur_qc %in% c(1,2) & !is.na(ucur_newc)))] <- NA
lonlat <- expand.grid(lon, lat)
# w = 1 # scaling factor for arrows
uv_cart_df <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], ucur = c(ucur_newc)*w, vcur = c(vcur_newc)*w)
uv_cart_df <- uv_cart_df %>% filter(!is.na(ucur))
uv_cart_df_newc <- merge(uv_cart_df_newc, uv_cart_df, by=c("lon0", "lat0"))
uv_cart_df_newc <- uv_cart_df_newc %>% mutate(lon3 = lon2 + ucur, lat3 = lat2 + vcur) %>% dplyr::select(-ucur, -vcur)
} else {
ucur_newc <- c()
vcur_newc <- c()
uv_cart_df_newc <- setNames(data.frame(matrix(ncol = 8, nrow = 0)), c("lon0", "lat0", "lon1", "lat1", "lon2", "lat2","lon3", "lat3"))
}
# save(ucur_newc, vcur_newc, uv_cart_df_newc, file = paste0(basePath, "data/HFRadar/NEWC/NEWC_HFRadar.RData"))
# COFH
fnames <- list.files(paste0(basePath, "data/HFRadar/COFH"), pattern = glob2rx(paste0("*",date,"*.nc")))
if (length(fnames) > 2) {
# 9 am
nc <- nc_open(paste0(basePath, "data/HFRadar/COFH/", fnames[1]))
ucur_cofh <- ncvar_get(nc, "UCUR")
vcur_cofh <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
# lat <- apply(lat, 2, mean)
# lon <- apply(lon, 1, mean)
ucur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(vcur_cofh)))] <- NA
vcur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(ucur_cofh)))] <- NA
lonlat <- expand.grid(lon, lat)
w = 0.2 # scaling factor for arrows
uv_cart_df_cofh <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], lon1 = lonlat[,1]+c(ucur_cofh)*w, lat1 = lonlat[,2]+c(vcur_cofh)*w)
uv_cart_df_cofh <- uv_cart_df_cofh %>% filter(!is.na(lon1))
# 3 pm
nc <- nc_open(paste0(basePath, "data/HFRadar/COFH/", fnames[2]))
ucur_cofh <- ncvar_get(nc, "UCUR")
vcur_cofh <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
# lat <- apply(lat, 2, mean)
# lon <- apply(lon, 1, mean)
ucur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(vcur_cofh)))] <- NA
vcur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(ucur_cofh)))] <- NA
# find EAC characteristics
EAC_char <- find_EAC_char(lon, lat, ucur_cofh, vcur_cofh)
if (!is_null(EAC_char)) {EAC_char <- EAC_char %>% mutate(date = date, coffs = T) %>% dplyr::select(date, everything(), coffs)}
EAC_char_month <- rbind(EAC_char_month, EAC_char)
lonlat <- expand.grid(lon, lat)
# w = 0.11 # scaling factor for arrows
uv_cart_df <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], ucur = c(ucur_cofh)*w, vcur = c(vcur_cofh)*w)
uv_cart_df <- uv_cart_df %>% filter(!is.na(ucur))
uv_cart_df_cofh <- merge(uv_cart_df_cofh, uv_cart_df, by=c("lon0", "lat0"))
uv_cart_df_cofh <- uv_cart_df_cofh %>% mutate(lon2 = lon1 + ucur, lat2 = lat1 + vcur) %>% dplyr::select(-ucur, -vcur)
# 9 pm
nc <- nc_open(paste0(basePath, "data/HFRadar/COFH/", fnames[3]))
ucur_cofh <- ncvar_get(nc, "UCUR")
vcur_cofh <- ncvar_get(nc, "VCUR")
lat <- ncvar_get(nc, "LATITUDE")
lon <- ncvar_get(nc, "LONGITUDE")
ucur_qc <- ncvar_get(nc, "UCUR_quality_control")
vcur_qc <- ncvar_get(nc, "VCUR_quality_control")
nc_close(nc)
# lat <- apply(lat, 2, mean)
# lon <- apply(lon, 1, mean)
ucur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(vcur_cofh)))] <- NA
vcur_cofh[which(!(ucur_qc %in% c(1) & vcur_qc %in% c(1) & !is.na(ucur_cofh)))] <- NA
lonlat <- expand.grid(lon, lat)
# w = 0.11 # scaling factor for arrows
uv_cart_df <- data.frame(lon0 = lonlat[,1], lat0 = lonlat[,2], ucur = c(ucur_cofh)*w, vcur = c(vcur_cofh)*w)
uv_cart_df <- uv_cart_df %>% filter(!is.na(ucur))
uv_cart_df_cofh <- merge(uv_cart_df_cofh, uv_cart_df, by=c("lon0", "lat0"))
uv_cart_df_cofh <- uv_cart_df_cofh %>% mutate(lon3 = lon2 + ucur, lat3 = lat2 + vcur) %>% dplyr::select(-ucur, -vcur)
} else {
ucur_cofh <- c()
vcur_cofh <- c()
uv_cart_df_cofh <- setNames(data.frame(matrix(ncol = 4, nrow = 0)), c("lon0", "lat0", "lon1", "lat1"))
}
ucur <- c(ucur_newc, ucur_cofh)
vcur <- c(vcur_newc, vcur_cofh)
uv_cart_df <- rbind(uv_cart_df_newc, uv_cart_df_cofh)
UVCart_month[[d]] <- uv_cart_df
}
save(UVCart_month, EAC_char_month, file = paste0(basePath, "data/HFRadar/HFRadar.RData"))
##################################
# 200m isobath from gebco (https://download.gebco.net/)
##################################
topo <- raster(file.path(basePath, "data", "gebco_2019_n-28.0_s-37.5_w149.5_e155.5.nc"))
topo[which(values(topo) > -200)] <- 0
topo[which(values(topo) < -200)] <- 1
isobath_200 <- rasterToContour(topo)
save(isobath_200, file = file.path(basePath, "data/isobath_200.RData"))
##################################
# save leaflet map on home page as html widget
##################################
# create leaflet map
# n <- 27
# date <- df$date[n]
# sst <- sst_month[[n]]
# sst_10 <- sst_10_month[[n]]
# sst_90 <- sst_90_month[[n]]
# oc <- oc_month[[n]]
#
# # determine colourmapping for sst raster image
# pal <- colorNumeric(
# palette = "magma",
# domain = values(sst),
# na.color = "#00000000",
# reverse = F)
#
# # determine colourmapping for oc raster image
# palOC <- colorNumeric(
# palette = "viridis",
# domain = log(values(oc)),
# na.color = "transparent",
# reverse = F)
#
# # load javascript plugin
# curveplugin <- htmlDependency("leaflet.curve", "0.5.2",
# src = file.path(normalizePath(basePath),"www"),
# script = "leaflet.curve.js")
#
# # A function that takes a plugin htmlDependency object and adds
# # it to the map. This ensures that however or whenever the map
# # gets rendered, the plugin will be loaded into the browser.
# registerPlugin <- function(map, plugin) {
# map$dependencies <- c(map$dependencies, list(plugin))
# map
# }
#
# m <- leaflet() %>% addTiles() #%>% setView(lng = 153.4, lat = -30.5, zoom = 9)
#
# m %>% addRasterImage(x = sst, colors = pal, group = "SST",opacity = 0.8) %>%
# addLegend(pal = pal, values = values(sst), opacity = 0.7, #labFormat = labelFormat(transform = function(x) {sort(x, decreasing = T)}),
# title = "Surface temp", group = c("SST"), position = "topleft") %>% #, labFormat = labelFormat(transform = function(x) sort(x, decreasing = TRUE))
# addRasterImage(x = sst_10, colors = pal, group = "Cold SSTs", opacity = 0.8) %>%
# addRasterImage(x = sst_90, colors = pal, group = "Warm SSTs", opacity = 0.8) %>%
# addLabelOnlyMarkers(lng = 151.4, lat = -27.9, label = HTML(paste("Date:<br>",date)),
# labelOptions = labelOptions(noHide = T, direction = "bottom", textsize = "15px")) %>%
# addRasterImage(x = log(oc), colors = palOC, group = "Ocean Colour", opacity = 0.8) %>%
# addLegend(pal = palOC, values = rev(log(values(oc))), labFormat = labelFormat(transform = exp), opacity = 0.7,
# title = "Ocean colour \n(Chl-a)", group = "Ocean Colour", position = "topleft",) %>%
# # addMarkers(data = stationLocs %>% filter(site_code == "CH100"), lat = ~avg_lat, lng = ~avg_lon,
# # label = HTML(paste(sep = "<br/>", stationLocs %>% dplyr::filter(site_code == "CH100") %>% dplyr::select(site_code), paste(round(rTemps[1],1), "degrees"))),
# # labelOptions = labelOptions(noHide = T, direction = "bottom", textsize = "15px",
# # style = list("background-color" = rBG[1])),
# # group = "Moorings") %>%
# # addMarkers(data = stationLocs %>% dplyr::filter(site_code == "SYD100"), lat = ~avg_lat, lng = ~avg_lon,
# # label = HTML(paste(sep = "<br/>", stationLocs %>% filter(site_code == "SYD100") %>% dplyr::select(site_code), paste(round(rTemps[2],1), "degrees"))),
# # labelOptions = labelOptions(noHide = T, direction = "right", textsize = "15px",
# # style = list("background-color" = rBG[2])),
# # group = "Moorings") %>%
# # addMarkers(data = stationLocs %>% dplyr::filter(site_code == "PH100"), lat = ~avg_lat, lng = ~avg_lon,
# # label = HTML(paste(sep = "<br/>",
# # a(paste(stationLocs %>% dplyr::filter(site_code == "PH100") %>% dplyr::select(site_code)), onclick = "openTab('PH100_Clim')", href="#"),
# # paste(round(rTemps[3],1), "degrees"))),
# # labelOptions = labelOptions(noHide = T, direction = "bottom", textsize = "15px",
# # style = list("background-color" = rBG[3],
# # "pointer-events" = "auto")),
# # group = "Moorings") %>%
# # addMarkers(data = stationLocs %>% dplyr::filter(site_code == "BMP120"), lat = ~avg_lat, lng = ~avg_lon,
# # label = HTML(paste(sep = "<br/>", stationLocs %>% dplyr::filter(site_code == "BMP120") %>% dplyr::select(site_code), paste(round(rTemps[4],1), "degrees"))),
# # labelOptions = labelOptions(noHide = T, direction = "bottom", textsize = "15px",
# # style = list("background-color" = rBG[4])),
# # group = "Moorings") %>%
# #
# # Layers control
# addLayersControl(
# baseGroups = c("SST", "Cold SSTs", "Warm SSTs", "Ocean Colour"),
# # overlayGroups = c("SST", "Ocean Colour"),
# options = layersControlOptions(collapsed = FALSE, autoZIndex = T),
# position = "topleft"
# )# %>% addFlows(uv_cart_df$lon0, uv_cart_df$lat0, uv_cart_df$lon1, uv_cart_df$lat1, maxThickness = 0.5)
#
# m <- m %>% # Register ESRI plugin on this map instance
# registerPlugin(curveplugin) %>%
# # Add your custom JS logic here. The `this` keyword
# # refers to the Leaflet (JS) map object.
# onRender(paste("function(el, x) {",
# paste0("L.curve(['M', [", uv_cart_df$lat0, ",", uv_cart_df$lon0,
# "], 'C', [", uv_cart_df$lat1, ",", uv_cart_df$lon1, "], [",
# uv_cart_df$lat2, ",", uv_cart_df$lon2[], "], [",
# uv_cart_df$lat3, ",", uv_cart_df$lon3[], "]], ",
# "{weight: 0.5, color: 'white', animate: {duration: 1500, iterations: Infinity}}).addTo(this);", sep = " ", collapse = "\n"),
# "}",sep = "\n"))
#
# # save leaflet map as html widget
# system("if [ ! -d www/figures ]; then mkdir www/figures; fi")
# # system("if [ ! -d www/figures/libdir ]; then mkdir www/figures/libdir; fi")
# f <- "www/figures/home_leaflet_map.html"
# saveWidget(m, file=file.path(normalizePath(dirname(f)),basename(f)), libdir = "libdir",
# selfcontained = T)
|
T1=Sys.time()
###############################################################################################
# #
# SYNTHETIC CASE STUDY: CONDITIONAL MODEL GENERATION (FORMATION 4) #
# #
###############################################################################################
## WORKING DIRECTORY SETTING
setwd("/media/fouedjio/095e0241-4724-4a1f-84f8-9ddda0df2da9/fouedjio/3d_stochastic_implicit_modeling/")
## Packages Loading
library(factoextra)
library(data.table)
library(foreach)
library(doParallel)
library(plot3Drgl)
library(plot3D)
library(pracma)
library(FNN)
library(ggfortify)
library(polycor)
library(psych)
library(philentropy)
library(MASS)
library(ggplot2)
library(misc3d)
library(CCA)
library(RGeostats)
library(rockchalk)
library(Rglpk)
library(mvtnorm)
library(LowRankQP)
library(Rfast)
library(feather)
##CREATING SOME FOLDERS
if(!file.exists("./outputs/data/conditional_model_generation")){dir.create("./outputs/data/conditional_model_generation")}
if(!file.exists("./outputs/figures/9_conditional_model_generation")){dir.create("./outputs/figures/9_conditional_model_generation")}
if(!file.exists("./outputs/figures/9_conditional_model_generation/4")){dir.create("./outputs/figures/9_conditional_model_generation/4")}
if(!file.exists("./outputs/figures/9_conditional_model_generation/4/scores_rqp")){dir.create("./outputs/figures/9_conditional_model_generation/4/scores_rqp")}
## DATA LOADING
data=data_points=as.data.frame(fread("./inputs/data/drillhole_data.csv"))
contact_points=as.data.frame(fread("./inputs/data/contact_points.csv"))
grid=grid_points=as.data.frame(fread("./inputs/data/grid_points.csv"))
contact_points_cat=list()
contact_points_cat[[1]]=readRDS('./inputs/data/contact_points_cat.RDS')[[4]]
data_points_binary=as.data.frame(fread("./outputs/data/drillhole_data_binary.csv",header=TRUE))
psd_data=as.data.frame(fread("./outputs/data/psd_data.csv")[,4])
lithology_names=1:4
nsdf=length(lithology_names)
full_grid_points=rbindlist(list(grid_points[,1:3],data_points[,2:4]))
full_grid=rbindlist(list(grid_points[,1:3],data_points[,2:4]))
## TARGET GRID DEFINITION
extent_x=range(data_points$X)
extent_y=range(data_points$Y)
extent_z=range(data_points$Z)
cell_size_x=7
cell_size_y=7
cell_size_z=5
xx=seq(min(extent_x),max(extent_x),by=cell_size_x)
yy=seq(min(extent_y),max(extent_y),by=cell_size_y)
zz=seq(min(extent_z),max(extent_z),by=cell_size_z)
D=sqrt((max(extent_x)-min(extent_x))**2+(max(extent_y)-min(extent_y))**2+(max(extent_z)-min(extent_z))**2)
nx=length(xx)
ny=length(yy)
nz=length(zz)
nsimu=1000
## Prior distribution of PC SCORES (MEAN AND COVARIANCE MATRIX)
pca_prior_sdf=readRDS(paste("./outputs/data/pca_unconditional_realizations/pca_unconditional_sdf_",4,".rds",sep=""))
model_score=pca_prior_sdf$x
eigc=100*pca_prior_sdf$sdev^2/sum(pca_prior_sdf$sdev^2)
eigc1=round(eigc[1],digits=3)
eigc2=round(eigc[2],digits=3)
eigc3=round(eigc[3],digits=3)
alpha_PRIOR=as.matrix(pca_prior_sdf$x)
m_PRIOR=rep(0,nsimu)
c_PRIOR=diag(pca_prior_sdf$sdev^2)
## INEQUALITY CONSTRAINTS FORMULATION
ncat=1
cat_pca_eig_prior_sdf=list()
m_scale=list()
s_scale=list()
for ( j in 1:ncat)
{
cat_pca_eig_prior_sdf[[j]]=pca_prior_sdf$rotation[((j-1)*nrow(full_grid)+1):(j*nrow(full_grid)),]
m_scale[[j]]=pca_prior_sdf$center[((j-1)*nrow(full_grid)+1):(j*nrow(full_grid))]
s_scale[[j]]=pca_prior_sdf$scale[((j-1)*nrow(full_grid)+1):(j*nrow(full_grid))]
}
cat_pca_eig_prior_sdf_data=list()
m_scale_data=list()
s_scale_data=list()
for ( j in 1:ncat)
{
cat_pca_eig_prior_sdf_data[[j]]=cat_pca_eig_prior_sdf[[j]][(nrow(grid) +1):(nrow(grid)+nrow(data)),]
m_scale_data[[j]]=m_scale[[j]][(nrow(grid) +1):(nrow(grid)+nrow(data))]
s_scale_data[[j]]=s_scale[[j]][(nrow(grid) +1):(nrow(grid)+nrow(data))]
}
rhs_l=list()
for ( j in 1:ncat)
{
rhs_l[[j]]=matrix(rep(m_scale_data[[j]],nsimu),nrow=nsimu,byrow=T)/(matrix(rep(s_scale_data[[j]],nsimu),nrow=nsimu,byrow=T))
}
matcoef_l=list()
for ( j in 1:ncat)
{
matcoef_l[[j]]=cat_pca_eig_prior_sdf_data[[j]]
}
mat_coef=matcoef_l[[1]]
ge=matrix(NA,ncol=ncat,nrow=nrow(data))
for(j in 1:ncat)
{
ge[,j]=ifelse(as.matrix(psd_data[,j])<0,"<",">")
}
ge=c(ge[,1])
rm(cat_pca_eig_prior_sdf)
gc()
rm(matcoef_l)
gc()
## INITIAL SOLUTION BY LP
ind_select=which(ge==">")
upper_bounds_m=upper_bounds=rhs=-c(rhs_l[[1]][1,])
upper_bounds_m[ind_select]=-upper_bounds[ind_select]
ge_m=ge
ge_m[ind_select]="<"
mat_coef_m=mat_coef
mat_coef_m[ind_select,]=-mat_coef[ind_select,]
bounds=list(lower = list(ind = seq(1,nsimu), val = rep(-Inf,nsimu)), upper = list(ind = seq(1,nsimu), val = rep(Inf,nsimu)))
par_init_model_score=Rglpk_solve_LP(obj = numeric(nsimu), mat = mat_coef_m, dir = ge_m, rhs =upper_bounds_m-1e-6,bounds=bounds,max=TRUE)$solution
ff=mat_coef_m%*%par_init_model_score
table(ff<=(upper_bounds_m))
m_PRIOR=rep(0,nsimu)#colMeans(alpha_PRIOR)
c_PRIOR=diag(pca_prior_sdf$sdev^2)#cov(alpha_PRIOR)
## RANDOMIZED QUADRATIC PROGRAMMING
closeAllConnections()
NbCores=detectCores()
Cl=makeCluster(NbCores)
registerDoParallel(Cl)
m_PRIOR=rep(0,nsimu)
ii=which(pca_prior_sdf$sdev==0)
pca_prior_sdf$sdev[ii]=.Machine$double.eps
c_PRIOR=diag(pca_prior_sdf$sdev^2)
c_PRIOR_=cov(as.matrix(pca_prior_sdf$x))
c_PRIOR_inv=diag(1/pca_prior_sdf$sdev^2)
rm(pca_prior_sdf)
gc()
n_pos_simu=1000
alpha_PRIOR=t(rmvnorm(n_pos_simu,m_PRIOR,c_PRIOR))
LUR_Res=foreach(l=1:n_pos_simu,.combine = rbind)%dopar%
{
library(quadprog)
solve.QP(c_PRIOR_inv, alpha_PRIOR[,l]%*%c_PRIOR_inv, -t(mat_coef_m), -upper_bounds_m+1e-6)$solution
}
closeAllConnections()
utest=rep(NA,n_pos_simu)
for (i in 1:n_pos_simu)
{
ff=mat_coef_m%*%LUR_Res[i,]
utest[i]=table(ff<=(upper_bounds_m))[1]
}
if(sum(utest)==nrow(mat_coef_m)*n_pos_simu){cat("RQP Verified!")}
for ( l in seq(1,nsimu-1,by=1))
{
png(paste("./outputs/figures/9_conditional_model_generation/4/scores_rqp/Con_Scores_",l,"_",l+1,".png",sep=""),width=5,height=5,units="in",res=300)
plot(model_score[,c(l,l+1)],col=1,pch=19,xlab=paste("PC",l," (",round(eigc[l],digits=3),"% )"),ylab=paste("PC",l+1," (",round(eigc[l+1],digits=3),"% )"),xlim=c(min(LUR_Res[,l],model_score[,l]),max(LUR_Res[,l],model_score[,l])),ylim=c(min(LUR_Res[,l+1],model_score[,l+1]),max(LUR_Res[,l+1],model_score[,l+1])))
points(LUR_Res[,c(l)],LUR_Res[,c(l+1)],col=2,pch=19)
legend("bottomleft",legend=c("Unconditional","Conditional"),pch=c(19,19),col=c(1,2),cex=c(0.7,0.7))
dev.off()
}
saveRDS(LUR_Res,"./outputs/data/conditional_model_generation/RQP_4.RDS")
## RECONSTRUCTION LUR POSTERIOR LEVEL SET
n_pos_simu=nrow(LUR_Res)
pca_prior_sdf=readRDS(paste("./outputs/data/pca_unconditional_realizations/pca_unconditional_sdf_",4,".rds",sep=""))
recon_=as.matrix(LUR_Res)%*% t(as.matrix(pca_prior_sdf$rotation))
a=1/pca_prior_sdf$scale
b=-pca_prior_sdf$center
rm(pca_prior_sdf)
gc()
recon_=scale(recon_, center=FALSE, scale=a)
recon_=scale(recon_, center=b, scale=FALSE)
signed_distance_fields_posterior_=t(recon_)
rm(recon_)
gc()
write_feather(as.data.frame(signed_distance_fields_posterior_),paste("./outputs/data/conditional_model_generation/conditional_sdf_model_",4,".feather",sep=""))
T2=Sys.time()
difftime(T2,T1)
| /code/scripts/9_conditional_model_generation_4_script.R | no_license | emanuelhuber/3D-Stochastic-Implicit-Modeling | R | false | false | 7,382 | r | T1=Sys.time()
###############################################################################################
# #
# SYNTHETIC CASE STUDY: CONDITIONAL MODEL GENERATION (FORMATION 4) #
# #
###############################################################################################
## WORKING DIRECTORY SETTING
setwd("/media/fouedjio/095e0241-4724-4a1f-84f8-9ddda0df2da9/fouedjio/3d_stochastic_implicit_modeling/")
## Packages Loading
library(factoextra)
library(data.table)
library(foreach)
library(doParallel)
library(plot3Drgl)
library(plot3D)
library(pracma)
library(FNN)
library(ggfortify)
library(polycor)
library(psych)
library(philentropy)
library(MASS)
library(ggplot2)
library(misc3d)
library(CCA)
library(RGeostats)
library(rockchalk)
library(Rglpk)
library(mvtnorm)
library(LowRankQP)
library(Rfast)
library(feather)
##CREATING SOME FOLDERS
if(!file.exists("./outputs/data/conditional_model_generation")){dir.create("./outputs/data/conditional_model_generation")}
if(!file.exists("./outputs/figures/9_conditional_model_generation")){dir.create("./outputs/figures/9_conditional_model_generation")}
if(!file.exists("./outputs/figures/9_conditional_model_generation/4")){dir.create("./outputs/figures/9_conditional_model_generation/4")}
if(!file.exists("./outputs/figures/9_conditional_model_generation/4/scores_rqp")){dir.create("./outputs/figures/9_conditional_model_generation/4/scores_rqp")}
## DATA LOADING
data=data_points=as.data.frame(fread("./inputs/data/drillhole_data.csv"))
contact_points=as.data.frame(fread("./inputs/data/contact_points.csv"))
grid=grid_points=as.data.frame(fread("./inputs/data/grid_points.csv"))
contact_points_cat=list()
contact_points_cat[[1]]=readRDS('./inputs/data/contact_points_cat.RDS')[[4]]
data_points_binary=as.data.frame(fread("./outputs/data/drillhole_data_binary.csv",header=TRUE))
psd_data=as.data.frame(fread("./outputs/data/psd_data.csv")[,4])
lithology_names=1:4
nsdf=length(lithology_names)
full_grid_points=rbindlist(list(grid_points[,1:3],data_points[,2:4]))
full_grid=rbindlist(list(grid_points[,1:3],data_points[,2:4]))
## TARGET GRID DEFINITION
extent_x=range(data_points$X)
extent_y=range(data_points$Y)
extent_z=range(data_points$Z)
cell_size_x=7
cell_size_y=7
cell_size_z=5
xx=seq(min(extent_x),max(extent_x),by=cell_size_x)
yy=seq(min(extent_y),max(extent_y),by=cell_size_y)
zz=seq(min(extent_z),max(extent_z),by=cell_size_z)
D=sqrt((max(extent_x)-min(extent_x))**2+(max(extent_y)-min(extent_y))**2+(max(extent_z)-min(extent_z))**2)
nx=length(xx)
ny=length(yy)
nz=length(zz)
nsimu=1000
## Prior distribution of PC SCORES (MEAN AND COVARIANCE MATRIX)
pca_prior_sdf=readRDS(paste("./outputs/data/pca_unconditional_realizations/pca_unconditional_sdf_",4,".rds",sep=""))
model_score=pca_prior_sdf$x
eigc=100*pca_prior_sdf$sdev^2/sum(pca_prior_sdf$sdev^2)
eigc1=round(eigc[1],digits=3)
eigc2=round(eigc[2],digits=3)
eigc3=round(eigc[3],digits=3)
alpha_PRIOR=as.matrix(pca_prior_sdf$x)
m_PRIOR=rep(0,nsimu)
c_PRIOR=diag(pca_prior_sdf$sdev^2)
## INEQUALITY CONSTRAINTS FORMULATION
ncat=1
cat_pca_eig_prior_sdf=list()
m_scale=list()
s_scale=list()
for ( j in 1:ncat)
{
cat_pca_eig_prior_sdf[[j]]=pca_prior_sdf$rotation[((j-1)*nrow(full_grid)+1):(j*nrow(full_grid)),]
m_scale[[j]]=pca_prior_sdf$center[((j-1)*nrow(full_grid)+1):(j*nrow(full_grid))]
s_scale[[j]]=pca_prior_sdf$scale[((j-1)*nrow(full_grid)+1):(j*nrow(full_grid))]
}
cat_pca_eig_prior_sdf_data=list()
m_scale_data=list()
s_scale_data=list()
for ( j in 1:ncat)
{
cat_pca_eig_prior_sdf_data[[j]]=cat_pca_eig_prior_sdf[[j]][(nrow(grid) +1):(nrow(grid)+nrow(data)),]
m_scale_data[[j]]=m_scale[[j]][(nrow(grid) +1):(nrow(grid)+nrow(data))]
s_scale_data[[j]]=s_scale[[j]][(nrow(grid) +1):(nrow(grid)+nrow(data))]
}
rhs_l=list()
for ( j in 1:ncat)
{
rhs_l[[j]]=matrix(rep(m_scale_data[[j]],nsimu),nrow=nsimu,byrow=T)/(matrix(rep(s_scale_data[[j]],nsimu),nrow=nsimu,byrow=T))
}
matcoef_l=list()
for ( j in 1:ncat)
{
matcoef_l[[j]]=cat_pca_eig_prior_sdf_data[[j]]
}
mat_coef=matcoef_l[[1]]
ge=matrix(NA,ncol=ncat,nrow=nrow(data))
for(j in 1:ncat)
{
ge[,j]=ifelse(as.matrix(psd_data[,j])<0,"<",">")
}
ge=c(ge[,1])
rm(cat_pca_eig_prior_sdf)
gc()
rm(matcoef_l)
gc()
## INITIAL SOLUTION BY LP
ind_select=which(ge==">")
upper_bounds_m=upper_bounds=rhs=-c(rhs_l[[1]][1,])
upper_bounds_m[ind_select]=-upper_bounds[ind_select]
ge_m=ge
ge_m[ind_select]="<"
mat_coef_m=mat_coef
mat_coef_m[ind_select,]=-mat_coef[ind_select,]
bounds=list(lower = list(ind = seq(1,nsimu), val = rep(-Inf,nsimu)), upper = list(ind = seq(1,nsimu), val = rep(Inf,nsimu)))
par_init_model_score=Rglpk_solve_LP(obj = numeric(nsimu), mat = mat_coef_m, dir = ge_m, rhs =upper_bounds_m-1e-6,bounds=bounds,max=TRUE)$solution
ff=mat_coef_m%*%par_init_model_score
table(ff<=(upper_bounds_m))
m_PRIOR=rep(0,nsimu)#colMeans(alpha_PRIOR)
c_PRIOR=diag(pca_prior_sdf$sdev^2)#cov(alpha_PRIOR)
## RANDOMIZED QUADRATIC PROGRAMMING
closeAllConnections()
NbCores=detectCores()
Cl=makeCluster(NbCores)
registerDoParallel(Cl)
m_PRIOR=rep(0,nsimu)
ii=which(pca_prior_sdf$sdev==0)
pca_prior_sdf$sdev[ii]=.Machine$double.eps
c_PRIOR=diag(pca_prior_sdf$sdev^2)
c_PRIOR_=cov(as.matrix(pca_prior_sdf$x))
c_PRIOR_inv=diag(1/pca_prior_sdf$sdev^2)
rm(pca_prior_sdf)
gc()
n_pos_simu=1000
alpha_PRIOR=t(rmvnorm(n_pos_simu,m_PRIOR,c_PRIOR))
LUR_Res=foreach(l=1:n_pos_simu,.combine = rbind)%dopar%
{
library(quadprog)
solve.QP(c_PRIOR_inv, alpha_PRIOR[,l]%*%c_PRIOR_inv, -t(mat_coef_m), -upper_bounds_m+1e-6)$solution
}
closeAllConnections()
utest=rep(NA,n_pos_simu)
for (i in 1:n_pos_simu)
{
ff=mat_coef_m%*%LUR_Res[i,]
utest[i]=table(ff<=(upper_bounds_m))[1]
}
if(sum(utest)==nrow(mat_coef_m)*n_pos_simu){cat("RQP Verified!")}
for ( l in seq(1,nsimu-1,by=1))
{
png(paste("./outputs/figures/9_conditional_model_generation/4/scores_rqp/Con_Scores_",l,"_",l+1,".png",sep=""),width=5,height=5,units="in",res=300)
plot(model_score[,c(l,l+1)],col=1,pch=19,xlab=paste("PC",l," (",round(eigc[l],digits=3),"% )"),ylab=paste("PC",l+1," (",round(eigc[l+1],digits=3),"% )"),xlim=c(min(LUR_Res[,l],model_score[,l]),max(LUR_Res[,l],model_score[,l])),ylim=c(min(LUR_Res[,l+1],model_score[,l+1]),max(LUR_Res[,l+1],model_score[,l+1])))
points(LUR_Res[,c(l)],LUR_Res[,c(l+1)],col=2,pch=19)
legend("bottomleft",legend=c("Unconditional","Conditional"),pch=c(19,19),col=c(1,2),cex=c(0.7,0.7))
dev.off()
}
saveRDS(LUR_Res,"./outputs/data/conditional_model_generation/RQP_4.RDS")
## RECONSTRUCTION LUR POSTERIOR LEVEL SET
n_pos_simu=nrow(LUR_Res)
pca_prior_sdf=readRDS(paste("./outputs/data/pca_unconditional_realizations/pca_unconditional_sdf_",4,".rds",sep=""))
recon_=as.matrix(LUR_Res)%*% t(as.matrix(pca_prior_sdf$rotation))
a=1/pca_prior_sdf$scale
b=-pca_prior_sdf$center
rm(pca_prior_sdf)
gc()
recon_=scale(recon_, center=FALSE, scale=a)
recon_=scale(recon_, center=b, scale=FALSE)
signed_distance_fields_posterior_=t(recon_)
rm(recon_)
gc()
write_feather(as.data.frame(signed_distance_fields_posterior_),paste("./outputs/data/conditional_model_generation/conditional_sdf_model_",4,".feather",sep=""))
T2=Sys.time()
difftime(T2,T1)
|
## extract the public tag records from a DTA file
publicDTA = function(
DTAfile = NULL, ## path to input file
tagDB = NULL, ## path to public tag database file
myproj = NULL, ## project code
site = NULL ## site code
) {
DTAfile = chooseDTAFile(DTAfile)
tagDB = chooseDBFile(tagDB)
depYear = substr(tagDB, 1, 4)
myproj = chooseProject(myproj)
if (is.null(site)) {
cat("Please enter the site code for file ", basename(DTAfile), ":\n")
site = readLines(n=1)
if (length(site) == 0)
stop("Cancelled")
}
dtalines = readLines(DTAfile)
dtaout = readDTA(lines=dtalines)
pubtags = read.csv(tagDB, as.is=TRUE)
outfile = sub("\\.dta$", "_raw_public_tags.rds", DTAfile, ignore.case=TRUE) ## output file
rv = list(recv=dtaout$recv, proj=myproj, site=site, tags=subset(dtaout$tags, id %in% pubtags$id))
saveRDS(rv, outfile)
cat(sprintf("Wrote public tag records to file %s\n", outfile))
return(invisible(rv))
}
| /R/publicDTA.R | no_license | jbrzusto/sensorgnome-R-package | R | false | false | 980 | r | ## extract the public tag records from a DTA file
publicDTA = function(
DTAfile = NULL, ## path to input file
tagDB = NULL, ## path to public tag database file
myproj = NULL, ## project code
site = NULL ## site code
) {
DTAfile = chooseDTAFile(DTAfile)
tagDB = chooseDBFile(tagDB)
depYear = substr(tagDB, 1, 4)
myproj = chooseProject(myproj)
if (is.null(site)) {
cat("Please enter the site code for file ", basename(DTAfile), ":\n")
site = readLines(n=1)
if (length(site) == 0)
stop("Cancelled")
}
dtalines = readLines(DTAfile)
dtaout = readDTA(lines=dtalines)
pubtags = read.csv(tagDB, as.is=TRUE)
outfile = sub("\\.dta$", "_raw_public_tags.rds", DTAfile, ignore.case=TRUE) ## output file
rv = list(recv=dtaout$recv, proj=myproj, site=site, tags=subset(dtaout$tags, id %in% pubtags$id))
saveRDS(rv, outfile)
cat(sprintf("Wrote public tag records to file %s\n", outfile))
return(invisible(rv))
}
|
#------------------------------------------------------------------------#
# TITLE: netcdf_access_PDSI_days.R
#
# AUTHOR: Erich Seamon
#
# INSTITUITON: College of Natural Resources
# University of Idaho
#
# DATE: Feb 1, 2019
#
# STAGE: netcdf access
#
# COMMENTS: This script opens and displays netcdf data.
#
#--Setting the working directory an d clearing the workspace-----------#
#netcdf_access(climatevar_short, climatevar, year )
#netcdf_access
#number of days under pdsi across th five years
#netcdf_access_PDSI <- function(year) {
for (h in 2007:2012) {
#library("ncdf")
library("zoo")
library("raster")
library("sp")
library("rgeos")
library("rgdal")
library("proj4")
library("RNetCDF")
library("ncdf4")
library("RColorBrewer")
library("raster")
#library("rasterVis")
library("latticeExtra")
library("maptools")
library("parallel")
library("Evapotranspiration")
library("plyr")
library("data.table")
library("sirad")
library("rgdal")
library("stringr")
library("leaflet")
setwd("/nethome/erichs/counties/")
counties <- readShapePoly('UScounties.shp',
proj4string=CRS
("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
projection = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
#counties <- counties[grep("Idaho|Washington|Oregon|Montana", counties@data$STATE_NAME),]
#counties <- counties[grep("Washington", counties@data$STATE_NAME),]
#subsets to CONUS
counties <- subset(counties, STATE_NAME != "Alaska")
counties <- subset(counties, STATE_NAME != "Hawaii")
#counties <- subset(counties, STATE_NAME == "Idaho")
#counties <- subset(counties, NAME == "Latah")
#--loop list for county by fip
countyfiploop <- counties@data$FIPS
#--data frame of county fip list
countyfiplist <- data.frame(counties@data$FIPS)
#--data frame of county names
countynames <- data.frame(counties@data$NAME)
statenames <- data.frame(counties@data$STATE_NAME)
#combo of county names and fip for this list
countylist <- cbind(statenames, countynames, countyfiplist)
colnames(countylist) <- c("STATE_NAME", "NAME", "FIPS")
#--number of rows in county list
countylistrows <- nrow(countylist)
climatevar_short <- "pdsi"
climatevar <- "palmer_drought_severity_index"
nc <- nc_open(paste("http://thredds.northwestknowledge.net:8080/thredds/dodsC/MET/pet/monthly/", "pet_gridMET", ".nc?lon[0:1:1385],lat[0:1:584],", "eto", "[0:1:0][0:1:0][0:1:0],time[0:1:0]", sep=""))
nc2 <- nc_open(paste("http://thredds.northwestknowledge.net:8080/thredds/dodsC/MET/pr/monthly/", "pr_gridMET", ".nc?lon[0:1:1385],lat[0:1:584],", "pr", "[0:1:0][0:1:0][0:1:0],time[0:1:0]", sep=""))
# nc$var$eto$dim[[1]]$name
# nc$var$eto$dim[[2]]$name
# nc$var$eto$dim[[3]]$name
##--
# extract variable name, size and dimension
v <- nc$var[[1]]
size <- v$varsize
dims <- v$ndims
time <- 1
nt <- size[time] # length of time dimension
lat <- nc$dim$lat$vals # latitude position
lon <- nc$dim$lon$vals # longitude position
# read sst variable
r<-list()
for (i in 1:nt) {
start <- rep(1,dims) # begin with start=(1,1,...,1)
start[1] <- i # change to start=(1,1,...,i) to read timestep i
count <- size # begin with count=(nx,ny,...,nt), reads entire var
count[1] <- 1 # change to count=(nx,ny,...,1) to read 1 tstep
dt<-ncvar_get(nc, varid = 'eto', start = start, count = count)
# convert to raster
r[i]<-raster(dt)
r[i] <- rotate(r[[i]])
extent(r[[i]]) <- c(-124.7667, -67.0583, 25.0667, 49.4000)
crs(r[[i]]) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
}
# extract variable name, size and dimension
v <- nc2$var[[1]]
size <- v$varsize
dims <- v$ndims
time <- 1
nt <- size[time] # length of time dimension
lat <- nc2$dim$lat$vals # latitude position
lon <- nc2$dim$lon$vals # longitude position
# read sst variable
r2<-list()
for (i in 1:nt) {
start <- rep(1,dims) # begin with start=(1,1,...,1)
start[1] <- i # change to start=(1,1,...,i) to read timestep i
count <- size # begin with count=(nx,ny,...,nt), reads entire var
count[1] <- 1 # change to count=(nx,ny,...,1) to read 1 tstep
dt<-ncvar_get(nc2, varid = 'pr', start = start, count = count)
# convert to raster
r2[i]<-raster(dt)
r2[i] <- rotate(r2[[i]])
extent(r2[[i]]) <- c(-124.7667, -67.0583, 25.0667, 49.4000)
crs(r2[[i]]) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
}
r_pet <- brick(r)
r_pr <- brick(r2)
r_bal <- r_pr - r_pet
funSPEI <- function(x, scale=1, na.rm=TRUE,...) as.numeric((SPEI::spei(x, scale=scale, na.rm=na.rm, ...))$fitted)
rstSPEI <- raster::calc(r_bal, fun = funSPEI)
sel <- subset(rstSPEI, 325:396) #2007-2012 by month
sel2007 <- subset(rstSPEI, 325:336)
sel2008 <- subset(rstSPEI, 336:347)
sel2009 <- subset(rstSPEI, 348:359)
sel2010 <- subset(rstSPEI, 360:371)
sel2011 <- subset(rstSPEI, 372:383)
sel2012 <- subset(rstSPEI, 384:396)
#spei moderate drougtht - -1.00 and below
#get mean by month, grouping every three layers of pdsi
groupn=function(n,m){rep(1:m,rep(n/m,m))}
group3 = groupn(36,12)
f = function(v){tapply(v,group3,mean)}
out = calc(r3, f)
#spei2007
newmatrix <- matrix(NA, nrow=countylistrows, ncol=13)
layer <- c(1:12)
for(ii in layer) {
jj = 0
for (l in countyfiploop) {
jj = jj + 1
subset_county <- counties[counties@data$FIPS == l,]
i2 <- paste("layer.", ii + 324, sep="")
i3 <- ii + 1
e <- extract(sel2007[[i2]], subset_county)
newmatrix[jj,i3] <- mean(e[[1]])
newmatrix[jj,1] <- l
}
}
nm <- data.frame(NA, nrow=newmatrix, ncol=13)
nm$countyFIPS <- as.numeric(as.character(newmatrix[,1]))
nm$jan <- as.numeric(as.character(newmatrix[,2]))
nm$feb <- as.numeric(as.character(newmatrix[,3]))
nm$mar <- as.numeric(as.character(newmatrix[,4]))
nm$apr <- as.numeric(as.character(newmatrix[,5]))
nm$may <- as.numeric(as.character(newmatrix[,6]))
nm$jun <- as.numeric(as.character(newmatrix[,7]))
nm$jul <- as.numeric(as.character(newmatrix[,8]))
nm$aug <- as.numeric(as.character(newmatrix[,9]))
nm$sep <- as.numeric(as.character(newmatrix[,10]))
nm$oct <- as.numeric(as.character(newmatrix[,11]))
nm$nov <- as.numeric(as.character(newmatrix[,12]))
nm$dec <- as.numeric(as.character(newmatrix[,13]))
nm2 <- data.frame(nm$countyFIPS, nm$jan, nm$feb, nm$mar, nm$apr, nm$may, nm$jun, nm$jul, nm$aug, nm$sep, nm$oct, nm$nov, nm$dec)
colnames(nm2) <- c("FIPS","spei_jan", "spei_feb", "spei_mar", "spei_apr", "spei_may", "spei_jun", "spei_jul", "spei_aug", "spei_sep", "spei_oct", "spei_nov", "spei_dec" )
nm2$FIPS <- str_pad(nm2$FIPS, 5, pad = "0")
newmatrix2 <- matrix(NA, nrow=countylistrows, ncol=2)
for (k in 1:nrow(nm2)) {
newmatrix2[k,1] <- sum(nm2[k,2:13] < -1)
newmatrix2[k,2] <- nm2[k,1]
}
newmatrix2[,1] <- as.numeric(newmatrix2[,1])
newmatrix2[,2] <- as.numeric(newmatrix2[,2])
colnames(newmatrix2) <- c(paste(h, "_days", sep=""), "FIPS")
assign(paste("spei_moderate_drought_", h, sep=""), data.frame(newmatrix2))
}
| /SHEAF_climate_access_spei.R | no_license | soilhealthfeedback/SHEAF_EDA | R | false | false | 7,541 | r | #------------------------------------------------------------------------#
# TITLE: netcdf_access_PDSI_days.R
#
# AUTHOR: Erich Seamon
#
# INSTITUITON: College of Natural Resources
# University of Idaho
#
# DATE: Feb 1, 2019
#
# STAGE: netcdf access
#
# COMMENTS: This script opens and displays netcdf data.
#
#--Setting the working directory an d clearing the workspace-----------#
#netcdf_access(climatevar_short, climatevar, year )
#netcdf_access
#number of days under pdsi across th five years
#netcdf_access_PDSI <- function(year) {
for (h in 2007:2012) {
#library("ncdf")
library("zoo")
library("raster")
library("sp")
library("rgeos")
library("rgdal")
library("proj4")
library("RNetCDF")
library("ncdf4")
library("RColorBrewer")
library("raster")
#library("rasterVis")
library("latticeExtra")
library("maptools")
library("parallel")
library("Evapotranspiration")
library("plyr")
library("data.table")
library("sirad")
library("rgdal")
library("stringr")
library("leaflet")
setwd("/nethome/erichs/counties/")
counties <- readShapePoly('UScounties.shp',
proj4string=CRS
("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
projection = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
#counties <- counties[grep("Idaho|Washington|Oregon|Montana", counties@data$STATE_NAME),]
#counties <- counties[grep("Washington", counties@data$STATE_NAME),]
#subsets to CONUS
counties <- subset(counties, STATE_NAME != "Alaska")
counties <- subset(counties, STATE_NAME != "Hawaii")
#counties <- subset(counties, STATE_NAME == "Idaho")
#counties <- subset(counties, NAME == "Latah")
#--loop list for county by fip
countyfiploop <- counties@data$FIPS
#--data frame of county fip list
countyfiplist <- data.frame(counties@data$FIPS)
#--data frame of county names
countynames <- data.frame(counties@data$NAME)
statenames <- data.frame(counties@data$STATE_NAME)
#combo of county names and fip for this list
countylist <- cbind(statenames, countynames, countyfiplist)
colnames(countylist) <- c("STATE_NAME", "NAME", "FIPS")
#--number of rows in county list
countylistrows <- nrow(countylist)
climatevar_short <- "pdsi"
climatevar <- "palmer_drought_severity_index"
nc <- nc_open(paste("http://thredds.northwestknowledge.net:8080/thredds/dodsC/MET/pet/monthly/", "pet_gridMET", ".nc?lon[0:1:1385],lat[0:1:584],", "eto", "[0:1:0][0:1:0][0:1:0],time[0:1:0]", sep=""))
nc2 <- nc_open(paste("http://thredds.northwestknowledge.net:8080/thredds/dodsC/MET/pr/monthly/", "pr_gridMET", ".nc?lon[0:1:1385],lat[0:1:584],", "pr", "[0:1:0][0:1:0][0:1:0],time[0:1:0]", sep=""))
# nc$var$eto$dim[[1]]$name
# nc$var$eto$dim[[2]]$name
# nc$var$eto$dim[[3]]$name
##--
# extract variable name, size and dimension
v <- nc$var[[1]]
size <- v$varsize
dims <- v$ndims
time <- 1
nt <- size[time] # length of time dimension
lat <- nc$dim$lat$vals # latitude position
lon <- nc$dim$lon$vals # longitude position
# read sst variable
r<-list()
for (i in 1:nt) {
start <- rep(1,dims) # begin with start=(1,1,...,1)
start[1] <- i # change to start=(1,1,...,i) to read timestep i
count <- size # begin with count=(nx,ny,...,nt), reads entire var
count[1] <- 1 # change to count=(nx,ny,...,1) to read 1 tstep
dt<-ncvar_get(nc, varid = 'eto', start = start, count = count)
# convert to raster
r[i]<-raster(dt)
r[i] <- rotate(r[[i]])
extent(r[[i]]) <- c(-124.7667, -67.0583, 25.0667, 49.4000)
crs(r[[i]]) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
}
# extract variable name, size and dimension
v <- nc2$var[[1]]
size <- v$varsize
dims <- v$ndims
time <- 1
nt <- size[time] # length of time dimension
lat <- nc2$dim$lat$vals # latitude position
lon <- nc2$dim$lon$vals # longitude position
# read sst variable
r2<-list()
for (i in 1:nt) {
start <- rep(1,dims) # begin with start=(1,1,...,1)
start[1] <- i # change to start=(1,1,...,i) to read timestep i
count <- size # begin with count=(nx,ny,...,nt), reads entire var
count[1] <- 1 # change to count=(nx,ny,...,1) to read 1 tstep
dt<-ncvar_get(nc2, varid = 'pr', start = start, count = count)
# convert to raster
r2[i]<-raster(dt)
r2[i] <- rotate(r2[[i]])
extent(r2[[i]]) <- c(-124.7667, -67.0583, 25.0667, 49.4000)
crs(r2[[i]]) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
}
r_pet <- brick(r)
r_pr <- brick(r2)
r_bal <- r_pr - r_pet
funSPEI <- function(x, scale=1, na.rm=TRUE,...) as.numeric((SPEI::spei(x, scale=scale, na.rm=na.rm, ...))$fitted)
rstSPEI <- raster::calc(r_bal, fun = funSPEI)
sel <- subset(rstSPEI, 325:396) #2007-2012 by month
sel2007 <- subset(rstSPEI, 325:336)
sel2008 <- subset(rstSPEI, 336:347)
sel2009 <- subset(rstSPEI, 348:359)
sel2010 <- subset(rstSPEI, 360:371)
sel2011 <- subset(rstSPEI, 372:383)
sel2012 <- subset(rstSPEI, 384:396)
#spei moderate drougtht - -1.00 and below
#get mean by month, grouping every three layers of pdsi
groupn=function(n,m){rep(1:m,rep(n/m,m))}
group3 = groupn(36,12)
f = function(v){tapply(v,group3,mean)}
out = calc(r3, f)
#spei2007
newmatrix <- matrix(NA, nrow=countylistrows, ncol=13)
layer <- c(1:12)
for(ii in layer) {
jj = 0
for (l in countyfiploop) {
jj = jj + 1
subset_county <- counties[counties@data$FIPS == l,]
i2 <- paste("layer.", ii + 324, sep="")
i3 <- ii + 1
e <- extract(sel2007[[i2]], subset_county)
newmatrix[jj,i3] <- mean(e[[1]])
newmatrix[jj,1] <- l
}
}
nm <- data.frame(NA, nrow=newmatrix, ncol=13)
nm$countyFIPS <- as.numeric(as.character(newmatrix[,1]))
nm$jan <- as.numeric(as.character(newmatrix[,2]))
nm$feb <- as.numeric(as.character(newmatrix[,3]))
nm$mar <- as.numeric(as.character(newmatrix[,4]))
nm$apr <- as.numeric(as.character(newmatrix[,5]))
nm$may <- as.numeric(as.character(newmatrix[,6]))
nm$jun <- as.numeric(as.character(newmatrix[,7]))
nm$jul <- as.numeric(as.character(newmatrix[,8]))
nm$aug <- as.numeric(as.character(newmatrix[,9]))
nm$sep <- as.numeric(as.character(newmatrix[,10]))
nm$oct <- as.numeric(as.character(newmatrix[,11]))
nm$nov <- as.numeric(as.character(newmatrix[,12]))
nm$dec <- as.numeric(as.character(newmatrix[,13]))
nm2 <- data.frame(nm$countyFIPS, nm$jan, nm$feb, nm$mar, nm$apr, nm$may, nm$jun, nm$jul, nm$aug, nm$sep, nm$oct, nm$nov, nm$dec)
colnames(nm2) <- c("FIPS","spei_jan", "spei_feb", "spei_mar", "spei_apr", "spei_may", "spei_jun", "spei_jul", "spei_aug", "spei_sep", "spei_oct", "spei_nov", "spei_dec" )
nm2$FIPS <- str_pad(nm2$FIPS, 5, pad = "0")
newmatrix2 <- matrix(NA, nrow=countylistrows, ncol=2)
for (k in 1:nrow(nm2)) {
newmatrix2[k,1] <- sum(nm2[k,2:13] < -1)
newmatrix2[k,2] <- nm2[k,1]
}
newmatrix2[,1] <- as.numeric(newmatrix2[,1])
newmatrix2[,2] <- as.numeric(newmatrix2[,2])
colnames(newmatrix2) <- c(paste(h, "_days", sep=""), "FIPS")
assign(paste("spei_moderate_drought_", h, sep=""), data.frame(newmatrix2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raw_mps.R
\name{fetch_mps_addresses_raw}
\alias{fetch_mps_addresses_raw}
\title{Fetch addresses: MPs}
\usage{
fetch_mps_addresses_raw()
}
\description{
Fetch addresses: MPs
}
\keyword{internal}
| /man/fetch_mps_addresses_raw.Rd | permissive | houseofcommonslibrary/clmnis | R | false | true | 272 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raw_mps.R
\name{fetch_mps_addresses_raw}
\alias{fetch_mps_addresses_raw}
\title{Fetch addresses: MPs}
\usage{
fetch_mps_addresses_raw()
}
\description{
Fetch addresses: MPs
}
\keyword{internal}
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Tests around writing named regions in an Excel workbook
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
test.workbook.writeNamedRegion <- function() {
# Create workbooks
wb.xls <- loadWorkbook(rsrc("resources/testWorkbookWriteNamedRegion.xls"), create = TRUE)
wb.xlsx <- loadWorkbook(rsrc("resources/testWorkbookWriteNamedRegion.xlsx"), create = TRUE)
# Check that trying to write an object which cannot be converted to a data.frame
# causes an exception (*.xls)
createName(wb.xls, "test1", "Test1!$C$8")
checkException(writeNamedRegion(wb.xls, search, "test1"))
# Check that trying to write an object which cannot be converted to a data.frame
# causes an exception (*.xlsx)
createName(wb.xlsx, "test1", "Test1!$C$8")
checkException(writeNamedRegion(wb.xlsx, search, "test1"))
# Check that attempting to write to a non-existing name causes an exception (*.xls)
checkException(writeNamedRegion(wb.xls, mtcars, "nameDoesNotExist"))
# Check that attempting to write to a non-existing name causes an exception (*.xlsx)
checkException(writeNamedRegion(wb.xlsx, mtcars, "nameDoesNotExist"))
# Check that attempting to write to a name which referes to a non-existing sheet
# causes an exception (*.xls)
createName(wb.xls, "nope", "NonExistingSheet!A1")
checkException(writeNamedRegion(wb.xls, mtcars, "nope"))
# Check that attempting to write to a name which referes to a non-existing sheet
# causes an exception (*.xlsx)
createName(wb.xlsx, "nope", "NonExistingSheet!A1")
checkException(writeNamedRegion(wb.xlsx, mtcars, "nope"))
# Check that writing an empty data.frame does not cause an error (*.xls)
createSheet(wb.xls, "empty")
createName(wb.xls, "empty1", "empty!A1")
createName(wb.xls, "empty2", "empty!D10")
checkNoException(writeNamedRegion(wb.xls, data.frame(), "empty1"))
checkNoException(writeNamedRegion(wb.xls, data.frame(a = character(0), b = numeric(0)), "empty2"))
# Check that writing an empty data.frame does not cause an error (*.xlsx)
createSheet(wb.xlsx, "empty")
createName(wb.xlsx, "empty1", "empty!A1")
createName(wb.xlsx, "empty2", "empty!D10")
checkNoException(writeNamedRegion(wb.xlsx, data.frame(), "empty1"))
checkNoException(writeNamedRegion(wb.xlsx, data.frame(a = character(0), b = numeric(0)), "empty2"))
}
| /inst/unitTests/runit.workbook.writeNamedRegion.R | no_license | harisxue/xlconnect | R | false | false | 3,388 | r | #############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Tests around writing named regions in an Excel workbook
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
test.workbook.writeNamedRegion <- function() {
# Create workbooks
wb.xls <- loadWorkbook(rsrc("resources/testWorkbookWriteNamedRegion.xls"), create = TRUE)
wb.xlsx <- loadWorkbook(rsrc("resources/testWorkbookWriteNamedRegion.xlsx"), create = TRUE)
# Check that trying to write an object which cannot be converted to a data.frame
# causes an exception (*.xls)
createName(wb.xls, "test1", "Test1!$C$8")
checkException(writeNamedRegion(wb.xls, search, "test1"))
# Check that trying to write an object which cannot be converted to a data.frame
# causes an exception (*.xlsx)
createName(wb.xlsx, "test1", "Test1!$C$8")
checkException(writeNamedRegion(wb.xlsx, search, "test1"))
# Check that attempting to write to a non-existing name causes an exception (*.xls)
checkException(writeNamedRegion(wb.xls, mtcars, "nameDoesNotExist"))
# Check that attempting to write to a non-existing name causes an exception (*.xlsx)
checkException(writeNamedRegion(wb.xlsx, mtcars, "nameDoesNotExist"))
# Check that attempting to write to a name which referes to a non-existing sheet
# causes an exception (*.xls)
createName(wb.xls, "nope", "NonExistingSheet!A1")
checkException(writeNamedRegion(wb.xls, mtcars, "nope"))
# Check that attempting to write to a name which referes to a non-existing sheet
# causes an exception (*.xlsx)
createName(wb.xlsx, "nope", "NonExistingSheet!A1")
checkException(writeNamedRegion(wb.xlsx, mtcars, "nope"))
# Check that writing an empty data.frame does not cause an error (*.xls)
createSheet(wb.xls, "empty")
createName(wb.xls, "empty1", "empty!A1")
createName(wb.xls, "empty2", "empty!D10")
checkNoException(writeNamedRegion(wb.xls, data.frame(), "empty1"))
checkNoException(writeNamedRegion(wb.xls, data.frame(a = character(0), b = numeric(0)), "empty2"))
# Check that writing an empty data.frame does not cause an error (*.xlsx)
createSheet(wb.xlsx, "empty")
createName(wb.xlsx, "empty1", "empty!A1")
createName(wb.xlsx, "empty2", "empty!D10")
checkNoException(writeNamedRegion(wb.xlsx, data.frame(), "empty1"))
checkNoException(writeNamedRegion(wb.xlsx, data.frame(a = character(0), b = numeric(0)), "empty2"))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/check_sol.r
\name{add.failure}
\alias{add.failure}
\title{Used inside tests: adds a failure to an exercise}
\usage{
add.failure(message, ..., ps = get.ps())
}
\arguments{
\item{message}{a longer description shown to the user}
\item{...}{variables that will be rendered into messages that have whiskers}
}
\description{
Used inside tests: adds a failure to an exercise
}
| /man/add.failure.Rd | no_license | harshinamdar/RTutor | R | false | false | 458 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/check_sol.r
\name{add.failure}
\alias{add.failure}
\title{Used inside tests: adds a failure to an exercise}
\usage{
add.failure(message, ..., ps = get.ps())
}
\arguments{
\item{message}{a longer description shown to the user}
\item{...}{variables that will be rendered into messages that have whiskers}
}
\description{
Used inside tests: adds a failure to an exercise
}
|
library(lme4)
require(optimx)
source(system.file("utils", "allFit.R", package = "lme4"))
# by subject random slope
# subject by type and condition random slopes did not converge
model_mono_id4 <- glmer(Score ~ Critical + Type + Agegroup + (1 + Type | ID), family = "binomial", optimizer = "bobyqa", control=glmerControl(optCtrl=list(maxfun=100000)), data = Monoling)
model_mono_id4_all <- allFit(model_mono_id4)
summary(model_mono_id4_all)
model_mono_id4_all_maineffects <- capture.output(summary(model_mono_id4_all$bobyqa))
write(model_mono_id4_all_maineffects, "model_mono_id4.txt") | /Bysubject_analysis.R | no_license | elspethwilson/implicature-development | R | false | false | 601 | r | library(lme4)
require(optimx)
source(system.file("utils", "allFit.R", package = "lme4"))
# by subject random slope
# subject by type and condition random slopes did not converge
model_mono_id4 <- glmer(Score ~ Critical + Type + Agegroup + (1 + Type | ID), family = "binomial", optimizer = "bobyqa", control=glmerControl(optCtrl=list(maxfun=100000)), data = Monoling)
model_mono_id4_all <- allFit(model_mono_id4)
summary(model_mono_id4_all)
model_mono_id4_all_maineffects <- capture.output(summary(model_mono_id4_all$bobyqa))
write(model_mono_id4_all_maineffects, "model_mono_id4.txt") |
/BAandDM/HW01/Wine.R | no_license | hy2yang/undergraduate | R | false | false | 553 | r | ||
data("m.deciles08")
#This tells you that the data series is in a time series format
is.ts(m.deciles08)
## [1] FALSE
#We change the data to time series format
#STEP 2:
# Now that we know that the data is time series we should do some data exploration. Functions print() and summary() are used to get the overview of the data. The start() and end() functions return the time index of the first and last observations, respectively. The time() function calculates a vector of time indices, with one element for each time index on which the series was observed. Finally, the frequency() function returns the number of observations per unit time.
#This will give us the structure of our data
print(m.deciles08)
summary(m.deciles08)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 104.0 180.0 265.5 280.3 360.5 622.0
#Starting index, end index
start(m.deciles08)
## [1] 1949 1
end(m.deciles08)
time(m.deciles08)
frequency(m.deciles08)
#Step 3:
# It is essential to analyze the trends prior to building any kind of time series model. The details we are interested in pertains to any kind of trend, seasonality or random behaviour in the series. what better way to do so than visualize the Time Series.
#This will plot the time series
ts.plot(AirPassengers, xlab="v1", ylab="V3", main="Monthly totals of international airline passengers, 1949-1960")
# This will fit in a line
abline(reg=lm(m.deciles08~time(m.deciles08)))
#Auto correlation matrixx
acf(m.deciles08)
#Fit the AR model to the dataset
AR <- arima(m.deciles08, order = c(0,1,0))
print(AR)
#Plotting the AR model
ts.plot(m.deciles08)
#Fitting the model
AR_fit <- m.deciles08 - residuals(AR)
points(AR_fit, type = "l", col = 2, lty = 2)
#Using predict() to make a 1-step forecast
predict_AR <- predict(AR)
#Obtaining the 1-step forecast using $pred[1]
predict_AR$pred[1]
#ALternatively Using predict to make 1-step through 10-step forecasts
predict(AR, n.ahead = 10)
#plotting the data series plus the forecast and 95% prediction intervals
ts.plot(m.deciles08, xlim = c(1949, 1961))
AR_forecast <- predict(AR, n.ahead = 10)$pred
AR_forecast_se <- predict(AR, n.ahead = 10)$se
points(AR_forecast, type = "l", col = 2)
points(AR_forecast - 2*AR_forecast_se, type = "l", col = 2, lty = 2)
points(AR_forecast + 2*AR_forecast_se, type = "l", col = 2, lty = 2)
#Fitting the MA model to the dataset
MA <- arima(m.deciles08, order = c(0,0,1))
print(MA)
#plotting the series along with the MA fitted values
ts.plot(m.deciles08)
MA_fit <- m.deciles08 - resid(MA)
points(MA_fit, type = "l", col = 2, lty = 2)
#Making a 1-step forecast based on MA
predict_MA <- predict(MA)
#Obtaining the 1-step forecast using $pred[1]
predict_MA$pred[1]
#Alternately Making a 1-step through 10-step forecast based on MA
predict(MA,n.ahead=10)
#Plotting the m.deciles08 series plus the forecast and 95% prediction intervals
ts.plot(m.deciles08, xlim = c(1949, 1961))
MA_forecasts <- predict(MA, n.ahead = 10)$pred
MA_forecast_se <- predict(MA, n.ahead = 10)$se
points(MA_forecasts, type = "l", col = 2)
points(MA_forecasts - 2*MA_forecast_se, type = "l", col = 2, lty = 2)
points(MA_forecasts + 2*MA_forecast_se, type = "l", col = 2, lty = 2)
#Choosing AR or MA: Exploiting ACF plots
# Find correlation between AR_fit and MA_fit
cor(AR_fit, MA_fit)
# Find AIC of AR
AIC(AR)
# Find AIC of MA
AIC(MA)
# Find BIC of AR
BIC(AR)
# Find BIC of MA
BIC(MA) | /Model/ARMACode.R | no_license | 97joseph/Financial_Economics | R | false | false | 3,423 | r | data("m.deciles08")
#This tells you that the data series is in a time series format
is.ts(m.deciles08)
## [1] FALSE
#We change the data to time series format
#STEP 2:
# Now that we know that the data is time series we should do some data exploration. Functions print() and summary() are used to get the overview of the data. The start() and end() functions return the time index of the first and last observations, respectively. The time() function calculates a vector of time indices, with one element for each time index on which the series was observed. Finally, the frequency() function returns the number of observations per unit time.
#This will give us the structure of our data
print(m.deciles08)
summary(m.deciles08)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 104.0 180.0 265.5 280.3 360.5 622.0
#Starting index, end index
start(m.deciles08)
## [1] 1949 1
end(m.deciles08)
time(m.deciles08)
frequency(m.deciles08)
#Step 3:
# It is essential to analyze the trends prior to building any kind of time series model. The details we are interested in pertains to any kind of trend, seasonality or random behaviour in the series. what better way to do so than visualize the Time Series.
#This will plot the time series
ts.plot(AirPassengers, xlab="v1", ylab="V3", main="Monthly totals of international airline passengers, 1949-1960")
# This will fit in a line
abline(reg=lm(m.deciles08~time(m.deciles08)))
#Auto correlation matrixx
acf(m.deciles08)
#Fit the AR model to the dataset
AR <- arima(m.deciles08, order = c(0,1,0))
print(AR)
#Plotting the AR model
ts.plot(m.deciles08)
#Fitting the model
AR_fit <- m.deciles08 - residuals(AR)
points(AR_fit, type = "l", col = 2, lty = 2)
#Using predict() to make a 1-step forecast
predict_AR <- predict(AR)
#Obtaining the 1-step forecast using $pred[1]
predict_AR$pred[1]
#ALternatively Using predict to make 1-step through 10-step forecasts
predict(AR, n.ahead = 10)
#plotting the data series plus the forecast and 95% prediction intervals
ts.plot(m.deciles08, xlim = c(1949, 1961))
AR_forecast <- predict(AR, n.ahead = 10)$pred
AR_forecast_se <- predict(AR, n.ahead = 10)$se
points(AR_forecast, type = "l", col = 2)
points(AR_forecast - 2*AR_forecast_se, type = "l", col = 2, lty = 2)
points(AR_forecast + 2*AR_forecast_se, type = "l", col = 2, lty = 2)
#Fitting the MA model to the dataset
MA <- arima(m.deciles08, order = c(0,0,1))
print(MA)
#plotting the series along with the MA fitted values
ts.plot(m.deciles08)
MA_fit <- m.deciles08 - resid(MA)
points(MA_fit, type = "l", col = 2, lty = 2)
#Making a 1-step forecast based on MA
predict_MA <- predict(MA)
#Obtaining the 1-step forecast using $pred[1]
predict_MA$pred[1]
#Alternately Making a 1-step through 10-step forecast based on MA
predict(MA,n.ahead=10)
#Plotting the m.deciles08 series plus the forecast and 95% prediction intervals
ts.plot(m.deciles08, xlim = c(1949, 1961))
MA_forecasts <- predict(MA, n.ahead = 10)$pred
MA_forecast_se <- predict(MA, n.ahead = 10)$se
points(MA_forecasts, type = "l", col = 2)
points(MA_forecasts - 2*MA_forecast_se, type = "l", col = 2, lty = 2)
points(MA_forecasts + 2*MA_forecast_se, type = "l", col = 2, lty = 2)
#Choosing AR or MA: Exploiting ACF plots
# Find correlation between AR_fit and MA_fit
cor(AR_fit, MA_fit)
# Find AIC of AR
AIC(AR)
# Find AIC of MA
AIC(MA)
# Find BIC of AR
BIC(AR)
# Find BIC of MA
BIC(MA) |
# R
library(rCUR)
library(scatterplot3d)
p.vox=5765
p.sub=233
p.Af="../233subjectsXvox.4dfp.img"
Av=double(p.vox*p.sub)
Av=readBin(p.Af,double(),p.vox*p.sub,4)
A=matrix(Av,nrow=p.vox,ncol=p.sub,dimnames=list(NULL,c(rep('HRP',36),rep('HRN',131),rep('LRN',66))))
PCA01=prcomp(A)
PCA01.rot=PCA01$rotation[,c(1,2,3)]
group=rownames(PCA01.rot)
pchs=ifelse(group=='HRP',16,0)
pchs=ifelse(group=='HRN',17,pchs)
pchs=ifelse(group=='LRN',18,pchs)
cols=ifelse(group=='HRP','blue',0)
cols=ifelse(group=='HRN','green',cols)
cols=ifelse(group=='LRN','red',cols)
dev.new()
scatterplot3d(PCA01.rot,pch=pchs,color=cols)
| /R/170615cur.r | no_license | kapspi/fidl | R | false | false | 607 | r | # R
library(rCUR)
library(scatterplot3d)
p.vox=5765
p.sub=233
p.Af="../233subjectsXvox.4dfp.img"
Av=double(p.vox*p.sub)
Av=readBin(p.Af,double(),p.vox*p.sub,4)
A=matrix(Av,nrow=p.vox,ncol=p.sub,dimnames=list(NULL,c(rep('HRP',36),rep('HRN',131),rep('LRN',66))))
PCA01=prcomp(A)
PCA01.rot=PCA01$rotation[,c(1,2,3)]
group=rownames(PCA01.rot)
pchs=ifelse(group=='HRP',16,0)
pchs=ifelse(group=='HRN',17,pchs)
pchs=ifelse(group=='LRN',18,pchs)
cols=ifelse(group=='HRP','blue',0)
cols=ifelse(group=='HRN','green',cols)
cols=ifelse(group=='LRN','red',cols)
dev.new()
scatterplot3d(PCA01.rot,pch=pchs,color=cols)
|
bulma_page <- function(..., title = NULL) {
tagList(
tags$head(
tags$meta(charset = "utf-8"),
tags$meta(
name = "viewport",
content = "width=device-width, initial-scale=1"
),
tags$title(title)
),
add_bulma_deps(tags$body(...))
)
}
bulma_button <- function(inputId, label, color = NULL, icon = NULL) {
button_cl <- "button action-button"
if (!is.null(color)) button_cl <- paste0(button_cl, " is-", color)
tags$button(
id = inputId,
class = button_cl,
icon,
label
)
}
| /R/tags.R | permissive | bright-spark/Unleash-Shiny-Exercise-2 | R | false | false | 550 | r | bulma_page <- function(..., title = NULL) {
tagList(
tags$head(
tags$meta(charset = "utf-8"),
tags$meta(
name = "viewport",
content = "width=device-width, initial-scale=1"
),
tags$title(title)
),
add_bulma_deps(tags$body(...))
)
}
bulma_button <- function(inputId, label, color = NULL, icon = NULL) {
button_cl <- "button action-button"
if (!is.null(color)) button_cl <- paste0(button_cl, " is-", color)
tags$button(
id = inputId,
class = button_cl,
icon,
label
)
}
|
get_dirichlet <- function(data){
prior_para <- c(1,1,1) # set the prior to dirichlet (1,1,1) in the order of -1,0,1
parameter_matrix <- matrix(ncol=3)
node_param_list <- list()
colnames(parameter_matrix)<- c("alpha_inhibit","alpha_dormant","alpha_active")
node_param<-matrix(ncol=3)
parent_matrix <-NULL
Name_Vector <- NULL
Nodes <-ncol(data)
for (iter in 1: Nodes){
if (iter >=1 & iter <=5) {# Nodes A,B,C,D,E
parent_matrix <-NULL
}
else if(iter ==6){ # Node F
parent_matrix<-cbind(data[,c("A","B","C","D","E")])
}
else if (iter == 7 | iter ==8){ # Nodes G and H
parent_matrix<-cbind(data[,c("D","E","F")])
}
else if (iter ==9 | iter ==10){ # Nodes I and J
parent_matrix <- cbind(data[,c("G","H")])
}
else if (iter == 11 | iter == 12) { # Nodes K and L
parent_matrix <- cbind(data[,c("I","J")])
}
else if (iter == 13 ) { # Node M
parent_matrix <- cbind(data[,c("K","L")])
}
else if (iter == 14){ # Node N
parent_matrix <- cbind(data[,"M"])
}
node_param<-rbind( get_param(prior=prior_para, node=data[,iter],parent=parent_matrix))
parameter_matrix<-rbind(parameter_matrix,node_param)
Name_Vector <- c(Name_Vector,rep(colnames(data)[iter],dim(node_param)[1]))
}
parameter_matrix<-as.data.frame(parameter_matrix)
parameter_matrix<-parameter_matrix[-1,]
parameter_matrix$Names <- Name_Vector
return(parameter_matrix)
} | /get_dirichlet.R | no_license | adilahiri/Lysine_Regulators | R | false | false | 1,467 | r | get_dirichlet <- function(data){
prior_para <- c(1,1,1) # set the prior to dirichlet (1,1,1) in the order of -1,0,1
parameter_matrix <- matrix(ncol=3)
node_param_list <- list()
colnames(parameter_matrix)<- c("alpha_inhibit","alpha_dormant","alpha_active")
node_param<-matrix(ncol=3)
parent_matrix <-NULL
Name_Vector <- NULL
Nodes <-ncol(data)
for (iter in 1: Nodes){
if (iter >=1 & iter <=5) {# Nodes A,B,C,D,E
parent_matrix <-NULL
}
else if(iter ==6){ # Node F
parent_matrix<-cbind(data[,c("A","B","C","D","E")])
}
else if (iter == 7 | iter ==8){ # Nodes G and H
parent_matrix<-cbind(data[,c("D","E","F")])
}
else if (iter ==9 | iter ==10){ # Nodes I and J
parent_matrix <- cbind(data[,c("G","H")])
}
else if (iter == 11 | iter == 12) { # Nodes K and L
parent_matrix <- cbind(data[,c("I","J")])
}
else if (iter == 13 ) { # Node M
parent_matrix <- cbind(data[,c("K","L")])
}
else if (iter == 14){ # Node N
parent_matrix <- cbind(data[,"M"])
}
node_param<-rbind( get_param(prior=prior_para, node=data[,iter],parent=parent_matrix))
parameter_matrix<-rbind(parameter_matrix,node_param)
Name_Vector <- c(Name_Vector,rep(colnames(data)[iter],dim(node_param)[1]))
}
parameter_matrix<-as.data.frame(parameter_matrix)
parameter_matrix<-parameter_matrix[-1,]
parameter_matrix$Names <- Name_Vector
return(parameter_matrix)
} |
stepdown.mams <- function(nMat = matrix(c(10, 20), nrow=2, ncol=4), alpha.star = c(0.01, 0.025), lb = 0, selection = "all.promising"){
# checking input parameters
if (!all(diff(nMat) >= 0)) {stop("total sample size per arm cannot decrease between stages.")}
J <- dim(nMat)[1]
K <- dim(nMat)[2] - 1
if ((J != 2) && (J != 3)) {stop("number of stages must be 2 or 3")}
if (K < 2) {stop("must have at least two experimental treatments")}
if (length(alpha.star) != J) {stop("length of error spending vector must be same as number of stages")}
if (!all(diff(alpha.star) >= 0)) {stop("cumulative familywise error must increase.")}
if (length(lb) != J - 1) {stop("lower boundary must be specified at all analysis points except the last")}
match.arg(selection,c("all.promising","select.best"))
get.hyp <- function(n){ # find the nth intersection hypothesis (positions of 1s in binary n)
indlength = ceiling(log(n)/log(2)+.0000001)
ind = rep(0,indlength)
newn=n
for (h in seq(1,indlength)){
ind[h] = (newn/(2^(h-1))) %% 2
newn = newn - ind[h]*2^(h-1)
}
seq(1,indlength)[ind==1]
}
create.block <- function(control.ratios = 1:2, active.ratios = matrix(1:2, 2, 3)){ # for argument c(i,j) this gives covariance between statistics in stage i with statistics in stage j
K <- dim(active.ratios)[2]
block <- matrix(NA, K, K)
for(i in 1:K){
block[i, i] <- sqrt(active.ratios[1, i] * control.ratios[1] * (active.ratios[2, i] + control.ratios[2]) / (active.ratios[1, i] + control.ratios[1]) / active.ratios[2, i] / control.ratios[2])
}
for (i in 2:K){
for (j in 1:(i - 1)){
block[i, j] <- sqrt(active.ratios[1, i] * control.ratios[1] * active.ratios[2, j] / (active.ratios[1, i] + control.ratios[1]) / (active.ratios[2, j] + control.ratios[2]) / control.ratios[2])
block[j, i] <- sqrt(active.ratios[1, j] * control.ratios[1] * active.ratios[2, i] / (active.ratios[1, j] + control.ratios[1]) / (active.ratios[2, i] + control.ratios[2]) / control.ratios[2])
}
}
block
}
create.cov.matrix <- function(control.ratios = 1:2, active.ratios = matrix(1:2, 2, 3)){ # create variance-covariance matrix of the test statistics
J <- dim(active.ratios)[1]
K <- dim(active.ratios)[2]
cov.matrix <- matrix(NA, J * K, J * K)
for (i in 1:J){
for (j in i:J){
cov.matrix[((i - 1) * K + 1):(i * K), ((j - 1) * K + 1):(j * K)] <- create.block(control.ratios[c(i, j)], active.ratios[c(i, j), ])
cov.matrix[((j - 1) * K + 1):(j * K), ((i - 1) * K + 1):(i * K)] <- t(cov.matrix[((i - 1) * K + 1):(i * K), ((j - 1) * K + 1):(j * K)])
}
}
cov.matrix
}
get.path.prob <- function(surviving.subset1, surviving.subset2 = NULL, cut.off, treatments, cov.matrix, lb, upper.boundary, K, stage){ # find the probability that no test statistic crosses the upper boundary + only treatments in surviving_subsetj reach the jth stage
treatments2 <- treatments[surviving.subset1]
if (stage == 2){
lower <- c(rep(-Inf, length(treatments)), rep(-Inf, length(treatments2)))
lower[surviving.subset1] <- lb[1]
upper <- c(rep(lb[1], length(treatments)), rep(cut.off, length(treatments2)))
upper[surviving.subset1] <- upper.boundary[1]
return(pmvnorm(lower = lower, upper = upper, sigma = cov.matrix[c(treatments, K + treatments2), c(treatments, K + treatments2)])[1])
}
treatments3 <- treatments2[surviving.subset2]
lower <- c(rep(-Inf, length(treatments)), rep(-Inf, length(treatments2)), rep(-Inf, length(treatments3)))
lower[surviving.subset1] <- lb[1]
lower[length(treatments) + surviving.subset2] <- lb[2]
upper <- c(rep(lb[1], length(treatments)), rep(lb[2], length(treatments2)), rep(cut.off, length(treatments3)))
upper[surviving.subset1] <- upper.boundary[1]
upper[length(treatments) + surviving.subset2] <- upper.boundary[2]
pmvnorm(lower = lower, upper = upper, sigma = cov.matrix[c(treatments, K + treatments2, 2 * K + treatments3), c(treatments, K + treatments2, 2 * K + treatments3)])[1]
}
rejection.paths <- function(selected.treatment, cut.off, treatments, cov.matrix, lb, upper.boundary, K, stage){ # for the "select.best" method, find the probability that "select.treatment" is selected and subsequently crosses the upper boundary
contrast <- diag(-1, K + stage - 1)
contrast[1:K, selected.treatment] <- 1
for (i in 1:(stage - 1)) contrast[K + i, K + i] <- 1
bar.cov.matrix <- contrast %*% cov.matrix[c(1:K, 1:(stage - 1) * K + selected.treatment), c(1:K, 1:(stage - 1) * K + selected.treatment)] %*% t(contrast)
lower <- c(rep(0, length(treatments)), cut.off)
if (stage > 2) lower <- c(rep(0, length(treatments)), lb[2:(stage - 1)], cut.off)
lower[which(treatments == selected.treatment)] <- lb[1]
upper <- c(rep(Inf, length(treatments)), Inf)
if (stage > 2) upper <- c(rep(Inf, length(treatments)), upper.boundary[2:(stage - 1)], Inf)
upper[which(treatments == selected.treatment)] <- upper.boundary[1]
pmvnorm(lower = lower, upper = upper, sigma = bar.cov.matrix[c(treatments, K + 1:(stage - 1)), c(treatments, K + 1:(stage - 1))])[1]
}
excess.alpha <- function(cut.off, alpha.star, treatments, cov.matrix, lb, upper.boundary, selection, K, stage){ # for "all.promising" rule, this gives the cumulative typeI error for 'stage' stages
# for "select.best" rule, this gives the Type I error spent at the 'stage'th stage
if (stage == 1) return(1 - alpha.star[1] - pmvnorm(lower = rep(-Inf, length(treatments)), upper = rep(cut.off, length(treatments)), sigma = cov.matrix[treatments, treatments])[1])
if (selection == "select.best") return(alpha.star[stage] - alpha.star[stage - 1] - sum(unlist(lapply(treatments, rejection.paths, cut.off = cut.off, treatments = treatments, cov.matrix = cov.matrix, lb = lb, upper.boundary = upper.boundary, K = K, stage = stage)))) # any of 'treatments' could be selected, so we add all these probabilities
if (stage == 2){
surviving.subsets <- c(list(numeric(0)), lapply(as.list(1:(2 ^ length(treatments) - 1)), get.hyp)) # list all possible subsets of surviving treatments after the first stage
return(1 - alpha.star[2] - sum(unlist(lapply(surviving.subsets, get.path.prob, cut.off = cut.off, treatments = treatments, cov.matrix = cov.matrix, lb = lb, upper.boundary = upper.boundary, K = K, stage = stage))))
}
surviving.subsets1 <- c(list(numeric(0)), lapply(as.list(1:(2 ^ length(treatments) - 1)), get.hyp)) # all possible subsets of surviving treatments after the first stage
surviving.subsets2 <- c(list(list(numeric(0))), lapply(surviving.subsets1[-1], function(x) c(list(numeric(0)), lapply(as.list(1:(2 ^ length(x) - 1)), get.hyp)))) # for each possible subset of survivng subsets after stage 1, list the possible subsets still surviving after stage 2
1 - alpha.star[3] - sum(unlist(Map(function(x, y) sum(unlist(lapply(y, get.path.prob, surviving.subset1 = x, cut.off = cut.off, treatments = treatments, cov.matrix = cov.matrix, lb = lb, upper.boundary = upper.boundary, K = K, stage = stage))), surviving.subsets1, surviving.subsets2)))
}
# get sample size ratios
R <- nMat[, -1] / nMat[1, 1]
r0 <- nMat[, 1] / nMat[1, 1]
cov.matrix <- create.cov.matrix(r0, R)
l <- u <- as.list(1:(2 ^ K - 1))
alpha.star <- rep(list(alpha.star), 2 ^ K - 1)
for (i in 1:(2 ^ K - 1)){
names(u)[i] <- paste("U_{",paste(get.hyp(i), collapse = " "),"}",sep="")
names(l)[i] <- paste("L_{",paste(get.hyp(i), collapse = " "),"}",sep="")
names(alpha.star)[i] <- paste("alpha.star.{",paste(get.hyp(i), collapse = " "),"}",sep="")
for (j in 1:J){
try(new.u <- uniroot(excess.alpha, c(0, 10), alpha.star = alpha.star[[i]], treatments = get.hyp(i), cov.matrix = cov.matrix, lb = lb, upper.boundary = u[[i]], selection = selection, K = K, stage = j)$root, silent = TRUE)
if (is.null(new.u)) {stop("upper boundary not between 0 and 10")}
u[[i]][j] <- round(new.u, 2)
}
l[[i]] <- c(lb, u[[i]][J])
}
res <- NULL
res$l <- l
res$u <- u
res$sample.sizes <- nMat
res$K <- K
res$J <- J
res$alpha.star <- alpha.star
res$selection <- selection
res$zscores <- NULL
res$selected.trts <- list(1:K)
class(res) <- "MAMS.stepdown"
return(res)
}
| /R/stepdown.mams.R | no_license | cran/MAMS | R | false | false | 8,614 | r | stepdown.mams <- function(nMat = matrix(c(10, 20), nrow=2, ncol=4), alpha.star = c(0.01, 0.025), lb = 0, selection = "all.promising"){
# checking input parameters
if (!all(diff(nMat) >= 0)) {stop("total sample size per arm cannot decrease between stages.")}
J <- dim(nMat)[1]
K <- dim(nMat)[2] - 1
if ((J != 2) && (J != 3)) {stop("number of stages must be 2 or 3")}
if (K < 2) {stop("must have at least two experimental treatments")}
if (length(alpha.star) != J) {stop("length of error spending vector must be same as number of stages")}
if (!all(diff(alpha.star) >= 0)) {stop("cumulative familywise error must increase.")}
if (length(lb) != J - 1) {stop("lower boundary must be specified at all analysis points except the last")}
match.arg(selection,c("all.promising","select.best"))
get.hyp <- function(n){ # find the nth intersection hypothesis (positions of 1s in binary n)
indlength = ceiling(log(n)/log(2)+.0000001)
ind = rep(0,indlength)
newn=n
for (h in seq(1,indlength)){
ind[h] = (newn/(2^(h-1))) %% 2
newn = newn - ind[h]*2^(h-1)
}
seq(1,indlength)[ind==1]
}
create.block <- function(control.ratios = 1:2, active.ratios = matrix(1:2, 2, 3)){ # for argument c(i,j) this gives covariance between statistics in stage i with statistics in stage j
K <- dim(active.ratios)[2]
block <- matrix(NA, K, K)
for(i in 1:K){
block[i, i] <- sqrt(active.ratios[1, i] * control.ratios[1] * (active.ratios[2, i] + control.ratios[2]) / (active.ratios[1, i] + control.ratios[1]) / active.ratios[2, i] / control.ratios[2])
}
for (i in 2:K){
for (j in 1:(i - 1)){
block[i, j] <- sqrt(active.ratios[1, i] * control.ratios[1] * active.ratios[2, j] / (active.ratios[1, i] + control.ratios[1]) / (active.ratios[2, j] + control.ratios[2]) / control.ratios[2])
block[j, i] <- sqrt(active.ratios[1, j] * control.ratios[1] * active.ratios[2, i] / (active.ratios[1, j] + control.ratios[1]) / (active.ratios[2, i] + control.ratios[2]) / control.ratios[2])
}
}
block
}
create.cov.matrix <- function(control.ratios = 1:2, active.ratios = matrix(1:2, 2, 3)){ # create variance-covariance matrix of the test statistics
J <- dim(active.ratios)[1]
K <- dim(active.ratios)[2]
cov.matrix <- matrix(NA, J * K, J * K)
for (i in 1:J){
for (j in i:J){
cov.matrix[((i - 1) * K + 1):(i * K), ((j - 1) * K + 1):(j * K)] <- create.block(control.ratios[c(i, j)], active.ratios[c(i, j), ])
cov.matrix[((j - 1) * K + 1):(j * K), ((i - 1) * K + 1):(i * K)] <- t(cov.matrix[((i - 1) * K + 1):(i * K), ((j - 1) * K + 1):(j * K)])
}
}
cov.matrix
}
get.path.prob <- function(surviving.subset1, surviving.subset2 = NULL, cut.off, treatments, cov.matrix, lb, upper.boundary, K, stage){ # find the probability that no test statistic crosses the upper boundary + only treatments in surviving_subsetj reach the jth stage
treatments2 <- treatments[surviving.subset1]
if (stage == 2){
lower <- c(rep(-Inf, length(treatments)), rep(-Inf, length(treatments2)))
lower[surviving.subset1] <- lb[1]
upper <- c(rep(lb[1], length(treatments)), rep(cut.off, length(treatments2)))
upper[surviving.subset1] <- upper.boundary[1]
return(pmvnorm(lower = lower, upper = upper, sigma = cov.matrix[c(treatments, K + treatments2), c(treatments, K + treatments2)])[1])
}
treatments3 <- treatments2[surviving.subset2]
lower <- c(rep(-Inf, length(treatments)), rep(-Inf, length(treatments2)), rep(-Inf, length(treatments3)))
lower[surviving.subset1] <- lb[1]
lower[length(treatments) + surviving.subset2] <- lb[2]
upper <- c(rep(lb[1], length(treatments)), rep(lb[2], length(treatments2)), rep(cut.off, length(treatments3)))
upper[surviving.subset1] <- upper.boundary[1]
upper[length(treatments) + surviving.subset2] <- upper.boundary[2]
pmvnorm(lower = lower, upper = upper, sigma = cov.matrix[c(treatments, K + treatments2, 2 * K + treatments3), c(treatments, K + treatments2, 2 * K + treatments3)])[1]
}
rejection.paths <- function(selected.treatment, cut.off, treatments, cov.matrix, lb, upper.boundary, K, stage){ # for the "select.best" method, find the probability that "select.treatment" is selected and subsequently crosses the upper boundary
contrast <- diag(-1, K + stage - 1)
contrast[1:K, selected.treatment] <- 1
for (i in 1:(stage - 1)) contrast[K + i, K + i] <- 1
bar.cov.matrix <- contrast %*% cov.matrix[c(1:K, 1:(stage - 1) * K + selected.treatment), c(1:K, 1:(stage - 1) * K + selected.treatment)] %*% t(contrast)
lower <- c(rep(0, length(treatments)), cut.off)
if (stage > 2) lower <- c(rep(0, length(treatments)), lb[2:(stage - 1)], cut.off)
lower[which(treatments == selected.treatment)] <- lb[1]
upper <- c(rep(Inf, length(treatments)), Inf)
if (stage > 2) upper <- c(rep(Inf, length(treatments)), upper.boundary[2:(stage - 1)], Inf)
upper[which(treatments == selected.treatment)] <- upper.boundary[1]
pmvnorm(lower = lower, upper = upper, sigma = bar.cov.matrix[c(treatments, K + 1:(stage - 1)), c(treatments, K + 1:(stage - 1))])[1]
}
excess.alpha <- function(cut.off, alpha.star, treatments, cov.matrix, lb, upper.boundary, selection, K, stage){ # for "all.promising" rule, this gives the cumulative typeI error for 'stage' stages
# for "select.best" rule, this gives the Type I error spent at the 'stage'th stage
if (stage == 1) return(1 - alpha.star[1] - pmvnorm(lower = rep(-Inf, length(treatments)), upper = rep(cut.off, length(treatments)), sigma = cov.matrix[treatments, treatments])[1])
if (selection == "select.best") return(alpha.star[stage] - alpha.star[stage - 1] - sum(unlist(lapply(treatments, rejection.paths, cut.off = cut.off, treatments = treatments, cov.matrix = cov.matrix, lb = lb, upper.boundary = upper.boundary, K = K, stage = stage)))) # any of 'treatments' could be selected, so we add all these probabilities
if (stage == 2){
surviving.subsets <- c(list(numeric(0)), lapply(as.list(1:(2 ^ length(treatments) - 1)), get.hyp)) # list all possible subsets of surviving treatments after the first stage
return(1 - alpha.star[2] - sum(unlist(lapply(surviving.subsets, get.path.prob, cut.off = cut.off, treatments = treatments, cov.matrix = cov.matrix, lb = lb, upper.boundary = upper.boundary, K = K, stage = stage))))
}
surviving.subsets1 <- c(list(numeric(0)), lapply(as.list(1:(2 ^ length(treatments) - 1)), get.hyp)) # all possible subsets of surviving treatments after the first stage
surviving.subsets2 <- c(list(list(numeric(0))), lapply(surviving.subsets1[-1], function(x) c(list(numeric(0)), lapply(as.list(1:(2 ^ length(x) - 1)), get.hyp)))) # for each possible subset of survivng subsets after stage 1, list the possible subsets still surviving after stage 2
1 - alpha.star[3] - sum(unlist(Map(function(x, y) sum(unlist(lapply(y, get.path.prob, surviving.subset1 = x, cut.off = cut.off, treatments = treatments, cov.matrix = cov.matrix, lb = lb, upper.boundary = upper.boundary, K = K, stage = stage))), surviving.subsets1, surviving.subsets2)))
}
# get sample size ratios
R <- nMat[, -1] / nMat[1, 1]
r0 <- nMat[, 1] / nMat[1, 1]
cov.matrix <- create.cov.matrix(r0, R)
l <- u <- as.list(1:(2 ^ K - 1))
alpha.star <- rep(list(alpha.star), 2 ^ K - 1)
for (i in 1:(2 ^ K - 1)){
names(u)[i] <- paste("U_{",paste(get.hyp(i), collapse = " "),"}",sep="")
names(l)[i] <- paste("L_{",paste(get.hyp(i), collapse = " "),"}",sep="")
names(alpha.star)[i] <- paste("alpha.star.{",paste(get.hyp(i), collapse = " "),"}",sep="")
for (j in 1:J){
try(new.u <- uniroot(excess.alpha, c(0, 10), alpha.star = alpha.star[[i]], treatments = get.hyp(i), cov.matrix = cov.matrix, lb = lb, upper.boundary = u[[i]], selection = selection, K = K, stage = j)$root, silent = TRUE)
if (is.null(new.u)) {stop("upper boundary not between 0 and 10")}
u[[i]][j] <- round(new.u, 2)
}
l[[i]] <- c(lb, u[[i]][J])
}
res <- NULL
res$l <- l
res$u <- u
res$sample.sizes <- nMat
res$K <- K
res$J <- J
res$alpha.star <- alpha.star
res$selection <- selection
res$zscores <- NULL
res$selected.trts <- list(1:K)
class(res) <- "MAMS.stepdown"
return(res)
}
|
# Load Data
training <- read.csv("pml-training.csv")
testing <- read.csv("pml-testing.csv")
# Process Training Data for Analysis
training$X <- NULL
training$user_name <- as.factor(training$user_name)
training$kurtosis_roll_belt <- as.numeric(training$kurtosis_roll_belt)
training$kurtosis_picth_belt <- as.numeric(training$kurtosis_picth_belt)
training$skewness_roll_belt <- as.numeric(training$skewness_roll_belt)
training$skewness_roll_belt.1 <- as.numeric(training$skewness_roll_belt.1)
training$max_yaw_belt <- as.numeric(training$max_yaw_belt )
training$min_yaw_belt <- as.numeric(training$min_yaw_belt)
training$amplitude_yaw_belt <- as.numeric(training$amplitude_yaw_belt)
training$kurtosis_roll_arm <- as.numeric(training$kurtosis_roll_arm)
training$kurtosis_picth_arm <- as.numeric(training$kurtosis_picth_arm)
training$kurtosis_yaw_arm <- as.numeric(training$kurtosis_yaw_arm)
training$skewness_roll_arm <- as.numeric(training$skewness_roll_arm)
training$skewness_pitch_arm <- as.numeric(training$skewness_pitch_arm)
training$skewness_yaw_arm <- as.numeric(training$skewness_yaw_arm)
training$kurtosis_yaw_arm <- as.numeric(training$kurtosis_yaw_arm)
training$kurtosis_roll_dumbbell <- as.numeric(training$kurtosis_roll_dumbbell)
training$kurtosis_picth_dumbbell <- as.numeric(training$kurtosis_picth_dumbbell)
training$skewness_roll_dumbbell <- as.numeric(training$skewness_roll_dumbbell)
training$skewness_pitch_dumbbell <- as.numeric(training$skewness_pitch_dumbbell)
training$kurtosis_yaw_belt <- as.numeric(training$kurtosis_yaw_belt)
training$skewness_yaw_belt <- as.numeric(training$skewness_yaw_belt)
training$kurtosis_yaw_dumbbell <- as.numeric(training$kurtosis_yaw_dumbbell)
training$skewness_yaw_dumbbell <- as.numeric(training$skewness_yaw_dumbbell)
training$max_yaw_dumbbell <- as.numeric(training$max_yaw_dumbbell)
training$min_yaw_dumbbell <- as.numeric(training$min_yaw_dumbbell)
training$amplitude_yaw_dumbbell <- as.numeric(training$amplitude_yaw_dumbbell)
training$kurtosis_roll_forearm <- as.numeric(training$kurtosis_roll_forearm)
training$kurtosis_picth_forearm <- as.numeric(training$kurtosis_picth_forearm)
training$kurtosis_yaw_forearm <- as.numeric(training$kurtosis_yaw_forearm)
training$skewness_roll_forearm <- as.numeric(training$skewness_roll_forearm)
training$skewness_pitch_forearm <- as.numeric(training$skewness_pitch_forearm)
training$skewness_yaw_forearm <- as.numeric(training$skewness_yaw_forearm)
training$max_yaw_forearm <- as.numeric(training$max_yaw_forearm)
training$min_yaw_forearm <- as.numeric(training$min_yaw_forearm)
training$amplitude_yaw_forearm <- as.numeric(training$amplitude_yaw_forearm)
# Apply the Same Processing to Test Data
testing$X <- NULL
testing$user_name <- as.factor(testing$user_name)
testing$kurtosis_roll_belt <- as.numeric(testing$kurtosis_roll_belt)
testing$kurtosis_picth_belt <- as.numeric(testing$kurtosis_picth_belt)
testing$skewness_roll_belt <- as.numeric(testing$skewness_roll_belt)
testing$skewness_roll_belt.1 <- as.numeric(testing$skewness_roll_belt.1)
testing$max_yaw_belt <- as.numeric(testing$max_yaw_belt )
testing$min_yaw_belt <- as.numeric(testing$min_yaw_belt)
testing$amplitude_yaw_belt <- as.numeric(testing$amplitude_yaw_belt)
testing$kurtosis_roll_arm <- as.numeric(testing$kurtosis_roll_arm)
testing$kurtosis_picth_arm <- as.numeric(testing$kurtosis_picth_arm)
testing$kurtosis_yaw_arm <- as.numeric(testing$kurtosis_yaw_arm)
testing$skewness_roll_arm <- as.numeric(testing$skewness_roll_arm)
testing$skewness_pitch_arm <- as.numeric(testing$skewness_pitch_arm)
testing$skewness_yaw_arm <- as.numeric(testing$skewness_yaw_arm)
testing$kurtosis_yaw_arm <- as.numeric(testing$kurtosis_yaw_arm)
testing$kurtosis_roll_dumbbell <- as.numeric(testing$kurtosis_roll_dumbbell)
testing$kurtosis_picth_dumbbell <- as.numeric(testing$kurtosis_picth_dumbbell)
testing$skewness_roll_dumbbell <- as.numeric(testing$skewness_roll_dumbbell)
testing$skewness_pitch_dumbbell <- as.numeric(testing$skewness_pitch_dumbbell)
testing$kurtosis_yaw_belt <- as.numeric(testing$kurtosis_yaw_belt)
testing$skewness_yaw_belt <- as.numeric(testing$skewness_yaw_belt)
testing$kurtosis_yaw_dumbbell <- as.numeric(testing$kurtosis_yaw_dumbbell)
testing$skewness_yaw_dumbbell <- as.numeric(testing$skewness_yaw_dumbbell)
testing$max_yaw_dumbbell <- as.numeric(testing$max_yaw_dumbbell)
testing$min_yaw_dumbbell <- as.numeric(testing$min_yaw_dumbbell)
testing$amplitude_yaw_dumbbell <- as.numeric(testing$amplitude_yaw_dumbbell)
testing$kurtosis_roll_forearm <- as.numeric(testing$kurtosis_roll_forearm)
testing$kurtosis_picth_forearm <- as.numeric(testing$kurtosis_picth_forearm)
testing$kurtosis_yaw_forearm <- as.numeric(testing$kurtosis_yaw_forearm)
testing$skewness_roll_forearm <- as.numeric(testing$skewness_roll_forearm)
testing$skewness_pitch_forearm <- as.numeric(testing$skewness_pitch_forearm)
testing$skewness_yaw_forearm <- as.numeric(testing$skewness_yaw_forearm)
testing$max_yaw_forearm <- as.numeric(testing$max_yaw_forearm)
testing$min_yaw_forearm <- as.numeric(testing$min_yaw_forearm)
testing$amplitude_yaw_forearm <- as.numeric(testing$amplitude_yaw_forearm)
# Data Exploration
library(corrplot)
corrSubset <- subset(training[,sapply(training,is.numeric)],
select = -c(kurtosis_yaw_belt,
skewness_yaw_belt,
kurtosis_yaw_dumbbell,
skewness_yaw_dumbbell,
kurtosis_yaw_forearm,
skewness_yaw_forearm))
correlation <- cor(corrSubset, use = "na.or.complete")
corrplot(correlation, order = "hclust", tl.pos = "n")
classeCor <- sapply(names(corrSubset), function(col) {
cor(corrSubset[,col], as.numeric(training$classe))
})
classeCor <- classeCor[!is.na(classeCor)]
classeCor <- classeCor[abs(classeCor) >= 0.05]
classeCor <- classeCor[order(abs(classeCor), decreasing = TRUE)]
correlatedVars <- names(classeCor)
# Data Modeling
library(caret)
library(doMC)
registerDoMC()
set.seed(1031)
controlObject <- trainControl(method = "cv", number = 10, verboseIter=TRUE)
## CART Model Using Highly Correlated Variables
cartModelCorVar <- train(x = subset(training, select = correlatedVars),
y = training$classe,
method = "rpart",
trControl = controlObject)
print(cartModelCorVar)
## CART Model Using All Variables
cartModel <- train(x = subset(training, select = -classe),
y = training$classe,
method = "rpart",
trControl = controlObject)
print(cartModel)
## Naive Bayes Model Using Highly Correlated Variables
naiveBayesModel <- train(x = subset(training, select = correlatedVars),
y = training$classe,
method = "nb",
trControl = controlObject)
print(naiveBayesModel)
## Bagged CART Model Using Highly Correlated Variables
baggedCartModel <- train(x = subset(training, select = correlatedVars),
y = training$classe,
method = "treebag",
trControl = controlObject)
print(baggedCartModel)
# Data Prediction
predictions <- predict(baggedCartModel, newdata = testing)
| /analysis.R | no_license | nabusman/PracMLClassAssn1 | R | false | false | 7,331 | r | # Load Data
training <- read.csv("pml-training.csv")
testing <- read.csv("pml-testing.csv")
# Process Training Data for Analysis
training$X <- NULL
training$user_name <- as.factor(training$user_name)
training$kurtosis_roll_belt <- as.numeric(training$kurtosis_roll_belt)
training$kurtosis_picth_belt <- as.numeric(training$kurtosis_picth_belt)
training$skewness_roll_belt <- as.numeric(training$skewness_roll_belt)
training$skewness_roll_belt.1 <- as.numeric(training$skewness_roll_belt.1)
training$max_yaw_belt <- as.numeric(training$max_yaw_belt )
training$min_yaw_belt <- as.numeric(training$min_yaw_belt)
training$amplitude_yaw_belt <- as.numeric(training$amplitude_yaw_belt)
training$kurtosis_roll_arm <- as.numeric(training$kurtosis_roll_arm)
training$kurtosis_picth_arm <- as.numeric(training$kurtosis_picth_arm)
training$kurtosis_yaw_arm <- as.numeric(training$kurtosis_yaw_arm)
training$skewness_roll_arm <- as.numeric(training$skewness_roll_arm)
training$skewness_pitch_arm <- as.numeric(training$skewness_pitch_arm)
training$skewness_yaw_arm <- as.numeric(training$skewness_yaw_arm)
training$kurtosis_yaw_arm <- as.numeric(training$kurtosis_yaw_arm)
training$kurtosis_roll_dumbbell <- as.numeric(training$kurtosis_roll_dumbbell)
training$kurtosis_picth_dumbbell <- as.numeric(training$kurtosis_picth_dumbbell)
training$skewness_roll_dumbbell <- as.numeric(training$skewness_roll_dumbbell)
training$skewness_pitch_dumbbell <- as.numeric(training$skewness_pitch_dumbbell)
training$kurtosis_yaw_belt <- as.numeric(training$kurtosis_yaw_belt)
training$skewness_yaw_belt <- as.numeric(training$skewness_yaw_belt)
training$kurtosis_yaw_dumbbell <- as.numeric(training$kurtosis_yaw_dumbbell)
training$skewness_yaw_dumbbell <- as.numeric(training$skewness_yaw_dumbbell)
training$max_yaw_dumbbell <- as.numeric(training$max_yaw_dumbbell)
training$min_yaw_dumbbell <- as.numeric(training$min_yaw_dumbbell)
training$amplitude_yaw_dumbbell <- as.numeric(training$amplitude_yaw_dumbbell)
training$kurtosis_roll_forearm <- as.numeric(training$kurtosis_roll_forearm)
training$kurtosis_picth_forearm <- as.numeric(training$kurtosis_picth_forearm)
training$kurtosis_yaw_forearm <- as.numeric(training$kurtosis_yaw_forearm)
training$skewness_roll_forearm <- as.numeric(training$skewness_roll_forearm)
training$skewness_pitch_forearm <- as.numeric(training$skewness_pitch_forearm)
training$skewness_yaw_forearm <- as.numeric(training$skewness_yaw_forearm)
training$max_yaw_forearm <- as.numeric(training$max_yaw_forearm)
training$min_yaw_forearm <- as.numeric(training$min_yaw_forearm)
training$amplitude_yaw_forearm <- as.numeric(training$amplitude_yaw_forearm)
# Apply the Same Processing to Test Data
testing$X <- NULL
testing$user_name <- as.factor(testing$user_name)
testing$kurtosis_roll_belt <- as.numeric(testing$kurtosis_roll_belt)
testing$kurtosis_picth_belt <- as.numeric(testing$kurtosis_picth_belt)
testing$skewness_roll_belt <- as.numeric(testing$skewness_roll_belt)
testing$skewness_roll_belt.1 <- as.numeric(testing$skewness_roll_belt.1)
testing$max_yaw_belt <- as.numeric(testing$max_yaw_belt )
testing$min_yaw_belt <- as.numeric(testing$min_yaw_belt)
testing$amplitude_yaw_belt <- as.numeric(testing$amplitude_yaw_belt)
testing$kurtosis_roll_arm <- as.numeric(testing$kurtosis_roll_arm)
testing$kurtosis_picth_arm <- as.numeric(testing$kurtosis_picth_arm)
testing$kurtosis_yaw_arm <- as.numeric(testing$kurtosis_yaw_arm)
testing$skewness_roll_arm <- as.numeric(testing$skewness_roll_arm)
testing$skewness_pitch_arm <- as.numeric(testing$skewness_pitch_arm)
testing$skewness_yaw_arm <- as.numeric(testing$skewness_yaw_arm)
testing$kurtosis_yaw_arm <- as.numeric(testing$kurtosis_yaw_arm)
testing$kurtosis_roll_dumbbell <- as.numeric(testing$kurtosis_roll_dumbbell)
testing$kurtosis_picth_dumbbell <- as.numeric(testing$kurtosis_picth_dumbbell)
testing$skewness_roll_dumbbell <- as.numeric(testing$skewness_roll_dumbbell)
testing$skewness_pitch_dumbbell <- as.numeric(testing$skewness_pitch_dumbbell)
testing$kurtosis_yaw_belt <- as.numeric(testing$kurtosis_yaw_belt)
testing$skewness_yaw_belt <- as.numeric(testing$skewness_yaw_belt)
testing$kurtosis_yaw_dumbbell <- as.numeric(testing$kurtosis_yaw_dumbbell)
testing$skewness_yaw_dumbbell <- as.numeric(testing$skewness_yaw_dumbbell)
testing$max_yaw_dumbbell <- as.numeric(testing$max_yaw_dumbbell)
testing$min_yaw_dumbbell <- as.numeric(testing$min_yaw_dumbbell)
testing$amplitude_yaw_dumbbell <- as.numeric(testing$amplitude_yaw_dumbbell)
testing$kurtosis_roll_forearm <- as.numeric(testing$kurtosis_roll_forearm)
testing$kurtosis_picth_forearm <- as.numeric(testing$kurtosis_picth_forearm)
testing$kurtosis_yaw_forearm <- as.numeric(testing$kurtosis_yaw_forearm)
testing$skewness_roll_forearm <- as.numeric(testing$skewness_roll_forearm)
testing$skewness_pitch_forearm <- as.numeric(testing$skewness_pitch_forearm)
testing$skewness_yaw_forearm <- as.numeric(testing$skewness_yaw_forearm)
testing$max_yaw_forearm <- as.numeric(testing$max_yaw_forearm)
testing$min_yaw_forearm <- as.numeric(testing$min_yaw_forearm)
testing$amplitude_yaw_forearm <- as.numeric(testing$amplitude_yaw_forearm)
# Data Exploration
library(corrplot)
corrSubset <- subset(training[,sapply(training,is.numeric)],
select = -c(kurtosis_yaw_belt,
skewness_yaw_belt,
kurtosis_yaw_dumbbell,
skewness_yaw_dumbbell,
kurtosis_yaw_forearm,
skewness_yaw_forearm))
correlation <- cor(corrSubset, use = "na.or.complete")
corrplot(correlation, order = "hclust", tl.pos = "n")
classeCor <- sapply(names(corrSubset), function(col) {
cor(corrSubset[,col], as.numeric(training$classe))
})
classeCor <- classeCor[!is.na(classeCor)]
classeCor <- classeCor[abs(classeCor) >= 0.05]
classeCor <- classeCor[order(abs(classeCor), decreasing = TRUE)]
correlatedVars <- names(classeCor)
# Data Modeling
library(caret)
library(doMC)
registerDoMC()
set.seed(1031)
controlObject <- trainControl(method = "cv", number = 10, verboseIter=TRUE)
## CART Model Using Highly Correlated Variables
cartModelCorVar <- train(x = subset(training, select = correlatedVars),
y = training$classe,
method = "rpart",
trControl = controlObject)
print(cartModelCorVar)
## CART Model Using All Variables
cartModel <- train(x = subset(training, select = -classe),
y = training$classe,
method = "rpart",
trControl = controlObject)
print(cartModel)
## Naive Bayes Model Using Highly Correlated Variables
naiveBayesModel <- train(x = subset(training, select = correlatedVars),
y = training$classe,
method = "nb",
trControl = controlObject)
print(naiveBayesModel)
## Bagged CART Model Using Highly Correlated Variables
baggedCartModel <- train(x = subset(training, select = correlatedVars),
y = training$classe,
method = "treebag",
trControl = controlObject)
print(baggedCartModel)
# Data Prediction
predictions <- predict(baggedCartModel, newdata = testing)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rosenbrock4.R
\name{rosenbrock4}
\alias{rosenbrock4}
\title{4D test function}
\usage{
rosenbrock4(x)
}
\arguments{
\item{x}{a 4-dimensional vector specifying the location where the function
is to be evaluated.}
}
\value{
A real number equal to the rosenbrock4 function values at \code{x}
}
\description{
Rosenbrock 4-dimensional test function.
}
\details{
The rosenbrock4 (standardized version) function is defined over the domain
\code{[0,1]^4}. It has 1 global minimizer : x* = c(0.4,0.4,0.4,0.4), with
minimum f(x*) = -1.019701, and an additional local minimizer, x*,2 =
c(0.26667,0.4,0.4,0.4), with minimum f(x*,2) = -1.019691.
}
\examples{
design <- matrix(runif(400), 100, 4)
response <- apply(design, 1, rosenbrock4)
}
\author{
Tobias Wagner
Victor Picheny
David Ginsbourger
}
\keyword{internal}
\keyword{optimize}
| /man/rosenbrock4.Rd | no_license | cran/DiceOptim | R | false | true | 908 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rosenbrock4.R
\name{rosenbrock4}
\alias{rosenbrock4}
\title{4D test function}
\usage{
rosenbrock4(x)
}
\arguments{
\item{x}{a 4-dimensional vector specifying the location where the function
is to be evaluated.}
}
\value{
A real number equal to the rosenbrock4 function values at \code{x}
}
\description{
Rosenbrock 4-dimensional test function.
}
\details{
The rosenbrock4 (standardized version) function is defined over the domain
\code{[0,1]^4}. It has 1 global minimizer : x* = c(0.4,0.4,0.4,0.4), with
minimum f(x*) = -1.019701, and an additional local minimizer, x*,2 =
c(0.26667,0.4,0.4,0.4), with minimum f(x*,2) = -1.019691.
}
\examples{
design <- matrix(runif(400), 100, 4)
response <- apply(design, 1, rosenbrock4)
}
\author{
Tobias Wagner
Victor Picheny
David Ginsbourger
}
\keyword{internal}
\keyword{optimize}
|
library(tidyverse)
library(tidytext)
## Tidying DocumentTermMatrix Objects
library(tm)
data("AssociatedPress", package = "topicmodels")
AssociatedPress
terms <- Terms(AssociatedPress)
head(terms)
ap_td <- tidy(AssociatedPress)
ap_td
ap_sentiments <- ap_td %>%
inner_join(get_sentiments("bing"), by = c(term = "word"))
ap_sentiments
ap_sentiments %>%
count(sentiment, term, wt = count) %>%
ungroup() %>%
filter(n >= 200) %>%
mutate(n = ifelse(sentiment == "negative", -n, n)) %>%
mutate(term = reorder(term, n)) %>%
ggplot(aes(term, n, fill = sentiment)) +
geom_col() +
ylab("Contribution to sentiment") +
coord_flip()
## Tidying dfm Objects
library(methods)
data("data_corpus_inaugural", package = "quanteda")
inaug_dfm <- quanteda::dfm(data_corpus_inaugural, verbose = F)
inaug_dfm
inaug_td <- tidy(inaug_dfm)
inaug_td
inaug_tf_idf <- inaug_td %>%
bind_tf_idf(term, document, count) %>%
arrange(desc(tf_idf))
inaug_tf_idf
year_term_counts <- inaug_td %>%
extract(document, "year", "(\\d+)", convert = T) %>%
complete(year, term, fill = list(count = 0)) %>%
group_by(year) %>%
mutate(year_total = sum(count))
year_term_counts
year_term_counts %>%
filter(term %in% c("god", "america", "foreign",
"union", "constitution", "freedom")) %>%
ggplot(aes(year, count / year_total)) +
geom_point() +
geom_smooth() +
facet_wrap(~ term, scales = "free_y") +
scale_y_continuous(labels = scales::percent_format()) +
ylab("% frequency of word in inaugural address")
# Casting Tidy Text Data into a Matrix
ap_td %>%
cast_dtm(document, term, count)
ap_td %>%
cast_dfm(term, document, count)
library(Matrix)
m <- ap_td %>%
cast_sparse(document, term, count)
class(m)
dim(m)
library(janeaustenr)
austen_dtm <- austen_books() %>%
unnest_tokens(word, text) %>%
count(book, word) %>%
cast_dtm(book, word, n)
austen_dtm
# Tidying Corpus Objects with Metadata
data("acq")
acq
acq[[1]]
acq_td <- tidy(acq)
acq_td
acq_tokens <- acq_td %>%
select(-places) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words, by = "word")
acq_tokens %>%
count(word, sort = T)
acq_tokens %>%
count(id, word) %>%
bind_tf_idf(word, id, n) %>%
arrange(desc(tf_idf))
## Example:: Mining Financial Articles
library(tm.plugin.webmining)
library(purrr)
company <- c("Microsoft", "Apple", "Google", "Amazon", "Facebook",
"Twitter", "IBM", "Yahoo", "Netflix")
symbol <- c("MSFT", "AAPL", "GOOG", "AMZN", "FB", "TWTR", "IBM", "YHOO", "NFLX")
download_articles <- function(symbol) {
WebCorpus(GoogleFinanceSource(paste0("NASDAQ:", symbol)))
}
stock_articles <- tibble(company = company,
symbol = symbol) %>%
mutate(corpus = map(symbol, download_articles))
stock_tokens <- stock_articles %>%
unnest(map(corpus, tidy)) %>%
unnest_tokens(word, text) %>%
select(company, datetimestamp, word, id, heading)
library(stringr)
stock_tf_idf <- stock_tokens %>%
count(company, word) %>%
filter(!str_detect(word, "\\d+")) %>%
bind_tf_idf(word, company, n) %>%
arrange(-tf_idf)
stock_tokens %>%
anti_join(stop_words, by = "word") %>%
count(word, id, sort = T) %>%
inner_join(get_sentimnents("afinn"), by = "word") %>%
group_by(word) %>%
summarise(contribution = sum(n * score)) %>%
top_n(12, abs(contribution)) %>%
mutate(word = reorder(word, contribution)) %>%
ggplot(aes(word, contribution)) +
geom_col() +
coord_flip() +
labs(y = "Frequency of word * AFINN score")
stock_tokens %>%
count(word) %>%
inner_join(get_sentiments("loughran"), by = "word") %>%
group_by(sentiment) %>%
top_n(5, n) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_copl() +
coord_flip() +
facet_wrap(~sentiment, scales = "free") +
ylab("Frequency of this word in the recent financial articles")
stock_sentiment_counts <- stock_tokens %>%
inner_join(get_sentiments("loughran"), by = "word") %>%
count(sentiment, company) %>%
spread(sentiment, n, fill = 0)
stock_sentiment_count %>%
mutate(score = (positive - negative) / (positive + negative)) %>%
mutate(company = reorder(company, score)) %>%
ggplot(aes(company, score, fill = score > 0)) +
geom_col(show.legend = F) +
coord_flip() +
labs(x = "Company",
y = "Positivity score among 20 recent news articles")
| /codes/chapter5.R | no_license | harryyang1982/tmwithR | R | false | false | 4,427 | r | library(tidyverse)
library(tidytext)
## Tidying DocumentTermMatrix Objects
library(tm)
data("AssociatedPress", package = "topicmodels")
AssociatedPress
terms <- Terms(AssociatedPress)
head(terms)
ap_td <- tidy(AssociatedPress)
ap_td
ap_sentiments <- ap_td %>%
inner_join(get_sentiments("bing"), by = c(term = "word"))
ap_sentiments
ap_sentiments %>%
count(sentiment, term, wt = count) %>%
ungroup() %>%
filter(n >= 200) %>%
mutate(n = ifelse(sentiment == "negative", -n, n)) %>%
mutate(term = reorder(term, n)) %>%
ggplot(aes(term, n, fill = sentiment)) +
geom_col() +
ylab("Contribution to sentiment") +
coord_flip()
## Tidying dfm Objects
library(methods)
data("data_corpus_inaugural", package = "quanteda")
inaug_dfm <- quanteda::dfm(data_corpus_inaugural, verbose = F)
inaug_dfm
inaug_td <- tidy(inaug_dfm)
inaug_td
inaug_tf_idf <- inaug_td %>%
bind_tf_idf(term, document, count) %>%
arrange(desc(tf_idf))
inaug_tf_idf
year_term_counts <- inaug_td %>%
extract(document, "year", "(\\d+)", convert = T) %>%
complete(year, term, fill = list(count = 0)) %>%
group_by(year) %>%
mutate(year_total = sum(count))
year_term_counts
year_term_counts %>%
filter(term %in% c("god", "america", "foreign",
"union", "constitution", "freedom")) %>%
ggplot(aes(year, count / year_total)) +
geom_point() +
geom_smooth() +
facet_wrap(~ term, scales = "free_y") +
scale_y_continuous(labels = scales::percent_format()) +
ylab("% frequency of word in inaugural address")
# Casting Tidy Text Data into a Matrix
ap_td %>%
cast_dtm(document, term, count)
ap_td %>%
cast_dfm(term, document, count)
library(Matrix)
m <- ap_td %>%
cast_sparse(document, term, count)
class(m)
dim(m)
library(janeaustenr)
austen_dtm <- austen_books() %>%
unnest_tokens(word, text) %>%
count(book, word) %>%
cast_dtm(book, word, n)
austen_dtm
# Tidying Corpus Objects with Metadata
data("acq")
acq
acq[[1]]
acq_td <- tidy(acq)
acq_td
acq_tokens <- acq_td %>%
select(-places) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words, by = "word")
acq_tokens %>%
count(word, sort = T)
acq_tokens %>%
count(id, word) %>%
bind_tf_idf(word, id, n) %>%
arrange(desc(tf_idf))
## Example:: Mining Financial Articles
library(tm.plugin.webmining)
library(purrr)
company <- c("Microsoft", "Apple", "Google", "Amazon", "Facebook",
"Twitter", "IBM", "Yahoo", "Netflix")
symbol <- c("MSFT", "AAPL", "GOOG", "AMZN", "FB", "TWTR", "IBM", "YHOO", "NFLX")
download_articles <- function(symbol) {
WebCorpus(GoogleFinanceSource(paste0("NASDAQ:", symbol)))
}
stock_articles <- tibble(company = company,
symbol = symbol) %>%
mutate(corpus = map(symbol, download_articles))
stock_tokens <- stock_articles %>%
unnest(map(corpus, tidy)) %>%
unnest_tokens(word, text) %>%
select(company, datetimestamp, word, id, heading)
library(stringr)
stock_tf_idf <- stock_tokens %>%
count(company, word) %>%
filter(!str_detect(word, "\\d+")) %>%
bind_tf_idf(word, company, n) %>%
arrange(-tf_idf)
stock_tokens %>%
anti_join(stop_words, by = "word") %>%
count(word, id, sort = T) %>%
inner_join(get_sentimnents("afinn"), by = "word") %>%
group_by(word) %>%
summarise(contribution = sum(n * score)) %>%
top_n(12, abs(contribution)) %>%
mutate(word = reorder(word, contribution)) %>%
ggplot(aes(word, contribution)) +
geom_col() +
coord_flip() +
labs(y = "Frequency of word * AFINN score")
stock_tokens %>%
count(word) %>%
inner_join(get_sentiments("loughran"), by = "word") %>%
group_by(sentiment) %>%
top_n(5, n) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_copl() +
coord_flip() +
facet_wrap(~sentiment, scales = "free") +
ylab("Frequency of this word in the recent financial articles")
stock_sentiment_counts <- stock_tokens %>%
inner_join(get_sentiments("loughran"), by = "word") %>%
count(sentiment, company) %>%
spread(sentiment, n, fill = 0)
stock_sentiment_count %>%
mutate(score = (positive - negative) / (positive + negative)) %>%
mutate(company = reorder(company, score)) %>%
ggplot(aes(company, score, fill = score > 0)) +
geom_col(show.legend = F) +
coord_flip() +
labs(x = "Company",
y = "Positivity score among 20 recent news articles")
|
# SIMULATION STUDY #
# CAROLINE BIRDROW #
# Set working directory
path <- "/home/birdroci/rsch/project/CPM"
setwd(path)
# Set seed for reproducibility
set.seed(ITER)
##G-computation with a CPM vs. a Parametric Model
library("rms")
## Define useful functions
expit <- function(x) {exp(x)/(1 + exp(x))}
pull.one <- function(x, y, fhat)
{
return(y[max(which(fhat < x))])
}
## Simulation parameters
alpha0 <- -0.3
alpha1 <- 0.7
alpha2 <- 0.5
alpha3 <- 0.7
sigma0 <- -0.6
sigma1 <- 0.25
sigma2 <- 0.2
beta0 <- -2.5
beta1 <- 0.2
beta2 <- 0.1
beta3 <- 0.4
beta4 <- 0.3
beta5 <- 0.6
beta6 <- 0.5
sigmaY <- 0.3
s <- 1
s.d <- 0.3
c <- 4
d <- 1
if(c == 1){
A1 = 0
A2 = 0
A3 = 0
sa1 = 0
sa2 = 0
sa3 = 0
}
if(c == 2){
A1 = 0
A2 = 1
A3 = 1
sa1 = 0
sa2 = 1
sa3 = 1
}
if(c == 3){
A1 = 0
A2 = 0
A3 = 1
sa1 = 0
sa2 = 0
sa3 = 1
}
if(c == 4){
A1 = 1
A2 = 1
A3 = 1
sa1 = 1
sa2 = 1
sa3 = 1
}
## Simulation parameters
N <- 1000
M <- 5000 #Monte-Carlo iterations within standardization
nsim <- 5
## Question 1: Does mean(res) reflect true outcome, my0?
res <- matrix(0, nrow = nsim, ncol = 1)
for (j in 1:nsim){
## Generate data
## First Time Point
l1 <- rnorm(N, mean = 0, sd = s)
a1 <- rbinom(N, 1, expit(alpha0 + alpha1 * l1))
## Second Time Point
eps.l2 <- rnorm(N, mean = 0, sd = s.d)
l2 <- sigma0 + sigma1 * l1 + sigma2 * a1 + eps.l2
l2[l2 < -1] <- 0
if(d == 1){
l2[l2 > -1] <- exp(l2[l2 > -1])
}
if(d == 2){
l2[l2 > -1] <- (l2[l2 > -1])^2
}
if(d == 3){
l2[l2 > -1] <- abs(l2[l2 > -1])
}
a2 <- rbinom(N, 1, expit(alpha0 + alpha1 * l1 + alpha2 * a1 + alpha3 * l2))
## Third Time Point
eps.l3 <- rnorm(N, mean = 0, sd = s.d)
l3 <- sigma0 + sigma1 * l2 + sigma2 * a2 + eps.l3
l3[l3 < -1] <- 0
if(d == 1){
l3[l3 > -1] <- exp(l3[l3 > -1])
}
if(d == 2){
l3[l3 > -1] <- (l3[l3 > -1])^ 2
}
if(d == 3){
l3[l3 > -1] <- abs(l3[l3 > -1])
}
a3 <- rbinom(N, 1, expit(alpha0 + alpha1 * l2 + alpha2 * a2 + alpha3 * l3))
## Final Outcome
eps.Y <- rnorm(N, mean = 0, sd = s)
Ystar <- beta0 + beta1*l1 + beta2*a1 + beta3*l2 + beta4*a2 + beta5*l3 + beta6*a3 + eps.Y
Y <- Ystar
Y[Ystar < -3] <- 0
if(d == 1){
Y[Ystar > -3] <- exp(Ystar[Ystar > -3])
}
if(d == 2){
Y[Ystar > -3] <- (Ystar[Ystar > -3])^2
}
if(d == 3){
Y[Ystar > -3] <- abs(Ystar[Ystar > -3])
}
## Simulated data set:
dat <- data.frame(cbind(l1, a1, l2, a2, l3, a3, Y))
names(dat) <- c("l1", "a1", "l2", "a2", "l3", "a3", "Y")
## Estimate sigmas using a CPM
zz <- orm(l2 ~ l1 + a1, data = dat, family = "probit")
alpha.hat1 <- as.numeric(zz$coef[-c((length(zz$coef) - 1):length(zz$coef))])
sigma1.hat1 <- as.numeric(zz$coef[length(zz$coef) - 1])
sigma2.hat1 <- as.numeric(zz$coef[length(zz$coef)])
## Estimate sigmas using a CPM
xx <- orm(l3 ~ l2 + a2, data = dat, family = "probit")
alpha.hat2 <- as.numeric(xx$coef[-c((length(xx$coef) - 1):length(xx$coef))])
sigma1.hat2 <- as.numeric(xx$coef[length(xx$coef) - 1])
sigma2.hat2 <- as.numeric(xx$coef[length(xx$coef)])
## Estimate betas using a CPM
reg.y <- orm(Y ~ l1 + a1 + l2 + a2 + l3 + a3, data = dat, family = 'probit')
alpha.hat3 <- as.numeric(reg.y$coef[-c((length(reg.y$coef) - 5):length(reg.y$coef))])
beta1.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 5])
beta2.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 4])
beta3.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 3])
beta4.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 2])
beta5.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 1])
beta6.hat <- as.numeric(reg.y$coef[length(reg.y$coef)])
## Goal: Approximate integral of E[Y|A = a, L = l] with respect to f_L(l)
## First for a = 0, a1 = 0, and a2 = 0
tmp1 <- matrix(0, nrow = length(alpha.hat1), ncol = M)
tmp2 <- matrix(0, nrow = length(alpha.hat2), ncol = M)
tmp3 <- matrix(0, nrow = length(alpha.hat3), ncol = M)
sl1 <- sample(dat$l1, size = M, replace = TRUE)
for (i in 1:length(alpha.hat1))
{
tmp1[i,] <- as.numeric(1 - pnorm(alpha.hat1[i] + sigma1.hat1*sl1 + sigma2.hat1*sa1, mean = 0, sd = 1))
}
tmp1 <- rbind(0, tmp1)
sl2 <- rep(0, M)
for (i in 1:M)
{
F.hat1 <- tmp1[,i]
sl2[i] <- pull.one(x = runif(1, 0, 1), y = sort(unique(dat$l2)), fhat = F.hat1)
}
sl2
for (n in 1:length(alpha.hat2))
{
tmp2[n,] <- as.numeric(1 - pnorm(alpha.hat2[n] + sigma1.hat2*sl2 + sigma2.hat2*sa2, mean = 0, sd = 1))
}
tmp2 <- rbind(0, tmp2)
sl3 <- rep(0, M)
for (i in 1:M)
{
F.hat2 <- tmp2[,i]
sl3[i] <- pull.one(x = runif(1, 0, 1), y = sort(unique(dat$l3)), fhat = F.hat2)
}
sl3
for (k in 1:length(alpha.hat3))
{
tmp3[k,] <- as.numeric(1 - pnorm(alpha.hat3[k] + beta1.hat*sl1 + beta2.hat*sa1 + beta3.hat*sl2 + beta4.hat*sa2 + beta5.hat*sl3 + beta6.hat*sa3, mean = 0, sd = 1))
}
tmp3 <- rbind(0, tmp3)
sy0 <- rep(0, M)
for (k in 1:M)
{
F.hat3 <- tmp3[,k]
sy0[k] <- pull.one(x = runif(1, 0, 1), y = sort(unique(dat$Y)), fhat = F.hat3)
}
m0 <- mean(sy0)
res[j, 1] <- m0
}
#Save data
outcome.estimates <- as.data.frame(res)
filename1 <- paste("outcome.estimates-", ITER, ".csv")
write.csv(outcome.estimates, filename1) | /CPM_Simulation_EstimationProcedure.R | no_license | BirdrowC/Semiparametric-G-Computation | R | false | false | 5,050 | r | # SIMULATION STUDY #
# CAROLINE BIRDROW #
# Set working directory
path <- "/home/birdroci/rsch/project/CPM"
setwd(path)
# Set seed for reproducibility
set.seed(ITER)
##G-computation with a CPM vs. a Parametric Model
library("rms")
## Define useful functions
expit <- function(x) {exp(x)/(1 + exp(x))}
pull.one <- function(x, y, fhat)
{
return(y[max(which(fhat < x))])
}
## Simulation parameters
alpha0 <- -0.3
alpha1 <- 0.7
alpha2 <- 0.5
alpha3 <- 0.7
sigma0 <- -0.6
sigma1 <- 0.25
sigma2 <- 0.2
beta0 <- -2.5
beta1 <- 0.2
beta2 <- 0.1
beta3 <- 0.4
beta4 <- 0.3
beta5 <- 0.6
beta6 <- 0.5
sigmaY <- 0.3
s <- 1
s.d <- 0.3
c <- 4
d <- 1
if(c == 1){
A1 = 0
A2 = 0
A3 = 0
sa1 = 0
sa2 = 0
sa3 = 0
}
if(c == 2){
A1 = 0
A2 = 1
A3 = 1
sa1 = 0
sa2 = 1
sa3 = 1
}
if(c == 3){
A1 = 0
A2 = 0
A3 = 1
sa1 = 0
sa2 = 0
sa3 = 1
}
if(c == 4){
A1 = 1
A2 = 1
A3 = 1
sa1 = 1
sa2 = 1
sa3 = 1
}
## Simulation parameters
N <- 1000
M <- 5000 #Monte-Carlo iterations within standardization
nsim <- 5
## Question 1: Does mean(res) reflect true outcome, my0?
res <- matrix(0, nrow = nsim, ncol = 1)
for (j in 1:nsim){
## Generate data
## First Time Point
l1 <- rnorm(N, mean = 0, sd = s)
a1 <- rbinom(N, 1, expit(alpha0 + alpha1 * l1))
## Second Time Point
eps.l2 <- rnorm(N, mean = 0, sd = s.d)
l2 <- sigma0 + sigma1 * l1 + sigma2 * a1 + eps.l2
l2[l2 < -1] <- 0
if(d == 1){
l2[l2 > -1] <- exp(l2[l2 > -1])
}
if(d == 2){
l2[l2 > -1] <- (l2[l2 > -1])^2
}
if(d == 3){
l2[l2 > -1] <- abs(l2[l2 > -1])
}
a2 <- rbinom(N, 1, expit(alpha0 + alpha1 * l1 + alpha2 * a1 + alpha3 * l2))
## Third Time Point
eps.l3 <- rnorm(N, mean = 0, sd = s.d)
l3 <- sigma0 + sigma1 * l2 + sigma2 * a2 + eps.l3
l3[l3 < -1] <- 0
if(d == 1){
l3[l3 > -1] <- exp(l3[l3 > -1])
}
if(d == 2){
l3[l3 > -1] <- (l3[l3 > -1])^ 2
}
if(d == 3){
l3[l3 > -1] <- abs(l3[l3 > -1])
}
a3 <- rbinom(N, 1, expit(alpha0 + alpha1 * l2 + alpha2 * a2 + alpha3 * l3))
## Final Outcome
eps.Y <- rnorm(N, mean = 0, sd = s)
Ystar <- beta0 + beta1*l1 + beta2*a1 + beta3*l2 + beta4*a2 + beta5*l3 + beta6*a3 + eps.Y
Y <- Ystar
Y[Ystar < -3] <- 0
if(d == 1){
Y[Ystar > -3] <- exp(Ystar[Ystar > -3])
}
if(d == 2){
Y[Ystar > -3] <- (Ystar[Ystar > -3])^2
}
if(d == 3){
Y[Ystar > -3] <- abs(Ystar[Ystar > -3])
}
## Simulated data set:
dat <- data.frame(cbind(l1, a1, l2, a2, l3, a3, Y))
names(dat) <- c("l1", "a1", "l2", "a2", "l3", "a3", "Y")
## Estimate sigmas using a CPM
zz <- orm(l2 ~ l1 + a1, data = dat, family = "probit")
alpha.hat1 <- as.numeric(zz$coef[-c((length(zz$coef) - 1):length(zz$coef))])
sigma1.hat1 <- as.numeric(zz$coef[length(zz$coef) - 1])
sigma2.hat1 <- as.numeric(zz$coef[length(zz$coef)])
## Estimate sigmas using a CPM
xx <- orm(l3 ~ l2 + a2, data = dat, family = "probit")
alpha.hat2 <- as.numeric(xx$coef[-c((length(xx$coef) - 1):length(xx$coef))])
sigma1.hat2 <- as.numeric(xx$coef[length(xx$coef) - 1])
sigma2.hat2 <- as.numeric(xx$coef[length(xx$coef)])
## Estimate betas using a CPM
reg.y <- orm(Y ~ l1 + a1 + l2 + a2 + l3 + a3, data = dat, family = 'probit')
alpha.hat3 <- as.numeric(reg.y$coef[-c((length(reg.y$coef) - 5):length(reg.y$coef))])
beta1.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 5])
beta2.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 4])
beta3.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 3])
beta4.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 2])
beta5.hat <- as.numeric(reg.y$coef[length(reg.y$coef) - 1])
beta6.hat <- as.numeric(reg.y$coef[length(reg.y$coef)])
## Goal: Approximate integral of E[Y|A = a, L = l] with respect to f_L(l)
## First for a = 0, a1 = 0, and a2 = 0
tmp1 <- matrix(0, nrow = length(alpha.hat1), ncol = M)
tmp2 <- matrix(0, nrow = length(alpha.hat2), ncol = M)
tmp3 <- matrix(0, nrow = length(alpha.hat3), ncol = M)
sl1 <- sample(dat$l1, size = M, replace = TRUE)
for (i in 1:length(alpha.hat1))
{
tmp1[i,] <- as.numeric(1 - pnorm(alpha.hat1[i] + sigma1.hat1*sl1 + sigma2.hat1*sa1, mean = 0, sd = 1))
}
tmp1 <- rbind(0, tmp1)
sl2 <- rep(0, M)
for (i in 1:M)
{
F.hat1 <- tmp1[,i]
sl2[i] <- pull.one(x = runif(1, 0, 1), y = sort(unique(dat$l2)), fhat = F.hat1)
}
sl2
for (n in 1:length(alpha.hat2))
{
tmp2[n,] <- as.numeric(1 - pnorm(alpha.hat2[n] + sigma1.hat2*sl2 + sigma2.hat2*sa2, mean = 0, sd = 1))
}
tmp2 <- rbind(0, tmp2)
sl3 <- rep(0, M)
for (i in 1:M)
{
F.hat2 <- tmp2[,i]
sl3[i] <- pull.one(x = runif(1, 0, 1), y = sort(unique(dat$l3)), fhat = F.hat2)
}
sl3
for (k in 1:length(alpha.hat3))
{
tmp3[k,] <- as.numeric(1 - pnorm(alpha.hat3[k] + beta1.hat*sl1 + beta2.hat*sa1 + beta3.hat*sl2 + beta4.hat*sa2 + beta5.hat*sl3 + beta6.hat*sa3, mean = 0, sd = 1))
}
tmp3 <- rbind(0, tmp3)
sy0 <- rep(0, M)
for (k in 1:M)
{
F.hat3 <- tmp3[,k]
sy0[k] <- pull.one(x = runif(1, 0, 1), y = sort(unique(dat$Y)), fhat = F.hat3)
}
m0 <- mean(sy0)
res[j, 1] <- m0
}
#Save data
outcome.estimates <- as.data.frame(res)
filename1 <- paste("outcome.estimates-", ITER, ".csv")
write.csv(outcome.estimates, filename1) |
#getting the library
library(ggplot2);head(diamonds);names(diamonds)
#starting a basic ggplot plot object
gg<-ggplot(diamonds,aes(price,carat))+geom_point(color="brown4")
gg
##gg is a starting a basic ggplot plot object
#adding a title or label to the graph
gg<-gg+ggtitle("Diamond Carat & Price")
gg
#adding labels
gg<-gg+labs("Diamond Carat & Price")
#adding theme to the plot
gg<-gg+theme(plot.title= element_text(size = 20, face = "bold"))
#adding labels to the graph
gg<-gg+labs(x="Price in Dollar", y="Carat", title="Price by Carat",
color="red")
gg
#removing text and ticks from a axis
gg<-gg+theme(axis.ticks.y=element_blank(),axis.text.y=element_blank())
#rotating the text in any axis
gg<-gg + theme(axis.text.x=element_text(angle=50, size=10, vjust=0.5))
gg
#adding color to the axis names
gg<-gg + theme(axis.text.x=element_text(color = "seagreen1", vjust=0.45),
axis.text.y=element_text(color = "violet", vjust=0.45))
gg
#setting limits to both axis
gg<-gg + ylim(0,0.8)+xlim(250,1500)
gg
#how to set legends in a graph
gg<-ggplot(diamonds,aes(price,carat,color=factor(cut)))+geom_point()
gg
gg<-ggplot(diamonds,aes(price,carat,color=factor(color)))+geom_point()
gg
gg<-ggplot(diamonds,aes(price,carat,color=factor(clarity)))+geom_point()
gg
gg<-gg+theme(legend.title=element_blank())
gg
#changing the title name of the legend
gg<-gg+theme(legend.title = element_text(colour="darkblue",
size=16,
face="bold"))+
scale_color_discrete(name="By Clarity")
gg
#changing the backgroup boxes in legend
gg<-gg+theme(legend.key=element_rect(fill='dodgerblue1'))
gg
#changing the size of the symbols used in legend
gg<-gg+guides(colour = guide_legend(override.aes = list(size=4)))
gg
#adding line to the data points
gg<-gg+geom_line(color="darkcyan")
#changing the background of an image
gg<-gg+theme(panel.background = element_rect(fill = 'orange'))
#changing plot background
gg<-gg+theme(plot.background = element_rect(fill = 'brown4'))
#adding a multi-variable cut to the graph
gg<-gg+facet_wrap(~cut, nrow=4)
#adding two variables as cut to display the relationship
gg<-gg+facet_wrap(~cut+clarity, nrow=4)
#scale free graphs in multi-panels
gg<-gg+facet_wrap(~color, ncol=2, scales="free")
#bi-variate plotting using ggplot2
gg<-gg+facet_grid(color~cut)
gg
library(ggplot2);
library(scales)
#changing discrete category colors
ggplot(diamonds, aes(price, carat, color=factor(cut)))+
geom_point() +
scale_color_brewer(palette="Set1")
#Using tableau colors
library(ggthemes)
ggplot(diamonds, aes(price, carat, color=factor(cut)))+
geom_point() +
scale_color_tableau()
#using color gradient
ggplot(diamonds, aes(price, carat))+
geom_point() +
scale_color_gradient(low = "blue", high = "red")
#plotting a distribution on a graph
mid<-mean(diamonds$price)
ggplot(diamonds, aes(price, carat, color=table))+geom_point()+
scale_color_gradient2(midpoint=mid,
low="blue", mid="white", high="red" )
########################################################
##creating different charts
#######################################################
library(MASS);names(Cars93)
#creating bar chart
barplot <- ggplot(Cars93,aes(Type))+
geom_bar(width = 0.5,fill="royalblue4",color="red")+
ggtitle("Vehicle Count by Category")
barplot
#creating boxplot
boxplot <- ggplot(Cars93,aes(Type,Price))+
geom_boxplot(width = 0.5,fill="firebrick",color="cadetblue2",
outlier.colour = "purple",outlier.shape = 2)+
ggtitle("Boxplot of Price by Car Type")
boxplot
#creatting Bubble chart
bubble<-ggplot(Cars93, aes(x=EngineSize, y=MPG.city)) +
geom_point(aes(size=Price,color="red")) +
scale_size_continuous(range=c(2,15)) +
theme(legend.position = "bottom")
bubble
#creating Donut charts
ggplot(Cars93) + geom_rect(aes(fill=Cylinders, ymax=Max.Price,
ymin=Min.Price, xmax=4, xmin=3)) +
coord_polar(theta="y") + xlim(c(0, 4))
#creating geomap
library(googleVis)
head(state.x77)
states <- data.frame(state.name, state.x77)
gmap <- gvisGeoMap(states, "state.name", "Area",
options=list(region="US", dataMode="regions",
width=900, height=600))
plot(gmap)
#creating histograms
histog <- ggplot(Cars93,aes(RPM))+
geom_bar(width = 10.5,fill="firebrick",color="cadetblue2")+
ggtitle("Histogram")
histog
#creating line charts
linechart <- ggplot(Cars93,aes(RPM,Price))+
geom_line(color="cadetblue4")+
ggtitle("Line Charts")
linechart
#creating pie charts
pp <- ggplot(Cars93, aes(x = factor(1), fill = factor(Type))) +
geom_bar(width = 1)
pp + coord_polar(theta = "y")
# 3D Pie Chart from data frame
library(plotrix)
t <- table(Cars93$Type);par(mfrow=c(1,2))
pct <- paste(names(t), "\n", t, sep="")
pie(t, labels = pct, main="Pie Chart of Type of cars")
pie3D(t,labels=pct,main="Pie Chart of Type of cars")
#Creating scatterplot
library(gridExtra)
sp <- ggplot(Cars93,aes(Horsepower,MPG.highway))+
geom_point(color="dodgerblue",size=5)+ggtitle("Basic Scatterplot")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp
#adding a cantinuous variable Length to scale thee scatterplot points
sp2<-sp+geom_point(aes(color=Length), size=5)+
ggtitle("Scatterplot: Adding Length Variable")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp2
grid.arrange(sp,sp2,nrow=1)
#adding a factor variable Origin to scale the scatterplot points
sp3<-sp+geom_point(aes(color=factor(Origin)),size=5)+
ggtitle("Scatterplot: Adding Origin Variable")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp3
#adding custom color to the scatterplot
sp4<-sp+geom_point(aes(color=factor(Origin)),size=5)+
scale_color_manual(values = c("red","blue"))+
ggtitle("Scatterplot: Adding Custom Color")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp4
grid.arrange(sp3,sp4,nrow=1)
#adding lines to the scatterplot
sp5<-sp+geom_point(color="blue",size=5)+geom_line()+
ggtitle("Scatterplot: Adding Lines")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp5
#adding regression lines to the scatterplot
sp6<-sp+geom_point(color="firebrick",size=5)+
geom_smooth(method = "lm",se =T)+
geom_smooth(method = "rlm",se =T)+
ggtitle("Adding Regression Lines")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp6
grid.arrange(sp5,sp6,nrow=1)
#datasets with n < 1000 default is loess. For datasets with 1000 or
#more observations defaults to gam
#adding more regression lines
sp7<-sp+geom_point(color="firebrick",size=5)+
geom_smooth(method = "auto",se =T)+
geom_smooth(method = "glm",se =T)+
ggtitle("Adding Regression Lines")+
theme(plot.title= element_text(size = 20, face = "bold"))
sp7
#adding regression lines to the scatterplot
sp8<-sp+geom_point(color="firebrick",size=5)+
geom_smooth(method = "gam",se =T)+
ggtitle("Adding Regression Lines")+
geom_smooth(method = "loess",se =T)+
theme(plot.title= element_text(size = 20, face = "bold"))
sp8
grid.arrange(sp7,sp8,nrow=1)
#creating 3D scatterplot
library(scatterplot3d);library(Rcmdr)
scatter3d(MPG.highway~Length+Width|Origin, data=Cars93, fit="linear",residuals=TRUE, parallel=FALSE, bg="black", axis.scales=TRUE, grid=TRUE, ellipsoid=FALSE)
#stacked bar chart
qplot(factor(Type), data=Cars93, geom="bar", fill=factor(Origin))
#or
ggplot(Cars93, aes(Type, fill=Origin)) + geom_bar()
#stem and leaf plot
stem(Cars93$MPG.city)
#Word cloud representation
library(wordcloud)
words<-c("data","data mining","big data analytics","statistics","graphs",
"visualization","predictive analytics","modeling","data science",
"R","Python","Shiny","ggplot2","data analytics")
freq<-c(123,234,213,423,142,145,156,176,214,218,213,234,256,324)
d<-data.frame(words,freq)
set.seed(1234)
wordcloud(words = d$words, freq = d$freq, min.freq = 1,c(8,.3),
max.words=200, random.order=F, rot.per=0.35,
colors=brewer.pal(7, "Dark2"))
#coxcomb chart = bar chart + pie chart
cox<- ggplot(Cars93, aes(x = factor(Type))) +
geom_bar(width = 1, colour = "goldenrod1",fill="darkred")
cox + coord_polar()
#a second variant of coxcomb plot
cox + coord_polar(theta = "y")
| /ggplot.R | no_license | Mayankagupta/R-programs | R | false | false | 8,566 | r |
#getting the library
library(ggplot2);head(diamonds);names(diamonds)
#starting a basic ggplot plot object
gg<-ggplot(diamonds,aes(price,carat))+geom_point(color="brown4")
gg
##gg is a starting a basic ggplot plot object
#adding a title or label to the graph
gg<-gg+ggtitle("Diamond Carat & Price")
gg
#adding labels
gg<-gg+labs("Diamond Carat & Price")
#adding theme to the plot
gg<-gg+theme(plot.title= element_text(size = 20, face = "bold"))
#adding labels to the graph
gg<-gg+labs(x="Price in Dollar", y="Carat", title="Price by Carat",
color="red")
gg
#removing text and ticks from a axis
gg<-gg+theme(axis.ticks.y=element_blank(),axis.text.y=element_blank())
#rotating the text in any axis
gg<-gg + theme(axis.text.x=element_text(angle=50, size=10, vjust=0.5))
gg
#adding color to the axis names
gg<-gg + theme(axis.text.x=element_text(color = "seagreen1", vjust=0.45),
axis.text.y=element_text(color = "violet", vjust=0.45))
gg
#setting limits to both axis
gg<-gg + ylim(0,0.8)+xlim(250,1500)
gg
#how to set legends in a graph
gg<-ggplot(diamonds,aes(price,carat,color=factor(cut)))+geom_point()
gg
gg<-ggplot(diamonds,aes(price,carat,color=factor(color)))+geom_point()
gg
gg<-ggplot(diamonds,aes(price,carat,color=factor(clarity)))+geom_point()
gg
gg<-gg+theme(legend.title=element_blank())
gg
#changing the title name of the legend
gg<-gg+theme(legend.title = element_text(colour="darkblue",
size=16,
face="bold"))+
scale_color_discrete(name="By Clarity")
gg
#changing the backgroup boxes in legend
gg<-gg+theme(legend.key=element_rect(fill='dodgerblue1'))
gg
#changing the size of the symbols used in legend
gg<-gg+guides(colour = guide_legend(override.aes = list(size=4)))
gg
#adding line to the data points
gg<-gg+geom_line(color="darkcyan")
#changing the background of an image
gg<-gg+theme(panel.background = element_rect(fill = 'orange'))
#changing plot background
gg<-gg+theme(plot.background = element_rect(fill = 'brown4'))
#adding a multi-variable cut to the graph
gg<-gg+facet_wrap(~cut, nrow=4)
#adding two variables as cut to display the relationship
gg<-gg+facet_wrap(~cut+clarity, nrow=4)
#scale free graphs in multi-panels
gg<-gg+facet_wrap(~color, ncol=2, scales="free")
#bi-variate plotting using ggplot2
gg<-gg+facet_grid(color~cut)
gg
library(ggplot2);
library(scales)
#changing discrete category colors
ggplot(diamonds, aes(price, carat, color=factor(cut)))+
geom_point() +
scale_color_brewer(palette="Set1")
#Using tableau colors
library(ggthemes)
ggplot(diamonds, aes(price, carat, color=factor(cut)))+
geom_point() +
scale_color_tableau()
#using color gradient
ggplot(diamonds, aes(price, carat))+
geom_point() +
scale_color_gradient(low = "blue", high = "red")
#plotting a distribution on a graph
mid<-mean(diamonds$price)
ggplot(diamonds, aes(price, carat, color=table))+geom_point()+
scale_color_gradient2(midpoint=mid,
low="blue", mid="white", high="red" )
########################################################
##creating different charts
#######################################################
library(MASS);names(Cars93)
#creating bar chart
barplot <- ggplot(Cars93,aes(Type))+
geom_bar(width = 0.5,fill="royalblue4",color="red")+
ggtitle("Vehicle Count by Category")
barplot
#creating boxplot
boxplot <- ggplot(Cars93,aes(Type,Price))+
geom_boxplot(width = 0.5,fill="firebrick",color="cadetblue2",
outlier.colour = "purple",outlier.shape = 2)+
ggtitle("Boxplot of Price by Car Type")
boxplot
#creatting Bubble chart
bubble<-ggplot(Cars93, aes(x=EngineSize, y=MPG.city)) +
geom_point(aes(size=Price,color="red")) +
scale_size_continuous(range=c(2,15)) +
theme(legend.position = "bottom")
bubble
#creating Donut charts
ggplot(Cars93) + geom_rect(aes(fill=Cylinders, ymax=Max.Price,
ymin=Min.Price, xmax=4, xmin=3)) +
coord_polar(theta="y") + xlim(c(0, 4))
#creating geomap
library(googleVis)
head(state.x77)
states <- data.frame(state.name, state.x77)
gmap <- gvisGeoMap(states, "state.name", "Area",
options=list(region="US", dataMode="regions",
width=900, height=600))
plot(gmap)
#creating histograms
histog <- ggplot(Cars93,aes(RPM))+
geom_bar(width = 10.5,fill="firebrick",color="cadetblue2")+
ggtitle("Histogram")
histog
#creating line charts
linechart <- ggplot(Cars93,aes(RPM,Price))+
geom_line(color="cadetblue4")+
ggtitle("Line Charts")
linechart
#creating pie charts
pp <- ggplot(Cars93, aes(x = factor(1), fill = factor(Type))) +
geom_bar(width = 1)
pp + coord_polar(theta = "y")
# 3D Pie Chart from data frame
library(plotrix)
t <- table(Cars93$Type);par(mfrow=c(1,2))
pct <- paste(names(t), "\n", t, sep="")
pie(t, labels = pct, main="Pie Chart of Type of cars")
pie3D(t,labels=pct,main="Pie Chart of Type of cars")
#Creating scatterplot
library(gridExtra)
sp <- ggplot(Cars93,aes(Horsepower,MPG.highway))+
geom_point(color="dodgerblue",size=5)+ggtitle("Basic Scatterplot")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp
#adding a cantinuous variable Length to scale thee scatterplot points
sp2<-sp+geom_point(aes(color=Length), size=5)+
ggtitle("Scatterplot: Adding Length Variable")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp2
grid.arrange(sp,sp2,nrow=1)
#adding a factor variable Origin to scale the scatterplot points
sp3<-sp+geom_point(aes(color=factor(Origin)),size=5)+
ggtitle("Scatterplot: Adding Origin Variable")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp3
#adding custom color to the scatterplot
sp4<-sp+geom_point(aes(color=factor(Origin)),size=5)+
scale_color_manual(values = c("red","blue"))+
ggtitle("Scatterplot: Adding Custom Color")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp4
grid.arrange(sp3,sp4,nrow=1)
#adding lines to the scatterplot
sp5<-sp+geom_point(color="blue",size=5)+geom_line()+
ggtitle("Scatterplot: Adding Lines")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp5
#adding regression lines to the scatterplot
sp6<-sp+geom_point(color="firebrick",size=5)+
geom_smooth(method = "lm",se =T)+
geom_smooth(method = "rlm",se =T)+
ggtitle("Adding Regression Lines")+
theme(plot.title= element_text(size = 12, face = "bold"))
sp6
grid.arrange(sp5,sp6,nrow=1)
#datasets with n < 1000 default is loess. For datasets with 1000 or
#more observations defaults to gam
#adding more regression lines
sp7<-sp+geom_point(color="firebrick",size=5)+
geom_smooth(method = "auto",se =T)+
geom_smooth(method = "glm",se =T)+
ggtitle("Adding Regression Lines")+
theme(plot.title= element_text(size = 20, face = "bold"))
sp7
#adding regression lines to the scatterplot
sp8<-sp+geom_point(color="firebrick",size=5)+
geom_smooth(method = "gam",se =T)+
ggtitle("Adding Regression Lines")+
geom_smooth(method = "loess",se =T)+
theme(plot.title= element_text(size = 20, face = "bold"))
sp8
grid.arrange(sp7,sp8,nrow=1)
#creating 3D scatterplot
library(scatterplot3d);library(Rcmdr)
scatter3d(MPG.highway~Length+Width|Origin, data=Cars93, fit="linear",residuals=TRUE, parallel=FALSE, bg="black", axis.scales=TRUE, grid=TRUE, ellipsoid=FALSE)
#stacked bar chart
qplot(factor(Type), data=Cars93, geom="bar", fill=factor(Origin))
#or
ggplot(Cars93, aes(Type, fill=Origin)) + geom_bar()
#stem and leaf plot
stem(Cars93$MPG.city)
#Word cloud representation
library(wordcloud)
words<-c("data","data mining","big data analytics","statistics","graphs",
"visualization","predictive analytics","modeling","data science",
"R","Python","Shiny","ggplot2","data analytics")
freq<-c(123,234,213,423,142,145,156,176,214,218,213,234,256,324)
d<-data.frame(words,freq)
set.seed(1234)
wordcloud(words = d$words, freq = d$freq, min.freq = 1,c(8,.3),
max.words=200, random.order=F, rot.per=0.35,
colors=brewer.pal(7, "Dark2"))
#coxcomb chart = bar chart + pie chart
cox<- ggplot(Cars93, aes(x = factor(Type))) +
geom_bar(width = 1, colour = "goldenrod1",fill="darkred")
cox + coord_polar()
#a second variant of coxcomb plot
cox + coord_polar(theta = "y")
|
n=1024
p=2048
par(mfrow=c(2,2))
q = seq(0, 1, length = p)
rho=0.7*dbeta(q,1500,3000)+0.5*dbeta(q,1200,900)+0.5*dbeta(q,600,160)
rho=10*rho/sqrt(sum(rho^2))
plot(rho,,"l")
xdata=array(,dim=c(n,p))
for (i in 1:n){
xdata[i,]=rnorm(1,0,1)*rho+1*rnorm(p,0,1)
}
plot(xdata[1,],type="l")
s = 1/n*t(xdata)%*%xdata
princ1=10*eigen(s)$vectors[,1]
if (max(princ1)<.5) princ1=-princ1
plot(princ1,,"l")
eigenvectors = eigen(s)$vectors
eigenvalues = eigen(s)$values
scores = xdata%*%eigenvectors # new data after pca
total.var = sum(eigenvalues) # total variance
prop.var = eigenvalues/total.var # proportion of variance
cum.var = cumsum(prop.var) # culmulative proportion
| /test13.R | no_license | zeke75/PCA-bootstrap | R | false | false | 703 | r | n=1024
p=2048
par(mfrow=c(2,2))
q = seq(0, 1, length = p)
rho=0.7*dbeta(q,1500,3000)+0.5*dbeta(q,1200,900)+0.5*dbeta(q,600,160)
rho=10*rho/sqrt(sum(rho^2))
plot(rho,,"l")
xdata=array(,dim=c(n,p))
for (i in 1:n){
xdata[i,]=rnorm(1,0,1)*rho+1*rnorm(p,0,1)
}
plot(xdata[1,],type="l")
s = 1/n*t(xdata)%*%xdata
princ1=10*eigen(s)$vectors[,1]
if (max(princ1)<.5) princ1=-princ1
plot(princ1,,"l")
eigenvectors = eigen(s)$vectors
eigenvalues = eigen(s)$values
scores = xdata%*%eigenvectors # new data after pca
total.var = sum(eigenvalues) # total variance
prop.var = eigenvalues/total.var # proportion of variance
cum.var = cumsum(prop.var) # culmulative proportion
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/specificity-plots.R
\name{calcOpenSignal}
\alias{calcOpenSignal}
\title{The function calcOpenSignal takes the input BED file(s)
in form of GRanges or GRangesList object, overlaps
it with all defined open chromatin regions across
cell types and returns a matrix, where each row is
the input genomic region (if overlap was found),
each column is a cell type, and the value
is a normalized ATAC-seq signal.}
\usage{
calcOpenSignal(query, cellMatrix)
}
\arguments{
\item{query}{Genomic regions to be analyzed. Can be GRanges or GRangesList
object.}
\item{cellMatrix}{Matrix with open chromatin signal values, rows are genomic
regions, columns are cell types. First column contains
information about the genomic region in following form:
chr_start_end. Can be either data.frame or data.table object.}
}
\value{
A list with named components:
signalMatrix - data.table with cell specific open chromatin signal
values for query regions
matrixStats - data.frame containing boxplot stats for individual
cell type
}
\description{
The function calcOpenSignal takes the input BED file(s)
in form of GRanges or GRangesList object, overlaps
it with all defined open chromatin regions across
cell types and returns a matrix, where each row is
the input genomic region (if overlap was found),
each column is a cell type, and the value
is a normalized ATAC-seq signal.
}
\examples{
openRegionSummary = calcOpenSignal(vistaEnhancers, exampleOpenSignalMatrix_hg19)
}
| /man/calcOpenSignal.Rd | no_license | GenomicsNX/GenomicDistributions | R | false | true | 1,616 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/specificity-plots.R
\name{calcOpenSignal}
\alias{calcOpenSignal}
\title{The function calcOpenSignal takes the input BED file(s)
in form of GRanges or GRangesList object, overlaps
it with all defined open chromatin regions across
cell types and returns a matrix, where each row is
the input genomic region (if overlap was found),
each column is a cell type, and the value
is a normalized ATAC-seq signal.}
\usage{
calcOpenSignal(query, cellMatrix)
}
\arguments{
\item{query}{Genomic regions to be analyzed. Can be GRanges or GRangesList
object.}
\item{cellMatrix}{Matrix with open chromatin signal values, rows are genomic
regions, columns are cell types. First column contains
information about the genomic region in following form:
chr_start_end. Can be either data.frame or data.table object.}
}
\value{
A list with named components:
signalMatrix - data.table with cell specific open chromatin signal
values for query regions
matrixStats - data.frame containing boxplot stats for individual
cell type
}
\description{
The function calcOpenSignal takes the input BED file(s)
in form of GRanges or GRangesList object, overlaps
it with all defined open chromatin regions across
cell types and returns a matrix, where each row is
the input genomic region (if overlap was found),
each column is a cell type, and the value
is a normalized ATAC-seq signal.
}
\examples{
openRegionSummary = calcOpenSignal(vistaEnhancers, exampleOpenSignalMatrix_hg19)
}
|
#!/usr/bin/env Rscript
fileConn<-file("/var/www/html/Hackday/Ennuste/values.txt")
pred <- readLines(fileConn)
pred <- list(ennuste = as.numeric(pred[1]), muutos = as.numeric(pred[2]))
muutos <- rnorm(1,pred$muutos/2,0.03)
ennuste <- max(min(pred$ennuste+muutos,0.95),0.05)
writeLines(c(as.character(ennuste),as.character(muutos)), fileConn)
cat(round(ennuste,2)*100)
close(fileConn)
| /Ennuste/Ennuste.R | no_license | KristianRoth/Hackday | R | false | false | 383 | r | #!/usr/bin/env Rscript
fileConn<-file("/var/www/html/Hackday/Ennuste/values.txt")
pred <- readLines(fileConn)
pred <- list(ennuste = as.numeric(pred[1]), muutos = as.numeric(pred[2]))
muutos <- rnorm(1,pred$muutos/2,0.03)
ennuste <- max(min(pred$ennuste+muutos,0.95),0.05)
writeLines(c(as.character(ennuste),as.character(muutos)), fileConn)
cat(round(ennuste,2)*100)
close(fileConn)
|
library(ROAuth)
### Name: OAuth-class
### Title: Class "OAuth": A class to manage OAuth authentication
### Aliases: OAuth-class ROAuth OAuth OAuthFactory
### Keywords: classes
### ** Examples
## This example uses a test case from liboauth and the
## keys are already pre-signed. This is an example of
## one of the few times \code{needsVerifier} would be \code{FALSE}.
## Not run:
##D reqURL <- "http://term.ie/oauth/example/request_token.php"
##D accessURL <- "http://term.ie/oauth/example/access_token.php"
##D authURL <- "NORMALLY YOU NEED THIS"
##D cKey <- "key"
##D cSecret <- "secret"
##D testURL <- "http://term.ie/oauth/example/echo_api.php?method=foo bar"
##D
##D credentials <- OAuthFactory$new(consumerKey=cKey,
##D consumerSecret=cSecret,
##D requestURL=reqURL,
##D accessURL=accessURL,
##D authURL=authURL,
##D needsVerifier=FALSE)
##D credentials$handshake()
##D ## the GET isn't strictly necessary as that's the default
##D credentials$OAuthRequest(testURL, "GET")
##D
## End(Not run)
| /data/genthat_extracted_code/ROAuth/examples/OAuth-class.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,173 | r | library(ROAuth)
### Name: OAuth-class
### Title: Class "OAuth": A class to manage OAuth authentication
### Aliases: OAuth-class ROAuth OAuth OAuthFactory
### Keywords: classes
### ** Examples
## This example uses a test case from liboauth and the
## keys are already pre-signed. This is an example of
## one of the few times \code{needsVerifier} would be \code{FALSE}.
## Not run:
##D reqURL <- "http://term.ie/oauth/example/request_token.php"
##D accessURL <- "http://term.ie/oauth/example/access_token.php"
##D authURL <- "NORMALLY YOU NEED THIS"
##D cKey <- "key"
##D cSecret <- "secret"
##D testURL <- "http://term.ie/oauth/example/echo_api.php?method=foo bar"
##D
##D credentials <- OAuthFactory$new(consumerKey=cKey,
##D consumerSecret=cSecret,
##D requestURL=reqURL,
##D accessURL=accessURL,
##D authURL=authURL,
##D needsVerifier=FALSE)
##D credentials$handshake()
##D ## the GET isn't strictly necessary as that's the default
##D credentials$OAuthRequest(testURL, "GET")
##D
## End(Not run)
|
# hello world in R
# suppressing quotes
print("Hello, World!", quote = FALSE)
| /hello.r | no_license | bendjibenz/HacktoberfestHelloWorld | R | false | false | 78 | r | # hello world in R
# suppressing quotes
print("Hello, World!", quote = FALSE)
|
\name{get.edge.start}
\alias{get.edge.start}
\alias{get.edge.end}
\alias{set.edge.start}
\alias{set.edge.end}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ read or modify the times assigned to an exisiting edge}
\description{
convinience methods for reading or modifying edge times without directly accessing the list, makes it possible to change implementation in the future.
}
\usage{
get.edge.start(network, edgeID)
get.edge.end(network, edgeID)
set.edge.start(network,edgeID,valid.time)
set.edge.end(network,edgeID,valid.time)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{network}{ \code{network} the network the edge belongs to }
\item{edgeID}{ \code{edgeID} the ID of the edge }
\item{valid.time}{ \code{edgeID} (for setters) the start (or end) value for the edge }
}
\details{
These methods make it possible to access and modify the valid intervals
associated with edges. It is much preferable to use these methods instead of
directly modifing the objects.
For dynamic network objects the edge timing information is stored on a list
named 'et' (edge time list) that parallels the master edge list. The timing
information for an edge will be at the same index on tel as the edge index on mel.
}
\value{
get methods will return a single numeric value indicating either the starting
time or ending time of the edge. 'NA' can also be used to indicate that it is
unknown or undefined.
set methods will return the network object containing the newly modified edge
}
\author{ Skye Bender-deMoll \email{skyebend@skyeome.net} }
\note{
the ETL list is not backended in C, so it is important to be
careful with the network objects returned
}
\examples{
#make a network
myNet <- network.initialize(5);
#add some edges
myNet[1,2] <-1;
myNet[2,3] <-1;
myNet[2,4] <-1;
myNet[3,5] <-1;
#convert it to dynamic, will have the bounds [0,1]
myDyn <- as.dynamic(myNet);
#check the end of the first edge
get.edge.end(myDyn,1);
#check the start of the 2nd edge
get.edge.start(myDyn,2);
#change the first edge
myDyn <- set.edge.start(myDyn, 1, 2); #whoops, we made an invalid interval [2,1] !
myDyn <- set.edge.end(myDyn,1,5);
#print the etl to see if it worked
print(myDyn$etl);
#plot the valid intervals
plot.intervals(myDyn);
}
\keyword{ graphs }% at least one, from doc/KEYWORDS
| /Lectures/Tutorial5/TERGM/R/update/old/dynamicnetwork/man/.svn/text-base/get.edge.start.Rd.svn-base | no_license | SSDALab/EPIC_Networks | R | false | false | 2,529 | \name{get.edge.start}
\alias{get.edge.start}
\alias{get.edge.end}
\alias{set.edge.start}
\alias{set.edge.end}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ read or modify the times assigned to an exisiting edge}
\description{
convinience methods for reading or modifying edge times without directly accessing the list, makes it possible to change implementation in the future.
}
\usage{
get.edge.start(network, edgeID)
get.edge.end(network, edgeID)
set.edge.start(network,edgeID,valid.time)
set.edge.end(network,edgeID,valid.time)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{network}{ \code{network} the network the edge belongs to }
\item{edgeID}{ \code{edgeID} the ID of the edge }
\item{valid.time}{ \code{edgeID} (for setters) the start (or end) value for the edge }
}
\details{
These methods make it possible to access and modify the valid intervals
associated with edges. It is much preferable to use these methods instead of
directly modifing the objects.
For dynamic network objects the edge timing information is stored on a list
named 'et' (edge time list) that parallels the master edge list. The timing
information for an edge will be at the same index on tel as the edge index on mel.
}
\value{
get methods will return a single numeric value indicating either the starting
time or ending time of the edge. 'NA' can also be used to indicate that it is
unknown or undefined.
set methods will return the network object containing the newly modified edge
}
\author{ Skye Bender-deMoll \email{skyebend@skyeome.net} }
\note{
the ETL list is not backended in C, so it is important to be
careful with the network objects returned
}
\examples{
#make a network
myNet <- network.initialize(5);
#add some edges
myNet[1,2] <-1;
myNet[2,3] <-1;
myNet[2,4] <-1;
myNet[3,5] <-1;
#convert it to dynamic, will have the bounds [0,1]
myDyn <- as.dynamic(myNet);
#check the end of the first edge
get.edge.end(myDyn,1);
#check the start of the 2nd edge
get.edge.start(myDyn,2);
#change the first edge
myDyn <- set.edge.start(myDyn, 1, 2); #whoops, we made an invalid interval [2,1] !
myDyn <- set.edge.end(myDyn,1,5);
#print the etl to see if it worked
print(myDyn$etl);
#plot the valid intervals
plot.intervals(myDyn);
}
\keyword{ graphs }% at least one, from doc/KEYWORDS
| |
apstylenumber <- function(x){
if (x == 1) {
num <- "one"
} else if (x == 2) {
num <- "two"
} else if (x == 3) {
num <- "three"
} else if (x == 4) {
num <- "four"
} else if (x == 5) {
num <- "five"
} else if (x == 6) {
num <- "six"
} else if (x == 7) {
num <- "seven"
} else if (x == 8) {
num <- "eight"
} else if (x == 9) {
num <- "nine"
} else {
num <- as.character(format(x, nsmall=0, big.mark=","))
}
return(num)
} | /apstylenumber.R | permissive | RoperDataLab/PPPloans | R | false | false | 481 | r | apstylenumber <- function(x){
if (x == 1) {
num <- "one"
} else if (x == 2) {
num <- "two"
} else if (x == 3) {
num <- "three"
} else if (x == 4) {
num <- "four"
} else if (x == 5) {
num <- "five"
} else if (x == 6) {
num <- "six"
} else if (x == 7) {
num <- "seven"
} else if (x == 8) {
num <- "eight"
} else if (x == 9) {
num <- "nine"
} else {
num <- as.character(format(x, nsmall=0, big.mark=","))
}
return(num)
} |
#' Simulation envelope
#'
#' @description Simulated envelopes in normal probability plots
#' @param star Initial values for the parameters to be optimized over.
#' @param formula The structure matrix of covariates of dimension n x p (in models that include an intercept x
#' should contain a column of ones).
#' @param dataSet data
#' @param n.r Indicator which residual type graphics. 1 - weighted, 2 - Standardized weighted, 3 - Pearson, 4 - Standardized Pearson, 5 - standardized deviance component residuals and 6 - randomized quantile residuals.
#' @param nsim Number of Monte Carlo replicates.
#' @param plot TRUE or FALSE. Indicates if a graph should be plotted.
#' @details Atkinson (1985), suggests the use of simulated envelopes in normal probability
#' plots to facilitate the goodness of fit.
#' @return L, residuals and simulation envelopes in normal probability plots
#' @author Jalmar M F Carrasco <carrascojalmar@gmail.com>,
#' Cristian M Villegas Lobo <master.villegas@gmail.com> and Lizandra C Fabio <lizandrafabio@gmail.com>
#' @references
#' \itemize{
#' \item Atkinson A.C. (1985). Plots, Transformations and Regression: An Introduction to Graphical Methods of Diagnostic
#' Regression Analysis. Oxford University Press, New York.
#' \item Fabio, L. C, Villegas, C. L., Carrasco, J. M. F. and de Castro, M. (2020). Diagnostic tools for a multivariate
#' negative binomial model for fitting correlated data with overdispersion. Submitted.
#'
#'
#' }
#' @examples
#'
#' # Not run:
#'
#' data(seizures)
#' head(seizures)
#'
#' star <-list(phi=0.05, beta1=1, beta2=0.1, beta3=1)
#'
#' envelope.MNB(formula=Y ~ trt + period +offset(weeks),star=star,nsim=21,n.r=2,
#' dataSet=seizures,plot=TRUE)
#'
#' dev.off()
#' #End(Not run)
#' @export
envelope.MNB <- function(star, formula, dataSet, n.r, nsim, plot=TRUE) {
Y <- stats::model.response(data = stats::model.frame(formula,
dataSet))
X <- stats::model.matrix(formula, dataSet)
off <- stats::model.extract(stats::model.frame(formula,dataSet),"offset")
dataSet.ind <- split(dataSet, f = dataSet$ind)
n <- length(dataSet.ind)
p <- dim(X)[2]
mi <- dim(dataSet.ind[[1]])[1]
N <- n * mi
op <- fit.MNB(star = star, formula = formula,
dataSet = dataSet,tab=FALSE)
r <- re.MNB(star = star, formula = formula,
dataSet = dataSet)
r.quantil <- qMNB(par=op$par,formula=formula,dataSet=dataSet)
if(n.r==6) {tp <- r.quantil}else{tp <- r[[n.r]]}
e <- matrix(NA, length(tp), nsim)
Y <- numeric(N)
sigma <- (op$par[1])^(-0.5)
for (k in 1:nsim) {
ui <- log(flexsurv::rgengamma(n = n, mu = 0, sigma = sigma,
Q = sigma))
uij <- rep(ui, each = mi)
eta <- X %*% (op$par[2:(p + 1)])
if(class(off)=="NULL"){
zij <- exp(eta + uij)}else{zij <- exp(eta + uij+off)}
Y <- stats::rpois(N, zij)
newDataSet <- data.frame(Y = Y, dataSet[,
2:ncol(dataSet)])
opEnv <- fit.MNB(star = star, formula = formula,
dataSet = newDataSet,tab=FALSE)
r.boot <- re.MNB(star = star, formula = formula,
dataSet = newDataSet)
r.qBoot <- qMNB(par=opEnv$par,formula=formula,dataSet=dataSet)
if(n.r==6){ tp.boot <- r.qBoot}else{tp.boot <- r.boot[[n.r]]}
e[, k] <- sort(tp.boot)
}
e1 <- apply(e, 1, min)
e2 <- apply(e, 1, max)
med <- apply(e, 1, mean)
faixa <- range(tp, e1, e2,na.rm=TRUE,finite=TRUE)
result <- list()
result$mE <- cbind(e1, med, e2)
result$residual <- tp
if (plot==TRUE) {
graphics::par(mfrow = c(1, 2), pty = "s", col = "royalblue")
# Plot - envelope
stats::qqnorm(sort(tp), xlab = "Normal quantiles", ylab = "residual",
pch = 15, ylim = faixa, main = "", cex.axis = 1.2,
cex.lab = 1.2, cex = 0.6, bg = 5)
graphics::par(new = T)
stats::qqnorm(e1, axes = F, xlab = "", ylab = "", type = "l",
ylim = faixa, lty = 1, main = "")
graphics::par(new = T)
stats::qqnorm(e2, axes = F, xlab = "", ylab = "", type = "l",
ylim = faixa, lty = 1, main = "")
graphics::par(new = T)
stats::qqnorm(med, axes = F, xlab = "", ylab = "", type = "l",
ylim = faixa, lty = 2, main = "")
# Plot - residual
graphics::plot(tp, ylab = "residual", xlab = "Index", ylim = faixa, cex.axis = 1.2, cex.lab = 1.2, pch = 15,
cex = 0.6, bg = 5)
graphics::abline(h = c(-3, 0, 3), lwd = 2, lty = 2)}
return(result)
}
# ................................................................................................
| /R/enveSim.R | no_license | clobos/MNB | R | false | false | 4,726 | r | #' Simulation envelope
#'
#' @description Simulated envelopes in normal probability plots
#' @param star Initial values for the parameters to be optimized over.
#' @param formula The structure matrix of covariates of dimension n x p (in models that include an intercept x
#' should contain a column of ones).
#' @param dataSet data
#' @param n.r Indicator which residual type graphics. 1 - weighted, 2 - Standardized weighted, 3 - Pearson, 4 - Standardized Pearson, 5 - standardized deviance component residuals and 6 - randomized quantile residuals.
#' @param nsim Number of Monte Carlo replicates.
#' @param plot TRUE or FALSE. Indicates if a graph should be plotted.
#' @details Atkinson (1985), suggests the use of simulated envelopes in normal probability
#' plots to facilitate the goodness of fit.
#' @return L, residuals and simulation envelopes in normal probability plots
#' @author Jalmar M F Carrasco <carrascojalmar@gmail.com>,
#' Cristian M Villegas Lobo <master.villegas@gmail.com> and Lizandra C Fabio <lizandrafabio@gmail.com>
#' @references
#' \itemize{
#' \item Atkinson A.C. (1985). Plots, Transformations and Regression: An Introduction to Graphical Methods of Diagnostic
#' Regression Analysis. Oxford University Press, New York.
#' \item Fabio, L. C, Villegas, C. L., Carrasco, J. M. F. and de Castro, M. (2020). Diagnostic tools for a multivariate
#' negative binomial model for fitting correlated data with overdispersion. Submitted.
#'
#'
#' }
#' @examples
#'
#' # Not run:
#'
#' data(seizures)
#' head(seizures)
#'
#' star <-list(phi=0.05, beta1=1, beta2=0.1, beta3=1)
#'
#' envelope.MNB(formula=Y ~ trt + period +offset(weeks),star=star,nsim=21,n.r=2,
#' dataSet=seizures,plot=TRUE)
#'
#' dev.off()
#' #End(Not run)
#' @export
envelope.MNB <- function(star, formula, dataSet, n.r, nsim, plot=TRUE) {
Y <- stats::model.response(data = stats::model.frame(formula,
dataSet))
X <- stats::model.matrix(formula, dataSet)
off <- stats::model.extract(stats::model.frame(formula,dataSet),"offset")
dataSet.ind <- split(dataSet, f = dataSet$ind)
n <- length(dataSet.ind)
p <- dim(X)[2]
mi <- dim(dataSet.ind[[1]])[1]
N <- n * mi
op <- fit.MNB(star = star, formula = formula,
dataSet = dataSet,tab=FALSE)
r <- re.MNB(star = star, formula = formula,
dataSet = dataSet)
r.quantil <- qMNB(par=op$par,formula=formula,dataSet=dataSet)
if(n.r==6) {tp <- r.quantil}else{tp <- r[[n.r]]}
e <- matrix(NA, length(tp), nsim)
Y <- numeric(N)
sigma <- (op$par[1])^(-0.5)
for (k in 1:nsim) {
ui <- log(flexsurv::rgengamma(n = n, mu = 0, sigma = sigma,
Q = sigma))
uij <- rep(ui, each = mi)
eta <- X %*% (op$par[2:(p + 1)])
if(class(off)=="NULL"){
zij <- exp(eta + uij)}else{zij <- exp(eta + uij+off)}
Y <- stats::rpois(N, zij)
newDataSet <- data.frame(Y = Y, dataSet[,
2:ncol(dataSet)])
opEnv <- fit.MNB(star = star, formula = formula,
dataSet = newDataSet,tab=FALSE)
r.boot <- re.MNB(star = star, formula = formula,
dataSet = newDataSet)
r.qBoot <- qMNB(par=opEnv$par,formula=formula,dataSet=dataSet)
if(n.r==6){ tp.boot <- r.qBoot}else{tp.boot <- r.boot[[n.r]]}
e[, k] <- sort(tp.boot)
}
e1 <- apply(e, 1, min)
e2 <- apply(e, 1, max)
med <- apply(e, 1, mean)
faixa <- range(tp, e1, e2,na.rm=TRUE,finite=TRUE)
result <- list()
result$mE <- cbind(e1, med, e2)
result$residual <- tp
if (plot==TRUE) {
graphics::par(mfrow = c(1, 2), pty = "s", col = "royalblue")
# Plot - envelope
stats::qqnorm(sort(tp), xlab = "Normal quantiles", ylab = "residual",
pch = 15, ylim = faixa, main = "", cex.axis = 1.2,
cex.lab = 1.2, cex = 0.6, bg = 5)
graphics::par(new = T)
stats::qqnorm(e1, axes = F, xlab = "", ylab = "", type = "l",
ylim = faixa, lty = 1, main = "")
graphics::par(new = T)
stats::qqnorm(e2, axes = F, xlab = "", ylab = "", type = "l",
ylim = faixa, lty = 1, main = "")
graphics::par(new = T)
stats::qqnorm(med, axes = F, xlab = "", ylab = "", type = "l",
ylim = faixa, lty = 2, main = "")
# Plot - residual
graphics::plot(tp, ylab = "residual", xlab = "Index", ylim = faixa, cex.axis = 1.2, cex.lab = 1.2, pch = 15,
cex = 0.6, bg = 5)
graphics::abline(h = c(-3, 0, 3), lwd = 2, lty = 2)}
return(result)
}
# ................................................................................................
|
#Austin Dickerson In-class Assignment 2
#https://github.com/abdickerson/CMDA
setwd("C:\\Users\\Austin\\Desktop\\Virginia Tech\\CS 3654\\R")
load('exampleData.rData')
names(custdata)
#Individual summary statistics for each variable in dataset
summary(custdata$is.employed)
#is.empployed has missing values, which most likely means that these people are not employed
summary(custdata$income)
#income has an outlier in that it has a negative value, which could mean that person
#is in debt, but it could mean multiple things, we are not really sure what it means
summary(custdata$age)
#age has an outlier in that the highest age is 146.7, and nobody lives to be that old
summary(custdata$state.of.res)
#state.of.res has an issue with units, you are left to assume that the units are
#the number of people living in each state
summary(custdata$custid)
#custid also has an issue with units, I have no idea what these numbers represent
summary(custdata$sex)
#sex has no issues with it, everything is straight forward, either they are M or F
summary(custdata$marital.stat)
#marital.stat also has nothing wrong with it, everything is categorized, and there
#are not any NA's or outliers
summary(custdata$health.ins)
#there also appears to be nothing wrong with heatlh.ins, there are no NA's or outliers
summary(custdata$housing.type)
#housing.type has NA's, which could mean they didn't fall into any of the categories
#but we are not sure
summary(custdata$recent.move)
#recent.move also has NA's, which might mean they don't have a home, but again we
#are not sure
summary(custdata$num.vehicles)
#num.vehicles also has NA's, which probably means they do not own a car
summary(custdata$is.employed.fix1)
#is.employed.fix1 has issues, but it is confusing, I am not sure what these
#categories mean
summary(custdata$age.normalized)
#age.normalized has outliers, in that it has negative values
summary(custdata$Median.Income)
#Median.Income doesn't seem to have any issues, everything seems normal
summary(custdata$income.norm)
#income.norm has outliers in that there are negative values as its lowest value
summary(custdata$gp)
#gp has issues with units, it doesn't say what these numbers represent
summary(custdata$income.lt.30k)
#income.lt.30k has issues, I am not too sure what these categories represent
summary(custdata$age.range)
#age.range doesn't seem to have any issues, everything seems to be normal
summary(custdata$Income)
#income has NA's, which could mean they
#2
uciCar <- read.table( 'http://www.win-vector.com/dfiles/car.data.csv', sep=',', header=T)
names(uciCar)
#Observations about individual category summaries
summary(uciCar$buying)
#buying has the issue in that all the values are the same, meaning there might be
#only one value
summary(uciCar$maint)
summary(uciCar$doors)
summary(uciCar$persons)
summary(uciCar$lug_boot)
summary(uciCar$safety)
#All the above categories have the same issue where all the summary numbers are the
#same, which might mean there is only one value to summarize
summary(uciCar$rating)
#there appears to be nothing wrong with this category, everything seems to make
#sense
#3
d <- read.table(paste('http://archive.ics.uci.edu/ml/', 'machine-learning-databases/statlog/german/german.data', sep=''), header=T)
names(d) <- c('Status.of.existing.checking.account',
'Duration.in.month', 'Credit.history', 'Purpose',
'Credit.amount', 'Savings account/bonds',
'Present.employment.since','Installment.rate.in.percentage.of.disposable.income',
'Personal.status.and.sex', 'Other.debtors/guarantors',
'Present.residence.since', 'Property', 'Age.in.years',
'Other.installment.plans', 'Housing',
'Number.of.existing.credits.at.this.bank', 'Job',
'Number.of.people.being.liable.to.provide.maintenance.for',
'Telephone', 'foreign.worker', 'Good.Loan')
mapping <- list('A11'='... < 0 DM',
'A12'='0 <= ... < 200 DM',
'A13'='... >= 200 DM / salary assignments for at least 1 year',
'A14'='no checking account',
'A30'='no credits taken/all credits paid back duly',
'A31'='all credits at this bank paid back duly',
'A32'='existing credits paid back duly till now',
'A33'='delay in paying off in the past',
'A34'='critical account/other credits existing (not at this bank)',
'A40'='car (new)',
'A41'='car (used)',
'A42'='furniture/equipment',
'A43'='radio/television',
'A44'='domestic appliances',
'A45'='repairs',
'A46'='education',
'A47'='(vacation - does not exist?)',
'A48'='retraining',
'A49'='business',
'A410'='others',
'A61'='... < 100 DM',
'A62'='100 <= ... < 500 DM',
'A63'='500 <= ... < 1000 DM',
'A64'='.. >= 1000 DM',
'A65'='unknown/ no savings account',
'A71'='unemployed',
'A72'='... < 1 year',
'A73'='1 <= ... < 4 years',
'A74'='4 <= ... < 7 years',
'A75'='.. >= 7 years',
'A91'='male : divorced/separated',
'A92'='female : divorced/separated/married',
'A93'='male : single',
'A94'='male : married/widowed',
'A95'='female : single',
'A101'='none',
'A102'='co-applicant',
'A103'='guarantor',
'A121'='real estate',
'A122'='if not A121 : building society savings agreement/life insurance',
'A123'='if not A121/A122 : car or other, not in attribute 6',
'A124'='unknown / no property',
'A141'='bank',
'A142'='stores',
'A143'='none',
'A151'='rent',
'A152'='own',
'A153'='for free',
'A171'='unemployed/ unskilled - non-resident',
'A172'='unskilled - resident',
'A173'='skilled employee / official',
'A174'='management/ self-employed/highly qualified employee/ officer',
'A191'='none',
'A192'='yes, registered under the customers name',
'A201'='yes',
'A202'='no')
summary(d$Personal.status.and.sex)
#There are more males who are either single, married or widowed, than there are females
#who fall under the same categories
summary(d$Other.debtors)
#According to these numbers, more people do not take out a loan for cars than those who
#are a guarntor or co-applicant
install.packages("hexbin")
library(hexbin)
help(package=hexbin)
names(custdata)
custdata2 <- subset(custdata,
(custdata$age > 0 & custdata$age < 100
& custdata$income > 0))
ggplot(custdata2, aes(x=age, y=income)) + geom_hex(binwidth=c(5, 100000))
#This, I think, better visualizes the data than the scatterplot. I think it makes it
#easier to visualize the number of people in each category over a bunch of dots on
#a graph
ggplot(custdata2, geom_bar(aes(x=income, fill=num.vehicles))) + theme_bw()+ ggtitle("Income vs Number of Vehicles")
#I used a bar graph. I noticed that the higher the income, the more vehicles that
#person owns
ggplot(custdata) + geom_bar(aes(x=income.lt.30K, fill=recent.move), position="dodge") + theme_bw()+ ggtitle("Recent moves vs Incomes less than 30K")
#I used a bar graph for this, I noticed that the recent moves were fairly balanced
#between those who do and don't have an income less than 30K | /AustinDickerson_InclassAssignment2.R | no_license | abdickerson/CMDA | R | false | false | 7,775 | r | #Austin Dickerson In-class Assignment 2
#https://github.com/abdickerson/CMDA
setwd("C:\\Users\\Austin\\Desktop\\Virginia Tech\\CS 3654\\R")
load('exampleData.rData')
names(custdata)
#Individual summary statistics for each variable in dataset
summary(custdata$is.employed)
#is.empployed has missing values, which most likely means that these people are not employed
summary(custdata$income)
#income has an outlier in that it has a negative value, which could mean that person
#is in debt, but it could mean multiple things, we are not really sure what it means
summary(custdata$age)
#age has an outlier in that the highest age is 146.7, and nobody lives to be that old
summary(custdata$state.of.res)
#state.of.res has an issue with units, you are left to assume that the units are
#the number of people living in each state
summary(custdata$custid)
#custid also has an issue with units, I have no idea what these numbers represent
summary(custdata$sex)
#sex has no issues with it, everything is straight forward, either they are M or F
summary(custdata$marital.stat)
#marital.stat also has nothing wrong with it, everything is categorized, and there
#are not any NA's or outliers
summary(custdata$health.ins)
#there also appears to be nothing wrong with heatlh.ins, there are no NA's or outliers
summary(custdata$housing.type)
#housing.type has NA's, which could mean they didn't fall into any of the categories
#but we are not sure
summary(custdata$recent.move)
#recent.move also has NA's, which might mean they don't have a home, but again we
#are not sure
summary(custdata$num.vehicles)
#num.vehicles also has NA's, which probably means they do not own a car
summary(custdata$is.employed.fix1)
#is.employed.fix1 has issues, but it is confusing, I am not sure what these
#categories mean
summary(custdata$age.normalized)
#age.normalized has outliers, in that it has negative values
summary(custdata$Median.Income)
#Median.Income doesn't seem to have any issues, everything seems normal
summary(custdata$income.norm)
#income.norm has outliers in that there are negative values as its lowest value
summary(custdata$gp)
#gp has issues with units, it doesn't say what these numbers represent
summary(custdata$income.lt.30k)
#income.lt.30k has issues, I am not too sure what these categories represent
summary(custdata$age.range)
#age.range doesn't seem to have any issues, everything seems to be normal
summary(custdata$Income)
#income has NA's, which could mean they
#2
uciCar <- read.table( 'http://www.win-vector.com/dfiles/car.data.csv', sep=',', header=T)
names(uciCar)
#Observations about individual category summaries
summary(uciCar$buying)
#buying has the issue in that all the values are the same, meaning there might be
#only one value
summary(uciCar$maint)
summary(uciCar$doors)
summary(uciCar$persons)
summary(uciCar$lug_boot)
summary(uciCar$safety)
#All the above categories have the same issue where all the summary numbers are the
#same, which might mean there is only one value to summarize
summary(uciCar$rating)
#there appears to be nothing wrong with this category, everything seems to make
#sense
#3
d <- read.table(paste('http://archive.ics.uci.edu/ml/', 'machine-learning-databases/statlog/german/german.data', sep=''), header=T)
names(d) <- c('Status.of.existing.checking.account',
'Duration.in.month', 'Credit.history', 'Purpose',
'Credit.amount', 'Savings account/bonds',
'Present.employment.since','Installment.rate.in.percentage.of.disposable.income',
'Personal.status.and.sex', 'Other.debtors/guarantors',
'Present.residence.since', 'Property', 'Age.in.years',
'Other.installment.plans', 'Housing',
'Number.of.existing.credits.at.this.bank', 'Job',
'Number.of.people.being.liable.to.provide.maintenance.for',
'Telephone', 'foreign.worker', 'Good.Loan')
mapping <- list('A11'='... < 0 DM',
'A12'='0 <= ... < 200 DM',
'A13'='... >= 200 DM / salary assignments for at least 1 year',
'A14'='no checking account',
'A30'='no credits taken/all credits paid back duly',
'A31'='all credits at this bank paid back duly',
'A32'='existing credits paid back duly till now',
'A33'='delay in paying off in the past',
'A34'='critical account/other credits existing (not at this bank)',
'A40'='car (new)',
'A41'='car (used)',
'A42'='furniture/equipment',
'A43'='radio/television',
'A44'='domestic appliances',
'A45'='repairs',
'A46'='education',
'A47'='(vacation - does not exist?)',
'A48'='retraining',
'A49'='business',
'A410'='others',
'A61'='... < 100 DM',
'A62'='100 <= ... < 500 DM',
'A63'='500 <= ... < 1000 DM',
'A64'='.. >= 1000 DM',
'A65'='unknown/ no savings account',
'A71'='unemployed',
'A72'='... < 1 year',
'A73'='1 <= ... < 4 years',
'A74'='4 <= ... < 7 years',
'A75'='.. >= 7 years',
'A91'='male : divorced/separated',
'A92'='female : divorced/separated/married',
'A93'='male : single',
'A94'='male : married/widowed',
'A95'='female : single',
'A101'='none',
'A102'='co-applicant',
'A103'='guarantor',
'A121'='real estate',
'A122'='if not A121 : building society savings agreement/life insurance',
'A123'='if not A121/A122 : car or other, not in attribute 6',
'A124'='unknown / no property',
'A141'='bank',
'A142'='stores',
'A143'='none',
'A151'='rent',
'A152'='own',
'A153'='for free',
'A171'='unemployed/ unskilled - non-resident',
'A172'='unskilled - resident',
'A173'='skilled employee / official',
'A174'='management/ self-employed/highly qualified employee/ officer',
'A191'='none',
'A192'='yes, registered under the customers name',
'A201'='yes',
'A202'='no')
summary(d$Personal.status.and.sex)
#There are more males who are either single, married or widowed, than there are females
#who fall under the same categories
summary(d$Other.debtors)
#According to these numbers, more people do not take out a loan for cars than those who
#are a guarntor or co-applicant
install.packages("hexbin")
library(hexbin)
help(package=hexbin)
names(custdata)
custdata2 <- subset(custdata,
(custdata$age > 0 & custdata$age < 100
& custdata$income > 0))
ggplot(custdata2, aes(x=age, y=income)) + geom_hex(binwidth=c(5, 100000))
#This, I think, better visualizes the data than the scatterplot. I think it makes it
#easier to visualize the number of people in each category over a bunch of dots on
#a graph
ggplot(custdata2, geom_bar(aes(x=income, fill=num.vehicles))) + theme_bw()+ ggtitle("Income vs Number of Vehicles")
#I used a bar graph. I noticed that the higher the income, the more vehicles that
#person owns
ggplot(custdata) + geom_bar(aes(x=income.lt.30K, fill=recent.move), position="dodge") + theme_bw()+ ggtitle("Recent moves vs Incomes less than 30K")
#I used a bar graph for this, I noticed that the recent moves were fairly balanced
#between those who do and don't have an income less than 30K |
library(ape)
testtree <- read.tree("7739_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7739_0_unrooted.txt") | /codeml_files/newick_trees_processed/7739_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("7739_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7739_0_unrooted.txt") |
library(plsRglm)
### Name: kfolds2Mclassedind
### Title: Number of missclassified individuals per group for k-fold cross
### validated partial least squares regression models.
### Aliases: kfolds2Mclassedind
### Keywords: models regression
### ** Examples
## No test:
data(aze_compl)
Xaze_compl<-aze_compl[,2:34]
yaze_compl<-aze_compl$y
bbb <- cv.plsR(dataY=yaze_compl,dataX=Xaze_compl,nt=10,K=8,NK=1)
bbb2 <- cv.plsR(dataY=yaze_compl,dataX=Xaze_compl,nt=10,K=8,NK=2)
kfolds2Mclassedind(bbb)
kfolds2Mclassedind(bbb2)
rm(list=c("Xaze_compl","yaze_compl","bbb","bbb2"))
## End(No test)
| /data/genthat_extracted_code/plsRglm/examples/kfolds2Mclassedind.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 594 | r | library(plsRglm)
### Name: kfolds2Mclassedind
### Title: Number of missclassified individuals per group for k-fold cross
### validated partial least squares regression models.
### Aliases: kfolds2Mclassedind
### Keywords: models regression
### ** Examples
## No test:
data(aze_compl)
Xaze_compl<-aze_compl[,2:34]
yaze_compl<-aze_compl$y
bbb <- cv.plsR(dataY=yaze_compl,dataX=Xaze_compl,nt=10,K=8,NK=1)
bbb2 <- cv.plsR(dataY=yaze_compl,dataX=Xaze_compl,nt=10,K=8,NK=2)
kfolds2Mclassedind(bbb)
kfolds2Mclassedind(bbb2)
rm(list=c("Xaze_compl","yaze_compl","bbb","bbb2"))
## End(No test)
|
# This R script combines all the GeoJSON files in a folder into one file, then writes it back to the folder.
# Modified from its original source: https://gist.github.com/wildintellect/582bb1096092170070996d21037b82d8
# Version 2 rewritten to us sf which is way faster, comparison at the link above
library(sf)
library(dplyr)
library(rgdal)
library(lwgeom)
# probably want to change the pattern to exclude or filter after to drop the all.geojson file
avas <- list.files(path="./avas", pattern = "*json$", full.names = "TRUE")
tbd <- list.files(path="./tbd", pattern = "*json$", full.names = "TRUE")
gj <- c(avas, tbd)
# exclude the all.geojson file... probably a more elegant way to do this, but this works:
gj <- gj[gj != "./avas.geojson"]
gj <- gj[gj != "./tbd/avas.geojson"]
c <- Sys.time()
vectsf <- lapply(gj, read_sf)
#Bug, if date field has NA it's a char but valid dates are doubles, can't bind those
#Option convert after reading to char, or read as char to begin with
vectsf2 <- lapply(vectsf, function(d){
d$created <- as.character(d$created)
return(d)
})
allsf <- do.call(rbind, vectsf2)
allsf <- mutate_if(allsf, is.character, gsub, pattern="N/A", replacement=NA)
allsf$valid_end[allsf$valid_end=='']<-NA
allsf$area <- st_area(allsf)
allsf <- arrange(allsf,desc(area))
#write_sf(allsf, dsn="avas.geojson", driver="GeoJSON", delete_dsn=TRUE)
#geojson_write(allsf, file="avas-sf.geojson", overwrite=TRUE, convert_wgs84 = TRUE)
# Separate the current & historic AVAs ---------------------------------
current.avas<-allsf[which(is.na(allsf$valid_end)),]
write_sf(current.avas, dsn="avas.geojson", driver="GeoJSON", delete_dsn=TRUE)
historic.avas<-allsf[which(nchar(allsf$valid_end)>0),]
write_sf(historic.avas, dsn="avas_historic.geojson", driver="GeoJSON", delete_dsn=TRUE)
write_sf(allsf, dsn="avas_allboundaries.geojson", driver="GeoJSON", delete_dsn=TRUE)
d <- Sys.time()
d-c
| /rcode/mergeGeoJson.R | no_license | ninvotee/ava | R | false | false | 1,918 | r | # This R script combines all the GeoJSON files in a folder into one file, then writes it back to the folder.
# Modified from its original source: https://gist.github.com/wildintellect/582bb1096092170070996d21037b82d8
# Version 2 rewritten to us sf which is way faster, comparison at the link above
library(sf)
library(dplyr)
library(rgdal)
library(lwgeom)
# probably want to change the pattern to exclude or filter after to drop the all.geojson file
avas <- list.files(path="./avas", pattern = "*json$", full.names = "TRUE")
tbd <- list.files(path="./tbd", pattern = "*json$", full.names = "TRUE")
gj <- c(avas, tbd)
# exclude the all.geojson file... probably a more elegant way to do this, but this works:
gj <- gj[gj != "./avas.geojson"]
gj <- gj[gj != "./tbd/avas.geojson"]
c <- Sys.time()
vectsf <- lapply(gj, read_sf)
#Bug, if date field has NA it's a char but valid dates are doubles, can't bind those
#Option convert after reading to char, or read as char to begin with
vectsf2 <- lapply(vectsf, function(d){
d$created <- as.character(d$created)
return(d)
})
allsf <- do.call(rbind, vectsf2)
allsf <- mutate_if(allsf, is.character, gsub, pattern="N/A", replacement=NA)
allsf$valid_end[allsf$valid_end=='']<-NA
allsf$area <- st_area(allsf)
allsf <- arrange(allsf,desc(area))
#write_sf(allsf, dsn="avas.geojson", driver="GeoJSON", delete_dsn=TRUE)
#geojson_write(allsf, file="avas-sf.geojson", overwrite=TRUE, convert_wgs84 = TRUE)
# Separate the current & historic AVAs ---------------------------------
current.avas<-allsf[which(is.na(allsf$valid_end)),]
write_sf(current.avas, dsn="avas.geojson", driver="GeoJSON", delete_dsn=TRUE)
historic.avas<-allsf[which(nchar(allsf$valid_end)>0),]
write_sf(historic.avas, dsn="avas_historic.geojson", driver="GeoJSON", delete_dsn=TRUE)
write_sf(allsf, dsn="avas_allboundaries.geojson", driver="GeoJSON", delete_dsn=TRUE)
d <- Sys.time()
d-c
|
pacman::p_load(tidyverse, stringr, stringi, rio, ggridges)
dat <- import("http://scriptures.nephi.org/downloads/lds-scriptures.csv.zip")
pull(dat, scripture_text) %>% .[c(1,2)]
pull(dat, scripture_text) %>% .[c(1,2)] %>% stri_stats_latex()
# What is the average verse length (number of words) in the New Testament compared to the Book of Mormon?
dat_bm_nt <- dat %>%
filter(volume_short_title %in% c("NT", "BoM")) %>%
group_by(volume_short_title, verse_short_title) %>%
mutate(count = stri_stats_latex(scripture_text)["Words"]) %>%
ungroup() %>%
select(volume_short_title, verse_short_title, count, scripture_text, book_title)
dat_bm_nt %>%
group_by(volume_short_title) %>%
summarise(average = mean(count)) %>%
knitr:kable()
dat_bm_nt %>%
ggplot(aes(x = count)) +
geom_histogram(bins = 45, color = "white") +
facet_wrap(~volume_short_title) +
theme_bw() +
labs(x = "Number of Words in Verse", y = "Number of Verses w/ Corresponding \nWord Count")
# How often is the word Jesus in the New Testament compared to the Book of Mormon?
dat_bm_nt %>%
mutate(jesus_count = str_count(scripture_text, "Jesus")) %>%
group_by(volume_short_title) %>%
summarize(count = sum(jesus_count))
dat_bm_nt %>%
mutate(jesus_count = str_count(scripture_text, "Jesus"))%>%
ggplot(aes(x = jesus_count)) +
geom_histogram(bins = 45, color = "white") +
facet_wrap(~volume_short_title) +
theme_bw() +
coord_cartesian(ylim = c(0, 25)) +
labs(x = "Number of Mentions of \"Jesus\"", y = "Number of Verses w/\nCorresponding Count")
# How does the word count distribution by verse look for each book in the Book of Mormon?
dat_bm_nt %>%
filter(volume_short_title == "BoM") %>%
ggplot(aes(y = count, x = fct_inorder(book_title) %>% fct_rev())) +
geom_jitter(height = 0, width = .25, color = "darkgrey") +
geom_boxplot(outlier.color = NA, fill = NA) +
theme_bw() +
labs(x = "Book in BoM", y = "Word Count") +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
dat_bm_nt %>%
filter(volume_short_title == "BoM") %>%
ggplot(aes(x = count, y = fct_inorder(book_title) %>% fct_rev())) +
geom_density_ridges() +
theme_bw() +
labs(x = "Book in BoM", y = "Word Count") +
theme(axis.text.x = element_text(angle = 60, hjust = 1)) +
scale_x_continuous(breaks = seq(0, 150, by = 20)) +
coord_cartesian(xlim = c(0, 100))
| /Case_Study_07/Class_Task_14/t14.R | no_license | critterwilson/MATH335-Coursework | R | false | false | 2,389 | r | pacman::p_load(tidyverse, stringr, stringi, rio, ggridges)
dat <- import("http://scriptures.nephi.org/downloads/lds-scriptures.csv.zip")
pull(dat, scripture_text) %>% .[c(1,2)]
pull(dat, scripture_text) %>% .[c(1,2)] %>% stri_stats_latex()
# What is the average verse length (number of words) in the New Testament compared to the Book of Mormon?
dat_bm_nt <- dat %>%
filter(volume_short_title %in% c("NT", "BoM")) %>%
group_by(volume_short_title, verse_short_title) %>%
mutate(count = stri_stats_latex(scripture_text)["Words"]) %>%
ungroup() %>%
select(volume_short_title, verse_short_title, count, scripture_text, book_title)
dat_bm_nt %>%
group_by(volume_short_title) %>%
summarise(average = mean(count)) %>%
knitr:kable()
dat_bm_nt %>%
ggplot(aes(x = count)) +
geom_histogram(bins = 45, color = "white") +
facet_wrap(~volume_short_title) +
theme_bw() +
labs(x = "Number of Words in Verse", y = "Number of Verses w/ Corresponding \nWord Count")
# How often is the word Jesus in the New Testament compared to the Book of Mormon?
dat_bm_nt %>%
mutate(jesus_count = str_count(scripture_text, "Jesus")) %>%
group_by(volume_short_title) %>%
summarize(count = sum(jesus_count))
dat_bm_nt %>%
mutate(jesus_count = str_count(scripture_text, "Jesus"))%>%
ggplot(aes(x = jesus_count)) +
geom_histogram(bins = 45, color = "white") +
facet_wrap(~volume_short_title) +
theme_bw() +
coord_cartesian(ylim = c(0, 25)) +
labs(x = "Number of Mentions of \"Jesus\"", y = "Number of Verses w/\nCorresponding Count")
# How does the word count distribution by verse look for each book in the Book of Mormon?
dat_bm_nt %>%
filter(volume_short_title == "BoM") %>%
ggplot(aes(y = count, x = fct_inorder(book_title) %>% fct_rev())) +
geom_jitter(height = 0, width = .25, color = "darkgrey") +
geom_boxplot(outlier.color = NA, fill = NA) +
theme_bw() +
labs(x = "Book in BoM", y = "Word Count") +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
dat_bm_nt %>%
filter(volume_short_title == "BoM") %>%
ggplot(aes(x = count, y = fct_inorder(book_title) %>% fct_rev())) +
geom_density_ridges() +
theme_bw() +
labs(x = "Book in BoM", y = "Word Count") +
theme(axis.text.x = element_text(angle = 60, hjust = 1)) +
scale_x_continuous(breaks = seq(0, 150, by = 20)) +
coord_cartesian(xlim = c(0, 100))
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(1.7234814599948e+218, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615856683-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 171 | r | testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(1.7234814599948e+218, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/g.R
\name{kg}
\alias{kg}
\title{kg}
\usage{
kg(x)
}
\arguments{
\item{x}{numeric}
}
\description{
kg
}
| /man/kg.Rd | no_license | t-arae/prtclmisc | R | false | true | 181 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/g.R
\name{kg}
\alias{kg}
\title{kg}
\usage{
kg(x)
}
\arguments{
\item{x}{numeric}
}
\description{
kg
}
|
# This file is part of the standard setup for testthat.
# It is recommended that you do not modify it.
#
# Where should you do additional test configuration?
# Learn more about the roles of various files in:
# * https://r-pkgs.org/tests.html
# * https://testthat.r-lib.org/reference/test_package.html#special-files
library(testthat)
library(multiweb)
test_check("multiweb")
| /tests/testthat.R | permissive | lsaravia/multiweb | R | false | false | 376 | r | # This file is part of the standard setup for testthat.
# It is recommended that you do not modify it.
#
# Where should you do additional test configuration?
# Learn more about the roles of various files in:
# * https://r-pkgs.org/tests.html
# * https://testthat.r-lib.org/reference/test_package.html#special-files
library(testthat)
library(multiweb)
test_check("multiweb")
|
library(magrittr)
head(transformedData,5)
preprocessData = function(transformedData) {
transformedData = transformedData %>% na.omit()
uniqueServers = unique(transformedData$Serv)
uniqueLics = unique(transformedData$Lics)
uniqueCountries = unique(transformedData$Count)
uniqueContinents = unique(transformedData$Conti)
uniqueOS = unique(transformedData$OS)
minedDataFrame = data.frame(servers = c(transformedData$Serv),
lics = c(transformedData$Lics),
countries = c(transformedData$Count),
continents = c(transformedData$Conti),
OS = c(transformedData$OS),
stringsAsFactors = FALSE)
minedDataFrame$servers.num = as.numeric(factor(minedDataFrame$servers, levels = c(uniqueServers)))
minedDataFrame$lics.num = as.numeric(factor(minedDataFrame$lics, levels = c(uniqueLics)))
minedDataFrame$countries.num = as.numeric(factor(minedDataFrame$countries, levels = c(uniqueCountries)))
minedDataFrame$continents.num = as.numeric(factor(minedDataFrame$continents, levels = c(uniqueContinents)))
minedDataFrame$OS.num = as.numeric(factor(minedDataFrame$OS, levels = c(uniqueOS)))
return(minedDataFrame[, 6:10])
} | /AttributeSelectionWrappers/AttributeSelectionWrappers/DataNormalization/PreprocessData.R | no_license | marskr/MOWProject | R | false | false | 1,299 | r | library(magrittr)
head(transformedData,5)
preprocessData = function(transformedData) {
transformedData = transformedData %>% na.omit()
uniqueServers = unique(transformedData$Serv)
uniqueLics = unique(transformedData$Lics)
uniqueCountries = unique(transformedData$Count)
uniqueContinents = unique(transformedData$Conti)
uniqueOS = unique(transformedData$OS)
minedDataFrame = data.frame(servers = c(transformedData$Serv),
lics = c(transformedData$Lics),
countries = c(transformedData$Count),
continents = c(transformedData$Conti),
OS = c(transformedData$OS),
stringsAsFactors = FALSE)
minedDataFrame$servers.num = as.numeric(factor(minedDataFrame$servers, levels = c(uniqueServers)))
minedDataFrame$lics.num = as.numeric(factor(minedDataFrame$lics, levels = c(uniqueLics)))
minedDataFrame$countries.num = as.numeric(factor(minedDataFrame$countries, levels = c(uniqueCountries)))
minedDataFrame$continents.num = as.numeric(factor(minedDataFrame$continents, levels = c(uniqueContinents)))
minedDataFrame$OS.num = as.numeric(factor(minedDataFrame$OS, levels = c(uniqueOS)))
return(minedDataFrame[, 6:10])
} |
#rm(list=ls())
# Library ====
library(bigrquery)
library(DBI)
library(data.table)
library(lubridate)
library(ggplot2)
library(dplyr)
library(stargazer)
# Functions ====
lagpad <- function(x, k = 1) {
c(rep(NA, k), x)[1 : length(x)]
}
theta_thres <- function(net_flow, w_max){
apply(net_flow, 1, function(x) max(x,(1/w_max)))
}
# Connection to BigQuery ====
Sys.setenv(BIGQUERY_TEST_PROJECT="bgse-dsc")
billing <- bq_test_project()
con <- dbConnect(
bigrquery::bigquery(),
project = "bgse-dsc",
dataset = "MIMIC3_V1_4",
billing = billing
)
# Specify shift and maximum waiting time length ====
total_hours <- 24
splits <- 4
shift_selection <- "CASE \n" # shift for charttime
pos <- 1
for( time in seq(splits, total_hours, splits)){
shift_selection <- paste0(shift_selection,
"WHEN EXTRACT(HOUR FROM CHARTTIME_COLLAPSED) < ",
time, " THEN ", pos, "\n")
pos <- pos + 1
}
shift_selection <- paste0(shift_selection, "END")
shift_selection_2 <- "CASE \n" # shift for intime
pos <- 1
for( time in seq(splits, total_hours, splits)){
shift_selection_2 <- paste0(shift_selection_2,
"WHEN EXTRACT(HOUR FROM ANY_VALUE(INTIME_TRANS_COLLAPSED)) < ",
time, " THEN ", pos, "\n")
pos <- pos + 1
}
shift_selection_2 <- paste0(shift_selection_2, "END")
shift_selection_3 <- "CASE \n" # shift for outtime
pos <- 1
for( time in seq(splits, total_hours, splits)){
shift_selection_3 <- paste0(shift_selection_3,
"WHEN EXTRACT(HOUR FROM ANY_VALUE(OUTTIME_TRANS_COLLAPSED)) < ",
time, " THEN ", pos, "\n")
pos <- pos + 1
}
shift_selection_3 <- paste0(shift_selection_3, "END")
# Build date hour framework ====
dates_sql <- paste("SELECT
MIN(EXTRACT(DATE FROM CHARTTIME_COLLAPSED)) AS min_date,
MAX(EXTRACT(DATE FROM CHARTTIME_COLLAPSED)) AS max_date
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`")
dates <- as.data.frame(dbGetQuery(con, dates_sql))
units_sql <- paste("SELECT
DISTINCT CURR_CAREUNIT
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`")
units <- as.data.frame(dbGetQuery(con, units_sql))$CURR_CAREUNIT
frame <- as.data.frame(rep(seq(as.Date(dates[1,1]),as.Date(dates[1,2]), by="day"),
each=(6 * total_hours / splits )))
frame$CHART_SHIFT <- rep(1:(total_hours / splits),each=6)
frame$CURR_UNIT <- rep(units,(total_hours / splits))
colnames(frame)[1] <- "CHART_DATE"
# Download staff data ====
staff_sql <- paste("
SELECT * FROM((
SELECT
EXTRACT(DATE FROM CHARTTIME_COLLAPSED) AS CHART_DATE, ",
shift_selection, "AS CHART_SHIFT,
CURR_CAREUNIT AS CURR_UNIT,
CGID
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY CHART_DATE, CHART_SHIFT, CURR_UNIT, CGID) AS L
LEFT JOIN (
SELECT CGID, DESCRIPTION FROM `MIMIC3_V1_4.CAREGIVERS` ) AS R
USING(CGID))
ORDER BY CHART_DATE, CHART_SHIFT
")
staff <- dbGetQuery(con, staff_sql)
staff <- as.data.table(staff)
staff[is.na(DESCRIPTION), DESCRIPTION := 'Unknown']
staff <- staff[,.("Nurse"=sum(DESCRIPTION=="RN"),
"Respiratory"=sum(DESCRIPTION=="Respiratory"),
"IMD"=sum(DESCRIPTION=="IMD"),
"PCT_NA"=sum(DESCRIPTION=="PCT/NA"),
"Resident"=sum(DESCRIPTION=="Resident/Fellow/PA/NP"),
"Unknown"=sum(DESCRIPTION=="Unknown"),
"Others"=sum(DESCRIPTION %in% c("Pharmacist", "UCO", "Case Manager",
"Pastoral Care", "Social Worker",
"Attending", "Rehabilitation",
"Read Only","Research Assistant",
"Dietitian"))
),
by=list(CHART_DATE, CHART_SHIFT,CURR_UNIT)]
colnames(staff) <- c(colnames(staff)[1:3], paste0("STAFF_", colnames(staff)
[4:length(colnames(staff))]))
# Skeleton
skeleton_sql <- paste("
SELECT
EXTRACT(DATE FROM CHARTTIME_COLLAPSED) AS CHART_DATE, ",
shift_selection, "AS CHART_SHIFT,
CURR_CAREUNIT AS CURR_UNIT,
COUNT(DISTINCT CGID) AS STAFF,
AVG(LOS_TRANS) AS AVG_LOS
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY CHART_DATE, CHART_SHIFT, CURR_UNIT
ORDER BY CHART_DATE, CHART_SHIFT
")
skeleton <- dbGetQuery(con, skeleton_sql)
skeleton <- as.data.table(skeleton)
# Inflow data ====
inflow_sql <- paste("
SELECT
ICUSTAY_ID,
CURR_CAREUNIT AS CURR_UNIT,
IFNULL(ANY_VALUE(PREV_CAREUNIT), ", "'OUT'" , ") AS PREV_CAREUNIT,
EXTRACT(DATE FROM ANY_VALUE(INTIME_TRANS_COLLAPSED)) AS CHART_DATE,",
shift_selection_2, " AS CHART_SHIFT
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY ICUSTAY_ID, CURR_UNIT
ORDER BY CHART_DATE, CHART_SHIFT
")
inflow <- dbGetQuery(con, inflow_sql)
inflow <- as.data.table(inflow)
detailed_inflow <- inflow[,.("from_OUT"=sum(PREV_CAREUNIT=="OUT"),
"from_NWARD"=sum(PREV_CAREUNIT=="NWARD"),
"from_NICU"=sum(PREV_CAREUNIT=="NICU"),
"from_MICU"=sum(PREV_CAREUNIT=="MICU"),
"from_TSICU"=sum(PREV_CAREUNIT=="TSICU"),
"from_CSRU"=sum(PREV_CAREUNIT=="CSRU"),
"from_SICU"=sum(PREV_CAREUNIT=="SICU"),
"from_CCU"=sum(PREV_CAREUNIT=="CCU")),
by=list(CHART_DATE, CHART_SHIFT,CURR_UNIT)]
# Outflow data ====
outflow_sql <- paste("
SELECT ICUSTAY_ID, CHART_DATE, CHART_SHIFT, CURR_UNIT, IFNULL(NEXT_CAREUNIT,'OUT') AS NEXT_CAREUNIT
FROM
( SELECT
ICUSTAY_ID,
CURR_CAREUNIT AS CURR_UNIT,
ANY_VALUE(EXTRACT(DATE FROM OUTTIME_TRANS_COLLAPSED)) AS CHART_DATE,",
shift_selection_3, " AS CHART_SHIFT
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY ICUSTAY_ID, CURR_UNIT) AS L
LEFT JOIN
( SELECT
ICUSTAY_ID,
CURR_CAREUNIT AS NEXT_CAREUNIT,
ANY_VALUE(EXTRACT(DATE FROM INTIME_TRANS_COLLAPSED)) AS CHART_DATE,",
shift_selection_2, " AS CHART_SHIFT
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY ICUSTAY_ID, NEXT_CAREUNIT) AS R
USING(ICUSTAY_ID, CHART_DATE, CHART_SHIFT)
ORDER BY CHART_DATE, CHART_SHIFT
")
outflow <- dbGetQuery(con, outflow_sql)
outflow <- as.data.table(outflow)
detailed_outflow <- outflow[,.("to_OUT"=sum(NEXT_CAREUNIT=="OUT"),
"to_NWARD"=sum(NEXT_CAREUNIT=="NWARD"),
"to_NICU"=sum(NEXT_CAREUNIT=="NICU"),
"to_MICU"=sum(NEXT_CAREUNIT=="MICU"),
"to_TSICU"=sum(NEXT_CAREUNIT=="TSICU"),
"to_CSRU"=sum(NEXT_CAREUNIT=="CSRU"),
"to_SICU"=sum(NEXT_CAREUNIT=="SICU"),
"to_CCU"=sum(NEXT_CAREUNIT=="CCU")),
by=list(CHART_DATE, CHART_SHIFT,CURR_UNIT)]
# Patient data ====
patients_sql <- paste("
SELECT
CHART_DATE, CHART_SHIFT, CURR_UNIT, PATIENTS
FROM `bgse-dsc.MIMIC3_V1_4.PATIENTS_DATA`
ORDER BY CHART_DATE, CHART_SHIFT
")
patients <- dbGetQuery(con, patients_sql)
patients <- as.data.table(patients)
# Merge data ====
flow_data <- frame %>%
left_join(y = staff, by=c("CHART_DATE", "CHART_SHIFT", "CURR_UNIT")) %>%
left_join(y = detailed_inflow, by=c("CHART_DATE", "CHART_SHIFT", "CURR_UNIT")) %>%
left_join(y = detailed_outflow, by=c("CHART_DATE", "CHART_SHIFT", "CURR_UNIT")) %>%
left_join(y = patients, by=c("CHART_DATE", "CHART_SHIFT", "CURR_UNIT")) %>%
as.data.table()
# Cleaning dataset ====
in_cols <- c("from_OUT", "from_NWARD", "from_NICU", "from_MICU",
"from_TSICU", "from_CSRU", "from_SICU", "from_CCU")
out_cols <- c("to_OUT", "to_NWARD", "to_NICU", "to_MICU", "to_TSICU",
"to_CSRU", "to_SICU", "to_CCU")
staff_cols <- c("STAFF_Nurse", "STAFF_Respiratory", "STAFF_IMD", "STAFF_PCT_NA",
"STAFF_Resident", "STAFF_Others")
relabel_cols <- c(in_cols, out_cols, staff_cols)
flow_data <- as.data.frame(flow_data)
flow_data[relabel_cols][is.na(flow_data[relabel_cols])] <- 0
flow_data["INFLOW"] <- rowSums(flow_data[in_cols])
flow_data["OUTFLOW"] <- rowSums(flow_data[out_cols])
flow_data["STAFF_TOTAL"] <- rowSums(flow_data[staff_cols])
flow_data <- as.data.table(flow_data)
# Deleting NICU and NWARD ====
see <- flow_data
flow_data <- flow_data[flow_data$CURR_UNIT != "NICU",]
flow_data %>% select(-contains('NICU')) %>% select(-contains('NWARD')) -> flow_data
# Save clean dataset ====
save(flow_data, file="model-building/data/clean_dataset.RData")
| /model-building/data/final_data_retrieval.R | no_license | aleixrvr/bayesian-hospital | R | false | false | 9,272 | r | #rm(list=ls())
# Library ====
library(bigrquery)
library(DBI)
library(data.table)
library(lubridate)
library(ggplot2)
library(dplyr)
library(stargazer)
# Functions ====
lagpad <- function(x, k = 1) {
c(rep(NA, k), x)[1 : length(x)]
}
theta_thres <- function(net_flow, w_max){
apply(net_flow, 1, function(x) max(x,(1/w_max)))
}
# Connection to BigQuery ====
Sys.setenv(BIGQUERY_TEST_PROJECT="bgse-dsc")
billing <- bq_test_project()
con <- dbConnect(
bigrquery::bigquery(),
project = "bgse-dsc",
dataset = "MIMIC3_V1_4",
billing = billing
)
# Specify shift and maximum waiting time length ====
total_hours <- 24
splits <- 4
shift_selection <- "CASE \n" # shift for charttime
pos <- 1
for( time in seq(splits, total_hours, splits)){
shift_selection <- paste0(shift_selection,
"WHEN EXTRACT(HOUR FROM CHARTTIME_COLLAPSED) < ",
time, " THEN ", pos, "\n")
pos <- pos + 1
}
shift_selection <- paste0(shift_selection, "END")
shift_selection_2 <- "CASE \n" # shift for intime
pos <- 1
for( time in seq(splits, total_hours, splits)){
shift_selection_2 <- paste0(shift_selection_2,
"WHEN EXTRACT(HOUR FROM ANY_VALUE(INTIME_TRANS_COLLAPSED)) < ",
time, " THEN ", pos, "\n")
pos <- pos + 1
}
shift_selection_2 <- paste0(shift_selection_2, "END")
shift_selection_3 <- "CASE \n" # shift for outtime
pos <- 1
for( time in seq(splits, total_hours, splits)){
shift_selection_3 <- paste0(shift_selection_3,
"WHEN EXTRACT(HOUR FROM ANY_VALUE(OUTTIME_TRANS_COLLAPSED)) < ",
time, " THEN ", pos, "\n")
pos <- pos + 1
}
shift_selection_3 <- paste0(shift_selection_3, "END")
# Build date hour framework ====
dates_sql <- paste("SELECT
MIN(EXTRACT(DATE FROM CHARTTIME_COLLAPSED)) AS min_date,
MAX(EXTRACT(DATE FROM CHARTTIME_COLLAPSED)) AS max_date
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`")
dates <- as.data.frame(dbGetQuery(con, dates_sql))
units_sql <- paste("SELECT
DISTINCT CURR_CAREUNIT
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`")
units <- as.data.frame(dbGetQuery(con, units_sql))$CURR_CAREUNIT
frame <- as.data.frame(rep(seq(as.Date(dates[1,1]),as.Date(dates[1,2]), by="day"),
each=(6 * total_hours / splits )))
frame$CHART_SHIFT <- rep(1:(total_hours / splits),each=6)
frame$CURR_UNIT <- rep(units,(total_hours / splits))
colnames(frame)[1] <- "CHART_DATE"
# Download staff data ====
staff_sql <- paste("
SELECT * FROM((
SELECT
EXTRACT(DATE FROM CHARTTIME_COLLAPSED) AS CHART_DATE, ",
shift_selection, "AS CHART_SHIFT,
CURR_CAREUNIT AS CURR_UNIT,
CGID
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY CHART_DATE, CHART_SHIFT, CURR_UNIT, CGID) AS L
LEFT JOIN (
SELECT CGID, DESCRIPTION FROM `MIMIC3_V1_4.CAREGIVERS` ) AS R
USING(CGID))
ORDER BY CHART_DATE, CHART_SHIFT
")
staff <- dbGetQuery(con, staff_sql)
staff <- as.data.table(staff)
staff[is.na(DESCRIPTION), DESCRIPTION := 'Unknown']
staff <- staff[,.("Nurse"=sum(DESCRIPTION=="RN"),
"Respiratory"=sum(DESCRIPTION=="Respiratory"),
"IMD"=sum(DESCRIPTION=="IMD"),
"PCT_NA"=sum(DESCRIPTION=="PCT/NA"),
"Resident"=sum(DESCRIPTION=="Resident/Fellow/PA/NP"),
"Unknown"=sum(DESCRIPTION=="Unknown"),
"Others"=sum(DESCRIPTION %in% c("Pharmacist", "UCO", "Case Manager",
"Pastoral Care", "Social Worker",
"Attending", "Rehabilitation",
"Read Only","Research Assistant",
"Dietitian"))
),
by=list(CHART_DATE, CHART_SHIFT,CURR_UNIT)]
colnames(staff) <- c(colnames(staff)[1:3], paste0("STAFF_", colnames(staff)
[4:length(colnames(staff))]))
# Skeleton
skeleton_sql <- paste("
SELECT
EXTRACT(DATE FROM CHARTTIME_COLLAPSED) AS CHART_DATE, ",
shift_selection, "AS CHART_SHIFT,
CURR_CAREUNIT AS CURR_UNIT,
COUNT(DISTINCT CGID) AS STAFF,
AVG(LOS_TRANS) AS AVG_LOS
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY CHART_DATE, CHART_SHIFT, CURR_UNIT
ORDER BY CHART_DATE, CHART_SHIFT
")
skeleton <- dbGetQuery(con, skeleton_sql)
skeleton <- as.data.table(skeleton)
# Inflow data ====
inflow_sql <- paste("
SELECT
ICUSTAY_ID,
CURR_CAREUNIT AS CURR_UNIT,
IFNULL(ANY_VALUE(PREV_CAREUNIT), ", "'OUT'" , ") AS PREV_CAREUNIT,
EXTRACT(DATE FROM ANY_VALUE(INTIME_TRANS_COLLAPSED)) AS CHART_DATE,",
shift_selection_2, " AS CHART_SHIFT
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY ICUSTAY_ID, CURR_UNIT
ORDER BY CHART_DATE, CHART_SHIFT
")
inflow <- dbGetQuery(con, inflow_sql)
inflow <- as.data.table(inflow)
detailed_inflow <- inflow[,.("from_OUT"=sum(PREV_CAREUNIT=="OUT"),
"from_NWARD"=sum(PREV_CAREUNIT=="NWARD"),
"from_NICU"=sum(PREV_CAREUNIT=="NICU"),
"from_MICU"=sum(PREV_CAREUNIT=="MICU"),
"from_TSICU"=sum(PREV_CAREUNIT=="TSICU"),
"from_CSRU"=sum(PREV_CAREUNIT=="CSRU"),
"from_SICU"=sum(PREV_CAREUNIT=="SICU"),
"from_CCU"=sum(PREV_CAREUNIT=="CCU")),
by=list(CHART_DATE, CHART_SHIFT,CURR_UNIT)]
# Outflow data ====
outflow_sql <- paste("
SELECT ICUSTAY_ID, CHART_DATE, CHART_SHIFT, CURR_UNIT, IFNULL(NEXT_CAREUNIT,'OUT') AS NEXT_CAREUNIT
FROM
( SELECT
ICUSTAY_ID,
CURR_CAREUNIT AS CURR_UNIT,
ANY_VALUE(EXTRACT(DATE FROM OUTTIME_TRANS_COLLAPSED)) AS CHART_DATE,",
shift_selection_3, " AS CHART_SHIFT
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY ICUSTAY_ID, CURR_UNIT) AS L
LEFT JOIN
( SELECT
ICUSTAY_ID,
CURR_CAREUNIT AS NEXT_CAREUNIT,
ANY_VALUE(EXTRACT(DATE FROM INTIME_TRANS_COLLAPSED)) AS CHART_DATE,",
shift_selection_2, " AS CHART_SHIFT
FROM `MIMIC3_V1_4.CHARTEVENTS_DEPTS_CATS_TS_COLLAPSED_FINAL`
GROUP BY ICUSTAY_ID, NEXT_CAREUNIT) AS R
USING(ICUSTAY_ID, CHART_DATE, CHART_SHIFT)
ORDER BY CHART_DATE, CHART_SHIFT
")
outflow <- dbGetQuery(con, outflow_sql)
outflow <- as.data.table(outflow)
detailed_outflow <- outflow[,.("to_OUT"=sum(NEXT_CAREUNIT=="OUT"),
"to_NWARD"=sum(NEXT_CAREUNIT=="NWARD"),
"to_NICU"=sum(NEXT_CAREUNIT=="NICU"),
"to_MICU"=sum(NEXT_CAREUNIT=="MICU"),
"to_TSICU"=sum(NEXT_CAREUNIT=="TSICU"),
"to_CSRU"=sum(NEXT_CAREUNIT=="CSRU"),
"to_SICU"=sum(NEXT_CAREUNIT=="SICU"),
"to_CCU"=sum(NEXT_CAREUNIT=="CCU")),
by=list(CHART_DATE, CHART_SHIFT,CURR_UNIT)]
# Patient data ====
patients_sql <- paste("
SELECT
CHART_DATE, CHART_SHIFT, CURR_UNIT, PATIENTS
FROM `bgse-dsc.MIMIC3_V1_4.PATIENTS_DATA`
ORDER BY CHART_DATE, CHART_SHIFT
")
patients <- dbGetQuery(con, patients_sql)
patients <- as.data.table(patients)
# Merge data ====
flow_data <- frame %>%
left_join(y = staff, by=c("CHART_DATE", "CHART_SHIFT", "CURR_UNIT")) %>%
left_join(y = detailed_inflow, by=c("CHART_DATE", "CHART_SHIFT", "CURR_UNIT")) %>%
left_join(y = detailed_outflow, by=c("CHART_DATE", "CHART_SHIFT", "CURR_UNIT")) %>%
left_join(y = patients, by=c("CHART_DATE", "CHART_SHIFT", "CURR_UNIT")) %>%
as.data.table()
# Cleaning dataset ====
in_cols <- c("from_OUT", "from_NWARD", "from_NICU", "from_MICU",
"from_TSICU", "from_CSRU", "from_SICU", "from_CCU")
out_cols <- c("to_OUT", "to_NWARD", "to_NICU", "to_MICU", "to_TSICU",
"to_CSRU", "to_SICU", "to_CCU")
staff_cols <- c("STAFF_Nurse", "STAFF_Respiratory", "STAFF_IMD", "STAFF_PCT_NA",
"STAFF_Resident", "STAFF_Others")
relabel_cols <- c(in_cols, out_cols, staff_cols)
flow_data <- as.data.frame(flow_data)
flow_data[relabel_cols][is.na(flow_data[relabel_cols])] <- 0
flow_data["INFLOW"] <- rowSums(flow_data[in_cols])
flow_data["OUTFLOW"] <- rowSums(flow_data[out_cols])
flow_data["STAFF_TOTAL"] <- rowSums(flow_data[staff_cols])
flow_data <- as.data.table(flow_data)
# Deleting NICU and NWARD ====
see <- flow_data
flow_data <- flow_data[flow_data$CURR_UNIT != "NICU",]
flow_data %>% select(-contains('NICU')) %>% select(-contains('NWARD')) -> flow_data
# Save clean dataset ====
save(flow_data, file="model-building/data/clean_dataset.RData")
|
################################################################################
#' Analyze power band by episodes
#' @description
#' Analyzes the ULF, VLF, LF and HF bands from a given indexFreqAnalysis allowing
#' to evaluate the application of a desired function inside and outside each episode.
#' @param HRVData Data structure that stores the beats register and information related to it.
#' @param indexFreqAnalysis Integer value denoting which frequency analysis is going to be analyzed using func. Default: 1
#' @param Tag Type of episode
#' @param verbose Deprecated argument maintained for compatibility, use SetVerbose() instead
#' @param func Function to be applied to each power band inside and outside episodes
#' @param ... Optional arguments for func.
#' @return Returns a list with two objects, that is, the values of the application of the selected function
#' inside ("resultIn") and outside ("resultOut") episodes in the given indexFreqAnalysis. Each of these
#' list has another set of lists: the "ULF", "VLF", "LF" and "HF" lists.
#' @examples
#' \dontrun{
#' hrv.data = CreateHRVData()
#' hrv.data = SetVerbose(hrv.data, TRUE)
#' hrv.data = LoadBeat(hrv.data, fileType = "WFDB", "a03", RecordPath ="beatsFolder/",
#' annotator = "qrs")
#' hrv.data = LoadApneaWFDB(hrv.data, RecordName="a03",Tag="Apnea",
#' RecordPath="beatsFolder/")
#' hrv.data = BuildNIHR(hrv.data)
#' hrv.data = InterpolateNIHR (hrv.data, freqhr = 4)
#' hrv.data = CreateFreqAnalysis(hrv.data)
#' hrv.data = CalculatePowerBand( hrv.data , indexFreqAnalysis= 1,
#' type = "wavelet", wavelet = "la8",
#' bandtolerance = 0.01, relative = FALSE)
#' results = AnalyzePowerBandsByEpisodes(hrv.data,indexFreqAnalysis=1,
#' Tag="Apnea",func=mean)}
AnalyzePowerBandsByEpisodes = function(HRVData, indexFreqAnalysis = length(HRVData$FreqAnalysis), Tag="", verbose=NULL,func, ...) {
# ----------------------------------------------
# Analyzes PowerBands using Episodes information
# ----------------------------------------------
# indexFreqAnalysis -> which frequency analysis is going to be analyzed using func
# Tag -> specifies tag of episodes
# func -> function to apply
# ... -> additional arguments for func
# Returns a list with two objects result
funcToApply = match.fun(func)
nameFunc = deparse(substitute(func))
#check if indexFreqAnalysis exists
if ((length(HRVData$FreqAnalysis) < indexFreqAnalysis) || (indexFreqAnalysis<1) ) {
stop(" --- Frequency analysis no. ",indexFreqAnalysis," not present!! ---\n --- Quitting now!! ---\n")
}
if (!is.null(verbose)) {
cat(" --- Warning: deprecated argument, using SetVerbose() instead ---\n --- See help for more information!! ---\n")
SetVerbose(HRVData,verbose)
}
if (HRVData$Verbose) {
cat("** Applying function to power bands in frequency analysis" ,indexFreqAnalysis," using episodic information **\n");
cat(" Function: ",nameFunc,"()\n",sep="")
}
if (is.null(HRVData$Episodes)) {
stop(" --- Episodes not present\n --- Quitting now!! ---\n")
}
if (HRVData$Verbose) {
if (Tag=="") {
cat(" No tag was specified\n")
} else {
cat(" Using episodes with tag:",Tag,"\n")
}
}
episodicInformation = SplitPowerBandByEpisodes(HRVData,
indexFreqAnalysis = indexFreqAnalysis,
Tag = Tag)
bandNames = names(episodicInformation$InEpisodes)
resultIn = list()
resultOut = list()
for (band in bandNames){
resultIn[[band]] = funcToApply(episodicInformation$InEpisodes[[band]], ...)
resultOut[[band]] = funcToApply(episodicInformation$OutEpisodes[[band]], ...)
}
result=list(resultIn=resultIn,resultOut=resultOut)
return(result)
}
| /RHRV/R/AnalyzePowerBandsByEpisodes.R | no_license | ingted/R-Examples | R | false | false | 4,125 | r | ################################################################################
#' Analyze power band by episodes
#' @description
#' Analyzes the ULF, VLF, LF and HF bands from a given indexFreqAnalysis allowing
#' to evaluate the application of a desired function inside and outside each episode.
#' @param HRVData Data structure that stores the beats register and information related to it.
#' @param indexFreqAnalysis Integer value denoting which frequency analysis is going to be analyzed using func. Default: 1
#' @param Tag Type of episode
#' @param verbose Deprecated argument maintained for compatibility, use SetVerbose() instead
#' @param func Function to be applied to each power band inside and outside episodes
#' @param ... Optional arguments for func.
#' @return Returns a list with two objects, that is, the values of the application of the selected function
#' inside ("resultIn") and outside ("resultOut") episodes in the given indexFreqAnalysis. Each of these
#' list has another set of lists: the "ULF", "VLF", "LF" and "HF" lists.
#' @examples
#' \dontrun{
#' hrv.data = CreateHRVData()
#' hrv.data = SetVerbose(hrv.data, TRUE)
#' hrv.data = LoadBeat(hrv.data, fileType = "WFDB", "a03", RecordPath ="beatsFolder/",
#' annotator = "qrs")
#' hrv.data = LoadApneaWFDB(hrv.data, RecordName="a03",Tag="Apnea",
#' RecordPath="beatsFolder/")
#' hrv.data = BuildNIHR(hrv.data)
#' hrv.data = InterpolateNIHR (hrv.data, freqhr = 4)
#' hrv.data = CreateFreqAnalysis(hrv.data)
#' hrv.data = CalculatePowerBand( hrv.data , indexFreqAnalysis= 1,
#' type = "wavelet", wavelet = "la8",
#' bandtolerance = 0.01, relative = FALSE)
#' results = AnalyzePowerBandsByEpisodes(hrv.data,indexFreqAnalysis=1,
#' Tag="Apnea",func=mean)}
AnalyzePowerBandsByEpisodes = function(HRVData, indexFreqAnalysis = length(HRVData$FreqAnalysis), Tag="", verbose=NULL,func, ...) {
# ----------------------------------------------
# Analyzes PowerBands using Episodes information
# ----------------------------------------------
# indexFreqAnalysis -> which frequency analysis is going to be analyzed using func
# Tag -> specifies tag of episodes
# func -> function to apply
# ... -> additional arguments for func
# Returns a list with two objects result
funcToApply = match.fun(func)
nameFunc = deparse(substitute(func))
#check if indexFreqAnalysis exists
if ((length(HRVData$FreqAnalysis) < indexFreqAnalysis) || (indexFreqAnalysis<1) ) {
stop(" --- Frequency analysis no. ",indexFreqAnalysis," not present!! ---\n --- Quitting now!! ---\n")
}
if (!is.null(verbose)) {
cat(" --- Warning: deprecated argument, using SetVerbose() instead ---\n --- See help for more information!! ---\n")
SetVerbose(HRVData,verbose)
}
if (HRVData$Verbose) {
cat("** Applying function to power bands in frequency analysis" ,indexFreqAnalysis," using episodic information **\n");
cat(" Function: ",nameFunc,"()\n",sep="")
}
if (is.null(HRVData$Episodes)) {
stop(" --- Episodes not present\n --- Quitting now!! ---\n")
}
if (HRVData$Verbose) {
if (Tag=="") {
cat(" No tag was specified\n")
} else {
cat(" Using episodes with tag:",Tag,"\n")
}
}
episodicInformation = SplitPowerBandByEpisodes(HRVData,
indexFreqAnalysis = indexFreqAnalysis,
Tag = Tag)
bandNames = names(episodicInformation$InEpisodes)
resultIn = list()
resultOut = list()
for (band in bandNames){
resultIn[[band]] = funcToApply(episodicInformation$InEpisodes[[band]], ...)
resultOut[[band]] = funcToApply(episodicInformation$OutEpisodes[[band]], ...)
}
result=list(resultIn=resultIn,resultOut=resultOut)
return(result)
}
|
library(Rcpp);
sourceCpp("impute.cpp");
x <- as.integer(c(1, 2, 5, 7, 9));
y <- c(1, 1, 2, 2, 3);
xx <- 2:10;
yy <- c(1, 1.5, 1.5, 2, 2, 2, 2.5, 3, 3);
yy_hat <- impute_nei(x, y, xx);
stopifnot(yy == yy_hat);
xx <- 0:5;
yy <- c(1, 1, 1, 1.5, 1.5, 2);
yy_hat <- impute_nei(x, y, xx);
stopifnot(yy == yy_hat);
| /impute.R | no_license | djhshih/impute | R | false | false | 315 | r | library(Rcpp);
sourceCpp("impute.cpp");
x <- as.integer(c(1, 2, 5, 7, 9));
y <- c(1, 1, 2, 2, 3);
xx <- 2:10;
yy <- c(1, 1.5, 1.5, 2, 2, 2, 2.5, 3, 3);
yy_hat <- impute_nei(x, y, xx);
stopifnot(yy == yy_hat);
xx <- 0:5;
yy <- c(1, 1, 1, 1.5, 1.5, 2);
yy_hat <- impute_nei(x, y, xx);
stopifnot(yy == yy_hat);
|
install.packages("plot3D")
library(plot3D)
#exercice 1
n=10000
par(mfrow=c(2,2))
ftilde=function(x){
ifelse(x[1]<=4 & x[1]>=0 & x[2]>=0 & x[2]<=2, (cos(x[1])**2+2*(sin(x[2])**2)*cos(x[1])**4)/(1+4*(x[2]-1)**2)*exp(-0.5*(x[1]-2)**2),0)
}
#1er rejet: on maximise par rapport à normale 2,1 en x et par une uniforme 0,2 en y
#constante de normalisation
M=6*sqrt(2*pi)
#densité instrumentale
g=function(x){
return(dnorm(x[1],2,1)*dunif(x[2],0,2))
}
#algo du rejet
rejet1=function(n){
solution=matrix(numeric(2*n), ncol=2) #création d'une matrice à n lignes et 2 colonnes que de 0 qui va à chaque ligne être composé de x,y à la fin du rejet
for (i in 1:n){
u=runif(1,0,1)
m=c(rnorm(1,2,1),runif(1,0,2))
beta=ftilde(m)/(M*g(m))
while (u>beta){
u=runif(1,0,1)
m=c(rnorm(1,2,1),runif(1,0,2))
beta=ftilde(m)/(M*g(m))
}
solution[i,]=m
}
return(solution)
}
#on plot les vectuer sen 2D car impossible d'installer package 3D
testrejet1=rejet1(n)
plot(testrejet1, col="red")
#proportion de x entre 0 et 4 (car on a vu que les 2 sont plutot bien répartis) hist ou barplot??
hist(testrejet1[,1], col='red', main='1er rejet', xlab="réalisation des X", ylab='')
#2ème rejet: normale 2,1 en x et par une cauchy 1,0.5 en y
#constante de normalisation
M2=1.5*pi*sqrt(2*pi)
#densité instrumentale
gbis=function(x){
return(dnorm(x[1],2,1)*dcauchy(x[2],1,0.5))
}
#algo du rejet
rejet_bis=function(n){
solution_bis=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
u=runif(1,0,1)
m=c(rnorm(1,2,1),rcauchy(1,1,0.5))
beta=ftilde(m)/(M2*gbis(m))
while (u>beta){
u=runif(1,0,1)
m=c(rnorm(1,2,1),rcauchy(1,1,0.5))
beta=ftilde(m)/(M2*gbis(m))
}
solution_bis[i,]=m
}
return(solution_bis)
}
#on plot les vectuer sen 2D car impossible d'installer package 3D
testrejet_bis=rejet_bis(n)
#test rejet 3: uniforme 0,4 pour x et uniforme 0,2 pour y
#constante de normalisation
M3=24
#densite instrumentale
gterce=function(x){
return(dunif(x[1],0,4)*dunif(x[2],0,2))
}
#fonction du rejet
rejet_terce=function(n){
solution_terce=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
u=runif(1,0,1)
m=c(runif(1,0,4),runif(1,0,2))
beta=ftilde(m)/(M2*gterce(m))
while (u>beta){
u=runif(1,0,1)
m=c(runif(1,0,4),runif(1,0,2))
beta=ftilde(m)/(M2*gterce(m))
}
solution_terce[i,]=m
}
return(solution_terce)
}
#on plot les vectuer sen 2D car impossible d'installer package 3D
testrejet_terce=rejet_terce(n)
points(testrejet_terce, col="blue")
##proportion de x entre 0 et 4 (car on a vu que les 2 sont plutot bien répartis)
hist(testrejet_terce[,1], col='blue', main='rejet 3', xlab="réalisation des X", ylab='')
#rejet numéro 4
#constante de normalisation
M4=6*pi
#densité instrumentale
gquatre=function(x){
return(dunif(x[1],0,4)*dcauchy(x[2],1,0.5))
}
#algorithme du rejet
rejetquatre=function(n){
solution_quatre=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
u=runif(1,0,1)
m=c(runif(1,0,4),rcauchy(1,1,0.5))
beta=ftilde(m)/(M4*gquatre(m))
while (u>beta){
u=runif(1,0,1)
m=c(runif(1,0,4),rcauchy(1,1,0.5))
beta=ftilde(m)/(M4*gquatre(m))
}
solution_quatre[i,]=m
}
return(solution_quatre)
}
#on plot les vectuer sen 2D car impossible d'installer package 3D
testrejet_quatre=rejetquatre(n)
points(testrejet_quatre, col="grey")
##proportion de x entre 0 et 4 (car on a vu que les 2 sont plutot bien répartis)
hist(testrejet_quatre[,1], col='grey', main='rejet 4', xlab="réalisation des X", ylab='')
#Algorithme de Metropolis-Hastings question 4
#expliquer que le denominateur s'annule jamais, que le support de f est inclus dans g.
alpha=function(xt,eps){
bob=ftilde(eps)*g(xt)/(ftilde(xt)*g(eps))
return(min(bob,1))
}
#quelles valeurs pour x0 et justifier
#espilon varie-t-il en fonction du temps?
MH1=function(n,x0=c(0,0)){
x0=as.vector(x0)
X=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
eps=c(rnorm(1,2,1),runif(1,0,2))
p=alpha(x0,eps)
u=rbinom(1,1,p)
X[i,]=eps*u+(1-u)*x0
x0=X[i,]
}
return(X)
}
##############################
alphabis=function(xt,eps){
bob=ftilde(eps)*gbis(xt)/(ftilde(xt)*gbis(eps)) #par le calcul, simuler f ou ftilde revient au même car f(xt/f(eps)=ftilde(xt)/ftilde(eps)
return(min(bob,1))
}
#quelles valeurs pour x0 et justifier
#espilon varie-t-il en fonction du temps?
MHbis=function(n,x0=c(0,0)){
x0=as.vector(x0)
X=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
eps=c(rnorm(1,2,1),rcauchy(1,1,0.5))
p=alphabis(x0,eps)
u=rbinom(1,1,p)
X[i,]=eps*u+(1-u)*x0
x0=X[i,]
}
return(X)
}
##############################
alphaterce=function(xt,eps){
bob=ftilde(eps)*gterce(xt)/(ftilde(xt)*gterce(eps)) #par le calcul, simuler f ou ftilde revient au même car f(xt/f(eps)=ftilde(xt)/ftilde(eps)
return(min(bob,1))
}
#quelles valeurs pour x0 et justifier
#espilon varie-t-il en fonction du temps?
MHterce=function(n,x0=c(0,0)){
x0=as.vector(x0)
X=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
eps=c(runif(1,0,4),runif(1,0,2))
p=alphaterce(x0,eps)
u=rbinom(1,1,p)
X[i,]=eps*u+(1-u)*x0
x0=X[i,]
}
return(X)
}
###########################
alphaquatre=function(xt,eps){
bob=ftilde(eps)*gquatre(xt)/(ftilde(xt)*gquatre(eps)) #par le calcul, simuler f ou ftilde revient au même car f(xt/f(eps)=ftilde(xt)/ftilde(eps)
return(min(bob,1))
}
#quelles valeurs pour x0 et justifier
#espilon varie-t-il en fonction du temps?
MHquatre=function(n,x0=c(0,0)){
x0=as.vector(x0)
X=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
eps=c(runif(1,0,4),rcauchy(1,1,0.5))
p=alphaquatre(x0,eps)
u=rbinom(1,1,p)
X[i,]=eps*u+(1-u)*x0
x0=X[i,]
}
return(X)
}
##################################################################################
#réinitialiser la page plot.
#Exercice 2, partie 1, question 1,a)
#estimation de delta par MC classique
n=10000
#fonction pour l'estimateur de MC
indicatrice=function(x,t=2){
ifelse(x>=t,1,0)
}
#estimateur
W=indicatrice(rweibull(n,2))
deltaMC=mean(W)
#graphe de la convergence de l'estimateur
plot(cumsum(W)/(1:length(W)),type='l', col='blue', xlab="n", ylab="y", ylim=c(0,0.022), main="convergence de l'estimateur")
abline(h=1-pweibull(2,2), col='red')
legend("bottomright", legend=c("estimateur", "valeur exacte"), col=c('blue','red'), lty=c(1), lwd=c(3), title="courbes", bg='aliceblue')
#intervalle de confiance
ICdeltaMC=c(deltaMC-qnorm(0.975)*sqrt(var(W)/n),deltaMC+qnorm(0.975)*sqrt(var(W)/n))
#stratification
#on crée une fonction qui renvoie une matrice. 1ère ligne, les sommes de h(xl) et 2ème ligne la variance de chaque échantillon
Strat=function(n,L){
if (n%%L!=0){
return("Impossible, L doit diviser n")
}
a=seq(0,1,length.out=L)
b=qweibull(a,2)
premieresomme=c()
variance=c()
mat=matrix(numeric(2*L), nrow=2)
u=runif(n/L,0,1)
for (l in 1:(L-1)){
xl=qweibull((l-1)/L+u/L,2)
hxl=indicatrice(xl)
mat[,l]=c(sum(hxl), var(hxl))
}
return(mat)
}
#application avec n=10000 et L=1000
echantillonstrat=Strat(10000,1000)
#on prend les sommes et on crée des échantillons
ff=echantillonstrat[1,]
estistrat=(1/10000)*sum(ff)
#on prend la variance et on crée l'intervalle de confiance
variance=mean(echantillonstrat[2,])
ICstrat=c(estistrat-qnorm(0.975)*sqrt(variance/10000),estistrat+qnorm(0.975)*sqrt(variance/10000))
#plot la convergence
#si j'essaie de garder le même intervalle que dans le td, je me retrouve avec des puissances trop petites encore.
#Partie 1 question 2,a
#on simule x3 par la méthode de l'inverse
F.inv=function(x){
ifelse(0<x & x<0.25, 4*x, ifelse(x<=1 & x>=0.75, 4*x-2, 1))
}
#fonction pour estimer l'intégrale (la renommer du même nom que dans le rapport)
indicatrice1=function(x,t=1){
ifelse(x>=t, 1, 0)
}
#X1+X2 suit une loi gamma(2,1). On fera appel directement à rgamma
#on crée une fonction qui revoie l'échantillon
MC.esti=function(n){
u=runif(n)
x3=F.inv(u)
t=rgamma(n,2,1)
m=colSums(matrix(c(t,x3), nrow=2, byrow=TRUE)) #matrice à 2 lignes. 1ere ligne, les n gamma et 2ème ligne les n x3. ON aditionne les colonnes, pour obtenir 100 réalisations de X1+X2+X3
return(indicatrice1(m))
}
#estimateur et convergence
h=MC.esti(n)
f=mean(h)
plot(cumsum(h)/(1:length(h)),type='l', col='chartreuse', xlab="n", ylab="y", main="convergences des estimateurs", ylim=c(0.90,1))
legend("bottomright", legend=c("estimateur monte carlo"), col=c('chartreuse'), lty=c(1), lwd=c(3), bg='aliceblue')
#intervalle de confiance
IC1=c(f-qnorm(0.975)*sqrt(var(h)/n),f+qnorm(0.975)*sqrt(var(h)/n))
#exo2, partie1, question c)
#prend en entrée des vecteurs et renvoie le vecteur transformé
F.=function(x){
ifelse( x>=0 & x<1, x/4, ifelse( x<2 & x>=1, x/4+0.5, ifelse(x>=2, 1, 0)))
}
#fonction qui crée h1 pour l'estimateur de MC
estiF=function(n,t=1){
m=rgamma(n,2,1)
a=F.(t-m) # on crée un vecteur de F(t-X1-X2)
return(1-a)
}
#estimateur
h1=estiF(n)
f1=mean(h1)
#intervalle de confiance
IC1=c(f1-qnorm(0.975)*sqrt(var(h1)/n), f1+qnorm(0.975)*sqrt(var(h1)/n))
points(cumsum(h1)/(1:length(h1)),type='l', col='blue')
legend("bottomright", legend=c("estimateur MC classique", "estimateur MC avec h1"), col=c('chartreuse', 'blue'), lty=c(1), lwd=c(3), bg='aliceblue')
#convergence de l'estimateur
#on fait avec G
#fonction qui crée la fonction h2
estiG=function(n, t=1){
x=runif(n)
x3=F.inv(x) #on crée un vecteur de simulation de X3
echantillon=1-pgamma(t-x3,2,1)
return(echantillon)
}
#estimateur
h2=estiG(n)
f2=mean(h2)
points(cumsum(h2)/(1:length(h2)),type='l' , col="red")
legend("bottomright", legend=c("estimateur MC classique", "estimateur MC avec h1", "estimateur MC avec h2"), col=c('chartreuse', 'blue', 'red'), lty=c(1), lwd=c(3), bg='aliceblue')
#intervalle de confiance
IC2=c(f2-qnorm(0.975)*sqrt(var(h2)/n), f2+qnorm(0.975)*sqrt(var(h2)/n))
#performance: calcul? Ou juste interprétation des résultats obtenus? peut-on prendre à partir de n=4000?
plot(cumsum(h)/(1:length(h)),type='l', col='chartreuse', xlab="n", ylab="y", main="zoom convergence des estimateurs", ylim=c(0.970,.975))
points(cumsum(h1)/(1:length(h1)),type='l', col='blue')
points(cumsum(h2)/(1:length(h2)),type='l' , col="red")
################################################################################
#x = rnorm(1e4)
#plot(cumsum(x)/(1:1e4),type='l')
#plot(Vectorize(MC.esti)(1:1000)[1,],type='l')
##################################################################################
#PARTIE 2, question 1
n=10000
#pour obtenir ce qu'on veut, on prend t=1.5 sachant que 5 est trop grand
t=1.5
#on définit la fonction longueur
d<-function(x){
return(min(x[1]+x[4], x[1]+x[3]+x[5], x[2]+x[5], x[2]+x[3]+x[4]))
}
#on définit la fonction pour l'estimateur de montecarlo
indicatrice_2=function(x,t=1.5){
ifelse(d(x)>=t,1,0)
}
#fonction qui crée un
MC.sample=function(n,t=1.5){
x1=rexp(n,6) #verifier qu'il faut bien donne 1/lambda en arg sur R
x2=rexp(n,7)
x3=rexp(n,3)
x4=rexp(n,2)
x5=rexp(n,1)
P=matrix(c(x1,x2,x3,x4,x5), ncol=5, byrow=FALSE)
sample=c()
for (i in 1:n){
sample=c(sample,indicatrice_2(P[i,],t)) #reflechir avec colSums (sous entend redéfinir d(x) alors)
}
return(sample)
}
#estimateur et convergence
b=MC.sample(n)
MC.esti2=mean(b)
plot(cumsum(b)/(1:length(b)), type='l', xlab='n', ylab='estimateur', main='convergence estimateur de monte carlo' )
#Echantillonage préférentiel, question 2b)
#on simule des lois suivant la densité g par rejet
#on crée la densité f(x,lambda)
fexp<-function(x){
return(dexp(x[1],6)*dexp(x[2],7)*dexp(x[3],3)*dexp(x[4],2)*dexp(x[5],1))
}
g2<-function(x,t){
return(indicatrice_2(x,t)*fexp(x))
}
########### manque d'optimalité totale , meilleure beta? faire méthode du prof en 5dimensions? #######################
rejetg=function(n,t=1.5){
solution=matrix(numeric(5*n), ncol=5)
for (i in 1:n){
u=runif(1)
exp=c(rexp(1,6), rexp(1,7), rexp(1,3), rexp(1,2), rexp(1,1))
alpha=indicatrice_2(exp,t) #expliquer pourquoi celui la
while (u>alpha){
u=runif(1)
exp=c(rexp(1,6), rexp(1,7), rexp(1,3), rexp(1,2), rexp(1,1))
alpha=indicatrice_2(exp,t)
}
solution[i,]=exp
}
return(solution)
}
#comencons par calculer les ai
a0=c(1,2,3,4,5)
simuler_h=function(n){
solution=matrix(numeric(5*n), ncol=5)
for (i in 1:n){
exp=c(rexp(1,a[1]), rexp(1,a0[2]), rexp(1,a0[3]), rexp(1,a0[4]), rexp(1,a0[5]))
solution[i,]=exp
}
return(solution)
}
solution=simuler_h(n)
a_opti=c()
for (j in 1:5){
sum1=0
sum2=0
for (i in 1:n){
sum1=sum1+(indicatrice_2(solution[i,],t)*(6/a0[1])*exp(-(6-a0[1])*solution[i,1])*(7/a0[2])*exp(-(7-a0[2])*solution[i,2])*(3/a0[3])*exp(-(3-a0[3])*solution[i,3])*(2/a0[4])*exp(-(2-a0[4])*solution[i,4])*(1/a0[5])*exp(-(1-a0[5])*solution[i,5]))
sum2=sum2+(indicatrice_2(solution[i,],t)*solution[i,j]*(6/a0[1])*exp(-(6-a0[1])*solution[i,1])*(7/a0[2])*exp(-(7-a0[2])*solution[i,2])*(3/a0[3])*exp(-(3-a0[3])*solution[i,3])*(2/a0[4])*exp(-(2-a0[4])*solution[i,4])*(1/a0[5])*exp(-(1-a0[5])*solution[i,5]))
}
a_opti=c(a_opti,sum1/sum2)
}
h_opti<-function(x){
return(dexp(x[1],a_opti[1])*dexp(x[2],a_opti[2])*dexp(x[3],a_opti[3])*dexp(x[4],a_opti[4])*dexp(x[5],a_opti[5]))
}
#echantillonage preferentiel opti
#rejet pour simuler selon h_opti
simuler_h_opti=function(n){
solution=matrix(numeric(5*n), ncol=5)
for (i in 1:n){
exp=c(rexp(1,a_opti[1]), rexp(1,a_opti[2]), rexp(1,a_opti[3]), rexp(1,a_opti[4]), rexp(1,a_opti[5]))
solution[i,]=exp
}
return(solution)
}
Y=simuler_h_opti(n)
le_truc=c()
for (i in 1:n){
le_truc=c(le_truc,(fexp(Y[i,])/h_opti(Y[i,])*indicatrice_2(Y[i,],t)))
}
plot(cumsum(le_truc)/(1:n), type='l')
pn_opti=mean(le_truc)
IC_borne_sup=pn_opti+qnorm(0.975)*var(h_opti)/n
IC_borne_inf=pn_opti-qnorm(0.975)*var(h_opti)/n
IC=c(IC_borne_inf,IC_borne_sup)
###############ex8
###FAUX####
rho<-function(x){
return(min((-1/a_opti[1])*log(x[1])-(1/a_opti[4])*log(x[4]), (-1/a_opti[1])*log(x[1])-(1/a_opti[3])*log(x[3])-(1/a_opti[5])*log(x[5]), (-1/a_opti[2])*log(x[2])-(1/a_opti[5])*log(x[5]), (-1/a_opti[2])*log(x[2])-(1/a_opti[3])*log(x[3])-(1/a_opti[4])*log(x[4])))
}
indicatrice_3<-function(x,t=1.5){
ifelse(rho(x)>=t, 1, 0)
}
fct_8<-function(u){
return((6*7*3*2*1)/(a_opti[1]*a_opti[2]*a_opti[3]*a_opti[4]*a_opti[5])*u[1]^((6/a_opti[1])-1)*u[2]^((7/a_opti[2])-1)*u[3]^((3/a_opti[3])-1)*u[4]^((2/a_opti[4])-1)*u[5]^((1/a_opti[5])-1))
}
simuler_uniforme=function(n){
solution=matrix(numeric(5*n), ncol=5)
for (i in 1:n){
unif=c(runif(5,0,1))
solution[i,]=unif
}
return(solution)
}
question8=simuler_uniforme(n)
question8_1=1-question8
v8=c()
v8_1=c()
for (i in 1:n){
v8=c(v8, fct_8(question8[i,])*indicatrice_3(question8[i,]))
v8_1=c(v8_1, fct_8(question8_1[i,])*indicatrice_3(question8_1[i,]))
}
pn_unif=mean(v8+v8_1)/2
| /Projet MC.R | no_license | guipet/appolon | R | false | false | 15,028 | r | install.packages("plot3D")
library(plot3D)
#exercice 1
n=10000
par(mfrow=c(2,2))
ftilde=function(x){
ifelse(x[1]<=4 & x[1]>=0 & x[2]>=0 & x[2]<=2, (cos(x[1])**2+2*(sin(x[2])**2)*cos(x[1])**4)/(1+4*(x[2]-1)**2)*exp(-0.5*(x[1]-2)**2),0)
}
#1er rejet: on maximise par rapport à normale 2,1 en x et par une uniforme 0,2 en y
#constante de normalisation
M=6*sqrt(2*pi)
#densité instrumentale
g=function(x){
return(dnorm(x[1],2,1)*dunif(x[2],0,2))
}
#algo du rejet
rejet1=function(n){
solution=matrix(numeric(2*n), ncol=2) #création d'une matrice à n lignes et 2 colonnes que de 0 qui va à chaque ligne être composé de x,y à la fin du rejet
for (i in 1:n){
u=runif(1,0,1)
m=c(rnorm(1,2,1),runif(1,0,2))
beta=ftilde(m)/(M*g(m))
while (u>beta){
u=runif(1,0,1)
m=c(rnorm(1,2,1),runif(1,0,2))
beta=ftilde(m)/(M*g(m))
}
solution[i,]=m
}
return(solution)
}
#on plot les vectuer sen 2D car impossible d'installer package 3D
testrejet1=rejet1(n)
plot(testrejet1, col="red")
#proportion de x entre 0 et 4 (car on a vu que les 2 sont plutot bien répartis) hist ou barplot??
hist(testrejet1[,1], col='red', main='1er rejet', xlab="réalisation des X", ylab='')
#2ème rejet: normale 2,1 en x et par une cauchy 1,0.5 en y
#constante de normalisation
M2=1.5*pi*sqrt(2*pi)
#densité instrumentale
gbis=function(x){
return(dnorm(x[1],2,1)*dcauchy(x[2],1,0.5))
}
#algo du rejet
rejet_bis=function(n){
solution_bis=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
u=runif(1,0,1)
m=c(rnorm(1,2,1),rcauchy(1,1,0.5))
beta=ftilde(m)/(M2*gbis(m))
while (u>beta){
u=runif(1,0,1)
m=c(rnorm(1,2,1),rcauchy(1,1,0.5))
beta=ftilde(m)/(M2*gbis(m))
}
solution_bis[i,]=m
}
return(solution_bis)
}
#on plot les vectuer sen 2D car impossible d'installer package 3D
testrejet_bis=rejet_bis(n)
#test rejet 3: uniforme 0,4 pour x et uniforme 0,2 pour y
#constante de normalisation
M3=24
#densite instrumentale
gterce=function(x){
return(dunif(x[1],0,4)*dunif(x[2],0,2))
}
#fonction du rejet
rejet_terce=function(n){
solution_terce=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
u=runif(1,0,1)
m=c(runif(1,0,4),runif(1,0,2))
beta=ftilde(m)/(M2*gterce(m))
while (u>beta){
u=runif(1,0,1)
m=c(runif(1,0,4),runif(1,0,2))
beta=ftilde(m)/(M2*gterce(m))
}
solution_terce[i,]=m
}
return(solution_terce)
}
#on plot les vectuer sen 2D car impossible d'installer package 3D
testrejet_terce=rejet_terce(n)
points(testrejet_terce, col="blue")
##proportion de x entre 0 et 4 (car on a vu que les 2 sont plutot bien répartis)
hist(testrejet_terce[,1], col='blue', main='rejet 3', xlab="réalisation des X", ylab='')
#rejet numéro 4
#constante de normalisation
M4=6*pi
#densité instrumentale
gquatre=function(x){
return(dunif(x[1],0,4)*dcauchy(x[2],1,0.5))
}
#algorithme du rejet
rejetquatre=function(n){
solution_quatre=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
u=runif(1,0,1)
m=c(runif(1,0,4),rcauchy(1,1,0.5))
beta=ftilde(m)/(M4*gquatre(m))
while (u>beta){
u=runif(1,0,1)
m=c(runif(1,0,4),rcauchy(1,1,0.5))
beta=ftilde(m)/(M4*gquatre(m))
}
solution_quatre[i,]=m
}
return(solution_quatre)
}
#on plot les vectuer sen 2D car impossible d'installer package 3D
testrejet_quatre=rejetquatre(n)
points(testrejet_quatre, col="grey")
##proportion de x entre 0 et 4 (car on a vu que les 2 sont plutot bien répartis)
hist(testrejet_quatre[,1], col='grey', main='rejet 4', xlab="réalisation des X", ylab='')
#Algorithme de Metropolis-Hastings question 4
#expliquer que le denominateur s'annule jamais, que le support de f est inclus dans g.
alpha=function(xt,eps){
bob=ftilde(eps)*g(xt)/(ftilde(xt)*g(eps))
return(min(bob,1))
}
#quelles valeurs pour x0 et justifier
#espilon varie-t-il en fonction du temps?
MH1=function(n,x0=c(0,0)){
x0=as.vector(x0)
X=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
eps=c(rnorm(1,2,1),runif(1,0,2))
p=alpha(x0,eps)
u=rbinom(1,1,p)
X[i,]=eps*u+(1-u)*x0
x0=X[i,]
}
return(X)
}
##############################
alphabis=function(xt,eps){
bob=ftilde(eps)*gbis(xt)/(ftilde(xt)*gbis(eps)) #par le calcul, simuler f ou ftilde revient au même car f(xt/f(eps)=ftilde(xt)/ftilde(eps)
return(min(bob,1))
}
#quelles valeurs pour x0 et justifier
#espilon varie-t-il en fonction du temps?
MHbis=function(n,x0=c(0,0)){
x0=as.vector(x0)
X=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
eps=c(rnorm(1,2,1),rcauchy(1,1,0.5))
p=alphabis(x0,eps)
u=rbinom(1,1,p)
X[i,]=eps*u+(1-u)*x0
x0=X[i,]
}
return(X)
}
##############################
alphaterce=function(xt,eps){
bob=ftilde(eps)*gterce(xt)/(ftilde(xt)*gterce(eps)) #par le calcul, simuler f ou ftilde revient au même car f(xt/f(eps)=ftilde(xt)/ftilde(eps)
return(min(bob,1))
}
#quelles valeurs pour x0 et justifier
#espilon varie-t-il en fonction du temps?
MHterce=function(n,x0=c(0,0)){
x0=as.vector(x0)
X=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
eps=c(runif(1,0,4),runif(1,0,2))
p=alphaterce(x0,eps)
u=rbinom(1,1,p)
X[i,]=eps*u+(1-u)*x0
x0=X[i,]
}
return(X)
}
###########################
alphaquatre=function(xt,eps){
bob=ftilde(eps)*gquatre(xt)/(ftilde(xt)*gquatre(eps)) #par le calcul, simuler f ou ftilde revient au même car f(xt/f(eps)=ftilde(xt)/ftilde(eps)
return(min(bob,1))
}
#quelles valeurs pour x0 et justifier
#espilon varie-t-il en fonction du temps?
MHquatre=function(n,x0=c(0,0)){
x0=as.vector(x0)
X=matrix(numeric(2*n), ncol=2)
for (i in 1:n){
eps=c(runif(1,0,4),rcauchy(1,1,0.5))
p=alphaquatre(x0,eps)
u=rbinom(1,1,p)
X[i,]=eps*u+(1-u)*x0
x0=X[i,]
}
return(X)
}
##################################################################################
#réinitialiser la page plot.
#Exercice 2, partie 1, question 1,a)
#estimation de delta par MC classique
n=10000
#fonction pour l'estimateur de MC
indicatrice=function(x,t=2){
ifelse(x>=t,1,0)
}
#estimateur
W=indicatrice(rweibull(n,2))
deltaMC=mean(W)
#graphe de la convergence de l'estimateur
plot(cumsum(W)/(1:length(W)),type='l', col='blue', xlab="n", ylab="y", ylim=c(0,0.022), main="convergence de l'estimateur")
abline(h=1-pweibull(2,2), col='red')
legend("bottomright", legend=c("estimateur", "valeur exacte"), col=c('blue','red'), lty=c(1), lwd=c(3), title="courbes", bg='aliceblue')
#intervalle de confiance
ICdeltaMC=c(deltaMC-qnorm(0.975)*sqrt(var(W)/n),deltaMC+qnorm(0.975)*sqrt(var(W)/n))
#stratification
#on crée une fonction qui renvoie une matrice. 1ère ligne, les sommes de h(xl) et 2ème ligne la variance de chaque échantillon
Strat=function(n,L){
if (n%%L!=0){
return("Impossible, L doit diviser n")
}
a=seq(0,1,length.out=L)
b=qweibull(a,2)
premieresomme=c()
variance=c()
mat=matrix(numeric(2*L), nrow=2)
u=runif(n/L,0,1)
for (l in 1:(L-1)){
xl=qweibull((l-1)/L+u/L,2)
hxl=indicatrice(xl)
mat[,l]=c(sum(hxl), var(hxl))
}
return(mat)
}
#application avec n=10000 et L=1000
echantillonstrat=Strat(10000,1000)
#on prend les sommes et on crée des échantillons
ff=echantillonstrat[1,]
estistrat=(1/10000)*sum(ff)
#on prend la variance et on crée l'intervalle de confiance
variance=mean(echantillonstrat[2,])
ICstrat=c(estistrat-qnorm(0.975)*sqrt(variance/10000),estistrat+qnorm(0.975)*sqrt(variance/10000))
#plot la convergence
#si j'essaie de garder le même intervalle que dans le td, je me retrouve avec des puissances trop petites encore.
#Partie 1 question 2,a
#on simule x3 par la méthode de l'inverse
F.inv=function(x){
ifelse(0<x & x<0.25, 4*x, ifelse(x<=1 & x>=0.75, 4*x-2, 1))
}
#fonction pour estimer l'intégrale (la renommer du même nom que dans le rapport)
indicatrice1=function(x,t=1){
ifelse(x>=t, 1, 0)
}
#X1+X2 suit une loi gamma(2,1). On fera appel directement à rgamma
#on crée une fonction qui revoie l'échantillon
MC.esti=function(n){
u=runif(n)
x3=F.inv(u)
t=rgamma(n,2,1)
m=colSums(matrix(c(t,x3), nrow=2, byrow=TRUE)) #matrice à 2 lignes. 1ere ligne, les n gamma et 2ème ligne les n x3. ON aditionne les colonnes, pour obtenir 100 réalisations de X1+X2+X3
return(indicatrice1(m))
}
#estimateur et convergence
h=MC.esti(n)
f=mean(h)
plot(cumsum(h)/(1:length(h)),type='l', col='chartreuse', xlab="n", ylab="y", main="convergences des estimateurs", ylim=c(0.90,1))
legend("bottomright", legend=c("estimateur monte carlo"), col=c('chartreuse'), lty=c(1), lwd=c(3), bg='aliceblue')
#intervalle de confiance
IC1=c(f-qnorm(0.975)*sqrt(var(h)/n),f+qnorm(0.975)*sqrt(var(h)/n))
#exo2, partie1, question c)
#prend en entrée des vecteurs et renvoie le vecteur transformé
F.=function(x){
ifelse( x>=0 & x<1, x/4, ifelse( x<2 & x>=1, x/4+0.5, ifelse(x>=2, 1, 0)))
}
#fonction qui crée h1 pour l'estimateur de MC
estiF=function(n,t=1){
m=rgamma(n,2,1)
a=F.(t-m) # on crée un vecteur de F(t-X1-X2)
return(1-a)
}
#estimateur
h1=estiF(n)
f1=mean(h1)
#intervalle de confiance
IC1=c(f1-qnorm(0.975)*sqrt(var(h1)/n), f1+qnorm(0.975)*sqrt(var(h1)/n))
points(cumsum(h1)/(1:length(h1)),type='l', col='blue')
legend("bottomright", legend=c("estimateur MC classique", "estimateur MC avec h1"), col=c('chartreuse', 'blue'), lty=c(1), lwd=c(3), bg='aliceblue')
#convergence de l'estimateur
#on fait avec G
#fonction qui crée la fonction h2
estiG=function(n, t=1){
x=runif(n)
x3=F.inv(x) #on crée un vecteur de simulation de X3
echantillon=1-pgamma(t-x3,2,1)
return(echantillon)
}
#estimateur
h2=estiG(n)
f2=mean(h2)
points(cumsum(h2)/(1:length(h2)),type='l' , col="red")
legend("bottomright", legend=c("estimateur MC classique", "estimateur MC avec h1", "estimateur MC avec h2"), col=c('chartreuse', 'blue', 'red'), lty=c(1), lwd=c(3), bg='aliceblue')
#intervalle de confiance
IC2=c(f2-qnorm(0.975)*sqrt(var(h2)/n), f2+qnorm(0.975)*sqrt(var(h2)/n))
#performance: calcul? Ou juste interprétation des résultats obtenus? peut-on prendre à partir de n=4000?
plot(cumsum(h)/(1:length(h)),type='l', col='chartreuse', xlab="n", ylab="y", main="zoom convergence des estimateurs", ylim=c(0.970,.975))
points(cumsum(h1)/(1:length(h1)),type='l', col='blue')
points(cumsum(h2)/(1:length(h2)),type='l' , col="red")
################################################################################
#x = rnorm(1e4)
#plot(cumsum(x)/(1:1e4),type='l')
#plot(Vectorize(MC.esti)(1:1000)[1,],type='l')
##################################################################################
#PARTIE 2, question 1
n=10000
#pour obtenir ce qu'on veut, on prend t=1.5 sachant que 5 est trop grand
t=1.5
#on définit la fonction longueur
d<-function(x){
return(min(x[1]+x[4], x[1]+x[3]+x[5], x[2]+x[5], x[2]+x[3]+x[4]))
}
#on définit la fonction pour l'estimateur de montecarlo
indicatrice_2=function(x,t=1.5){
ifelse(d(x)>=t,1,0)
}
#fonction qui crée un
MC.sample=function(n,t=1.5){
x1=rexp(n,6) #verifier qu'il faut bien donne 1/lambda en arg sur R
x2=rexp(n,7)
x3=rexp(n,3)
x4=rexp(n,2)
x5=rexp(n,1)
P=matrix(c(x1,x2,x3,x4,x5), ncol=5, byrow=FALSE)
sample=c()
for (i in 1:n){
sample=c(sample,indicatrice_2(P[i,],t)) #reflechir avec colSums (sous entend redéfinir d(x) alors)
}
return(sample)
}
#estimateur et convergence
b=MC.sample(n)
MC.esti2=mean(b)
plot(cumsum(b)/(1:length(b)), type='l', xlab='n', ylab='estimateur', main='convergence estimateur de monte carlo' )
#Echantillonage préférentiel, question 2b)
#on simule des lois suivant la densité g par rejet
#on crée la densité f(x,lambda)
fexp<-function(x){
return(dexp(x[1],6)*dexp(x[2],7)*dexp(x[3],3)*dexp(x[4],2)*dexp(x[5],1))
}
g2<-function(x,t){
return(indicatrice_2(x,t)*fexp(x))
}
########### manque d'optimalité totale , meilleure beta? faire méthode du prof en 5dimensions? #######################
rejetg=function(n,t=1.5){
solution=matrix(numeric(5*n), ncol=5)
for (i in 1:n){
u=runif(1)
exp=c(rexp(1,6), rexp(1,7), rexp(1,3), rexp(1,2), rexp(1,1))
alpha=indicatrice_2(exp,t) #expliquer pourquoi celui la
while (u>alpha){
u=runif(1)
exp=c(rexp(1,6), rexp(1,7), rexp(1,3), rexp(1,2), rexp(1,1))
alpha=indicatrice_2(exp,t)
}
solution[i,]=exp
}
return(solution)
}
#comencons par calculer les ai
a0=c(1,2,3,4,5)
simuler_h=function(n){
solution=matrix(numeric(5*n), ncol=5)
for (i in 1:n){
exp=c(rexp(1,a[1]), rexp(1,a0[2]), rexp(1,a0[3]), rexp(1,a0[4]), rexp(1,a0[5]))
solution[i,]=exp
}
return(solution)
}
solution=simuler_h(n)
a_opti=c()
for (j in 1:5){
sum1=0
sum2=0
for (i in 1:n){
sum1=sum1+(indicatrice_2(solution[i,],t)*(6/a0[1])*exp(-(6-a0[1])*solution[i,1])*(7/a0[2])*exp(-(7-a0[2])*solution[i,2])*(3/a0[3])*exp(-(3-a0[3])*solution[i,3])*(2/a0[4])*exp(-(2-a0[4])*solution[i,4])*(1/a0[5])*exp(-(1-a0[5])*solution[i,5]))
sum2=sum2+(indicatrice_2(solution[i,],t)*solution[i,j]*(6/a0[1])*exp(-(6-a0[1])*solution[i,1])*(7/a0[2])*exp(-(7-a0[2])*solution[i,2])*(3/a0[3])*exp(-(3-a0[3])*solution[i,3])*(2/a0[4])*exp(-(2-a0[4])*solution[i,4])*(1/a0[5])*exp(-(1-a0[5])*solution[i,5]))
}
a_opti=c(a_opti,sum1/sum2)
}
h_opti<-function(x){
return(dexp(x[1],a_opti[1])*dexp(x[2],a_opti[2])*dexp(x[3],a_opti[3])*dexp(x[4],a_opti[4])*dexp(x[5],a_opti[5]))
}
#echantillonage preferentiel opti
#rejet pour simuler selon h_opti
simuler_h_opti=function(n){
solution=matrix(numeric(5*n), ncol=5)
for (i in 1:n){
exp=c(rexp(1,a_opti[1]), rexp(1,a_opti[2]), rexp(1,a_opti[3]), rexp(1,a_opti[4]), rexp(1,a_opti[5]))
solution[i,]=exp
}
return(solution)
}
Y=simuler_h_opti(n)
le_truc=c()
for (i in 1:n){
le_truc=c(le_truc,(fexp(Y[i,])/h_opti(Y[i,])*indicatrice_2(Y[i,],t)))
}
plot(cumsum(le_truc)/(1:n), type='l')
pn_opti=mean(le_truc)
IC_borne_sup=pn_opti+qnorm(0.975)*var(h_opti)/n
IC_borne_inf=pn_opti-qnorm(0.975)*var(h_opti)/n
IC=c(IC_borne_inf,IC_borne_sup)
###############ex8
###FAUX####
rho<-function(x){
return(min((-1/a_opti[1])*log(x[1])-(1/a_opti[4])*log(x[4]), (-1/a_opti[1])*log(x[1])-(1/a_opti[3])*log(x[3])-(1/a_opti[5])*log(x[5]), (-1/a_opti[2])*log(x[2])-(1/a_opti[5])*log(x[5]), (-1/a_opti[2])*log(x[2])-(1/a_opti[3])*log(x[3])-(1/a_opti[4])*log(x[4])))
}
indicatrice_3<-function(x,t=1.5){
ifelse(rho(x)>=t, 1, 0)
}
fct_8<-function(u){
return((6*7*3*2*1)/(a_opti[1]*a_opti[2]*a_opti[3]*a_opti[4]*a_opti[5])*u[1]^((6/a_opti[1])-1)*u[2]^((7/a_opti[2])-1)*u[3]^((3/a_opti[3])-1)*u[4]^((2/a_opti[4])-1)*u[5]^((1/a_opti[5])-1))
}
simuler_uniforme=function(n){
solution=matrix(numeric(5*n), ncol=5)
for (i in 1:n){
unif=c(runif(5,0,1))
solution[i,]=unif
}
return(solution)
}
question8=simuler_uniforme(n)
question8_1=1-question8
v8=c()
v8_1=c()
for (i in 1:n){
v8=c(v8, fct_8(question8[i,])*indicatrice_3(question8[i,]))
v8_1=c(v8_1, fct_8(question8_1[i,])*indicatrice_3(question8_1[i,]))
}
pn_unif=mean(v8+v8_1)/2
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.codecommit_operations.R
\name{get_comment}
\alias{get_comment}
\title{Returns the content of a comment made on a change, file, or commit in a repository}
\usage{
get_comment(commentId)
}
\arguments{
\item{commentId}{[required] The unique, system-generated ID of the comment. To get this ID, use GetCommentsForComparedCommit or GetCommentsForPullRequest.}
}
\description{
Returns the content of a comment made on a change, file, or commit in a repository.
}
\section{Accepted Parameters}{
\preformatted{get_comment(
commentId = "string"
)
}
}
| /service/paws.codecommit/man/get_comment.Rd | permissive | CR-Mercado/paws | R | false | true | 628 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.codecommit_operations.R
\name{get_comment}
\alias{get_comment}
\title{Returns the content of a comment made on a change, file, or commit in a repository}
\usage{
get_comment(commentId)
}
\arguments{
\item{commentId}{[required] The unique, system-generated ID of the comment. To get this ID, use GetCommentsForComparedCommit or GetCommentsForPullRequest.}
}
\description{
Returns the content of a comment made on a change, file, or commit in a repository.
}
\section{Accepted Parameters}{
\preformatted{get_comment(
commentId = "string"
)
}
}
|
# Exercise-1: practice with basic syntax
# Create a variable `hometown` that stores the city in which you were born
hometown <- "Daegu"
# Assign your name to the variable `my.name`
my.name <- "HYEONG SUK KIM"
# Assign your height to a variable `my.height`
my.height <- "183" #in cm
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 20
# Create a variable `puppy.price`, which is how expensive you think a puppy is
puppy.price <- 300
# Create a variable `total.cost` that has the total cost of all of your puppies
total.cost <- puppies * puppy.price
# Create a boolean variable `too.expensive`, set to true if the cost is greater than $1,000
too.expensive <- total.cost > 1000
# Create a variable `max_puppies`, which is the number of puppies you can afford for $1K. Compute this
# value in R, not just assign!
max.puppies <- 1000/puppy.price | /exercise-1/exercise.R | permissive | hsk26-1365358/ch05-r-intro | R | false | false | 891 | r | # Exercise-1: practice with basic syntax
# Create a variable `hometown` that stores the city in which you were born
hometown <- "Daegu"
# Assign your name to the variable `my.name`
my.name <- "HYEONG SUK KIM"
# Assign your height to a variable `my.height`
my.height <- "183" #in cm
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 20
# Create a variable `puppy.price`, which is how expensive you think a puppy is
puppy.price <- 300
# Create a variable `total.cost` that has the total cost of all of your puppies
total.cost <- puppies * puppy.price
# Create a boolean variable `too.expensive`, set to true if the cost is greater than $1,000
too.expensive <- total.cost > 1000
# Create a variable `max_puppies`, which is the number of puppies you can afford for $1K. Compute this
# value in R, not just assign!
max.puppies <- 1000/puppy.price |
#' GET response to a code. This function will determine the vocabulary system automatically.
#' @param code one of RADLEX, LOINC, DICOM, SNOMEDCT
#' @param elements TRUE if elements are desired.
#' @import httr
#' @import jsonlite
#' @import purrr
#' @import rubix
#' @import centipede
#' @import dplyr
#' @export
query_radelement_code <-
function(code, values = FALSE, elements = TRUE, page = 1) {
system <- guess_vocabulary(code)
if (elements == TRUE) {
url <- paste0("https://phpapi.rsna.org/radelement/public/v1/codes/", system, "/", code)
resp <- httr::GET(url)
} else if (values == TRUE) {
url <- paste0("https://phpapi.rsna.org/radelement/public/v1/codes/", system, "/", code, "/values")
resp <- httr::GET(url,
query = list(page = page))
} else {
url <- paste0("https://phpapi.rsna.org/radelement/public/v1/codes/", system, "/", code)
resp <- httr::GET(url,
query = list(page = page))
}
parsed <<- jsonlite::fromJSON(content(resp, "text"), simplifyVector = FALSE)
if (http_error(resp)) {
stop(
sprintf(
"RSNA API request failed [%s]\n%s\n<%s>",
status_code(resp),
parsed$message,
parsed$documentation_url
),
call. = FALSE
)
}
payload <-
parsed$data %>%
purrr::map(function(x) t(as.data.frame(x))) %>%
dplyr::bind_rows() %>%
rubix::cleanup_colnames()
structure(
list(
content = payload,
meta = parsed$meta,
links = parsed$links,
response = resp
),
class = "rsna_api"
)
}
| /R/query_radelement_code.R | no_license | meerapatelmd/radiogram | R | false | false | 2,400 | r | #' GET response to a code. This function will determine the vocabulary system automatically.
#' @param code one of RADLEX, LOINC, DICOM, SNOMEDCT
#' @param elements TRUE if elements are desired.
#' @import httr
#' @import jsonlite
#' @import purrr
#' @import rubix
#' @import centipede
#' @import dplyr
#' @export
query_radelement_code <-
function(code, values = FALSE, elements = TRUE, page = 1) {
system <- guess_vocabulary(code)
if (elements == TRUE) {
url <- paste0("https://phpapi.rsna.org/radelement/public/v1/codes/", system, "/", code)
resp <- httr::GET(url)
} else if (values == TRUE) {
url <- paste0("https://phpapi.rsna.org/radelement/public/v1/codes/", system, "/", code, "/values")
resp <- httr::GET(url,
query = list(page = page))
} else {
url <- paste0("https://phpapi.rsna.org/radelement/public/v1/codes/", system, "/", code)
resp <- httr::GET(url,
query = list(page = page))
}
parsed <<- jsonlite::fromJSON(content(resp, "text"), simplifyVector = FALSE)
if (http_error(resp)) {
stop(
sprintf(
"RSNA API request failed [%s]\n%s\n<%s>",
status_code(resp),
parsed$message,
parsed$documentation_url
),
call. = FALSE
)
}
payload <-
parsed$data %>%
purrr::map(function(x) t(as.data.frame(x))) %>%
dplyr::bind_rows() %>%
rubix::cleanup_colnames()
structure(
list(
content = payload,
meta = parsed$meta,
links = parsed$links,
response = resp
),
class = "rsna_api"
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nfunctions.R
\name{nsentence}
\alias{nsentence}
\title{count the number of sentences}
\usage{
nsentence(x, ...)
}
\arguments{
\item{x}{a character or \link{corpus} whose sentences will be counted}
\item{...}{additional arguments passed to \code{\link{tokens}}}
}
\value{
count(s) of the total sentences per text
}
\description{
Return the count of sentences in a corpus or character object.
}
\note{
\code{nsentence()} relies on the boundaries definitions in the
\pkg{stringi} package (see \link[stringi]{stri_opts_brkiter}). It does not
count sentences correctly if the text has been transformed to lower case,
and for this reason \code{nsentence()} will issue a warning if it detects
all lower-cased text.
}
\examples{
# simple example
txt <- c(text1 = "This is a sentence: second part of first sentence.",
text2 = "A word. Repeated repeated.",
text3 = "Mr. Jones has a PhD from the LSE. Second sentence.")
nsentence(txt)
}
| /man/nsentence.Rd | no_license | chmue/quanteda | R | false | true | 1,033 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nfunctions.R
\name{nsentence}
\alias{nsentence}
\title{count the number of sentences}
\usage{
nsentence(x, ...)
}
\arguments{
\item{x}{a character or \link{corpus} whose sentences will be counted}
\item{...}{additional arguments passed to \code{\link{tokens}}}
}
\value{
count(s) of the total sentences per text
}
\description{
Return the count of sentences in a corpus or character object.
}
\note{
\code{nsentence()} relies on the boundaries definitions in the
\pkg{stringi} package (see \link[stringi]{stri_opts_brkiter}). It does not
count sentences correctly if the text has been transformed to lower case,
and for this reason \code{nsentence()} will issue a warning if it detects
all lower-cased text.
}
\examples{
# simple example
txt <- c(text1 = "This is a sentence: second part of first sentence.",
text2 = "A word. Repeated repeated.",
text3 = "Mr. Jones has a PhD from the LSE. Second sentence.")
nsentence(txt)
}
|
if (getRversion() >= "2.15.1") utils::globalVariables(c("tkrp","facevalue","couprate","discrate","maturity","ratefreq","durtype"))
bonddur <-
function(){
my.draw <- function(panel)
{
faceval<-as.numeric(panel$facevalue)
discrate = as.numeric(panel$discrate)/100
maturity <- panel$maturity
if (panel$frequency == "quarterly"){
freq<-4 # change thru radio button
times<-seq(from=0.25,by=0.25,length.out=maturity*freq)
}
else if (panel$frequency == "semi-annual"){
freq<-2 # change thru radio button
times<-seq(from=0.5,by=0.5,length.out=maturity*freq)
}
else {
freq<-1 # change thru radio button
times<-seq(from=1,by=1,length.out=maturity*freq)
}
if (panel$ratefreq=="continuous comp"){
pvfactors=exp(-discrate*times)
}
else if(panel$ratefreq=="annual comp"){
pvfactors=1/(1+discrate)^times
}
else{
pvfactors=1/(1+discrate/freq)^(freq*times)
}
# effrate = discrate/(100*freq)
# effperiods = freq*maturity
# pv_coupons<-(coupon/effrate)*(1-(1+effrate)^(-effperiods)) # PV of coupons
# pv_face<-faceval*(1+effrate)^(-effperiods) # PV of face value
# price<-pv_coupons+pv_face # bond price is the sum of both
# price <- round(price,2)
coupon<-panel$couprate*faceval/(100*freq)
cashflows <- rep(coupon,maturity*freq)
cashflows[length(cashflows)] = cashflows[length(cashflows)]+faceval
price<-sum(cashflows*pvfactors)
dur = sum(cashflows*pvfactors*times)/price
if (panel$durtype=="Modified"){
dur<- dur/(1+discrate/freq)
}
dur<-round(dur,2)
plot(1:10, 1:10, type="n", xlab="", ylab="",
axes=FALSE, frame = TRUE)
text(5, 5, paste("Duration: ", dur),cex=1.4)
#cat(pv)
panel
}
my.redraw <- function(panel)
{
rp.tkrreplot(panel, tkrp)
panel
}
my.panel <- rp.control("Bond Duration", frequency="quarterly",couprate= 8,discrate=10, maturity = 10)
rp.textentry(panel = my.panel, variable= facevalue,
labels = "Face Value: ", action = my.redraw, initval="1000")
rp.doublebutton(my.panel,variable=couprate,step=0.25,title="Coupon (% p.a.)",initval=10,range=c(1,15),showvalue=TRUE,action=my.redraw)
rp.doublebutton(my.panel,variable=discrate,step=0.25,title="Discount Rate (% p.a.)",initval=10,range=c(1,15),showvalue=TRUE,action=my.redraw)
rp.doublebutton(my.panel,variable=maturity,step=0.25,title="Maturity (Yrs)",initval=10,range=c(1,25),showvalue=TRUE,action=my.redraw)
rp.radiogroup(panel = my.panel, variable= frequency,
vals = c("quarterly", "semi-annual", "annual"),
action = my.redraw, title = "Coupon payments")
rp.radiogroup(panel = my.panel, variable= ratefreq,
vals = c("continuous comp", "same as coupon freq","annual comp"),
action = my.redraw, title = "Frequency of discount rate")
rp.radiogroup(panel = my.panel, variable= durtype,
vals = c("Macaulay", "Modified"),
action = my.redraw, title = "Duration formula")
rp.tkrplot(panel=my.panel , name=tkrp, plotfun=my.draw)
#rp.do(my.panel, my.draw)
}
| /GUIDE/R/bonddur.R | no_license | ingted/R-Examples | R | false | false | 3,459 | r | if (getRversion() >= "2.15.1") utils::globalVariables(c("tkrp","facevalue","couprate","discrate","maturity","ratefreq","durtype"))
bonddur <-
function(){
my.draw <- function(panel)
{
faceval<-as.numeric(panel$facevalue)
discrate = as.numeric(panel$discrate)/100
maturity <- panel$maturity
if (panel$frequency == "quarterly"){
freq<-4 # change thru radio button
times<-seq(from=0.25,by=0.25,length.out=maturity*freq)
}
else if (panel$frequency == "semi-annual"){
freq<-2 # change thru radio button
times<-seq(from=0.5,by=0.5,length.out=maturity*freq)
}
else {
freq<-1 # change thru radio button
times<-seq(from=1,by=1,length.out=maturity*freq)
}
if (panel$ratefreq=="continuous comp"){
pvfactors=exp(-discrate*times)
}
else if(panel$ratefreq=="annual comp"){
pvfactors=1/(1+discrate)^times
}
else{
pvfactors=1/(1+discrate/freq)^(freq*times)
}
# effrate = discrate/(100*freq)
# effperiods = freq*maturity
# pv_coupons<-(coupon/effrate)*(1-(1+effrate)^(-effperiods)) # PV of coupons
# pv_face<-faceval*(1+effrate)^(-effperiods) # PV of face value
# price<-pv_coupons+pv_face # bond price is the sum of both
# price <- round(price,2)
coupon<-panel$couprate*faceval/(100*freq)
cashflows <- rep(coupon,maturity*freq)
cashflows[length(cashflows)] = cashflows[length(cashflows)]+faceval
price<-sum(cashflows*pvfactors)
dur = sum(cashflows*pvfactors*times)/price
if (panel$durtype=="Modified"){
dur<- dur/(1+discrate/freq)
}
dur<-round(dur,2)
plot(1:10, 1:10, type="n", xlab="", ylab="",
axes=FALSE, frame = TRUE)
text(5, 5, paste("Duration: ", dur),cex=1.4)
#cat(pv)
panel
}
my.redraw <- function(panel)
{
rp.tkrreplot(panel, tkrp)
panel
}
my.panel <- rp.control("Bond Duration", frequency="quarterly",couprate= 8,discrate=10, maturity = 10)
rp.textentry(panel = my.panel, variable= facevalue,
labels = "Face Value: ", action = my.redraw, initval="1000")
rp.doublebutton(my.panel,variable=couprate,step=0.25,title="Coupon (% p.a.)",initval=10,range=c(1,15),showvalue=TRUE,action=my.redraw)
rp.doublebutton(my.panel,variable=discrate,step=0.25,title="Discount Rate (% p.a.)",initval=10,range=c(1,15),showvalue=TRUE,action=my.redraw)
rp.doublebutton(my.panel,variable=maturity,step=0.25,title="Maturity (Yrs)",initval=10,range=c(1,25),showvalue=TRUE,action=my.redraw)
rp.radiogroup(panel = my.panel, variable= frequency,
vals = c("quarterly", "semi-annual", "annual"),
action = my.redraw, title = "Coupon payments")
rp.radiogroup(panel = my.panel, variable= ratefreq,
vals = c("continuous comp", "same as coupon freq","annual comp"),
action = my.redraw, title = "Frequency of discount rate")
rp.radiogroup(panel = my.panel, variable= durtype,
vals = c("Macaulay", "Modified"),
action = my.redraw, title = "Duration formula")
rp.tkrplot(panel=my.panel , name=tkrp, plotfun=my.draw)
#rp.do(my.panel, my.draw)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outlook.R
\name{connect_outlook}
\alias{connect_outlook}
\title{Create a COM Outlook.Application instance.}
\usage{
connect_outlook(wait_seconds = 3)
}
\value{
An object of class `COMIDispatch.` that represents an Outlook application instance.
}
\description{
Create a COM Outlook.Application instance.
}
\examples{
\dontrun{
com <- connect_outlook()
}
}
| /man/connect_outlook.Rd | permissive | lgaborini/outlookMailer | R | false | true | 433 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outlook.R
\name{connect_outlook}
\alias{connect_outlook}
\title{Create a COM Outlook.Application instance.}
\usage{
connect_outlook(wait_seconds = 3)
}
\value{
An object of class `COMIDispatch.` that represents an Outlook application instance.
}
\description{
Create a COM Outlook.Application instance.
}
\examples{
\dontrun{
com <- connect_outlook()
}
}
|
package_check <- require("d3heatmap")
if (package_check == FALSE) {
install.packages('d3heatmap')
}
package_check <- require("parcoords")
if (package_check == FALSE) {
install.packages('parcoords')
}
package_check <- require("pairsD3")
if (package_check == FALSE) {
install.packages('pairsD3')
}
package_check <- require("plotly")
if (package_check == FALSE) {
install.packages('plotly')
}
library(dplyr)
library(d3heatmap)
library(parcoords)
require(pairsD3)
library(plotly)
library(tidyr)
library(leaflet)
df <- read.csv("dataset_Facebook.csv",sep=";") %>%
dplyr::select(Category,Lifetime.Post.Total.Reach,
Lifetime.Post.Consumptions,Total.Interactions) | /global.R | no_license | usfviz/jpoberhauser-hw3 | R | false | false | 725 | r | package_check <- require("d3heatmap")
if (package_check == FALSE) {
install.packages('d3heatmap')
}
package_check <- require("parcoords")
if (package_check == FALSE) {
install.packages('parcoords')
}
package_check <- require("pairsD3")
if (package_check == FALSE) {
install.packages('pairsD3')
}
package_check <- require("plotly")
if (package_check == FALSE) {
install.packages('plotly')
}
library(dplyr)
library(d3heatmap)
library(parcoords)
require(pairsD3)
library(plotly)
library(tidyr)
library(leaflet)
df <- read.csv("dataset_Facebook.csv",sep=";") %>%
dplyr::select(Category,Lifetime.Post.Total.Reach,
Lifetime.Post.Consumptions,Total.Interactions) |
normal_income=rnorm(100,mean=250000,sd=75000)
normal_income
mean<-mean(normal_income)
sd<-sd(normal_income)
gender=c(rep("F",100))
i=sample(1:100,100,replace=FALSE)
for(q in 1:100){
if(gender[i[q]] == "F" && q <= 40)
{}
else
gender[i[q]] = c("M")
}
gender
Custs <-data.frame(ID=1:100,INCOME= normal_income, GENDER = gender)
View(Custs)
| /Hands on 3.R | no_license | bhavyabishnoi/AMMA--ALL-Codes | R | false | false | 366 | r |
normal_income=rnorm(100,mean=250000,sd=75000)
normal_income
mean<-mean(normal_income)
sd<-sd(normal_income)
gender=c(rep("F",100))
i=sample(1:100,100,replace=FALSE)
for(q in 1:100){
if(gender[i[q]] == "F" && q <= 40)
{}
else
gender[i[q]] = c("M")
}
gender
Custs <-data.frame(ID=1:100,INCOME= normal_income, GENDER = gender)
View(Custs)
|
/man/EnvSysSample.Rd | no_license | DIARSproject/iSDM | R | false | true | 4,022 | rd | ||
#' Create R code for a dm object
#'
#' `dm_paste()` takes an existing `dm` and emits the code necessary for its creation.
#'
#' @inheritParams dm_add_pk
#' @param select
#' Deprecated, see `"select"` in the `options` argument.
#' @param ... Must be empty.
#' @param tab_width Indentation width for code from the second line onwards
#' @param options Formatting options. A character vector containing some of:
#' - `"tables"`: [tibble()] calls for empty table definitions
#' derived from [dm_ptype()], overrides `"select"`.
#' - `"select"`: [dm_select()] statements for columns that are part
#' of the dm.
#' - `"keys"`: [dm_add_pk()] and [dm_add_fk()] statements for adding keys.
#' - `"color"`: [dm_set_colors()] statements to set color.
#' - `"all"`: All options above except `"select"`
#'
#' Default `NULL` is equivalent to `c("keys", "color")`
#' @param path Output file, if `NULL` the code is printed to the console.
#'
#' @details
#' The code emitted by the function reproduces the structure of the `dm` object.
#' The `options` argument controls the level of detail: keys, colors,
#' table definitions.
#' Data in the tables is never included, see [dm_ptype()] for the underlying logic.
#'
#' @return Code for producing the prototype of the given `dm`.
#'
#' @export
#' @examples
#' dm() %>%
#' dm_paste()
#' @examplesIf rlang::is_installed("nycflights13")
#'
#' dm_nycflights13() %>%
#' dm_paste()
#'
#' dm_nycflights13() %>%
#' dm_paste(options = "select")
dm_paste <- function(dm, select = NULL, ..., tab_width = 2,
options = NULL, path = NULL) {
check_dots_empty(action = warn)
options <- check_paste_options(options, select, caller_env())
if (!is.null(path)) {
stopifnot(rlang::is_installed("brio"))
}
code <- dm_paste_impl(dm = dm, options, tab_width = tab_width)
if (is.null(path)) {
cli::cli_code(code)
} else {
brio::write_lines(code, path)
}
invisible(dm)
}
check_paste_options <- function(options, select, env) {
allowed_options <- c("all", "tables", "keys", "select", "color")
if (is.null(options)) {
options <- c("keys", "color")
} else {
if (!all(options %in% allowed_options)) {
abort_unknown_option(options, allowed_options)
}
}
if (!is.null(select)) {
deprecate_soft("0.1.2", "dm::dm_paste(select = )", "dm::dm_paste(options = 'select')", env = env)
if (isTRUE(select)) {
options <- c(options, "select")
}
}
if ("all" %in% options) {
options <- allowed_options
}
if ("tables" %in% options) {
options <- setdiff(options, "select")
}
options
}
dm_paste_impl <- function(dm, options, tab_width) {
check_not_zoomed(dm)
check_no_filter(dm)
tab <- paste0(rep(" ", tab_width), collapse = "")
# code for including table definitions
code_tables <- if ("tables" %in% options) dm_paste_tables(dm, tab)
# code for including the tables
code_construct <- dm_paste_construct(dm)
# adding code for selection of columns
code_select <- if ("select" %in% options) dm_paste_select(dm)
# adding code for establishing PKs
code_pks <- if ("keys" %in% options) dm_paste_pks(dm)
# adding code for establishing FKs
code_fks <- if ("keys" %in% options) dm_paste_fks(dm)
# adding code for color
code_color <- if ("color" %in% options) dm_paste_color(dm)
# combine dm and paste code
code_dm <- glue_collapse(
c(
code_construct,
code_select,
code_pks,
code_fks,
code_color
),
sep = glue(" %>%\n{tab}", .trim = FALSE)
)
paste0(code_tables, code_dm)
}
dm_paste_tables <- function(dm, tab) {
ptype <- dm_ptype(dm)
tables <-
ptype %>%
dm_get_tables() %>%
map_chr(df_paste, tab)
glue_collapse1(
glue("{tick_if_needed(names(tables))} <- {tables}\n\n", .trim = FALSE)
)
}
dm_paste_construct <- function(dm) {
glue("dm::dm({glue_collapse1(tick_if_needed(src_tbls(dm)), ', ')})")
}
dm_paste_select <- function(dm) {
tbl_select <- dm %>%
dm_get_def() %>%
mutate(cols = map(data, colnames)) %>%
mutate(cols = map_chr(cols, ~ glue_collapse1(glue(", {tick_if_needed(.x)}")))) %>%
mutate(code = glue("dm::dm_select({tick_if_needed(table)}{cols})")) %>%
pull()
}
dm_paste_pks <- function(dm) {
# FIXME: this will fail with compound keys
dm_get_all_pks_impl(dm) %>%
mutate(code = glue("dm::dm_add_pk({tick_if_needed(table)}, {tick_if_needed(pk_col)})")) %>%
pull()
}
dm_paste_fks <- function(dm) {
# FIXME: this will fail with compound keys
dm_get_all_fks_impl(dm) %>%
mutate(code = glue("dm::dm_add_fk({tick_if_needed(child_table)}, {tick_if_needed(child_fk_cols)}, {tick_if_needed(parent_table)})")) %>%
pull()
}
dm_paste_color <- function(dm) {
colors <- dm_get_colors(dm)
colors <- colors[names(colors) != "default"]
glue("dm::dm_set_colors({tick_if_needed(names(colors))} = {tick_if_needed(colors)})")
}
df_paste <- function(x, tab) {
cols <- map_chr(x, deparse_line)
if (is_empty(x)) {
cols <- ""
} else {
cols <- paste0(
paste0("\n", tab, tick_if_needed(names(cols)), " = ", cols, collapse = ","),
"\n"
)
}
paste0("tibble::tibble(", cols, ")")
}
deparse_line <- function(x) {
x <- deparse(x, width.cutoff = 500, backtick = TRUE)
gsub(" *\n *", " ", x)
}
glue_collapse1 <- function(x, ...) {
if (is_empty(x)) {
""
} else {
glue_collapse(x, ...)
}
}
dquote <- function(x) {
if (is_empty(x)) {
return(character())
}
paste0('"', x, '"')
}
# Errors ------------------------------------------------------------------
abort_unknown_option <- function(options, all_options) {
abort(error_txt_unknown_option(options, all_options), .subclass = dm_error_full("unknown_option"))
}
error_txt_unknown_option <- function(options, all_options) {
bad_options <- setdiff(options, all_options)
glue("Option unknown: {commas(dquote(bad_options))}. Must be one of {commas(dquote(all_options))}.")
}
| /R/paste.R | permissive | jawond/dm | R | false | false | 5,970 | r | #' Create R code for a dm object
#'
#' `dm_paste()` takes an existing `dm` and emits the code necessary for its creation.
#'
#' @inheritParams dm_add_pk
#' @param select
#' Deprecated, see `"select"` in the `options` argument.
#' @param ... Must be empty.
#' @param tab_width Indentation width for code from the second line onwards
#' @param options Formatting options. A character vector containing some of:
#' - `"tables"`: [tibble()] calls for empty table definitions
#' derived from [dm_ptype()], overrides `"select"`.
#' - `"select"`: [dm_select()] statements for columns that are part
#' of the dm.
#' - `"keys"`: [dm_add_pk()] and [dm_add_fk()] statements for adding keys.
#' - `"color"`: [dm_set_colors()] statements to set color.
#' - `"all"`: All options above except `"select"`
#'
#' Default `NULL` is equivalent to `c("keys", "color")`
#' @param path Output file, if `NULL` the code is printed to the console.
#'
#' @details
#' The code emitted by the function reproduces the structure of the `dm` object.
#' The `options` argument controls the level of detail: keys, colors,
#' table definitions.
#' Data in the tables is never included, see [dm_ptype()] for the underlying logic.
#'
#' @return Code for producing the prototype of the given `dm`.
#'
#' @export
#' @examples
#' dm() %>%
#' dm_paste()
#' @examplesIf rlang::is_installed("nycflights13")
#'
#' dm_nycflights13() %>%
#' dm_paste()
#'
#' dm_nycflights13() %>%
#' dm_paste(options = "select")
dm_paste <- function(dm, select = NULL, ..., tab_width = 2,
options = NULL, path = NULL) {
check_dots_empty(action = warn)
options <- check_paste_options(options, select, caller_env())
if (!is.null(path)) {
stopifnot(rlang::is_installed("brio"))
}
code <- dm_paste_impl(dm = dm, options, tab_width = tab_width)
if (is.null(path)) {
cli::cli_code(code)
} else {
brio::write_lines(code, path)
}
invisible(dm)
}
check_paste_options <- function(options, select, env) {
allowed_options <- c("all", "tables", "keys", "select", "color")
if (is.null(options)) {
options <- c("keys", "color")
} else {
if (!all(options %in% allowed_options)) {
abort_unknown_option(options, allowed_options)
}
}
if (!is.null(select)) {
deprecate_soft("0.1.2", "dm::dm_paste(select = )", "dm::dm_paste(options = 'select')", env = env)
if (isTRUE(select)) {
options <- c(options, "select")
}
}
if ("all" %in% options) {
options <- allowed_options
}
if ("tables" %in% options) {
options <- setdiff(options, "select")
}
options
}
dm_paste_impl <- function(dm, options, tab_width) {
check_not_zoomed(dm)
check_no_filter(dm)
tab <- paste0(rep(" ", tab_width), collapse = "")
# code for including table definitions
code_tables <- if ("tables" %in% options) dm_paste_tables(dm, tab)
# code for including the tables
code_construct <- dm_paste_construct(dm)
# adding code for selection of columns
code_select <- if ("select" %in% options) dm_paste_select(dm)
# adding code for establishing PKs
code_pks <- if ("keys" %in% options) dm_paste_pks(dm)
# adding code for establishing FKs
code_fks <- if ("keys" %in% options) dm_paste_fks(dm)
# adding code for color
code_color <- if ("color" %in% options) dm_paste_color(dm)
# combine dm and paste code
code_dm <- glue_collapse(
c(
code_construct,
code_select,
code_pks,
code_fks,
code_color
),
sep = glue(" %>%\n{tab}", .trim = FALSE)
)
paste0(code_tables, code_dm)
}
dm_paste_tables <- function(dm, tab) {
ptype <- dm_ptype(dm)
tables <-
ptype %>%
dm_get_tables() %>%
map_chr(df_paste, tab)
glue_collapse1(
glue("{tick_if_needed(names(tables))} <- {tables}\n\n", .trim = FALSE)
)
}
dm_paste_construct <- function(dm) {
glue("dm::dm({glue_collapse1(tick_if_needed(src_tbls(dm)), ', ')})")
}
dm_paste_select <- function(dm) {
tbl_select <- dm %>%
dm_get_def() %>%
mutate(cols = map(data, colnames)) %>%
mutate(cols = map_chr(cols, ~ glue_collapse1(glue(", {tick_if_needed(.x)}")))) %>%
mutate(code = glue("dm::dm_select({tick_if_needed(table)}{cols})")) %>%
pull()
}
dm_paste_pks <- function(dm) {
# FIXME: this will fail with compound keys
dm_get_all_pks_impl(dm) %>%
mutate(code = glue("dm::dm_add_pk({tick_if_needed(table)}, {tick_if_needed(pk_col)})")) %>%
pull()
}
dm_paste_fks <- function(dm) {
# FIXME: this will fail with compound keys
dm_get_all_fks_impl(dm) %>%
mutate(code = glue("dm::dm_add_fk({tick_if_needed(child_table)}, {tick_if_needed(child_fk_cols)}, {tick_if_needed(parent_table)})")) %>%
pull()
}
dm_paste_color <- function(dm) {
colors <- dm_get_colors(dm)
colors <- colors[names(colors) != "default"]
glue("dm::dm_set_colors({tick_if_needed(names(colors))} = {tick_if_needed(colors)})")
}
df_paste <- function(x, tab) {
cols <- map_chr(x, deparse_line)
if (is_empty(x)) {
cols <- ""
} else {
cols <- paste0(
paste0("\n", tab, tick_if_needed(names(cols)), " = ", cols, collapse = ","),
"\n"
)
}
paste0("tibble::tibble(", cols, ")")
}
deparse_line <- function(x) {
x <- deparse(x, width.cutoff = 500, backtick = TRUE)
gsub(" *\n *", " ", x)
}
glue_collapse1 <- function(x, ...) {
if (is_empty(x)) {
""
} else {
glue_collapse(x, ...)
}
}
dquote <- function(x) {
if (is_empty(x)) {
return(character())
}
paste0('"', x, '"')
}
# Errors ------------------------------------------------------------------
abort_unknown_option <- function(options, all_options) {
abort(error_txt_unknown_option(options, all_options), .subclass = dm_error_full("unknown_option"))
}
error_txt_unknown_option <- function(options, all_options) {
bad_options <- setdiff(options, all_options)
glue("Option unknown: {commas(dquote(bad_options))}. Must be one of {commas(dquote(all_options))}.")
}
|
# First, install the keras R package from GitHub as follows:
devtools::install_github("rstudio/keras")
# The Keras R interface uses the TensorFlow backend engine by default.
# To install both the core Keras library as well as the TensorFlow backend use the install_keras() function:
library(keras)
install_keras()
# Load in a pretrained model: Using Inception V3 with ImageNet weights
model <- application_inception_v3(weights = "imagenet")
# Load in image from `imgs` directory. Images include that of an elephant, hamster, apples and oranges
# Feel free to add your own images to the directory to test the model
img_path <- "./imgs/elephant.jpg"
img <- image_load(img_path, target_size = c(299, 299))
# Convert img to an array for tensor calculations
img_array <- image_to_array(img)
# Ensure we have a 4d tensor with single element in the batch dimension
img_tensor <- array_reshape(img_array, c(1, dim(img_array)))
# Process the input so that it can be used for prediction
processed_tensor <- inception_v3_preprocess_input(img_tensor)
# Make a prediction based on the model
predictions <- model %>% predict(processed_tensor)
imagenet_decode_predictions(predictions, top = 10)[[1]]
| /image-classifier/demo.R | no_license | OscarLewis/mini-demos | R | false | false | 1,193 | r | # First, install the keras R package from GitHub as follows:
devtools::install_github("rstudio/keras")
# The Keras R interface uses the TensorFlow backend engine by default.
# To install both the core Keras library as well as the TensorFlow backend use the install_keras() function:
library(keras)
install_keras()
# Load in a pretrained model: Using Inception V3 with ImageNet weights
model <- application_inception_v3(weights = "imagenet")
# Load in image from `imgs` directory. Images include that of an elephant, hamster, apples and oranges
# Feel free to add your own images to the directory to test the model
img_path <- "./imgs/elephant.jpg"
img <- image_load(img_path, target_size = c(299, 299))
# Convert img to an array for tensor calculations
img_array <- image_to_array(img)
# Ensure we have a 4d tensor with single element in the batch dimension
img_tensor <- array_reshape(img_array, c(1, dim(img_array)))
# Process the input so that it can be used for prediction
processed_tensor <- inception_v3_preprocess_input(img_tensor)
# Make a prediction based on the model
predictions <- model %>% predict(processed_tensor)
imagenet_decode_predictions(predictions, top = 10)[[1]]
|
#' Plot MCMC selectivities for iSCAM models
#'
#' @description
#' Plot the selectivity for any combination of gears in an iscam model
#'
#' @family Selectivity plotting functions
#'
#' @param model An iscam model object (class [mdl_cls])
#' @param probs A 3-element vector of probabilities that appear in the output data frames
#' This is provided in case the data frames have more than three different quantile levels
#' @param show_maturity If `TRUE`, overlay the maturity ogive on the selectivity plots
#' @param ages A character vector from 1 to the maximum age to show on
#' the plot. Defaults to the max age in the model. This may be needed when
#' The selectivity goes past the maximum age, so that we may see the
#' selectivity curve
#' @param breaks A vector representing the tick locations on the x-axis
#' @param ci_type One of "line", "ribbon", "both" to show the credible interval
#' @param ci_linetype See `linetype` in [ggplot2]. Only used if `ci_type` is "line" or "both"
#' @param ci_alpha Opacity between 0 and 1 for the credible intervals ribbons. Only used if
#' `ci_type` is "ribbon" or "both"
#' @param leg_loc A two-element vector describing the X-Y values between 0 and
#' 1 to anchor the legend to. eg. c(1, 1) is the top right corner and c(0, 0)
#' is the bottom left corner. It can also be the string "facet" in which case
#' the legend will appear in the empty facet if it exists.
#' @return A [ggplot2::ggplot()] object
#' @importFrom ggplot2 geom_function
#' @export
plot_selex_mcmc <- function(model,
gear = NULL,
probs = c(0.025, 0.5, 0.975),
show_maturity = FALSE,
ages = as.character(model$dat$start.age:model$dat$end.age),
breaks = seq(0, model$dat$end.age, 5),
ci_type = c("both", "line", "ribbon"),
ci_linetype = c("dotted", "solid",
"dashed", "dotdash",
"longdash", "twodash"),
ci_alpha = 0.3,
leg_loc = c(1, 1),
angle_x_labels = FALSE){
ci_type <- match.arg(ci_type)
ci_linetype <- match.arg(ci_linetype)
if(!is.character(ages)){
ages <- as.character(ages)
}
if(is_iscam_model_list(model) && length(model) == 1){
model <- model[[1]]
}
if(class(model) != mdl_cls){
if(class(model) != mdl_lst_cls){
stop("`model` is not a gfiscamutils::mdl_cls class (",mdl_cls, ")")
}
stop("`model` is a `gfiscamutils::mdl_lst_cls` class (",mdl_lst_cls, ")\n",
" It should be a `gfiscamutils::mdl_cls` class (",mdl_cls, ")")
}
if(length(probs) != 3){
stop("`probs` has length ", length(probs), " but must be a vector of three values\n",
" representing lower CI, median, and upper CI")
}
# Extract selectivity parameters
vals <- model$mcmc$selest
if(is.null(vals)){
stop("MCMC selectivity estimates not found for this model, see\n",
" `model$mcmc$selest` which is created in `read_mcmc()` and `load_special()`")
}
# Remove male "estimates" for models with number of sexes == 1. iSCAM outputs the
# parameter values even if they were not estimated so they are gibberish
if(model$dat$num.sex == 1){
vals <- vals %>%
filter(sex != 1)
}
vals <- vals %>%
mutate(sex = ifelse(sex %in% c(0, 2), "Female", "Male"))
gear_names <- model$dat$gear_names
if(length(unique(vals$gear)) != length(gear_names)){
stop("`model$dat$gear_names` is not the same length as the number of gears present\n",
" in the MCMC selectivity parameter outputs. Check your declaration of the names\n",
" in the iSCAM data file and try again.")
}
if(!is.null(gear)){
valid_gear_nums <- seq_along(gear_names)
if(!all(gear %in% valid_gear_nums)){
stop("One or more of the gear numbers you requested is outside the range of possible gears.\n\n",
"Available gears numbers are: ", paste(valid_gear_nums, collapse = ", "), "\n\n",
"Names for these are:\n", paste(gear_names, collapse = "\n"))
}
vals <- vals %>%
filter(gear %in% gear_names[!!gear])
gear_names <- gear_names[gear]
}
# Rename the parameter columns because the ages columns would
# have these same names
vals <- vals %>%
rename(p1 = "a_hat", p2 = "g_hat")
# Remove gears with TV selectivity and give a warning
vals <- vals %>%
split(~gear) %>%
imap(~{
yrs <- unique(.x$start_year)
if(length(yrs) > 1){
warning("`gear` ", unique(.x$gear), " has selectivity blocks (is time-varying)\n",
" and must be plotted seperately using `plot_tv_selex_mcmc()`\n")
gear_names <<- gear_names[-which(gear_names == unique(.x$gear))]
return(NULL)
}
return(.x)
})
# Remove NULL list elements (fixed parameters)
vals <- vals[!sapply(vals, is.null)] %>%
bind_rows()
# Add age columns with logistic selectivity calculations
for(i in ages){
vals <- vals %>%
mutate(!!sym(i) := 1 / (1 + exp(-(as.numeric(i) - p1) / p2)))
}
get_val <- function(d, q){
d %>%
filter(quants == q) %>%
select(-quants) %>%
pivot_longer(-c(gear, start_year, end_year, Sex, p1, p2),
names_to = "age",
values_to = "value") %>%
mutate(age = as.numeric(age))
}
vals <- vals %>%
rename(Sex = sex)
# Re-order the posteriors by group in order of a_hat smallest to largest
gear_lst <- vals |>
split(~ gear)
vals <- gear_lst |>
map_dfr(function(gear_df){
sex_lst <- gear_df |>
split(~ Sex)
sex_lst <- map_dfr(sex_lst, function(sex_df){
sex_df[order(sex_df$p1), ]
})
}) |>
select(-c(posterior, block, start_year, end_year, p1, p2)) |>
select(gear, Sex, everything())
num_posts <- nrow(model$mcmc$params)
probs <- as.integer(probs * num_posts)
make_longer <- function(d){
d |>
pivot_longer(-c(gear, Sex),
names_to = "age",
values_to = "value") |>
mutate(age = as.numeric(age))
}
vals <- vals |>
mutate(gear = factor(gear)) |>
mutate(gear = fct_relevel(gear, gear_names))
lo_vals <- vals |>
group_by(gear, Sex) |>
slice(probs[1]) |>
make_longer() |>
rename(lo_value = value)
med_vals <- vals |>
group_by(gear, Sex) |>
slice(probs[2]) |>
make_longer()
hi_vals <- vals |>
group_by(gear, Sex) |>
slice(probs[3]) |>
make_longer() |>
rename(hi_value = value)
rib_vals <- lo_vals %>%
left_join(hi_vals,
by = c("gear", "Sex", "age")) |>
mutate(value = lo_value)
g <- ggplot(med_vals, aes(x = factor(age),
y = value,
group = Sex,
color = Sex,
fill = Sex)) +
geom_line() +
geom_point() +
xlab("Age") +
ylab("Selectivity") +
scale_x_discrete(breaks = breaks) +
scale_color_manual(values = c("red", "blue"))
if(ci_type %in% c("ribbon", "both")){
g <- g +
geom_ribbon(data = rib_vals,
aes(ymin = lo_value,
ymax = hi_value,
group = Sex),
alpha = ci_alpha,
color = NA)
}
if(ci_type %in% c("line", "both")){
g <- g +
geom_line(data = lo_vals, aes(y = lo_value,
group = Sex,
color = Sex),
linetype = ci_linetype) +
geom_line(data = hi_vals, aes(y = hi_value,
group = Sex,
color = Sex),
linetype = ci_linetype)
}
g <- g +
facet_wrap(~ gear) +
xlab("Age") +
ylab("Proportion")
if(show_maturity){
model$mpd$ma
if(model$dat$num.sex == 2){
a50_female <- model$dat$age.at.50.mat[1]
sigma_a50_female <- model$dat$sd.at.50.mat[1]
a50_male <- model$dat$age.at.50.mat[2]
sigma_a50_male <- model$dat$sd.at.50.mat[2]
g <- g +
geom_function(fun = function(x){1 / (1 + exp(-(x - a50_male) / sigma_a50_male))},
color = "blue",
linetype = "dashed")
}else{
a50_female <- model$dat$age.at.50.mat[1]
sigma_a50_female <- model$dat$sd.at.50.mat[1]
}
g <- g +
geom_function(fun = function(x){1 / (1 + exp(-(x - a50_female) / sigma_a50_female))},
color = "red",
linetype = "dashed")
}
if(is.null(leg_loc)){
g <- g +
theme(legend.position = "none")
}else if(leg_loc[1] == "facet"){
g <- g %>% move_legend_to_empty_facet()
}else{
g <- g +
theme(legend.justification = leg_loc,
legend.position = leg_loc,
legend.background = element_rect(fill = "white", color = "white"))
}
if(angle_x_labels){
g <- g +
theme(axis.text.x = element_text(angle = 45, hjust = 0.55, vjust = 0.5))
}
g
#suppressWarnings(print(g))
}
| /R/plot-selex-mcmc.R | no_license | pbs-assess/gfiscamutils | R | false | false | 9,246 | r | #' Plot MCMC selectivities for iSCAM models
#'
#' @description
#' Plot the selectivity for any combination of gears in an iscam model
#'
#' @family Selectivity plotting functions
#'
#' @param model An iscam model object (class [mdl_cls])
#' @param probs A 3-element vector of probabilities that appear in the output data frames
#' This is provided in case the data frames have more than three different quantile levels
#' @param show_maturity If `TRUE`, overlay the maturity ogive on the selectivity plots
#' @param ages A character vector from 1 to the maximum age to show on
#' the plot. Defaults to the max age in the model. This may be needed when
#' The selectivity goes past the maximum age, so that we may see the
#' selectivity curve
#' @param breaks A vector representing the tick locations on the x-axis
#' @param ci_type One of "line", "ribbon", "both" to show the credible interval
#' @param ci_linetype See `linetype` in [ggplot2]. Only used if `ci_type` is "line" or "both"
#' @param ci_alpha Opacity between 0 and 1 for the credible intervals ribbons. Only used if
#' `ci_type` is "ribbon" or "both"
#' @param leg_loc A two-element vector describing the X-Y values between 0 and
#' 1 to anchor the legend to. eg. c(1, 1) is the top right corner and c(0, 0)
#' is the bottom left corner. It can also be the string "facet" in which case
#' the legend will appear in the empty facet if it exists.
#' @return A [ggplot2::ggplot()] object
#' @importFrom ggplot2 geom_function
#' @export
plot_selex_mcmc <- function(model,
gear = NULL,
probs = c(0.025, 0.5, 0.975),
show_maturity = FALSE,
ages = as.character(model$dat$start.age:model$dat$end.age),
breaks = seq(0, model$dat$end.age, 5),
ci_type = c("both", "line", "ribbon"),
ci_linetype = c("dotted", "solid",
"dashed", "dotdash",
"longdash", "twodash"),
ci_alpha = 0.3,
leg_loc = c(1, 1),
angle_x_labels = FALSE){
ci_type <- match.arg(ci_type)
ci_linetype <- match.arg(ci_linetype)
if(!is.character(ages)){
ages <- as.character(ages)
}
if(is_iscam_model_list(model) && length(model) == 1){
model <- model[[1]]
}
if(class(model) != mdl_cls){
if(class(model) != mdl_lst_cls){
stop("`model` is not a gfiscamutils::mdl_cls class (",mdl_cls, ")")
}
stop("`model` is a `gfiscamutils::mdl_lst_cls` class (",mdl_lst_cls, ")\n",
" It should be a `gfiscamutils::mdl_cls` class (",mdl_cls, ")")
}
if(length(probs) != 3){
stop("`probs` has length ", length(probs), " but must be a vector of three values\n",
" representing lower CI, median, and upper CI")
}
# Extract selectivity parameters
vals <- model$mcmc$selest
if(is.null(vals)){
stop("MCMC selectivity estimates not found for this model, see\n",
" `model$mcmc$selest` which is created in `read_mcmc()` and `load_special()`")
}
# Remove male "estimates" for models with number of sexes == 1. iSCAM outputs the
# parameter values even if they were not estimated so they are gibberish
if(model$dat$num.sex == 1){
vals <- vals %>%
filter(sex != 1)
}
vals <- vals %>%
mutate(sex = ifelse(sex %in% c(0, 2), "Female", "Male"))
gear_names <- model$dat$gear_names
if(length(unique(vals$gear)) != length(gear_names)){
stop("`model$dat$gear_names` is not the same length as the number of gears present\n",
" in the MCMC selectivity parameter outputs. Check your declaration of the names\n",
" in the iSCAM data file and try again.")
}
if(!is.null(gear)){
valid_gear_nums <- seq_along(gear_names)
if(!all(gear %in% valid_gear_nums)){
stop("One or more of the gear numbers you requested is outside the range of possible gears.\n\n",
"Available gears numbers are: ", paste(valid_gear_nums, collapse = ", "), "\n\n",
"Names for these are:\n", paste(gear_names, collapse = "\n"))
}
vals <- vals %>%
filter(gear %in% gear_names[!!gear])
gear_names <- gear_names[gear]
}
# Rename the parameter columns because the ages columns would
# have these same names
vals <- vals %>%
rename(p1 = "a_hat", p2 = "g_hat")
# Remove gears with TV selectivity and give a warning
vals <- vals %>%
split(~gear) %>%
imap(~{
yrs <- unique(.x$start_year)
if(length(yrs) > 1){
warning("`gear` ", unique(.x$gear), " has selectivity blocks (is time-varying)\n",
" and must be plotted seperately using `plot_tv_selex_mcmc()`\n")
gear_names <<- gear_names[-which(gear_names == unique(.x$gear))]
return(NULL)
}
return(.x)
})
# Remove NULL list elements (fixed parameters)
vals <- vals[!sapply(vals, is.null)] %>%
bind_rows()
# Add age columns with logistic selectivity calculations
for(i in ages){
vals <- vals %>%
mutate(!!sym(i) := 1 / (1 + exp(-(as.numeric(i) - p1) / p2)))
}
get_val <- function(d, q){
d %>%
filter(quants == q) %>%
select(-quants) %>%
pivot_longer(-c(gear, start_year, end_year, Sex, p1, p2),
names_to = "age",
values_to = "value") %>%
mutate(age = as.numeric(age))
}
vals <- vals %>%
rename(Sex = sex)
# Re-order the posteriors by group in order of a_hat smallest to largest
gear_lst <- vals |>
split(~ gear)
vals <- gear_lst |>
map_dfr(function(gear_df){
sex_lst <- gear_df |>
split(~ Sex)
sex_lst <- map_dfr(sex_lst, function(sex_df){
sex_df[order(sex_df$p1), ]
})
}) |>
select(-c(posterior, block, start_year, end_year, p1, p2)) |>
select(gear, Sex, everything())
num_posts <- nrow(model$mcmc$params)
probs <- as.integer(probs * num_posts)
make_longer <- function(d){
d |>
pivot_longer(-c(gear, Sex),
names_to = "age",
values_to = "value") |>
mutate(age = as.numeric(age))
}
vals <- vals |>
mutate(gear = factor(gear)) |>
mutate(gear = fct_relevel(gear, gear_names))
lo_vals <- vals |>
group_by(gear, Sex) |>
slice(probs[1]) |>
make_longer() |>
rename(lo_value = value)
med_vals <- vals |>
group_by(gear, Sex) |>
slice(probs[2]) |>
make_longer()
hi_vals <- vals |>
group_by(gear, Sex) |>
slice(probs[3]) |>
make_longer() |>
rename(hi_value = value)
rib_vals <- lo_vals %>%
left_join(hi_vals,
by = c("gear", "Sex", "age")) |>
mutate(value = lo_value)
g <- ggplot(med_vals, aes(x = factor(age),
y = value,
group = Sex,
color = Sex,
fill = Sex)) +
geom_line() +
geom_point() +
xlab("Age") +
ylab("Selectivity") +
scale_x_discrete(breaks = breaks) +
scale_color_manual(values = c("red", "blue"))
if(ci_type %in% c("ribbon", "both")){
g <- g +
geom_ribbon(data = rib_vals,
aes(ymin = lo_value,
ymax = hi_value,
group = Sex),
alpha = ci_alpha,
color = NA)
}
if(ci_type %in% c("line", "both")){
g <- g +
geom_line(data = lo_vals, aes(y = lo_value,
group = Sex,
color = Sex),
linetype = ci_linetype) +
geom_line(data = hi_vals, aes(y = hi_value,
group = Sex,
color = Sex),
linetype = ci_linetype)
}
g <- g +
facet_wrap(~ gear) +
xlab("Age") +
ylab("Proportion")
if(show_maturity){
model$mpd$ma
if(model$dat$num.sex == 2){
a50_female <- model$dat$age.at.50.mat[1]
sigma_a50_female <- model$dat$sd.at.50.mat[1]
a50_male <- model$dat$age.at.50.mat[2]
sigma_a50_male <- model$dat$sd.at.50.mat[2]
g <- g +
geom_function(fun = function(x){1 / (1 + exp(-(x - a50_male) / sigma_a50_male))},
color = "blue",
linetype = "dashed")
}else{
a50_female <- model$dat$age.at.50.mat[1]
sigma_a50_female <- model$dat$sd.at.50.mat[1]
}
g <- g +
geom_function(fun = function(x){1 / (1 + exp(-(x - a50_female) / sigma_a50_female))},
color = "red",
linetype = "dashed")
}
if(is.null(leg_loc)){
g <- g +
theme(legend.position = "none")
}else if(leg_loc[1] == "facet"){
g <- g %>% move_legend_to_empty_facet()
}else{
g <- g +
theme(legend.justification = leg_loc,
legend.position = leg_loc,
legend.background = element_rect(fill = "white", color = "white"))
}
if(angle_x_labels){
g <- g +
theme(axis.text.x = element_text(angle = 45, hjust = 0.55, vjust = 0.5))
}
g
#suppressWarnings(print(g))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog_dat.R
\docType{data}
\name{sa42016}
\alias{sa42016}
\title{Statistical Area 4, 2016}
\format{An \code{sf} object with 12 variables:
#' \describe{
\item{\code{sa4_code_2016}}{The full 3 digit SA4 code numeric}
\item{\code{sa4_name_2016}}{The SA4 name character}
\item{\code{gcc_code_2016}}{The alphanumeric Greater Capital City (GCC) code numeric}
\item{\code{gcc_name_2016}}{The GCC name}
\item{\code{state_name_2016}}{The full state name}
\item{\code{albers_sqkm_2016}}{The area in square kilometres}
\item{\code{cent_lat}}{The latitide of the area's centroid}
\item{\code{cent_long}}{The latitide of the area's centroid}
\item{\code{geometry}}{A nested list containing the area's geometry (polygons)}
}}
\usage{
sa42016
}
\description{
Geospatial data provided by the ABS for Statistical Area 4 in 2016.
}
\keyword{datasets}
| /man/sa42016.Rd | no_license | srepho/absmapsdata | R | false | true | 910 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog_dat.R
\docType{data}
\name{sa42016}
\alias{sa42016}
\title{Statistical Area 4, 2016}
\format{An \code{sf} object with 12 variables:
#' \describe{
\item{\code{sa4_code_2016}}{The full 3 digit SA4 code numeric}
\item{\code{sa4_name_2016}}{The SA4 name character}
\item{\code{gcc_code_2016}}{The alphanumeric Greater Capital City (GCC) code numeric}
\item{\code{gcc_name_2016}}{The GCC name}
\item{\code{state_name_2016}}{The full state name}
\item{\code{albers_sqkm_2016}}{The area in square kilometres}
\item{\code{cent_lat}}{The latitide of the area's centroid}
\item{\code{cent_long}}{The latitide of the area's centroid}
\item{\code{geometry}}{A nested list containing the area's geometry (polygons)}
}}
\usage{
sa42016
}
\description{
Geospatial data provided by the ABS for Statistical Area 4 in 2016.
}
\keyword{datasets}
|
source("Documents/ModellingTrends/Bayesian/Background/plot_dist.R")
#normal with uniform hyperprior
plot_dist(dists$normal, labels = c(mean = expression(mu[0]), right_sd = expression(sigma[U("1e-10", "100")])))
#Species intercept
plot_dist(dists$normal, labels = c(mean = expression(mu[genus]), right_sd = expression(sigma[U("1e-10", "100")])))
#Country intercept
plot_dist(dists$normal, labels = c(mean = expression(mu[region]), right_sd = expression(sigma[U("1e-10", "100")])))
#Model error
plot_dist(dists$normal, labels = c(mean = expression(mu[prediction]), right_sd = expression(sigma[weighted-error])))
#half t
plot_dist(dists$half_t, labels = c(scale = expression(sigma[0.0001]), df = expression(nu[1])))
#Mod1
plot_dist(dists$t, labels = c(mean = expression(mu[pred1]), right_df = expression(df[gamma(0.1, 0.001)]), right_scale = expression(sigma[gamma(0.001, 0.001)])))
#Mod2
plot_dist(dists$t, labels = c(mean = expression(mu[pred2]), right_df = expression(df[gamma(0.1, 0.001)]), right_scale = expression(sigma[U("1e-10", 1)])))
#Mod1
plot_dist(dists$beta, labels = expression(B["2,8"]))
| /code/Background/dag.R | permissive | GitTFJ/carnivore_trends | R | false | false | 1,152 | r | source("Documents/ModellingTrends/Bayesian/Background/plot_dist.R")
#normal with uniform hyperprior
plot_dist(dists$normal, labels = c(mean = expression(mu[0]), right_sd = expression(sigma[U("1e-10", "100")])))
#Species intercept
plot_dist(dists$normal, labels = c(mean = expression(mu[genus]), right_sd = expression(sigma[U("1e-10", "100")])))
#Country intercept
plot_dist(dists$normal, labels = c(mean = expression(mu[region]), right_sd = expression(sigma[U("1e-10", "100")])))
#Model error
plot_dist(dists$normal, labels = c(mean = expression(mu[prediction]), right_sd = expression(sigma[weighted-error])))
#half t
plot_dist(dists$half_t, labels = c(scale = expression(sigma[0.0001]), df = expression(nu[1])))
#Mod1
plot_dist(dists$t, labels = c(mean = expression(mu[pred1]), right_df = expression(df[gamma(0.1, 0.001)]), right_scale = expression(sigma[gamma(0.001, 0.001)])))
#Mod2
plot_dist(dists$t, labels = c(mean = expression(mu[pred2]), right_df = expression(df[gamma(0.1, 0.001)]), right_scale = expression(sigma[U("1e-10", 1)])))
#Mod1
plot_dist(dists$beta, labels = expression(B["2,8"]))
|
dataR <- read.csv(
"household_power_consumption.txt",
sep=";",
dec=".",
na.strings = "?",
colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
)
data <- dataR[(dataR$Date == "1/2/2007" | dataR$Date == "2/2/2007"), ]
data$uTime = strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
plot(c(data$uTime,data$uTime,data$uTime), c(data$Sub_metering_1, data$Sub_metering_2, data$Sub_metering_3), type = "n", ylab = "Energy sub metering", xlab="")
lines(data$uTime, data$Sub_metering_1, col="black")
lines(data$uTime, data$Sub_metering_2, col="red")
lines(data$uTime, data$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1), col=c("black", "red", "blue"))
dev.copy(png,"plot3.png", height=480, width=480)
dev.off()
| /plot3.R | no_license | itsdeepesh5/ExData_Plotting1 | R | false | false | 855 | r | dataR <- read.csv(
"household_power_consumption.txt",
sep=";",
dec=".",
na.strings = "?",
colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
)
data <- dataR[(dataR$Date == "1/2/2007" | dataR$Date == "2/2/2007"), ]
data$uTime = strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
plot(c(data$uTime,data$uTime,data$uTime), c(data$Sub_metering_1, data$Sub_metering_2, data$Sub_metering_3), type = "n", ylab = "Energy sub metering", xlab="")
lines(data$uTime, data$Sub_metering_1, col="black")
lines(data$uTime, data$Sub_metering_2, col="red")
lines(data$uTime, data$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1), col=c("black", "red", "blue"))
dev.copy(png,"plot3.png", height=480, width=480)
dev.off()
|
library(rvest)
library(readr)
library(dplyr)
library(janitor)
#web scraping
webpage <- read_html("https://www.pwrc.usgs.gov/bbl/manual/speclist.cfm")
tbls <- html_nodes(webpage, "table") %>%
html_table(fill = TRUE)
species <- tbls[[1]] %>%
clean_names() %>%
select(alpha_code, common_name) %>%
mutate(alpha_code = tolower(alpha_code) %>% rename(species=alpha_code)
# load data
nest_data <- read.csv ("https://arcticdata.io/metacat/d1/mn/v2/object/urn%3Auuid%3A982bd2fc-4edf-4da7-96ef-0d11b853102d")
head(nest_data)
predation_data <- read.csv("https://arcticdata.io/metacat/d1/mn/v2/object/urn%3Auuid%3A9ffec04c-7e2d-41dd-9e88-b6c2e8c4375e")
head(predator_data)
predator_species_join <- left_join(predator_data,species,by = c("species"="alpha_code")) %>% select(year, species, count)
nest_species_join <- left_join(nest_data, species, by = c("species"="alpha_code")) %>% select(common_name, )
#' Function to rename spp code to common names
#'
#' @param df
#' @param species
#'
#' @return common names
#' @export
#'
#' @examples
#'
assign_species_name <- function(df, species){
if (!("alpha_code" %in% names(species)) |
!("species" %in% names(df)) |
!("common_name" %in% names(species))){
stop("Tables appear to be formatted incorrectly.")
}
return_df <- left_join(df, species, by = c("species" = "alpha_code"))
if (nrow(return_df) > nrow(df)){
warning("Joined table has more rows than original table. Check species table for duplicated code values.")
}
if (length(which(is.na(return_df$common_name))) > 0){
x <- length(which(is.na(return_df$common_name)))
warning(paste("Common name has", x, "rows containing NA"))
}
return(return_df)
}
| /lesson11.R | no_license | tcounihan/training_counihan | R | false | false | 1,753 | r | library(rvest)
library(readr)
library(dplyr)
library(janitor)
#web scraping
webpage <- read_html("https://www.pwrc.usgs.gov/bbl/manual/speclist.cfm")
tbls <- html_nodes(webpage, "table") %>%
html_table(fill = TRUE)
species <- tbls[[1]] %>%
clean_names() %>%
select(alpha_code, common_name) %>%
mutate(alpha_code = tolower(alpha_code) %>% rename(species=alpha_code)
# load data
nest_data <- read.csv ("https://arcticdata.io/metacat/d1/mn/v2/object/urn%3Auuid%3A982bd2fc-4edf-4da7-96ef-0d11b853102d")
head(nest_data)
predation_data <- read.csv("https://arcticdata.io/metacat/d1/mn/v2/object/urn%3Auuid%3A9ffec04c-7e2d-41dd-9e88-b6c2e8c4375e")
head(predator_data)
predator_species_join <- left_join(predator_data,species,by = c("species"="alpha_code")) %>% select(year, species, count)
nest_species_join <- left_join(nest_data, species, by = c("species"="alpha_code")) %>% select(common_name, )
#' Function to rename spp code to common names
#'
#' @param df
#' @param species
#'
#' @return common names
#' @export
#'
#' @examples
#'
assign_species_name <- function(df, species){
if (!("alpha_code" %in% names(species)) |
!("species" %in% names(df)) |
!("common_name" %in% names(species))){
stop("Tables appear to be formatted incorrectly.")
}
return_df <- left_join(df, species, by = c("species" = "alpha_code"))
if (nrow(return_df) > nrow(df)){
warning("Joined table has more rows than original table. Check species table for duplicated code values.")
}
if (length(which(is.na(return_df$common_name))) > 0){
x <- length(which(is.na(return_df$common_name)))
warning(paste("Common name has", x, "rows containing NA"))
}
return(return_df)
}
|
#' Read .gff files
#'
#' Uses `rtracklayer::import`. Assume contig IDs are unique amongst files if
#' genome_ids are not provided
#'
#' @param gff_files to read
#' @param genome_ids for each file. Only necessary of contig_ids are not unique
#' among different genomes.
#' @export
#' @return tibble
read_gffs <- function(gff_files, genome_ids = NULL){
if (!requireNamespace("rtracklayer", quietly = TRUE)) {
stop("Reading .gffs requires package 'rtracklayer' to be installed.",
call. = FALSE)
}
if(!is.null(genome_ids)) names(gff_files) <- genome_ids
TODO("list types, suggest filter")
map_df(gff_files, function(gff){
as_tibble(rtracklayer::import(gff)) %>%
mutate_if(is.factor, as.character)
})
}
#' Read genome_ids, contig_ids and contig lengths from .gff files.
#'
#' Parses `##sequence-region` annotation using `grep`. `rtracklayer` ignores
#' those lines.
#'
#' @param genome_ids to use with each file. If `NULL` infer from file name.
#' @export
#' @return A tibble with columns: genome_id, contig_id, length.
read_gffs_as_contigs <- function(gff_files, genome_ids = NULL){
data <- map(gff_files, function(gff){
data <- read_table(pipe(paste('grep ^##sequence-region ', gff)), col_names = c("contig_id", "from", "to"), col_types = "-cnn") %>%
mutate(length = to - from + 1) %>%
select(-from, -to)
})
# genome ids
if(is.null(genome_ids)){
genome_ids <- sapply(gff_files, basename) %>%
stringr::str_replace(".gff", "")
if(any(duplicated(genome_ids))) stop("Filenames gave non-unique genome IDs, use `genome_id=` to specify manually")
}
names(data) <- genome_ids
TODO("print summary info: read X genomes with y contigs, ...")
# bind
bind_rows(data, .id="genome_id") %>% as_tibble
}
#' read a .paf file (minimap/minimap2). Only the first 12 canonical
#' columns. Ignores tagged extra fields.
#'
#' @inheritParams thacklr::read_paf
#' @importFrom readr read_tsv
#' @importFrom thacklr read_paf
#' @export
#' @return tibble
read_paf <- function(file, max_tags=20){
thacklr::read_paf(file, max_tags) %>%
rename(
seq_id1=query_name, seq_id2=target_name,
start1=query_start, start2=target_start,
end1=query_end, end2=target_end,
length1=query_length, length2=target_length
)
}
#' Read AliTV .json file
#'
#' this file contains sequences, links and (optionally) genes
#'
#' @importFrom tidyr unnest_wider
#' @importFrom tidyr unnest
#' @importFrom jsonlite fromJSON
#' @param file path to json
#' @export
#' @return list with seqs, genes, and links
#' @examples
#' ali <- read_alitv("https://alitvteam.github.io/AliTV/d3/data/chloroplasts.json")
#' gggenomes(ali$seqs, ali$genes, links=ali$links) +
#' geom_seq() +
#' geom_bin_label() +
#' geom_gene(aes(fill=class)) +
#' geom_link()
#' p <- gggenomes(ali$seqs, ali$genes, links=ali$links) +
#' geom_seq() +
#' geom_bin_label() +
#' geom_gene(aes(color=class)) +
#' geom_link(aes(fill=identity)) +
#' scale_fill_distiller(palette="RdYlGn", direction = 1)
#' p %>% flip_seq("Same_gi") %>% pick(1,3,2,4,5,6,7,8)
read_alitv <- function(file){
ali <- jsonlite::fromJSON(file, simplifyDataFrame=TRUE)
seqs <- tibble(seq = ali$data$karyo$chromosome) %>%
mutate(seq_id = names(seq)) %>%
unnest_wider(seq) %>%
rename(bin_id = genome_id)
genes <- tibble(feature = ali$data$feature) %>%
mutate(class = names(feature)) %>%
filter(class != "link") %>%
unnest(feature) %>%
rename(seq_id=karyo)
links <- tibble(links=ali$data$links) %>% unnest(links) %>% unnest(links) %>% unnest_wider(links)
link_pos <- tibble(link=ali$data$features$link) %>% mutate(id=names(link)) %>% unnest_wider(link)
links <- links %>%
left_join(link_pos, by=c("source"="id")) %>%
left_join(link_pos, by=c("target"="id")) %>%
transmute(
seq_id1=karyo.x,
start1=start.x,
end1=end.x,
seq_id2=karyo.y,
start2=start.y,
end2=end.y,
identity=identity
)
return(list(seqs=seqs,genes=genes,links=links))
}
| /R/read.R | permissive | babasaraki/gggenomes | R | false | false | 4,104 | r | #' Read .gff files
#'
#' Uses `rtracklayer::import`. Assume contig IDs are unique amongst files if
#' genome_ids are not provided
#'
#' @param gff_files to read
#' @param genome_ids for each file. Only necessary of contig_ids are not unique
#' among different genomes.
#' @export
#' @return tibble
read_gffs <- function(gff_files, genome_ids = NULL){
if (!requireNamespace("rtracklayer", quietly = TRUE)) {
stop("Reading .gffs requires package 'rtracklayer' to be installed.",
call. = FALSE)
}
if(!is.null(genome_ids)) names(gff_files) <- genome_ids
TODO("list types, suggest filter")
map_df(gff_files, function(gff){
as_tibble(rtracklayer::import(gff)) %>%
mutate_if(is.factor, as.character)
})
}
#' Read genome_ids, contig_ids and contig lengths from .gff files.
#'
#' Parses `##sequence-region` annotation using `grep`. `rtracklayer` ignores
#' those lines.
#'
#' @param genome_ids to use with each file. If `NULL` infer from file name.
#' @export
#' @return A tibble with columns: genome_id, contig_id, length.
read_gffs_as_contigs <- function(gff_files, genome_ids = NULL){
data <- map(gff_files, function(gff){
data <- read_table(pipe(paste('grep ^##sequence-region ', gff)), col_names = c("contig_id", "from", "to"), col_types = "-cnn") %>%
mutate(length = to - from + 1) %>%
select(-from, -to)
})
# genome ids
if(is.null(genome_ids)){
genome_ids <- sapply(gff_files, basename) %>%
stringr::str_replace(".gff", "")
if(any(duplicated(genome_ids))) stop("Filenames gave non-unique genome IDs, use `genome_id=` to specify manually")
}
names(data) <- genome_ids
TODO("print summary info: read X genomes with y contigs, ...")
# bind
bind_rows(data, .id="genome_id") %>% as_tibble
}
#' read a .paf file (minimap/minimap2). Only the first 12 canonical
#' columns. Ignores tagged extra fields.
#'
#' @inheritParams thacklr::read_paf
#' @importFrom readr read_tsv
#' @importFrom thacklr read_paf
#' @export
#' @return tibble
read_paf <- function(file, max_tags=20){
thacklr::read_paf(file, max_tags) %>%
rename(
seq_id1=query_name, seq_id2=target_name,
start1=query_start, start2=target_start,
end1=query_end, end2=target_end,
length1=query_length, length2=target_length
)
}
#' Read AliTV .json file
#'
#' this file contains sequences, links and (optionally) genes
#'
#' @importFrom tidyr unnest_wider
#' @importFrom tidyr unnest
#' @importFrom jsonlite fromJSON
#' @param file path to json
#' @export
#' @return list with seqs, genes, and links
#' @examples
#' ali <- read_alitv("https://alitvteam.github.io/AliTV/d3/data/chloroplasts.json")
#' gggenomes(ali$seqs, ali$genes, links=ali$links) +
#' geom_seq() +
#' geom_bin_label() +
#' geom_gene(aes(fill=class)) +
#' geom_link()
#' p <- gggenomes(ali$seqs, ali$genes, links=ali$links) +
#' geom_seq() +
#' geom_bin_label() +
#' geom_gene(aes(color=class)) +
#' geom_link(aes(fill=identity)) +
#' scale_fill_distiller(palette="RdYlGn", direction = 1)
#' p %>% flip_seq("Same_gi") %>% pick(1,3,2,4,5,6,7,8)
read_alitv <- function(file){
ali <- jsonlite::fromJSON(file, simplifyDataFrame=TRUE)
seqs <- tibble(seq = ali$data$karyo$chromosome) %>%
mutate(seq_id = names(seq)) %>%
unnest_wider(seq) %>%
rename(bin_id = genome_id)
genes <- tibble(feature = ali$data$feature) %>%
mutate(class = names(feature)) %>%
filter(class != "link") %>%
unnest(feature) %>%
rename(seq_id=karyo)
links <- tibble(links=ali$data$links) %>% unnest(links) %>% unnest(links) %>% unnest_wider(links)
link_pos <- tibble(link=ali$data$features$link) %>% mutate(id=names(link)) %>% unnest_wider(link)
links <- links %>%
left_join(link_pos, by=c("source"="id")) %>%
left_join(link_pos, by=c("target"="id")) %>%
transmute(
seq_id1=karyo.x,
start1=start.x,
end1=end.x,
seq_id2=karyo.y,
start2=start.y,
end2=end.y,
identity=identity
)
return(list(seqs=seqs,genes=genes,links=links))
}
|
setwd("C:/Users/vikra/OneDrive/Data Science/Course 4 - Exploratory data analysis/data")
## Read the file
datafile<-read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## Select only observations within a date range
datafile_2007<-subset(datafile, Date %in% c("1/2/2007","2/2/2007"))
## Convert the Date column to date format
datafile_2007$Date<-as.Date(datafile_2007$Date, format="%d%m%y")
png("plot1.png", width=480, height = 480)
hist(datafile_2007$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off() | /Plot1.R | no_license | vikramdr/ExData_Plotting1 | R | false | false | 688 | r | setwd("C:/Users/vikra/OneDrive/Data Science/Course 4 - Exploratory data analysis/data")
## Read the file
datafile<-read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## Select only observations within a date range
datafile_2007<-subset(datafile, Date %in% c("1/2/2007","2/2/2007"))
## Convert the Date column to date format
datafile_2007$Date<-as.Date(datafile_2007$Date, format="%d%m%y")
png("plot1.png", width=480, height = 480)
hist(datafile_2007$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off() |
#two factors:wool and tension
rm(list=ls())
data("warpbreaks")
head(warpbreaks)
levels(warpbreaks$wool) # A,B
levels(warpbreaks$tension) #"L" "M" "H"
table(warpbreaks$wool, warpbreaks$tension)
#visualize the data with box plots
boxplot(breaks ~ wool + tension, data=warpbreaks)
boxplot(log(breaks) ~ wool + tension, data=warpbreaks)
#The different groups have more similar variance if we use the logarithm of breaks.
#It appears that there is a general decrease in breaks as we move from low to medium to high tension
###############one-way ANOVA model using tension only#########################
# y= mu + sig
# mu ~ dnorm(0.0, 1.0/1.0e6)
# sig ~ dgamma(5/2.0, 5*2.0/2.0)
library("rjags")
mod1_string = " model {
for( i in 1:length(y)) {
y[i] ~ dnorm(mu[tensGrp[i]], prec)
}
for (j in 1:3) {
mu[j] ~ dnorm(0.0, 1.0/1.0e6)
}
prec ~ dgamma(5/2.0, 5*2.0/2.0)
sig = sqrt(1.0 / prec)
} "
set.seed(83)
str(warpbreaks)
data1_jags = list(y=log(warpbreaks$breaks), tensGrp=as.numeric(warpbreaks$tension))
params1 = c("mu", "sig")
mod1 = jags.model(textConnection(mod1_string), data=data1_jags, n.chains=3)
update(mod1, 1e3)
mod1_sim = coda.samples(model=mod1,
variable.names=params1,
n.iter=5e3)
## convergence diagnostics
plot(mod1_sim)
gelman.diag(mod1_sim)
autocorr.diag(mod1_sim)
effectiveSize(mod1_sim)
summary(mod1_sim)
#The 95% posterior interval for the mean of mu[2] overlaps with both the mu[1] and mu[3]
# but the intervals for low and high group only slightly overlap.
# the means for low and high tension are different.
# DIC model
dic1 = dic.samples(mod1, n.iter=1e3)
################### Two-way additive model(wool and tension have no interaction: no mu matrix) ################
#multiple factor ANOVA: With two factors, one with two levels and the other with three, we have six treatment groups,
# fit the additive model which treats the two factors separately with no interaction.no mu matrix
X = model.matrix( ~ wool + tension, data=warpbreaks)
head(X)
tail(X)
#By default, R has chosen the mean for wool A and low tension to be the intercept
mod2_string = " model {
for( i in 1:length(y)) {
y[i] ~ dnorm(mu[i], prec)
mu[i] = int + alpha*isWoolB[i] + beta[1]*isTensionM[i] + beta[2]*isTensionH[i]
}
int ~ dnorm(0.0, 1.0/1.0e6)
alpha ~ dnorm(0.0, 1.0/1.0e6)
for (j in 1:2) {
beta[j] ~ dnorm(0.0, 1.0/1.0e6)
}
prec ~ dgamma(3/2.0, 3*1.0/2.0)
sig = sqrt(1.0 / prec)
} "
data2_jags = list(y=log(warpbreaks$breaks), isWoolB=X[,"woolB"], isTensionM=X[,"tensionM"], isTensionH=X[,"tensionH"])
params2 = c("int", "alpha", "beta", "sig")
mod2 = jags.model(textConnection(mod2_string), data=data2_jags, n.chains=3)
update(mod2, 1e3)
mod2_sim = coda.samples(model=mod2,
variable.names=params2,
n.iter=5e3)
## convergene diagnostics
plot(mod2_sim)
gelman.diag(mod1_sim)
autocorr.diag(mod1_sim)
effectiveSize(mod1_sim)
#summarize the results
summary(mod2_sim)
(dic2 = dic.samples(mod2, n.iter=1e3))
dic1
#This suggests there is much to be gained adding the wool factor to the model.
# look again at the box plot with all six treatment groups.
boxplot(log(breaks) ~ wool + tension, data=warpbreaks)
lmod1 = lm(log(breaks)~ wool + tension, data=warpbreaks)
summary(lmod2)
#Our two-way model has a single effect for wool B and the estimate is negative.
#we would expect wool B to be associated with fewer breaks than its wool A counterpart on average
# This is true for low and high tension, but it's breaks are higher for wool B when there is medium tension.
# the effect for wool B is not consistent across tension levels,
# so it may appropriate to add an interaction term. In R, this would look like:
lmod2 = lm(log(breaks) ~ .^2, data=warpbreaks)
summary(lmod2)
#Adding the interaction, we get an effect for being in wool B and medium tension,
#as well as for being in wool B and high tension.
#There are now six parameters for the mean,
#one for each treatment group, so this model is equivalent to the full cell means model.
##############Two-way cell means model with interaction metrixs####################################
# mu will be a matrix with six entries : rows:woolGrp:2, columns: tensGrp: 3
mod3_string = " model {
for( i in 1:length(y)) {
y[i] ~ dnorm(mu[woolGrp[i], tensGrp[i]], prec)
}
for (j in 1:max(woolGrp)) {
for (k in 1:max(tensGrp)) {
mu[j,k] ~ dnorm(0.0, 1.0/1.0e6)
}
}
prec ~ dgamma(3/2.0, 3*1.0/2.0)
sig = sqrt(1.0 / prec)
} "
str(warpbreaks)
data3_jags = list(y=log(warpbreaks$breaks), woolGrp=as.numeric(warpbreaks$wool), tensGrp=as.numeric(warpbreaks$tension))
params3 = c("mu", "sig")
mod3 = jags.model(textConnection(mod3_string), data=data3_jags, n.chains=3)
update(mod3, 1e3)
mod3_sim = coda.samples(model=mod3,
variable.names=params3,
n.iter=5e3)
mod3_csim = as.mcmc(do.call(rbind, mod3_sim))
plot(mod3_sim, ask=TRUE)
## convergence diagnostics
gelman.diag(mod3_sim)
autocorr.diag(mod3_sim)
effectiveSize(mod3_sim)
raftery.diag(mod3_sim)
# compute the DIC and compare with our previous models.
(dic3 = dic.samples(mod3, n.iter=1e3))
#This suggests that the full model with interaction between wool and tension
#(which is equivalent to the cell means model) is the best for explaining/predicting warp breaks.
summary(mod3_sim)
par(mfrow=c(3,2)) # arrange frame for plots for mu matrixs value distributions
densplot(mod3_csim[,1:6], xlim=c(2.0, 4.5))
#It might be tempting to look at comparisons between each combination of treatments
#Results are most reliable when we determine a relatively small number of hypotheses
#we are interested in beforehand, collect the data, and statistically evaluate the evidence for them.
#go through our posterior samples and for each sample, find out which group has the smallest mean
#posterior probability that each of the treatment groups has the smallest mean
prop.table( table( apply(mod3_csim[,1:6], 1, which.min) ) ) # find rows with min Markov Chain Monte Carlo (MCMC) output
#The evidence supports wool B with high tension as the treatment that produces the fewest breaks. | /Project Code/Two Factor ANOVA.R | no_license | jpark77/Bayesian-Statistics-Techniques-and-Models-from-UCSC-on-Coursera | R | false | false | 6,150 | r | #two factors:wool and tension
rm(list=ls())
data("warpbreaks")
head(warpbreaks)
levels(warpbreaks$wool) # A,B
levels(warpbreaks$tension) #"L" "M" "H"
table(warpbreaks$wool, warpbreaks$tension)
#visualize the data with box plots
boxplot(breaks ~ wool + tension, data=warpbreaks)
boxplot(log(breaks) ~ wool + tension, data=warpbreaks)
#The different groups have more similar variance if we use the logarithm of breaks.
#It appears that there is a general decrease in breaks as we move from low to medium to high tension
###############one-way ANOVA model using tension only#########################
# y= mu + sig
# mu ~ dnorm(0.0, 1.0/1.0e6)
# sig ~ dgamma(5/2.0, 5*2.0/2.0)
library("rjags")
mod1_string = " model {
for( i in 1:length(y)) {
y[i] ~ dnorm(mu[tensGrp[i]], prec)
}
for (j in 1:3) {
mu[j] ~ dnorm(0.0, 1.0/1.0e6)
}
prec ~ dgamma(5/2.0, 5*2.0/2.0)
sig = sqrt(1.0 / prec)
} "
set.seed(83)
str(warpbreaks)
data1_jags = list(y=log(warpbreaks$breaks), tensGrp=as.numeric(warpbreaks$tension))
params1 = c("mu", "sig")
mod1 = jags.model(textConnection(mod1_string), data=data1_jags, n.chains=3)
update(mod1, 1e3)
mod1_sim = coda.samples(model=mod1,
variable.names=params1,
n.iter=5e3)
## convergence diagnostics
plot(mod1_sim)
gelman.diag(mod1_sim)
autocorr.diag(mod1_sim)
effectiveSize(mod1_sim)
summary(mod1_sim)
#The 95% posterior interval for the mean of mu[2] overlaps with both the mu[1] and mu[3]
# but the intervals for low and high group only slightly overlap.
# the means for low and high tension are different.
# DIC model
dic1 = dic.samples(mod1, n.iter=1e3)
################### Two-way additive model(wool and tension have no interaction: no mu matrix) ################
#multiple factor ANOVA: With two factors, one with two levels and the other with three, we have six treatment groups,
# fit the additive model which treats the two factors separately with no interaction.no mu matrix
X = model.matrix( ~ wool + tension, data=warpbreaks)
head(X)
tail(X)
#By default, R has chosen the mean for wool A and low tension to be the intercept
mod2_string = " model {
for( i in 1:length(y)) {
y[i] ~ dnorm(mu[i], prec)
mu[i] = int + alpha*isWoolB[i] + beta[1]*isTensionM[i] + beta[2]*isTensionH[i]
}
int ~ dnorm(0.0, 1.0/1.0e6)
alpha ~ dnorm(0.0, 1.0/1.0e6)
for (j in 1:2) {
beta[j] ~ dnorm(0.0, 1.0/1.0e6)
}
prec ~ dgamma(3/2.0, 3*1.0/2.0)
sig = sqrt(1.0 / prec)
} "
data2_jags = list(y=log(warpbreaks$breaks), isWoolB=X[,"woolB"], isTensionM=X[,"tensionM"], isTensionH=X[,"tensionH"])
params2 = c("int", "alpha", "beta", "sig")
mod2 = jags.model(textConnection(mod2_string), data=data2_jags, n.chains=3)
update(mod2, 1e3)
mod2_sim = coda.samples(model=mod2,
variable.names=params2,
n.iter=5e3)
## convergene diagnostics
plot(mod2_sim)
gelman.diag(mod1_sim)
autocorr.diag(mod1_sim)
effectiveSize(mod1_sim)
#summarize the results
summary(mod2_sim)
(dic2 = dic.samples(mod2, n.iter=1e3))
dic1
#This suggests there is much to be gained adding the wool factor to the model.
# look again at the box plot with all six treatment groups.
boxplot(log(breaks) ~ wool + tension, data=warpbreaks)
lmod1 = lm(log(breaks)~ wool + tension, data=warpbreaks)
summary(lmod2)
#Our two-way model has a single effect for wool B and the estimate is negative.
#we would expect wool B to be associated with fewer breaks than its wool A counterpart on average
# This is true for low and high tension, but it's breaks are higher for wool B when there is medium tension.
# the effect for wool B is not consistent across tension levels,
# so it may appropriate to add an interaction term. In R, this would look like:
lmod2 = lm(log(breaks) ~ .^2, data=warpbreaks)
summary(lmod2)
#Adding the interaction, we get an effect for being in wool B and medium tension,
#as well as for being in wool B and high tension.
#There are now six parameters for the mean,
#one for each treatment group, so this model is equivalent to the full cell means model.
##############Two-way cell means model with interaction metrixs####################################
# mu will be a matrix with six entries : rows:woolGrp:2, columns: tensGrp: 3
mod3_string = " model {
for( i in 1:length(y)) {
y[i] ~ dnorm(mu[woolGrp[i], tensGrp[i]], prec)
}
for (j in 1:max(woolGrp)) {
for (k in 1:max(tensGrp)) {
mu[j,k] ~ dnorm(0.0, 1.0/1.0e6)
}
}
prec ~ dgamma(3/2.0, 3*1.0/2.0)
sig = sqrt(1.0 / prec)
} "
str(warpbreaks)
data3_jags = list(y=log(warpbreaks$breaks), woolGrp=as.numeric(warpbreaks$wool), tensGrp=as.numeric(warpbreaks$tension))
params3 = c("mu", "sig")
mod3 = jags.model(textConnection(mod3_string), data=data3_jags, n.chains=3)
update(mod3, 1e3)
mod3_sim = coda.samples(model=mod3,
variable.names=params3,
n.iter=5e3)
mod3_csim = as.mcmc(do.call(rbind, mod3_sim))
plot(mod3_sim, ask=TRUE)
## convergence diagnostics
gelman.diag(mod3_sim)
autocorr.diag(mod3_sim)
effectiveSize(mod3_sim)
raftery.diag(mod3_sim)
# compute the DIC and compare with our previous models.
(dic3 = dic.samples(mod3, n.iter=1e3))
#This suggests that the full model with interaction between wool and tension
#(which is equivalent to the cell means model) is the best for explaining/predicting warp breaks.
summary(mod3_sim)
par(mfrow=c(3,2)) # arrange frame for plots for mu matrixs value distributions
densplot(mod3_csim[,1:6], xlim=c(2.0, 4.5))
#It might be tempting to look at comparisons between each combination of treatments
#Results are most reliable when we determine a relatively small number of hypotheses
#we are interested in beforehand, collect the data, and statistically evaluate the evidence for them.
#go through our posterior samples and for each sample, find out which group has the smallest mean
#posterior probability that each of the treatment groups has the smallest mean
prop.table( table( apply(mod3_csim[,1:6], 1, which.min) ) ) # find rows with min Markov Chain Monte Carlo (MCMC) output
#The evidence supports wool B with high tension as the treatment that produces the fewest breaks. |
#"!
~~~~~~~~
myc~
myclusterVec<-append(myclusterVec,c$no)
~
myclusterVec<-append(~
myclus~
myc~~
myclusterVec<-append(myclusterVec,c$no)
print(c$no)
c <- clusters(Gi)
Gi <- subgraph(Gi,vi)
# print(vi)
vi <- V(G)[which(V
L = length(V(Gi)) - 1
for (j in 1:N){
vertexwrite(memberships,G,'memberships')
source("vertexwrite.R")
N= length(memberships$csize)
# save the names of vertices belonging to each cluster in a separate file.
modularity <- results[3]
memberships <- list(membership=results$membership,csize=results$csize)
results <- clustergraph(G)
source("clustergraph.R")
# cluster the base graph and save membership information
sink(file="BalatarinModularity.txt",append=TRUE,type="output")
G<-read.graph("unipartite.txt", format="ncol")
# read graph from file
library(igraph)
# Important Note: Arrays in R are labeled from 1 to n, but vertices in igraph are labled from 0 to n-1. This means V(G)[0]$name = V(G)$name[1]
b0VIM 7.3
roja
starsky
/media/data3/roja/Balatarin/CompleteRun/vertexwrite.R
| /Balatarin/main_balatarin22.R | no_license | Roja-B/Trajectories | R | false | false | 1,120 | r | #"!
~~~~~~~~
myc~
myclusterVec<-append(myclusterVec,c$no)
~
myclusterVec<-append(~
myclus~
myc~~
myclusterVec<-append(myclusterVec,c$no)
print(c$no)
c <- clusters(Gi)
Gi <- subgraph(Gi,vi)
# print(vi)
vi <- V(G)[which(V
L = length(V(Gi)) - 1
for (j in 1:N){
vertexwrite(memberships,G,'memberships')
source("vertexwrite.R")
N= length(memberships$csize)
# save the names of vertices belonging to each cluster in a separate file.
modularity <- results[3]
memberships <- list(membership=results$membership,csize=results$csize)
results <- clustergraph(G)
source("clustergraph.R")
# cluster the base graph and save membership information
sink(file="BalatarinModularity.txt",append=TRUE,type="output")
G<-read.graph("unipartite.txt", format="ncol")
# read graph from file
library(igraph)
# Important Note: Arrays in R are labeled from 1 to n, but vertices in igraph are labled from 0 to n-1. This means V(G)[0]$name = V(G)$name[1]
b0VIM 7.3
roja
starsky
/media/data3/roja/Balatarin/CompleteRun/vertexwrite.R
|
# matching_geos matches cities from DCM and Adwords based on names, impression
# counts
# Author Wil Poole
# Version 0.1
# Libraries
library(tools)
library(stringdist)
# data files
data.directory <- 'data'
# Check correct files
file.check <- function(
# Look in the data
directory.to.check
) {
# List files in the directory, and then see what is happening
file.list <- list.files(directory.to.check)
if ("adwords_city_impressions.csv" %in% file.list) {
data.state <- 1
} else {
"Missing adwords data file"
}
return(data.state)
}
dcm.report.file.process <- function(
data.directory,dcm.report.file
) {
# Read original DCM data
dcm.city <- read.csv(
file.path(data.directory,dcm.report.file)
, header = F, as.is = T
)
# Count lines
n.lines <- dim(dcm.city)[1]
# Read the data and look for "Report Fields"
counter <- 1
for (i in 1:n.lines){
if ("Report Fields" %in% dcm.city[i,1]){counter <- counter +2; break}
counter <- counter + 1
}
# Read csv again so that the data is in the correct form
dcm.city <- read.csv(
file.path(data.directory,dcm.report.file)
, skip = counter, header = T, as.is = T
)
}
# Need to first join the cities together
# Load the datasets
# Adwords data
adwords.city <- read.csv(
file.path(data.directory,"adwords_city_impressions.csv")
)
# DCM data - change this to inclide the main list
dcm.city <- dcm.report.file.process(
data.directory,"dcm_city_impressions.csv"
)
# Match on name
adwords.city.sorted <- sort(unique(adwords.city$City[adwords.city$Country.Territory == "United Kingdom"]))
dcm.city.sorted <- sort(unique(dcm.city$City))
dl <- stringdistmatrix(adwords.city.sorted,dcm.city.sorted, method = 'dl')
jw <- stringdistmatrix(adwords.city.sorted,dcm.city.sorted, method = 'jw')
dcm.matched.to.adwords.dl <- data.frame(
dcm.city.sorted,
apply(dl,2,min),
apply(dl,2,which.min),
apply(dl,2,function(x) length(x[x == min(x)]) ),
adwords.city.sorted[apply(dl,2,which.min)]
)
dcm.matched.to.adwords.dl[apply(dl,2,function(x) length(x[x == min(x)]) )!=1,]
unique(apply(dl,2,function(x) length(x[x == min(x)]) ))
adwords.matched.to.dcm.dl <- data.frame(adwords.city.sorted,dcm.city.sorted[apply(dl,1,which.min)])
dcm.matched.to.adwords.jw <- data.frame(dcm.city.sorted,adwords.city.sorted[apply(jw,2,which.min)])
adwords.matched.to.dcm.jw <- data.frame(adwords.city.sorted,dcm.city.sorted[apply(jw,1,which.min)])
m <- data.frame(dcm.matched.to.adwords.dl,dcm.matched.to.adwords.jw)
write.csv(m, "m.csv")
# City list
list.city <- read.csv(
file.path(data.directory,"cities.csv")
)
# Postalcode data
postal.code.small <- read.csv(file.path(data.directory,"postalcode_small.csv"))
# Postalcode data
postal.code.large <- read.csv(file.path(data.directory,"postalcode_large.csv"))
# Stage 1 do this on names
# Find 5 best matching city names
# Stage 2 do this by impression count
# Of the five, identify those that are closest together
# Process files
# Output is one row per city, with matching:
# city_ids, names, county, and postcodes
| /matching_geos.r | no_license | wilpoole/matching_cities | R | false | false | 3,081 | r | # matching_geos matches cities from DCM and Adwords based on names, impression
# counts
# Author Wil Poole
# Version 0.1
# Libraries
library(tools)
library(stringdist)
# data files
data.directory <- 'data'
# Check correct files
file.check <- function(
# Look in the data
directory.to.check
) {
# List files in the directory, and then see what is happening
file.list <- list.files(directory.to.check)
if ("adwords_city_impressions.csv" %in% file.list) {
data.state <- 1
} else {
"Missing adwords data file"
}
return(data.state)
}
dcm.report.file.process <- function(
data.directory,dcm.report.file
) {
# Read original DCM data
dcm.city <- read.csv(
file.path(data.directory,dcm.report.file)
, header = F, as.is = T
)
# Count lines
n.lines <- dim(dcm.city)[1]
# Read the data and look for "Report Fields"
counter <- 1
for (i in 1:n.lines){
if ("Report Fields" %in% dcm.city[i,1]){counter <- counter +2; break}
counter <- counter + 1
}
# Read csv again so that the data is in the correct form
dcm.city <- read.csv(
file.path(data.directory,dcm.report.file)
, skip = counter, header = T, as.is = T
)
}
# Need to first join the cities together
# Load the datasets
# Adwords data
adwords.city <- read.csv(
file.path(data.directory,"adwords_city_impressions.csv")
)
# DCM data - change this to inclide the main list
dcm.city <- dcm.report.file.process(
data.directory,"dcm_city_impressions.csv"
)
# Match on name
adwords.city.sorted <- sort(unique(adwords.city$City[adwords.city$Country.Territory == "United Kingdom"]))
dcm.city.sorted <- sort(unique(dcm.city$City))
dl <- stringdistmatrix(adwords.city.sorted,dcm.city.sorted, method = 'dl')
jw <- stringdistmatrix(adwords.city.sorted,dcm.city.sorted, method = 'jw')
dcm.matched.to.adwords.dl <- data.frame(
dcm.city.sorted,
apply(dl,2,min),
apply(dl,2,which.min),
apply(dl,2,function(x) length(x[x == min(x)]) ),
adwords.city.sorted[apply(dl,2,which.min)]
)
dcm.matched.to.adwords.dl[apply(dl,2,function(x) length(x[x == min(x)]) )!=1,]
unique(apply(dl,2,function(x) length(x[x == min(x)]) ))
adwords.matched.to.dcm.dl <- data.frame(adwords.city.sorted,dcm.city.sorted[apply(dl,1,which.min)])
dcm.matched.to.adwords.jw <- data.frame(dcm.city.sorted,adwords.city.sorted[apply(jw,2,which.min)])
adwords.matched.to.dcm.jw <- data.frame(adwords.city.sorted,dcm.city.sorted[apply(jw,1,which.min)])
m <- data.frame(dcm.matched.to.adwords.dl,dcm.matched.to.adwords.jw)
write.csv(m, "m.csv")
# City list
list.city <- read.csv(
file.path(data.directory,"cities.csv")
)
# Postalcode data
postal.code.small <- read.csv(file.path(data.directory,"postalcode_small.csv"))
# Postalcode data
postal.code.large <- read.csv(file.path(data.directory,"postalcode_large.csv"))
# Stage 1 do this on names
# Find 5 best matching city names
# Stage 2 do this by impression count
# Of the five, identify those that are closest together
# Process files
# Output is one row per city, with matching:
# city_ids, names, county, and postcodes
|
## ----echo=FALSE----------------------------------------------------------
runModels <- FALSE # If true rerun models. OTW, reload previous output.
## ------------------------------------------------------------------------
## Load package
library(dalmatian)
## ------------------------------------------------------------------------
## Load pied flycatcher data
data(pied_flycatchers_1)
## ------------------------------------------------------------------------
## Create variables bounding the true load
pfdata$lower=ifelse(pfdata$load==0,log(.001),log(pfdata$load-.049))
pfdata$upper=log(pfdata$load+.05)
## ------------------------------------------------------------------------
# Random component of mean
mymean=list(fixed=list(name="alpha",
formula=~ log(IVI) + broodsize + sex,
priors=list(c("dnorm",0,.001))),
random=list(name="epsilon",formula=~-1 + indidx + indidx:log(IVI)))
# Random component of variance
myvar=list(fixed=list(name="psi",
link="log",
formula=~1,
priors=list(c("dnorm",0,.001))))
## ------------------------------------------------------------------------
## Set working directory
## By default uses a system temp directory. You probably want to change this.
workingDir <- tempdir()
## Define list of arguments for jags.model()
jm.args <- list(file=file.path(workingDir,"pied_flycatcher_3_jags.R"),n.adapt=1000)
## Define list of arguments for coda.samples()
cs.args <- list(n.iter=1000)
## Run the model using dalmatian
if(runModels){
pfresults3 <- dalmatian(df=pfdata,
mean.model=mymean,
variance.model=myvar,
jags.model.args=jm.args,
coda.samples.args=cs.args,
rounding=TRUE,
lower="lower",
upper="upper",
debug=FALSE)
save(pfresults2,"pfresults3.RData")
}
if(!runModels){
## Load output from previously run chain
load(system.file("Pied_Flycatchers_2","pfresults3.RData",package="dalmatian"))
}
## ------------------------------------------------------------------------
## Compute convergence diagnostics
pfconvergence2 <- convergence(pfresults3,raftery=list(r=.01))
## Gelman-Rubin diagnostics
pfconvergence2$gelman
## Raftery diagnostics
pfconvergence2$raftery
## Effective sample size
pfconvergence2$effectiveSize
## ----fig.width=6,fig.align="center"--------------------------------------
## Generate traceplots
pftraceplots2 <- traceplots(pfresults3,plot = FALSE)
## Fixed effects for mean
pftraceplots2$meanFixed
## Fixed effects for variance
pftraceplots2$varianceFixed
## Random effects variances for mean
pftraceplots2$meanRandom
## Random effects variances for variances
pftraceplots2$varianceRandom
## ------------------------------------------------------------------------
## Compute numerical summaries
summary(pfresults3)
## ----fig.width=6,fig.align="center"--------------------------------------
## Generate caterpillar
pfcaterpillar2 <- caterpillar(pfresults3,plot = FALSE)
## Fixed effects for mean
pfcaterpillar2$meanFixed
## Fixed effects for variance
pfcaterpillar2$varianceFixed
## ----fig.width=6,fig.align="center"--------------------------------------
## Compute summary statistics for random effects
ranef2 <- ranef(pfresults3)
## Load ggplot2
library(ggplot2)
## Identify number of individuals
nind <- nlevels(pfdata$indidx)
## Plot predicted random slopes
ggplot(data=as.data.frame(ranef2$mean[nind+(1:nind),]),aes(x=1:nind,y=Mean)) +
geom_point() +
geom_errorbar(aes(ymin=`Lower 95%`,ymax=`Upper 95%`)) +
geom_abline(intercept=0,slope=0)
| /data/genthat_extracted_code/dalmatian/vignettes/pied-flycatchers-2.R | no_license | surayaaramli/typeRrh | R | false | false | 3,742 | r | ## ----echo=FALSE----------------------------------------------------------
runModels <- FALSE # If true rerun models. OTW, reload previous output.
## ------------------------------------------------------------------------
## Load package
library(dalmatian)
## ------------------------------------------------------------------------
## Load pied flycatcher data
data(pied_flycatchers_1)
## ------------------------------------------------------------------------
## Create variables bounding the true load
pfdata$lower=ifelse(pfdata$load==0,log(.001),log(pfdata$load-.049))
pfdata$upper=log(pfdata$load+.05)
## ------------------------------------------------------------------------
# Random component of mean
mymean=list(fixed=list(name="alpha",
formula=~ log(IVI) + broodsize + sex,
priors=list(c("dnorm",0,.001))),
random=list(name="epsilon",formula=~-1 + indidx + indidx:log(IVI)))
# Random component of variance
myvar=list(fixed=list(name="psi",
link="log",
formula=~1,
priors=list(c("dnorm",0,.001))))
## ------------------------------------------------------------------------
## Set working directory
## By default uses a system temp directory. You probably want to change this.
workingDir <- tempdir()
## Define list of arguments for jags.model()
jm.args <- list(file=file.path(workingDir,"pied_flycatcher_3_jags.R"),n.adapt=1000)
## Define list of arguments for coda.samples()
cs.args <- list(n.iter=1000)
## Run the model using dalmatian
if(runModels){
pfresults3 <- dalmatian(df=pfdata,
mean.model=mymean,
variance.model=myvar,
jags.model.args=jm.args,
coda.samples.args=cs.args,
rounding=TRUE,
lower="lower",
upper="upper",
debug=FALSE)
save(pfresults2,"pfresults3.RData")
}
if(!runModels){
## Load output from previously run chain
load(system.file("Pied_Flycatchers_2","pfresults3.RData",package="dalmatian"))
}
## ------------------------------------------------------------------------
## Compute convergence diagnostics
pfconvergence2 <- convergence(pfresults3,raftery=list(r=.01))
## Gelman-Rubin diagnostics
pfconvergence2$gelman
## Raftery diagnostics
pfconvergence2$raftery
## Effective sample size
pfconvergence2$effectiveSize
## ----fig.width=6,fig.align="center"--------------------------------------
## Generate traceplots
pftraceplots2 <- traceplots(pfresults3,plot = FALSE)
## Fixed effects for mean
pftraceplots2$meanFixed
## Fixed effects for variance
pftraceplots2$varianceFixed
## Random effects variances for mean
pftraceplots2$meanRandom
## Random effects variances for variances
pftraceplots2$varianceRandom
## ------------------------------------------------------------------------
## Compute numerical summaries
summary(pfresults3)
## ----fig.width=6,fig.align="center"--------------------------------------
## Generate caterpillar
pfcaterpillar2 <- caterpillar(pfresults3,plot = FALSE)
## Fixed effects for mean
pfcaterpillar2$meanFixed
## Fixed effects for variance
pfcaterpillar2$varianceFixed
## ----fig.width=6,fig.align="center"--------------------------------------
## Compute summary statistics for random effects
ranef2 <- ranef(pfresults3)
## Load ggplot2
library(ggplot2)
## Identify number of individuals
nind <- nlevels(pfdata$indidx)
## Plot predicted random slopes
ggplot(data=as.data.frame(ranef2$mean[nind+(1:nind),]),aes(x=1:nind,y=Mean)) +
geom_point() +
geom_errorbar(aes(ymin=`Lower 95%`,ymax=`Upper 95%`)) +
geom_abline(intercept=0,slope=0)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plan.R
\name{write_plan}
\alias{write_plan}
\title{plan}
\usage{
write_plan()
}
\value{
A drake plan
}
\description{
Write basic plan
}
| /man/write_plan.Rd | permissive | GaelMariani/datatoolboxexos | R | false | true | 214 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plan.R
\name{write_plan}
\alias{write_plan}
\title{plan}
\usage{
write_plan()
}
\value{
A drake plan
}
\description{
Write basic plan
}
|
# Lendo os dados, importando como objeto
dados<-read.table("data/fotoquimica.txt", h=T)
View(dados)
# Summaryzando os dados
summary(dados)
# Criando um modelo de anova
ANOVA_twoway<-aov(Fv.Fm~Especie+Face, data=dados)
# Vendo a normalidade dos dados
par(mfrow=c(2,2))
plot(ANOVA_twoway)
require(MASS)
par(mfrow=c(1,1))
boxcox(ANOVA_twoway)
# Não é necessário a transformação dos dados
anova(ANOVA_twoway)
# Como não houve diferença entre as faces, só entre as
# espécies, irei simplificar o modelo
ANOVA1_twoway<-aov(Fv.Fm~Especie, data=dados)
anova(ANOVA1_twoway)
# O valor de p < 0.001 (2.022e-06)
# Aplicando o teste de Tukey
require(agricolae)
anova_agricolae<-aov(dados$Fv.Fm~dados$Especie, data=dados)
HSD.test(anova_agricolae,"dados$Especie", console=T)
# Fazendo um gráfico
plot(Fv.Fm~Especie, las=1, cex.axis=0.5, xlab="")
| /Documents/ENBT/R/Script.R | no_license | amandalupm/FINAL | R | false | false | 857 | r | # Lendo os dados, importando como objeto
dados<-read.table("data/fotoquimica.txt", h=T)
View(dados)
# Summaryzando os dados
summary(dados)
# Criando um modelo de anova
ANOVA_twoway<-aov(Fv.Fm~Especie+Face, data=dados)
# Vendo a normalidade dos dados
par(mfrow=c(2,2))
plot(ANOVA_twoway)
require(MASS)
par(mfrow=c(1,1))
boxcox(ANOVA_twoway)
# Não é necessário a transformação dos dados
anova(ANOVA_twoway)
# Como não houve diferença entre as faces, só entre as
# espécies, irei simplificar o modelo
ANOVA1_twoway<-aov(Fv.Fm~Especie, data=dados)
anova(ANOVA1_twoway)
# O valor de p < 0.001 (2.022e-06)
# Aplicando o teste de Tukey
require(agricolae)
anova_agricolae<-aov(dados$Fv.Fm~dados$Especie, data=dados)
HSD.test(anova_agricolae,"dados$Especie", console=T)
# Fazendo um gráfico
plot(Fv.Fm~Especie, las=1, cex.axis=0.5, xlab="")
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix() function allows to set and get a matrix, and
## to set and get inverse of a matrix
## Sample usage:
## m <- matrix(c(-1, -2, 1, 1), 2,2)
## x <- makeCacheMatrix(m)
## x$get()
## [,1] [,2]
## [1,] -1 1
## [2,] -2 1
##
## inv <- cacheSolve(x)
## inv
## [,1] [,2]
## [1,] 1 -1
## [2,] 2 -1
##
## > inv <- cacheSolve(x)
## getting cached data
## > inv
## [,1] [,2]
## [1,] 1 -1
## [2,] 2 -1
makeCacheMatrix <- function(x = matrix()) {
## initialize the inverse to NULL
m <- NULL
## setter function for our matrix
set <- function(y) {
x <<- y
m <<- NULL
}
## getter function does a simple return of the matrix
get <- function() x
##
setinverse <- function(inv) m <<- inv
## getter for matrix inverse
getinverse <- function() m
## we need to bind our new functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## cacheSolve() returns previously cached inverse of a matrix
## if no cached value exists, cacheSolve() will compute such an inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
# if the inverse of a matrix has previously been computed, return cached value
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# if we do not have a cached value, compute inverse of a matrix
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | mikezaloznyy/ProgrammingAssignment2 | R | false | false | 1,774 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix() function allows to set and get a matrix, and
## to set and get inverse of a matrix
## Sample usage:
## m <- matrix(c(-1, -2, 1, 1), 2,2)
## x <- makeCacheMatrix(m)
## x$get()
## [,1] [,2]
## [1,] -1 1
## [2,] -2 1
##
## inv <- cacheSolve(x)
## inv
## [,1] [,2]
## [1,] 1 -1
## [2,] 2 -1
##
## > inv <- cacheSolve(x)
## getting cached data
## > inv
## [,1] [,2]
## [1,] 1 -1
## [2,] 2 -1
makeCacheMatrix <- function(x = matrix()) {
## initialize the inverse to NULL
m <- NULL
## setter function for our matrix
set <- function(y) {
x <<- y
m <<- NULL
}
## getter function does a simple return of the matrix
get <- function() x
##
setinverse <- function(inv) m <<- inv
## getter for matrix inverse
getinverse <- function() m
## we need to bind our new functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## cacheSolve() returns previously cached inverse of a matrix
## if no cached value exists, cacheSolve() will compute such an inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
# if the inverse of a matrix has previously been computed, return cached value
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# if we do not have a cached value, compute inverse of a matrix
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
|
#funciones
conc<-c(1,2,3,4,5,6)
concCuadrado<-conc^2
#para aplicar una funcion a cada elemento del vector.
conRaiz<- sapply(conc, name <- function(elem) {sqrt(elem)
})
n<-1:100
y<-n^2+2*n+2
mediaAritmetica=mean(y)
diferenciasSucesivas=diff(y)
sumasAcum=cumsum(y)
ordenado=sort(y)
ordInvert=rev(y) | /testProject/seccion07/funciones.R | no_license | nicolassnider/udemy_machine_learning_r_python | R | false | false | 302 | r | #funciones
conc<-c(1,2,3,4,5,6)
concCuadrado<-conc^2
#para aplicar una funcion a cada elemento del vector.
conRaiz<- sapply(conc, name <- function(elem) {sqrt(elem)
})
n<-1:100
y<-n^2+2*n+2
mediaAritmetica=mean(y)
diferenciasSucesivas=diff(y)
sumasAcum=cumsum(y)
ordenado=sort(y)
ordInvert=rev(y) |
# distillTreeRules.R
# ::rtemis::
# 2017 Efstathios D. Gennatas egenn.github.io
#' Distill rules from trained RF and GBM learners
#'
#' Extract rules from RF or GBM model, prune, and remove unnecessary rules using \code{inTrees}
#'
#' Models must be trained with \link{s.RF} or \link{s.GBM} / \link{s.GBM3}
#'
#' @param mod A trained RF or GBM model
#' @param x The training set features
#' @param y The training set outcomes. If NULL, assumed to be last column of \code{x}
#' @param n.trees Integer: Number of trees to extract
#' @param maxdepth Integer: Max depth to consider
#' @param maxDecay Float: See \code{inTree=es::pruneRule}
#' @param typeDecay Integer: See \code{inTreees::pruneRule}
#' @param verbose Logical: If TRUE, print messages to output
#' @author Efstathios D. Gennatas
#' @export
distillTreeRules <- function(mod, x, y = NULL,
n.trees = NULL,
maxdepth = 100,
maxDecay = 0.05,
typeDecay = 2,
verbose = TRUE) {
# [ DEPENDENCIES ] ====
if (!depCheck("inTrees", verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
# [ INPUT ] ====
mod.name <- mod$mod.name
mod <- mod$mod
if (class(mod)[1] == "rtMod") mod <- mod$mod
if (is.null(y) & NCOL(x) > 1) {
y <- x[, ncol(x)]
x <- x[, (1:ncol(x)) - 1]
}
if (is.null(n.trees)) {
n.trees <- if (mod.name == "RF") mod$ntree else mod$n.trees
}
if (is.null(colnames(x))) colnames(x) <- paste0("Feature", 1:NCOL(x))
if (verbose) msg("Working on", mod.name, "model; looking at", n.trees, "trees")
# [ Get Rules ] ====
if (mod.name == "RF") {
trees <- inTrees::RF2List(mod)
} else {
trees <- inTrees::GBM2List(mod, X = x)
}
if (verbose) msg("Extracting rules from model...")
rules <- inTrees::extractRules(treeList = trees, X = x,
ntree = n.trees, maxdepth = maxdepth,
random = FALSE)
rules <- unique(rules)
if (verbose) msg("Extracting rule metrics...")
ruleMetrics <- inTrees::getRuleMetric(ruleExec = rules, X = x, target = y)
if (verbose) msg("Pruning rules...")
rules.pruned <- inTrees::pruneRule(rules = ruleMetrics, X = x, target = y,
maxDecay = maxDecay, typeDecay = typeDecay)
if (verbose) msg("Adding variable names to rules...")
rules.names <- inTrees::presentRules(rules = rules.pruned, colN = colnames(x))
if (verbose) msg("Building simplified learner...")
rules.distilled <- inTrees::buildLearner(ruleMetric = rules.pruned, X = x, target = y)
if (verbose) msg("Adding variable names to simplified rules")
rules.distilled.names <- inTrees::presentRules(rules = rules.distilled, colN = colnames(x))
list(trees = trees,
rules = rules,
ruleMetrics = ruleMetrics,
rules.pruned = rules.pruned,
rules.names = rules.names,
rules.distilled = rules.distilled,
rules.distilled.names = rules.distilled.names)
} # rtemis::distillTreeRules
| /R/distillTreeRules.R | no_license | bakaibaiazbekov/rtemis | R | false | false | 3,125 | r | # distillTreeRules.R
# ::rtemis::
# 2017 Efstathios D. Gennatas egenn.github.io
#' Distill rules from trained RF and GBM learners
#'
#' Extract rules from RF or GBM model, prune, and remove unnecessary rules using \code{inTrees}
#'
#' Models must be trained with \link{s.RF} or \link{s.GBM} / \link{s.GBM3}
#'
#' @param mod A trained RF or GBM model
#' @param x The training set features
#' @param y The training set outcomes. If NULL, assumed to be last column of \code{x}
#' @param n.trees Integer: Number of trees to extract
#' @param maxdepth Integer: Max depth to consider
#' @param maxDecay Float: See \code{inTree=es::pruneRule}
#' @param typeDecay Integer: See \code{inTreees::pruneRule}
#' @param verbose Logical: If TRUE, print messages to output
#' @author Efstathios D. Gennatas
#' @export
distillTreeRules <- function(mod, x, y = NULL,
n.trees = NULL,
maxdepth = 100,
maxDecay = 0.05,
typeDecay = 2,
verbose = TRUE) {
# [ DEPENDENCIES ] ====
if (!depCheck("inTrees", verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
# [ INPUT ] ====
mod.name <- mod$mod.name
mod <- mod$mod
if (class(mod)[1] == "rtMod") mod <- mod$mod
if (is.null(y) & NCOL(x) > 1) {
y <- x[, ncol(x)]
x <- x[, (1:ncol(x)) - 1]
}
if (is.null(n.trees)) {
n.trees <- if (mod.name == "RF") mod$ntree else mod$n.trees
}
if (is.null(colnames(x))) colnames(x) <- paste0("Feature", 1:NCOL(x))
if (verbose) msg("Working on", mod.name, "model; looking at", n.trees, "trees")
# [ Get Rules ] ====
if (mod.name == "RF") {
trees <- inTrees::RF2List(mod)
} else {
trees <- inTrees::GBM2List(mod, X = x)
}
if (verbose) msg("Extracting rules from model...")
rules <- inTrees::extractRules(treeList = trees, X = x,
ntree = n.trees, maxdepth = maxdepth,
random = FALSE)
rules <- unique(rules)
if (verbose) msg("Extracting rule metrics...")
ruleMetrics <- inTrees::getRuleMetric(ruleExec = rules, X = x, target = y)
if (verbose) msg("Pruning rules...")
rules.pruned <- inTrees::pruneRule(rules = ruleMetrics, X = x, target = y,
maxDecay = maxDecay, typeDecay = typeDecay)
if (verbose) msg("Adding variable names to rules...")
rules.names <- inTrees::presentRules(rules = rules.pruned, colN = colnames(x))
if (verbose) msg("Building simplified learner...")
rules.distilled <- inTrees::buildLearner(ruleMetric = rules.pruned, X = x, target = y)
if (verbose) msg("Adding variable names to simplified rules")
rules.distilled.names <- inTrees::presentRules(rules = rules.distilled, colN = colnames(x))
list(trees = trees,
rules = rules,
ruleMetrics = ruleMetrics,
rules.pruned = rules.pruned,
rules.names = rules.names,
rules.distilled = rules.distilled,
rules.distilled.names = rules.distilled.names)
} # rtemis::distillTreeRules
|
#' GCD
#'
#' @description Finds the greatest common divisor!
#' @param x1 First
#' @param x2 Second
#'
#' @return
#' @export
#'
euclidean <- function(x1,x2){
is.scalar <- function(femto) is.numeric(femto) && length(femto) == 1L
x3 <- abs(x1)
x4 <- abs(x2)
if(is.scalar(x1)==FALSE || is.scalar(x2)==FALSE){
stop("The input is not correct")}
if(x3>x4){
smallern<-x4
} else {
smallern<-x3
}
for(i in 1:smallern){
if((x3%%i==0)&&(x4%%i==0)){
euclid=i
}}
return(euclid)
}
| /R/euclidean.R | no_license | casparsylwan/casparnitin3 | R | false | false | 513 | r | #' GCD
#'
#' @description Finds the greatest common divisor!
#' @param x1 First
#' @param x2 Second
#'
#' @return
#' @export
#'
euclidean <- function(x1,x2){
is.scalar <- function(femto) is.numeric(femto) && length(femto) == 1L
x3 <- abs(x1)
x4 <- abs(x2)
if(is.scalar(x1)==FALSE || is.scalar(x2)==FALSE){
stop("The input is not correct")}
if(x3>x4){
smallern<-x4
} else {
smallern<-x3
}
for(i in 1:smallern){
if((x3%%i==0)&&(x4%%i==0)){
euclid=i
}}
return(euclid)
}
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Plot annual high and low flows
#'
#' @description Plots annual n-day minimum and maximum values and the day of year of occurrence of daily flow values
#' from a daily streamflow data set. Calculates statistics from all values, unless specified. Returns a tibble with statistics.
#'
#' @inheritParams calc_annual_extremes
#' @inheritParams plot_annual_stats
#'
#' @return A list of ggplot2 objects with the following for each station provided:
#' \item{Annual_Extreme_Flows}{ggplot2 object of annual minimum and maximum flows of selected n-day rolling means}
#' \item{Annual_Extreme_Flows_Dates}{ggplot2 object of the day of years of annual minimum and maximum flows of selected n-day rolling means}
#'
#' @seealso \code{\link{calc_annual_extremes}}
#'
#' @examples
#' # Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
#' if (file.exists(tidyhydat::hy_downloaded_db())) {
#'
#' # Plot annual 1-day (default) max/min flow data with
#' # default alignment ('right')
#' plot_annual_extremes(station_number = "08NM116")
#'
#' # Plot custom annual 3-day max and 7-min flow data with 'center' alignment
#' plot_annual_extremes(station_number = "08NM116",
#' roll_days_max = 3,
#' roll_days_min = 7,
#' roll_align = "center")
#'
#' }
#' @export
plot_annual_extremes <- function(data,
dates = Date,
values = Value,
groups = STATION_NUMBER,
station_number,
roll_days = 1,
roll_days_min = NA,
roll_days_max = NA,
roll_align = "right",
water_year_start = 1,
start_year,
end_year,
exclude_years,
months = 1:12,
months_min = NA,
months_max = NA,
complete_years = FALSE,
ignore_missing = FALSE,
allowed_missing = ifelse(ignore_missing,100,0),
include_title = FALSE){
## ARGUMENT CHECKS
## others will be check in calc_ function
## ---------------
if (missing(data)) {
data <- NULL
}
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
if (missing(exclude_years)) {
exclude_years <- NULL
}
logical_arg_check(include_title)
## FLOW DATA CHECKS AND FORMATTING
## -------------------------------
# Check if data is provided and import it
flow_data <- flowdata_import(data = data, station_number = station_number)
# Check and rename columns
flow_data <- format_all_cols(data = flow_data,
dates = as.character(substitute(dates)),
values = as.character(substitute(values)),
groups = as.character(substitute(groups)),
rm_other_cols = TRUE)
## CALC STATS
## ----------
peak_stats <- calc_annual_extremes(data = flow_data,
roll_days = roll_days,
roll_days_min = roll_days_min,
roll_days_max = roll_days_max,
roll_align = roll_align,
water_year_start = water_year_start,
start_year = start_year,
end_year = end_year,
exclude_years = exclude_years,
months = months,
months_min = months_min,
months_max = months_max,
complete_years = complete_years,
ignore_missing = ignore_missing,
allowed_missing = allowed_missing)
# Remove all leading NA years
peak_stats <- dplyr::filter(dplyr::group_by(peak_stats, STATION_NUMBER),
Year >= Year[min(which(!is.na(.data[[names(peak_stats)[3]]])))])
# Gather data and plot the minimums day
peak_doy <- dplyr::select(peak_stats, STATION_NUMBER, Year, dplyr::contains("DoY"))
stat_levels <- names(peak_doy[-(1:2)])
stat_levels <- gsub("_", " ", paste0(gsub("_Day_DoY", "", stat_levels), " Day"))
peak_doy <- tidyr::gather(peak_doy, Statistic, Value, -STATION_NUMBER, -Year)
peak_doy <- dplyr::mutate(peak_doy, Statistic = factor(gsub("_"," ", paste0(gsub("_Day_DoY", "", Statistic), " Day")),
levels = rev(stat_levels)))
# Gather data and plot the minimums values
peak_values <- dplyr::select(peak_stats, STATION_NUMBER, Year, dplyr::contains("Day"),
-dplyr::contains("DoY"), -dplyr::contains("Date"))
peak_values <- tidyr::gather(peak_values, Statistic, Value, -STATION_NUMBER, -Year)
peak_values <- dplyr::mutate(peak_values, Statistic = factor(gsub("_"," ", paste0(gsub("_Day", "", Statistic), " Day")),
levels = rev(stat_levels)))
## PLOT STATS
## ----------
# Create axis label based on input columns
y_axis_title <- ifelse(as.character(substitute(values)) == "Volume_m3", "Volume (cubic metres)", #expression(Volume~(m^3))
ifelse(as.character(substitute(values)) == "Yield_mm", "Yield (mm)",
"Discharge (cms)")) #expression(Discharge~(m^3/s))
# high_col <- "dodgerblue2" #"#440154FF" #
# low_col <- "orange" #"#FDE725FF" #
colour_list <- c("dodgerblue2",
"orange")
# Create plots for each STATION_NUMBER in a tibble
doy_plots <- dplyr::group_by(peak_doy, STATION_NUMBER)
doy_plots <- tidyr::nest(doy_plots)
doy_plots <- dplyr::mutate(
doy_plots,
plot = purrr::map2(
data, STATION_NUMBER,
~ggplot2::ggplot(data = ., ggplot2::aes(x = Year, y = Value, color = Statistic, fill = Statistic)) +
ggplot2::geom_line(alpha = 0.5, na.rm = TRUE)+
ggplot2::geom_point(na.rm = TRUE, shape = 21, colour = "black", size = 2) +
ggplot2::facet_wrap(~Statistic, ncol = 1, strip.position = "top")+
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks(n = 8))+
{if(length(unique(peak_doy$Year)) < 8) ggplot2::scale_x_continuous(breaks = unique(peak_doy$Year))}+
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(n = 6))+
ggplot2::ylab(ifelse(water_year_start == 1, "Day of Year", "Day of Water Year"))+
ggplot2::xlab(ifelse(water_year_start ==1, "Year", "Water Year"))+
ggplot2::scale_color_manual(values = colour_list)+
ggplot2::scale_fill_manual(values = colour_list)+
ggplot2::theme_bw() +
ggplot2::guides(colour = 'none', fill = "none")+
{if (include_title & .y != "XXXXXXX") ggplot2::ggtitle(paste(.y)) } +
ggplot2::theme(panel.border = ggplot2::element_rect(colour = "black", fill = NA, size = 1),
panel.grid = ggplot2::element_line(size = .2),
axis.title = ggplot2::element_text(size = 12),
axis.text = ggplot2::element_text(size = 10),
plot.title = ggplot2::element_text(hjust = 1, size = 9, colour = "grey25"),
strip.background = ggplot2::element_blank(),
strip.text = ggplot2::element_text(hjust = 0, face = "bold", size = 10))
))
flow_plots <- dplyr::group_by(peak_values, STATION_NUMBER)
flow_plots <- tidyr::nest(flow_plots)
flow_plots <- dplyr::mutate(
flow_plots,
plot = purrr::map2(
data, STATION_NUMBER,
~ggplot2::ggplot(data = ., ggplot2::aes(x = Year, y = Value, color = Statistic, fill = Statistic)) +
ggplot2::geom_line(alpha = 0.5, na.rm = TRUE)+
ggplot2::geom_point(na.rm = TRUE, shape = 21, colour = "black", size = 2) +
ggplot2::facet_wrap(~Statistic, ncol = 1, strip.position = "top", scales = "free_y")+
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks(n = 8))+
{if(length(unique(peak_values$Year)) < 8) ggplot2::scale_x_continuous(breaks = unique(peak_values$Year))}+
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(n = 6),
labels = scales::label_number(scale_cut = scales::cut_short_scale())) +
ggplot2::ylab(y_axis_title)+
ggplot2::xlab("Year")+
ggplot2::scale_color_manual(values = colour_list)+
ggplot2::scale_fill_manual(values = colour_list)+
ggplot2::theme_bw() +
ggplot2::guides(colour = 'none', fill = "none")+
{if (include_title & .y != "XXXXXXX") ggplot2::ggtitle(paste(.y)) } +
ggplot2::theme(panel.border = ggplot2::element_rect(colour = "black", fill = NA, size = 1),
panel.grid = ggplot2::element_line(size = .2),
axis.title = ggplot2::element_text(size = 12),
axis.text = ggplot2::element_text(size = 10),
plot.title = ggplot2::element_text(hjust = 1, size = 9, colour = "grey25"),
strip.background = ggplot2::element_blank(),
strip.text = ggplot2::element_text(hjust = 0, face = "bold", size = 10))
))
# Create a list of named plots extracted from the tibble
plots_1 <- flow_plots$plot
plots_2 <- doy_plots$plot
if (nrow(flow_plots) == 1) {
names(plots_1) <- "Annual_Extreme_Flows"
names(plots_2) <- "Annual_Extreme_Flows_Dates"
} else {
names(plots_1) <- paste0(flow_plots$STATION_NUMBER, "_Annual_Extreme_Flows")
names(plots_2) <- paste0(doy_plots$STATION_NUMBER, "_Annual_Extreme_Flows_Dates")
}
# Add the plots to the plot list
plots <- c(plots_1, plots_2)
plots
}
| /R/plot_annual_extremes.R | permissive | bcgov/fasstr | R | false | false | 11,018 | r | # Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Plot annual high and low flows
#'
#' @description Plots annual n-day minimum and maximum values and the day of year of occurrence of daily flow values
#' from a daily streamflow data set. Calculates statistics from all values, unless specified. Returns a tibble with statistics.
#'
#' @inheritParams calc_annual_extremes
#' @inheritParams plot_annual_stats
#'
#' @return A list of ggplot2 objects with the following for each station provided:
#' \item{Annual_Extreme_Flows}{ggplot2 object of annual minimum and maximum flows of selected n-day rolling means}
#' \item{Annual_Extreme_Flows_Dates}{ggplot2 object of the day of years of annual minimum and maximum flows of selected n-day rolling means}
#'
#' @seealso \code{\link{calc_annual_extremes}}
#'
#' @examples
#' # Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
#' if (file.exists(tidyhydat::hy_downloaded_db())) {
#'
#' # Plot annual 1-day (default) max/min flow data with
#' # default alignment ('right')
#' plot_annual_extremes(station_number = "08NM116")
#'
#' # Plot custom annual 3-day max and 7-min flow data with 'center' alignment
#' plot_annual_extremes(station_number = "08NM116",
#' roll_days_max = 3,
#' roll_days_min = 7,
#' roll_align = "center")
#'
#' }
#' @export
plot_annual_extremes <- function(data,
dates = Date,
values = Value,
groups = STATION_NUMBER,
station_number,
roll_days = 1,
roll_days_min = NA,
roll_days_max = NA,
roll_align = "right",
water_year_start = 1,
start_year,
end_year,
exclude_years,
months = 1:12,
months_min = NA,
months_max = NA,
complete_years = FALSE,
ignore_missing = FALSE,
allowed_missing = ifelse(ignore_missing,100,0),
include_title = FALSE){
## ARGUMENT CHECKS
## others will be check in calc_ function
## ---------------
if (missing(data)) {
data <- NULL
}
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
if (missing(exclude_years)) {
exclude_years <- NULL
}
logical_arg_check(include_title)
## FLOW DATA CHECKS AND FORMATTING
## -------------------------------
# Check if data is provided and import it
flow_data <- flowdata_import(data = data, station_number = station_number)
# Check and rename columns
flow_data <- format_all_cols(data = flow_data,
dates = as.character(substitute(dates)),
values = as.character(substitute(values)),
groups = as.character(substitute(groups)),
rm_other_cols = TRUE)
## CALC STATS
## ----------
peak_stats <- calc_annual_extremes(data = flow_data,
roll_days = roll_days,
roll_days_min = roll_days_min,
roll_days_max = roll_days_max,
roll_align = roll_align,
water_year_start = water_year_start,
start_year = start_year,
end_year = end_year,
exclude_years = exclude_years,
months = months,
months_min = months_min,
months_max = months_max,
complete_years = complete_years,
ignore_missing = ignore_missing,
allowed_missing = allowed_missing)
# Remove all leading NA years
peak_stats <- dplyr::filter(dplyr::group_by(peak_stats, STATION_NUMBER),
Year >= Year[min(which(!is.na(.data[[names(peak_stats)[3]]])))])
# Gather data and plot the minimums day
peak_doy <- dplyr::select(peak_stats, STATION_NUMBER, Year, dplyr::contains("DoY"))
stat_levels <- names(peak_doy[-(1:2)])
stat_levels <- gsub("_", " ", paste0(gsub("_Day_DoY", "", stat_levels), " Day"))
peak_doy <- tidyr::gather(peak_doy, Statistic, Value, -STATION_NUMBER, -Year)
peak_doy <- dplyr::mutate(peak_doy, Statistic = factor(gsub("_"," ", paste0(gsub("_Day_DoY", "", Statistic), " Day")),
levels = rev(stat_levels)))
# Gather data and plot the minimums values
peak_values <- dplyr::select(peak_stats, STATION_NUMBER, Year, dplyr::contains("Day"),
-dplyr::contains("DoY"), -dplyr::contains("Date"))
peak_values <- tidyr::gather(peak_values, Statistic, Value, -STATION_NUMBER, -Year)
peak_values <- dplyr::mutate(peak_values, Statistic = factor(gsub("_"," ", paste0(gsub("_Day", "", Statistic), " Day")),
levels = rev(stat_levels)))
## PLOT STATS
## ----------
# Create axis label based on input columns
y_axis_title <- ifelse(as.character(substitute(values)) == "Volume_m3", "Volume (cubic metres)", #expression(Volume~(m^3))
ifelse(as.character(substitute(values)) == "Yield_mm", "Yield (mm)",
"Discharge (cms)")) #expression(Discharge~(m^3/s))
# high_col <- "dodgerblue2" #"#440154FF" #
# low_col <- "orange" #"#FDE725FF" #
colour_list <- c("dodgerblue2",
"orange")
# Create plots for each STATION_NUMBER in a tibble
doy_plots <- dplyr::group_by(peak_doy, STATION_NUMBER)
doy_plots <- tidyr::nest(doy_plots)
doy_plots <- dplyr::mutate(
doy_plots,
plot = purrr::map2(
data, STATION_NUMBER,
~ggplot2::ggplot(data = ., ggplot2::aes(x = Year, y = Value, color = Statistic, fill = Statistic)) +
ggplot2::geom_line(alpha = 0.5, na.rm = TRUE)+
ggplot2::geom_point(na.rm = TRUE, shape = 21, colour = "black", size = 2) +
ggplot2::facet_wrap(~Statistic, ncol = 1, strip.position = "top")+
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks(n = 8))+
{if(length(unique(peak_doy$Year)) < 8) ggplot2::scale_x_continuous(breaks = unique(peak_doy$Year))}+
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(n = 6))+
ggplot2::ylab(ifelse(water_year_start == 1, "Day of Year", "Day of Water Year"))+
ggplot2::xlab(ifelse(water_year_start ==1, "Year", "Water Year"))+
ggplot2::scale_color_manual(values = colour_list)+
ggplot2::scale_fill_manual(values = colour_list)+
ggplot2::theme_bw() +
ggplot2::guides(colour = 'none', fill = "none")+
{if (include_title & .y != "XXXXXXX") ggplot2::ggtitle(paste(.y)) } +
ggplot2::theme(panel.border = ggplot2::element_rect(colour = "black", fill = NA, size = 1),
panel.grid = ggplot2::element_line(size = .2),
axis.title = ggplot2::element_text(size = 12),
axis.text = ggplot2::element_text(size = 10),
plot.title = ggplot2::element_text(hjust = 1, size = 9, colour = "grey25"),
strip.background = ggplot2::element_blank(),
strip.text = ggplot2::element_text(hjust = 0, face = "bold", size = 10))
))
flow_plots <- dplyr::group_by(peak_values, STATION_NUMBER)
flow_plots <- tidyr::nest(flow_plots)
flow_plots <- dplyr::mutate(
flow_plots,
plot = purrr::map2(
data, STATION_NUMBER,
~ggplot2::ggplot(data = ., ggplot2::aes(x = Year, y = Value, color = Statistic, fill = Statistic)) +
ggplot2::geom_line(alpha = 0.5, na.rm = TRUE)+
ggplot2::geom_point(na.rm = TRUE, shape = 21, colour = "black", size = 2) +
ggplot2::facet_wrap(~Statistic, ncol = 1, strip.position = "top", scales = "free_y")+
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks(n = 8))+
{if(length(unique(peak_values$Year)) < 8) ggplot2::scale_x_continuous(breaks = unique(peak_values$Year))}+
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(n = 6),
labels = scales::label_number(scale_cut = scales::cut_short_scale())) +
ggplot2::ylab(y_axis_title)+
ggplot2::xlab("Year")+
ggplot2::scale_color_manual(values = colour_list)+
ggplot2::scale_fill_manual(values = colour_list)+
ggplot2::theme_bw() +
ggplot2::guides(colour = 'none', fill = "none")+
{if (include_title & .y != "XXXXXXX") ggplot2::ggtitle(paste(.y)) } +
ggplot2::theme(panel.border = ggplot2::element_rect(colour = "black", fill = NA, size = 1),
panel.grid = ggplot2::element_line(size = .2),
axis.title = ggplot2::element_text(size = 12),
axis.text = ggplot2::element_text(size = 10),
plot.title = ggplot2::element_text(hjust = 1, size = 9, colour = "grey25"),
strip.background = ggplot2::element_blank(),
strip.text = ggplot2::element_text(hjust = 0, face = "bold", size = 10))
))
# Create a list of named plots extracted from the tibble
plots_1 <- flow_plots$plot
plots_2 <- doy_plots$plot
if (nrow(flow_plots) == 1) {
names(plots_1) <- "Annual_Extreme_Flows"
names(plots_2) <- "Annual_Extreme_Flows_Dates"
} else {
names(plots_1) <- paste0(flow_plots$STATION_NUMBER, "_Annual_Extreme_Flows")
names(plots_2) <- paste0(doy_plots$STATION_NUMBER, "_Annual_Extreme_Flows_Dates")
}
# Add the plots to the plot list
plots <- c(plots_1, plots_2)
plots
}
|
#' @title source_dir
#' @description sources all R scriptd (*.R) from sepcified directory
#'
#' @importFrom assertthat not_empty
#' @param path character; path to directory
#'
#' @return NULL
#' @export
#'
source_dir <- function(path) {
if (!is.character(path))
stop("'path' is not a character")
if (!file.exists(path))
stop(sprintf("Path '%s' does not exist", path ))
file.sources <- list.files( path, pattern = ".*\\.R$", full.names = TRUE, ignore.case = TRUE)
if (not_empty(file.sources) == F)
stop(sprintf("Directory '%s' is empty.", path))
out <- sapply(file.sources, source, .GlobalEnv)
message("Sourced files:")
print(colnames(out))
}
| /R/21-source_dir.R | no_license | Tazovsky/utilities | R | false | false | 685 | r | #' @title source_dir
#' @description sources all R scriptd (*.R) from sepcified directory
#'
#' @importFrom assertthat not_empty
#' @param path character; path to directory
#'
#' @return NULL
#' @export
#'
source_dir <- function(path) {
if (!is.character(path))
stop("'path' is not a character")
if (!file.exists(path))
stop(sprintf("Path '%s' does not exist", path ))
file.sources <- list.files( path, pattern = ".*\\.R$", full.names = TRUE, ignore.case = TRUE)
if (not_empty(file.sources) == F)
stop(sprintf("Directory '%s' is empty.", path))
out <- sapply(file.sources, source, .GlobalEnv)
message("Sourced files:")
print(colnames(out))
}
|
#' @title incidenceMatch - Risk set matching for nested case-control designs
#'
#' @description
#'
#' A case is a person who has an outcome event at the index date. A control is a person who does not yet have the event
#' at the index date of the case. See \code{exposureMatch} for situations where treatment start or onset of
#' a comorbidity define the case and the index date.
#'
#' Risk set matching or incidence density sampling for nested case-control designs targets a a Cox regression model.
#' with time-dependent covariates. In large-scale registry data the main purpose
#' for risk set matching, instead of standard Cox regression on all data, is to save computation time.
#' See Details and references.
#'
#' Note that the parameter estimates of a conditional logistic regression analysis applied to the
#' output of \code{incidenceMatch} are hazard ratios which should be interpreted in terms of a Cox regression
#' model.
#'
#'
#' To provide necessary speed for large samples the general technique used is
#' work with data.table and to create a series of match groups that have the fixed matching variables
#' identical (such as birthyear and gender).
#'
#' @author Christian Torp-Pedersen & Thomas Alexander Gerds
#' @param ptid Personal ID variable defining participant
#' @param event Name of variable that defines cases. MUST be numeric 0/1 where 0 codes for never-case, and 1 for case.
#' @param terms Vector of variable names specifying the variables that should be matched on.
#' Make sure that
#' appropriate classification is in place for truly continuous variables, such as age. This is to
#' ensure a sufficient number of controls for each case. For example it may be
#' difficult to find controls for cases of very high and very low ages
#' and extreme ages should therefor further aggregated.
#' @param data The single dataset with all information - coerced to data.table
#' if data.frame
#' @param n.controls Number of controls for each case
#' @param case.index Name of the variable which contains the case index dates.
#' This can be a calendar date variable or a numeric variable, i.e., the time to outcome event
#' from a well defined baseline date. Missing values are interpreted as
#' no event at the end of followup.
#' @param end.followup Name of the variable which defines the date (as date or time)
#' from which a control can no longer be selected due to
#'\itemize{
#' \item{death: }{Nothing happens thereafter}
#' \item{other competing risks: }{Event after which we are not interested in the subject anymore. E.g., the date of an outcome event.}
#' \item{censoring: }{Event after which we do not observe anymore. E.g., emmigration, end of study period, end of registry, drop-out}
#' }
#' The end.followup must be larger or equal to
#' the \code{case.index} date.
#' @param date.terms Unclear if useful in this context. But, see description
#' for \code{exposureMatch}.
#' @param duration.terms A list where each element defines a time duration
#' term with two elements:
#' \itemize{
#' \item{start}{Name of a variable which defines a date or time which
#' defines the duration as the difference between this variable and the \code{case.index}.
#' }
#' \item{min}{Numeric value given in days when \code{case.index} is a date and in
#' the same time unit as \code{case.index} otherwise.
#' Cases and controls have either both a duration as least as long as \code{min}
#' or both a duration shorter than min.}
#' }
#' Useful to prepare to summarize the history of exposure for cases and controls in an equally long period
#' looking back in time from the \code{case.index}.
#' @param output.count.controls Logical. If \code{TRUE} add number of found controls to each case/control set.
#' @param cores number of cores to use in the calculation.
#' @param seed Random seed to make results reproducible
#' @param progressbar set to \code{FALSE} to avoid progressbar
#' @details
#'
#' The function performs exact matching and hence
#' all matching variables must be factor variables or character.
#'
#' It may appear tempting always to use multiple cores, but this comes at the
#' cost of copying the data to the cores.
#'
#' This function prepares the data for fitting a Cox regression model via \code{survival::clogit} or directly
#' via \code{survival::coxph} or equivalent routine. The regression parameters are hazard ratios.
#' The matching variables are allowed to have a time-dependent non-proportional
#' effect on the hazard rate of the outcome very much in the same way as would be obtained without matching
#' by a strata statement to stratify the baseline hazard function. The original motivation for the nested case-control
#' design is when it is difficult, expensive or time-consuming to measure the exposure variables.
#'
#'
#' The function matchReport may afterwards be used to provide simple summaries
#' of use of cases and controls
#' @return data.table with cases and controls. After matching, a the variable
#' "case.id" identifies sets which include 1 case and x matched controls.
#'
#' Variables in the original dataset are preserved. The final dataset includes
#' all original cases but only the controls that were selected.
#' @seealso exposureMatch clogit matchReport Matchit
#' @references
#'
#' Bryan Langholz and Larry Goldstein. Risk set sampling in epidemiologic
#' cohort studies. Statistical Science, pages 35--53, 1996.
#'
#' Ornulf Borgan, Larry Goldstein, Bryan Langholz, et al. Methods for the
#' analysis of sampled cohort data in the cox proportional hazards model. The
#' Annals of Statistics, 23(5):1749--1778, 1995.
#'
#' Vidal Essebag, Robert W Platt, Michal Abrahamowicz, and Louise Pilote.
#' Comparison of nested case-control and survival analysis methodologies for
#' analysis of time-dependent exposure. BMC medical research methodology, 5
#' (1):5, 2005.
#'
#' @examples
#' require(data.table)
#' case <- c(rep(0,40),rep(1,15))
#' ptid <- paste0("P",1:55)
#' sex <- c(rep("fem",20),rep("mal",20),rep("fem",8),rep("mal",7))
#' byear <- c(rep(c(2020,2030),20),rep(2020,7),rep(2030,8))
#' case.Index <- c(seq(1,40,1),seq(5,47,3))
#' startDisease <- rep(10,55)
#' control.Index <- case.Index
#' diabetes <- seq(2,110,2)
#' heartdis <- seq(110,2,-2)
#' diabetes <- c(rep(1,55))
#' heartdis <- c(rep(100,55))
#' library(data.table)
#' dat <- data.table(case,ptid,sex,byear,diabetes,heartdis,case.Index,
#' control.Index,startDisease)
#' # Risk set matching
#' matchdat <- incidenceMatch(ptid="ptid",event="case",
#' terms=c("byear","sex"),data=dat,n.controls=2,
#' case.index="case.Index",
#' end.followup="control.Index",seed=8)
#' matchdat
#' matchReport(matchdat)
#' # Same with 2 cores
#' library(parallel)
#' library(foreach)
#' \dontrun{
#' matchdat2 <- incidenceMatch("ptid","case",c("byear","sex"),data=dat,
#' n.controls=2,case.index="case.Index",end.followup="control.Index"
#' ,cores=2,seed=8)
#' matchdat2
#' all.equal(matchdat,matchdat2)
#' }
#'
#' # Case control matching with requirement of minimum exposure time in each
#' # group
#' ew <- incidenceMatch(ptid="ptid",event="case",terms=c("byear","sex"),
#' data=dat,n.controls=2,case.index="case.Index",
#' end.followup="control.Index",cores=1,
#' duration.terms=list(list(start="startDisease",min=15)))
#' ew
#'
#' @include riskSetMatch.R
#' @export
incidenceMatch <- riskSetMatch
| /R/incidenceMatch.R | no_license | tagteam/heaven | R | false | false | 7,426 | r | #' @title incidenceMatch - Risk set matching for nested case-control designs
#'
#' @description
#'
#' A case is a person who has an outcome event at the index date. A control is a person who does not yet have the event
#' at the index date of the case. See \code{exposureMatch} for situations where treatment start or onset of
#' a comorbidity define the case and the index date.
#'
#' Risk set matching or incidence density sampling for nested case-control designs targets a a Cox regression model.
#' with time-dependent covariates. In large-scale registry data the main purpose
#' for risk set matching, instead of standard Cox regression on all data, is to save computation time.
#' See Details and references.
#'
#' Note that the parameter estimates of a conditional logistic regression analysis applied to the
#' output of \code{incidenceMatch} are hazard ratios which should be interpreted in terms of a Cox regression
#' model.
#'
#'
#' To provide necessary speed for large samples the general technique used is
#' work with data.table and to create a series of match groups that have the fixed matching variables
#' identical (such as birthyear and gender).
#'
#' @author Christian Torp-Pedersen & Thomas Alexander Gerds
#' @param ptid Personal ID variable defining participant
#' @param event Name of variable that defines cases. MUST be numeric 0/1 where 0 codes for never-case, and 1 for case.
#' @param terms Vector of variable names specifying the variables that should be matched on.
#' Make sure that
#' appropriate classification is in place for truly continuous variables, such as age. This is to
#' ensure a sufficient number of controls for each case. For example it may be
#' difficult to find controls for cases of very high and very low ages
#' and extreme ages should therefor further aggregated.
#' @param data The single dataset with all information - coerced to data.table
#' if data.frame
#' @param n.controls Number of controls for each case
#' @param case.index Name of the variable which contains the case index dates.
#' This can be a calendar date variable or a numeric variable, i.e., the time to outcome event
#' from a well defined baseline date. Missing values are interpreted as
#' no event at the end of followup.
#' @param end.followup Name of the variable which defines the date (as date or time)
#' from which a control can no longer be selected due to
#'\itemize{
#' \item{death: }{Nothing happens thereafter}
#' \item{other competing risks: }{Event after which we are not interested in the subject anymore. E.g., the date of an outcome event.}
#' \item{censoring: }{Event after which we do not observe anymore. E.g., emmigration, end of study period, end of registry, drop-out}
#' }
#' The end.followup must be larger or equal to
#' the \code{case.index} date.
#' @param date.terms Unclear if useful in this context. But, see description
#' for \code{exposureMatch}.
#' @param duration.terms A list where each element defines a time duration
#' term with two elements:
#' \itemize{
#' \item{start}{Name of a variable which defines a date or time which
#' defines the duration as the difference between this variable and the \code{case.index}.
#' }
#' \item{min}{Numeric value given in days when \code{case.index} is a date and in
#' the same time unit as \code{case.index} otherwise.
#' Cases and controls have either both a duration as least as long as \code{min}
#' or both a duration shorter than min.}
#' }
#' Useful to prepare to summarize the history of exposure for cases and controls in an equally long period
#' looking back in time from the \code{case.index}.
#' @param output.count.controls Logical. If \code{TRUE} add number of found controls to each case/control set.
#' @param cores number of cores to use in the calculation.
#' @param seed Random seed to make results reproducible
#' @param progressbar set to \code{FALSE} to avoid progressbar
#' @details
#'
#' The function performs exact matching and hence
#' all matching variables must be factor variables or character.
#'
#' It may appear tempting always to use multiple cores, but this comes at the
#' cost of copying the data to the cores.
#'
#' This function prepares the data for fitting a Cox regression model via \code{survival::clogit} or directly
#' via \code{survival::coxph} or equivalent routine. The regression parameters are hazard ratios.
#' The matching variables are allowed to have a time-dependent non-proportional
#' effect on the hazard rate of the outcome very much in the same way as would be obtained without matching
#' by a strata statement to stratify the baseline hazard function. The original motivation for the nested case-control
#' design is when it is difficult, expensive or time-consuming to measure the exposure variables.
#'
#'
#' The function matchReport may afterwards be used to provide simple summaries
#' of use of cases and controls
#' @return data.table with cases and controls. After matching, a the variable
#' "case.id" identifies sets which include 1 case and x matched controls.
#'
#' Variables in the original dataset are preserved. The final dataset includes
#' all original cases but only the controls that were selected.
#' @seealso exposureMatch clogit matchReport Matchit
#' @references
#'
#' Bryan Langholz and Larry Goldstein. Risk set sampling in epidemiologic
#' cohort studies. Statistical Science, pages 35--53, 1996.
#'
#' Ornulf Borgan, Larry Goldstein, Bryan Langholz, et al. Methods for the
#' analysis of sampled cohort data in the cox proportional hazards model. The
#' Annals of Statistics, 23(5):1749--1778, 1995.
#'
#' Vidal Essebag, Robert W Platt, Michal Abrahamowicz, and Louise Pilote.
#' Comparison of nested case-control and survival analysis methodologies for
#' analysis of time-dependent exposure. BMC medical research methodology, 5
#' (1):5, 2005.
#'
#' @examples
#' require(data.table)
#' case <- c(rep(0,40),rep(1,15))
#' ptid <- paste0("P",1:55)
#' sex <- c(rep("fem",20),rep("mal",20),rep("fem",8),rep("mal",7))
#' byear <- c(rep(c(2020,2030),20),rep(2020,7),rep(2030,8))
#' case.Index <- c(seq(1,40,1),seq(5,47,3))
#' startDisease <- rep(10,55)
#' control.Index <- case.Index
#' diabetes <- seq(2,110,2)
#' heartdis <- seq(110,2,-2)
#' diabetes <- c(rep(1,55))
#' heartdis <- c(rep(100,55))
#' library(data.table)
#' dat <- data.table(case,ptid,sex,byear,diabetes,heartdis,case.Index,
#' control.Index,startDisease)
#' # Risk set matching
#' matchdat <- incidenceMatch(ptid="ptid",event="case",
#' terms=c("byear","sex"),data=dat,n.controls=2,
#' case.index="case.Index",
#' end.followup="control.Index",seed=8)
#' matchdat
#' matchReport(matchdat)
#' # Same with 2 cores
#' library(parallel)
#' library(foreach)
#' \dontrun{
#' matchdat2 <- incidenceMatch("ptid","case",c("byear","sex"),data=dat,
#' n.controls=2,case.index="case.Index",end.followup="control.Index"
#' ,cores=2,seed=8)
#' matchdat2
#' all.equal(matchdat,matchdat2)
#' }
#'
#' # Case control matching with requirement of minimum exposure time in each
#' # group
#' ew <- incidenceMatch(ptid="ptid",event="case",terms=c("byear","sex"),
#' data=dat,n.controls=2,case.index="case.Index",
#' end.followup="control.Index",cores=1,
#' duration.terms=list(list(start="startDisease",min=15)))
#' ew
#'
#' @include riskSetMatch.R
#' @export
incidenceMatch <- riskSetMatch
|
# Global functions
source("scripts/funs.R")
# Define model parameters
source("scripts/params3.R")
prod <- read.xlsx("data_raw/Productos_Candidatos_30_corregidos.xlsx", "Todos") %>%
cleanNames() %>%
mutate(product = descripcion_del_producto,
row_id = row_number(),
internal = codigo_producto_solo_elemetrica)
# Run predictions
new <- h2o_word2vec.predict(
prod$product, w2v, model$model,
params = params, clean = TRUE, top = 1)
freqs(new$category)
hist(new$probability)
summary(new$probability)
head(new)
# Check random products
new %>% ungroup() %>% sample_n(20) %>% arrange(desc(probability))
# Check specific inputs
product_name <- "mortero"
h2o_word2vec.predict(product_name, w2v, model$model, params = params, clean = TRUE, top = 5)
# Search for products/categories containing product_name
df %>% clean_label("category") %>%
filter(grepl(product_name, paste(category, product))) %>%
select(source, category, product)
# Check close words or synonyms
predict(w2v, product_name, type = "nearest", top_n = 8)
# Top N Predictors
gs <- h2o_word2vec.predict(
prod$product, w2v, model$model,
params = params, clean = TRUE, top = 2) %>%
group_by(id) %>% mutate(tied = length(unique(probability)) == 1) %>%
left_join(select(prod, row_id:internal), by = c("id" = "row_id"))
# How many does the model is uncertain between the top labels?
gs %>% slice(1) %>% freqs(tied)
# Distributions
gs %>% ungroup() %>% mutate(range = quants(100*probability, 5, "labels")) %>% freqs(range, abc=T)
# Upload to GSheets
writeGS(gs, "Elemétrica: Catálogo Disensa", "TopProducts2")
############
logs <- read.csv("resultslog.csv")
# Accuracy-Coverage Curve
logs[7:13,] %>%
ggplot(aes(x = categories_used, y = ACC, group = 1)) +
geom_line() + geom_point() +
labs(title = "Accuracy-Coverage Curve",
y = "Prediction accuracy",
x = "Number of categories used",
caption = "Using 70% of the database + tested with 30% untrained data") +
theme_lares() +
scale_y_percent()
# USE MOJO FILES TO PREDICT
temp <- h2o_word2vec.predict(
x = prod$product[1:100],
w2v = w2v.model$model,
model = "MOJOs/DRF_1_AutoML_20201009_163058",
params = params,
clean = TRUE)
hist(temp$probability)
h2o::h2o.findSynonyms(w2v.model$model, as.h2o("mortero"), 10)
# ADD INTERNAL CODES
gs <- readGS("Elemétrica: Catálogo Disensa", "TopProducts", email = "laresbernardo@gmail.com")
labeled_gs <- gs %>% left_join(
cats %>% mutate(chain4 = cleanText(cats$chain4, spaces = ".")) %>%
clean_label("chain4") %>%
select(chain4, code) %>% distinct(),
by = c("Predicción" = "chain4"))
writeGS(select(labeled_gs, code), "Elemétrica: Catálogo Disensa", "Temp")
# JOIN RESULTS TO COMPARE
gs1 <- readGS("Elemétrica: Catálogo Disensa", "TopProducts", email = "laresbernardo@gmail.com")
gs2 <- readGS("Elemétrica: Catálogo Disensa", "TopProducts2", email = "laresbernardo@gmail.com")
gs1 %>% left_join(gs2 %>% filter(rank == 1), by = c(`Codigo Elemetrica` = "internal")) %>%
filter(!is.na(category)) %>%
mutate(unchanged = `Predicción` == category) %>%
filter(!unchanged) %>%
sample_n(20) %>%
select(label, `Predicción`, `Certeza`, category, probability)
| /scripts/check3.R | no_license | jfontestad/elemetrica | R | false | false | 3,220 | r | # Global functions
source("scripts/funs.R")
# Define model parameters
source("scripts/params3.R")
prod <- read.xlsx("data_raw/Productos_Candidatos_30_corregidos.xlsx", "Todos") %>%
cleanNames() %>%
mutate(product = descripcion_del_producto,
row_id = row_number(),
internal = codigo_producto_solo_elemetrica)
# Run predictions
new <- h2o_word2vec.predict(
prod$product, w2v, model$model,
params = params, clean = TRUE, top = 1)
freqs(new$category)
hist(new$probability)
summary(new$probability)
head(new)
# Check random products
new %>% ungroup() %>% sample_n(20) %>% arrange(desc(probability))
# Check specific inputs
product_name <- "mortero"
h2o_word2vec.predict(product_name, w2v, model$model, params = params, clean = TRUE, top = 5)
# Search for products/categories containing product_name
df %>% clean_label("category") %>%
filter(grepl(product_name, paste(category, product))) %>%
select(source, category, product)
# Check close words or synonyms
predict(w2v, product_name, type = "nearest", top_n = 8)
# Top N Predictors
gs <- h2o_word2vec.predict(
prod$product, w2v, model$model,
params = params, clean = TRUE, top = 2) %>%
group_by(id) %>% mutate(tied = length(unique(probability)) == 1) %>%
left_join(select(prod, row_id:internal), by = c("id" = "row_id"))
# How many does the model is uncertain between the top labels?
gs %>% slice(1) %>% freqs(tied)
# Distributions
gs %>% ungroup() %>% mutate(range = quants(100*probability, 5, "labels")) %>% freqs(range, abc=T)
# Upload to GSheets
writeGS(gs, "Elemétrica: Catálogo Disensa", "TopProducts2")
############
logs <- read.csv("resultslog.csv")
# Accuracy-Coverage Curve
logs[7:13,] %>%
ggplot(aes(x = categories_used, y = ACC, group = 1)) +
geom_line() + geom_point() +
labs(title = "Accuracy-Coverage Curve",
y = "Prediction accuracy",
x = "Number of categories used",
caption = "Using 70% of the database + tested with 30% untrained data") +
theme_lares() +
scale_y_percent()
# USE MOJO FILES TO PREDICT
temp <- h2o_word2vec.predict(
x = prod$product[1:100],
w2v = w2v.model$model,
model = "MOJOs/DRF_1_AutoML_20201009_163058",
params = params,
clean = TRUE)
hist(temp$probability)
h2o::h2o.findSynonyms(w2v.model$model, as.h2o("mortero"), 10)
# ADD INTERNAL CODES
gs <- readGS("Elemétrica: Catálogo Disensa", "TopProducts", email = "laresbernardo@gmail.com")
labeled_gs <- gs %>% left_join(
cats %>% mutate(chain4 = cleanText(cats$chain4, spaces = ".")) %>%
clean_label("chain4") %>%
select(chain4, code) %>% distinct(),
by = c("Predicción" = "chain4"))
writeGS(select(labeled_gs, code), "Elemétrica: Catálogo Disensa", "Temp")
# JOIN RESULTS TO COMPARE
gs1 <- readGS("Elemétrica: Catálogo Disensa", "TopProducts", email = "laresbernardo@gmail.com")
gs2 <- readGS("Elemétrica: Catálogo Disensa", "TopProducts2", email = "laresbernardo@gmail.com")
gs1 %>% left_join(gs2 %>% filter(rank == 1), by = c(`Codigo Elemetrica` = "internal")) %>%
filter(!is.na(category)) %>%
mutate(unchanged = `Predicción` == category) %>%
filter(!unchanged) %>%
sample_n(20) %>%
select(label, `Predicción`, `Certeza`, category, probability)
|
#library(devtools)
#install_github("Rfacebook", "pablobarbera", subdir="Rfacebook")
library(Rfacebook)
token <- ""
user_name = ""
df <- getUsers(user_name, token, private_info = TRUE)
df$name #name
df$hometown # my hometown
my_likes <- getLikes(user= "#" token=token)
fans <- getFriends(token, simplify = TRUE)
my_friends_info <- getUsers(my_friends$id, token, private_info = TRUE)
table(my_friends_info$gender) # gender
table(substr(my_friends_info$locale, 1, 2)) # language
table(substr(my_friends_info$locale, 4, 5)) # country
mat <- getNetwork(token, format = "adj.matrix")
dim(mat)
# get page info
#True will make it pull user info
# account info
pg.posts <- getPage(user_name, token, n=100, feed=TRUE)
blah <- getUsers(pg.posts$from_id, token, private_info = TRUE)
fix(blah)
getCheckins(562008467, n = 10, token, tags = FALSE)
#pulled this from above
## Getting information about Facebook's Facebook Page
fb_page <- getPage(page=user_name, token=token)
## Getting information and likes/comments about most recent post
post <- getPost(post=fb_page$id[1], n=200, token=token)
## End(Not run)
post
## Searching 100 public posts that mention
posts <- searchFacebook( string=user_name, token=token, n=100 )
## Searching 100 public posts that mention "facebook" from yesterday
posts <- searchFacebook( string=user_name, token=token, n=100 ,
since = "yesterday 00:00", until = "yesterday 23:59")
#graph social network on FB
me <- getUsers("me", token=token)
my_friends <- getFriends(token=token, simplify=TRUE)
my_friends_info <- getUsers(my_friends$id, token=token, private_info=TRUE)
my_network <- getNetwork(token, format="adj.matrix")
singletons <- rowSums(my_network)==0 # friends who are friends with me alone
install.packages("igraph")
library(igraph)
my_graph <- graph.adjacency(my_network[!singletons,!singletons])
layout <- layout.drl(my_graph,options=list(simmer.attraction=0))
plot(my_graph, vertex.size=2,
#vertex.label=NA,
vertex.label.cex=0.5,
edge.arrow.size=0, edge.curved=TRUE,layout=layout) | /Using_R_In_FB.R | no_license | smart-patrol/Useful_Code | R | false | false | 2,086 | r | #library(devtools)
#install_github("Rfacebook", "pablobarbera", subdir="Rfacebook")
library(Rfacebook)
token <- ""
user_name = ""
df <- getUsers(user_name, token, private_info = TRUE)
df$name #name
df$hometown # my hometown
my_likes <- getLikes(user= "#" token=token)
fans <- getFriends(token, simplify = TRUE)
my_friends_info <- getUsers(my_friends$id, token, private_info = TRUE)
table(my_friends_info$gender) # gender
table(substr(my_friends_info$locale, 1, 2)) # language
table(substr(my_friends_info$locale, 4, 5)) # country
mat <- getNetwork(token, format = "adj.matrix")
dim(mat)
# get page info
#True will make it pull user info
# account info
pg.posts <- getPage(user_name, token, n=100, feed=TRUE)
blah <- getUsers(pg.posts$from_id, token, private_info = TRUE)
fix(blah)
getCheckins(562008467, n = 10, token, tags = FALSE)
#pulled this from above
## Getting information about Facebook's Facebook Page
fb_page <- getPage(page=user_name, token=token)
## Getting information and likes/comments about most recent post
post <- getPost(post=fb_page$id[1], n=200, token=token)
## End(Not run)
post
## Searching 100 public posts that mention
posts <- searchFacebook( string=user_name, token=token, n=100 )
## Searching 100 public posts that mention "facebook" from yesterday
posts <- searchFacebook( string=user_name, token=token, n=100 ,
since = "yesterday 00:00", until = "yesterday 23:59")
#graph social network on FB
me <- getUsers("me", token=token)
my_friends <- getFriends(token=token, simplify=TRUE)
my_friends_info <- getUsers(my_friends$id, token=token, private_info=TRUE)
my_network <- getNetwork(token, format="adj.matrix")
singletons <- rowSums(my_network)==0 # friends who are friends with me alone
install.packages("igraph")
library(igraph)
my_graph <- graph.adjacency(my_network[!singletons,!singletons])
layout <- layout.drl(my_graph,options=list(simmer.attraction=0))
plot(my_graph, vertex.size=2,
#vertex.label=NA,
vertex.label.cex=0.5,
edge.arrow.size=0, edge.curved=TRUE,layout=layout) |
#load packages
library(affy)
library(limma)
library(simpleaffy)
#set graphical parameters
par(cex=0.5, las=2, cex.axis=0.8) #set graphically axes vertical and smaller
par(mar=c(9,5,5,2)) #set the margins bigger so text stays on the screen
par(mgp=c(7,1,0)) #push the y axis label to the bottom of the plot | /PhD/120518-initialize.R | no_license | dvbrown/Rscripts | R | false | false | 306 | r | #load packages
library(affy)
library(limma)
library(simpleaffy)
#set graphical parameters
par(cex=0.5, las=2, cex.axis=0.8) #set graphically axes vertical and smaller
par(mar=c(9,5,5,2)) #set the margins bigger so text stays on the screen
par(mgp=c(7,1,0)) #push the y axis label to the bottom of the plot |
#' ICA Signal Extraction
#'
#' `step_ica` creates a *specification* of a recipe step
#' that will convert numeric data into one or more independent
#' components.
#'
#' @inheritParams step_center
#' @inherit step_center return
#' @param ... One or more selector functions to choose which
#' variables will be used to compute the components. See
#' [selections()] for more details. For the `tidy`
#' method, these are not currently used.
#' @param role For model terms created by this step, what analysis
#' role should they be assigned?. By default, the function assumes
#' that the new independent component columns created by the
#' original variables will be used as predictors in a model.
#' @param num_comp The number of ICA components to retain as new
#' predictors. If `num_comp` is greater than the number of columns
#' or the number of possible components, a smaller value will be
#' used.
#' @param options A list of options to
#' [fastICA::fastICA()]. No defaults are set here.
#' **Note** that the arguments `X` and `n.comp` should
#' not be passed here.
#' @param res The [fastICA::fastICA()] object is stored
#' here once this preprocessing step has be trained by
#' [prep.recipe()].
#' @param num The number of components to retain (this will be
#' deprecated in factor of `num_comp` in version 0.1.5). `num_comp`
#' will override this option.
#' @param prefix A character string that will be the prefix to the
#' resulting new variables. See notes below.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` (the
#' selectors or variables selected), `value` (the loading),
#' and `component`.
#' @keywords datagen
#' @concept preprocessing ica projection_methods
#' @export
#' @details Independent component analysis (ICA) is a
#' transformation of a group of variables that produces a new set
#' of artificial features or components. ICA assumes that the
#' variables are mixtures of a set of distinct, non-Gaussian
#' signals and attempts to transform the data to isolate these
#' signals. Like PCA, the components are statistically independent
#' from one another. This means that they can be used to combat
#' large inter-variables correlations in a data set. Also like PCA,
#' it is advisable to center and scale the variables prior to
#' running ICA.
#'
#' This package produces components using the "FastICA"
#' methodology (see reference below). This step requires the
#' \pkg{dimRed} and \pkg{fastICA} packages. If not installed, the
#' step will stop with a note about installing these packages.
#'
#' The argument `num_comp` controls the number of components that
#' will be retained (the original variables that are used to derive
#' the components are removed from the data). The new components
#' will have names that begin with `prefix` and a sequence of
#' numbers. The variable names are padded with zeros. For example,
#' if `num_comp < 10`, their names will be `IC1` - `IC9`.
#' If `num_comp = 101`, the names would be `IC001` -
#' `IC101`.
#'
#' @references Hyvarinen, A., and Oja, E. (2000). Independent
#' component analysis: algorithms and applications. *Neural
#' Networks*, 13(4-5), 411-430.
#'
#' @examples
#' # from fastICA::fastICA
#' set.seed(131)
#' S <- matrix(runif(400), 200, 2)
#' A <- matrix(c(1, 1, -1, 3), 2, 2, byrow = TRUE)
#' X <- as.data.frame(S %*% A)
#'
#' tr <- X[1:100, ]
#' te <- X[101:200, ]
#'
#' rec <- recipe( ~ ., data = tr)
#'
#' ica_trans <- step_center(rec, V1, V2)
#' ica_trans <- step_scale(ica_trans, V1, V2)
#' ica_trans <- step_ica(ica_trans, V1, V2, num_comp = 2)
#'
#' if (require(dimRed) & require(fastICA)) {
#' ica_estimates <- prep(ica_trans, training = tr)
#' ica_data <- bake(ica_estimates, te)
#'
#' plot(te$V1, te$V2)
#' plot(ica_data$IC1, ica_data$IC2)
#'
#' tidy(ica_trans, number = 3)
#' tidy(ica_estimates, number = 3)
#' }
#' @seealso [step_pca()] [step_kpca()]
#' [step_isomap()] [recipe()] [prep.recipe()]
#' [bake.recipe()]
step_ica <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
num_comp = 5,
options = list(),
res = NULL,
num = NULL,
prefix = "IC",
skip = FALSE,
id = rand_id("ica")) {
recipes_pkg_check(c("dimRed", "fastICA"))
if (!is.null(num))
message("The argument `num` is deprecated in factor of `num_comp`. ",
"`num` will be removed in next version.", call. = FALSE)
add_step(
recipe,
step_ica_new(
terms = ellipse_check(...),
role = role,
trained = trained,
num_comp = num_comp,
options = options,
res = res,
num = num,
prefix = prefix,
skip = skip,
id = id
)
)
}
step_ica_new <-
function(terms, role, trained, num_comp, options, res, num, prefix, skip, id) {
step(
subclass = "ica",
terms = terms,
role = role,
trained = trained,
num_comp = num_comp,
options = options,
res = res,
num = num,
prefix = prefix,
skip = skip,
id = id
)
}
#' @export
prep.step_ica <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
check_type(training[, col_names])
x$num_comp <- min(x$num_comp, length(col_names))
indc <- dimRed::FastICA(stdpars = x$options)
indc <-
indc@fun(
dimRed::dimRedData(as.data.frame(training[, col_names, drop = FALSE])),
list(ndim = x$num_comp)
)
step_ica_new(
terms = x$terms,
role = x$role,
trained = TRUE,
num_comp = x$num_comp,
options = x$options,
res = indc,
num = x$num_comp,
prefix = x$prefix,
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_ica <- function(object, new_data, ...) {
ica_vars <- colnames(environment(object$res@apply)$indata)
comps <-
object$res@apply(
dimRed::dimRedData(
as.data.frame(new_data[, ica_vars, drop = FALSE])
)
)@data
comps <- comps[, 1:object$num_comp, drop = FALSE]
colnames(comps) <- names0(ncol(comps), object$prefix)
new_data <- bind_cols(new_data, as_tibble(comps))
new_data <-
new_data[, !(colnames(new_data) %in% ica_vars), drop = FALSE]
as_tibble(new_data)
}
print.step_ica <-
function(x, width = max(20, options()$width - 29), ...) {
cat("ICA extraction with ")
printer(colnames(x$res@org.data), x$terms, x$trained, width = width)
invisible(x)
}
#' @importFrom utils stack
#' @rdname step_ica
#' @param x A `step_ica` object.
#' @export
tidy.step_ica <- function(x, ...) {
if (is_trained(x)) {
rot <- dimRed::getRotationMatrix(x$res)
colnames(rot) <- names0(ncol(rot), x$prefix)
rot <- as.data.frame(rot)
vars <- colnames(x$res@org.data)
npc <- ncol(rot)
res <- utils::stack(rot)
colnames(res) <- c("value", "component")
res$component <- as.character(res$component)
res$terms <- rep(vars, npc)
res <- as_tibble(res)
} else {
term_names <- sel2char(x$terms)
comp_names <- names0(x$num_comp, x$prefix)
res <- expand.grid(terms = term_names,
value = na_dbl,
component = comp_names)
res$terms <- as.character(res$terms)
res$component <- as.character(res$component)
res <- as_tibble(res)
}
res$id <- x$id
res
}
| /R/ica.R | no_license | Athospd/recipes | R | false | false | 7,504 | r | #' ICA Signal Extraction
#'
#' `step_ica` creates a *specification* of a recipe step
#' that will convert numeric data into one or more independent
#' components.
#'
#' @inheritParams step_center
#' @inherit step_center return
#' @param ... One or more selector functions to choose which
#' variables will be used to compute the components. See
#' [selections()] for more details. For the `tidy`
#' method, these are not currently used.
#' @param role For model terms created by this step, what analysis
#' role should they be assigned?. By default, the function assumes
#' that the new independent component columns created by the
#' original variables will be used as predictors in a model.
#' @param num_comp The number of ICA components to retain as new
#' predictors. If `num_comp` is greater than the number of columns
#' or the number of possible components, a smaller value will be
#' used.
#' @param options A list of options to
#' [fastICA::fastICA()]. No defaults are set here.
#' **Note** that the arguments `X` and `n.comp` should
#' not be passed here.
#' @param res The [fastICA::fastICA()] object is stored
#' here once this preprocessing step has be trained by
#' [prep.recipe()].
#' @param num The number of components to retain (this will be
#' deprecated in factor of `num_comp` in version 0.1.5). `num_comp`
#' will override this option.
#' @param prefix A character string that will be the prefix to the
#' resulting new variables. See notes below.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` (the
#' selectors or variables selected), `value` (the loading),
#' and `component`.
#' @keywords datagen
#' @concept preprocessing ica projection_methods
#' @export
#' @details Independent component analysis (ICA) is a
#' transformation of a group of variables that produces a new set
#' of artificial features or components. ICA assumes that the
#' variables are mixtures of a set of distinct, non-Gaussian
#' signals and attempts to transform the data to isolate these
#' signals. Like PCA, the components are statistically independent
#' from one another. This means that they can be used to combat
#' large inter-variables correlations in a data set. Also like PCA,
#' it is advisable to center and scale the variables prior to
#' running ICA.
#'
#' This package produces components using the "FastICA"
#' methodology (see reference below). This step requires the
#' \pkg{dimRed} and \pkg{fastICA} packages. If not installed, the
#' step will stop with a note about installing these packages.
#'
#' The argument `num_comp` controls the number of components that
#' will be retained (the original variables that are used to derive
#' the components are removed from the data). The new components
#' will have names that begin with `prefix` and a sequence of
#' numbers. The variable names are padded with zeros. For example,
#' if `num_comp < 10`, their names will be `IC1` - `IC9`.
#' If `num_comp = 101`, the names would be `IC001` -
#' `IC101`.
#'
#' @references Hyvarinen, A., and Oja, E. (2000). Independent
#' component analysis: algorithms and applications. *Neural
#' Networks*, 13(4-5), 411-430.
#'
#' @examples
#' # from fastICA::fastICA
#' set.seed(131)
#' S <- matrix(runif(400), 200, 2)
#' A <- matrix(c(1, 1, -1, 3), 2, 2, byrow = TRUE)
#' X <- as.data.frame(S %*% A)
#'
#' tr <- X[1:100, ]
#' te <- X[101:200, ]
#'
#' rec <- recipe( ~ ., data = tr)
#'
#' ica_trans <- step_center(rec, V1, V2)
#' ica_trans <- step_scale(ica_trans, V1, V2)
#' ica_trans <- step_ica(ica_trans, V1, V2, num_comp = 2)
#'
#' if (require(dimRed) & require(fastICA)) {
#' ica_estimates <- prep(ica_trans, training = tr)
#' ica_data <- bake(ica_estimates, te)
#'
#' plot(te$V1, te$V2)
#' plot(ica_data$IC1, ica_data$IC2)
#'
#' tidy(ica_trans, number = 3)
#' tidy(ica_estimates, number = 3)
#' }
#' @seealso [step_pca()] [step_kpca()]
#' [step_isomap()] [recipe()] [prep.recipe()]
#' [bake.recipe()]
step_ica <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
num_comp = 5,
options = list(),
res = NULL,
num = NULL,
prefix = "IC",
skip = FALSE,
id = rand_id("ica")) {
recipes_pkg_check(c("dimRed", "fastICA"))
if (!is.null(num))
message("The argument `num` is deprecated in factor of `num_comp`. ",
"`num` will be removed in next version.", call. = FALSE)
add_step(
recipe,
step_ica_new(
terms = ellipse_check(...),
role = role,
trained = trained,
num_comp = num_comp,
options = options,
res = res,
num = num,
prefix = prefix,
skip = skip,
id = id
)
)
}
step_ica_new <-
function(terms, role, trained, num_comp, options, res, num, prefix, skip, id) {
step(
subclass = "ica",
terms = terms,
role = role,
trained = trained,
num_comp = num_comp,
options = options,
res = res,
num = num,
prefix = prefix,
skip = skip,
id = id
)
}
#' @export
prep.step_ica <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
check_type(training[, col_names])
x$num_comp <- min(x$num_comp, length(col_names))
indc <- dimRed::FastICA(stdpars = x$options)
indc <-
indc@fun(
dimRed::dimRedData(as.data.frame(training[, col_names, drop = FALSE])),
list(ndim = x$num_comp)
)
step_ica_new(
terms = x$terms,
role = x$role,
trained = TRUE,
num_comp = x$num_comp,
options = x$options,
res = indc,
num = x$num_comp,
prefix = x$prefix,
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_ica <- function(object, new_data, ...) {
ica_vars <- colnames(environment(object$res@apply)$indata)
comps <-
object$res@apply(
dimRed::dimRedData(
as.data.frame(new_data[, ica_vars, drop = FALSE])
)
)@data
comps <- comps[, 1:object$num_comp, drop = FALSE]
colnames(comps) <- names0(ncol(comps), object$prefix)
new_data <- bind_cols(new_data, as_tibble(comps))
new_data <-
new_data[, !(colnames(new_data) %in% ica_vars), drop = FALSE]
as_tibble(new_data)
}
print.step_ica <-
function(x, width = max(20, options()$width - 29), ...) {
cat("ICA extraction with ")
printer(colnames(x$res@org.data), x$terms, x$trained, width = width)
invisible(x)
}
#' @importFrom utils stack
#' @rdname step_ica
#' @param x A `step_ica` object.
#' @export
tidy.step_ica <- function(x, ...) {
if (is_trained(x)) {
rot <- dimRed::getRotationMatrix(x$res)
colnames(rot) <- names0(ncol(rot), x$prefix)
rot <- as.data.frame(rot)
vars <- colnames(x$res@org.data)
npc <- ncol(rot)
res <- utils::stack(rot)
colnames(res) <- c("value", "component")
res$component <- as.character(res$component)
res$terms <- rep(vars, npc)
res <- as_tibble(res)
} else {
term_names <- sel2char(x$terms)
comp_names <- names0(x$num_comp, x$prefix)
res <- expand.grid(terms = term_names,
value = na_dbl,
component = comp_names)
res$terms <- as.character(res$terms)
res$component <- as.character(res$component)
res <- as_tibble(res)
}
res$id <- x$id
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/removeSource.R
\name{removeSource}
\alias{removeSource}
\title{Backport of removeSource for R < 3.6.0}
\usage{
removeSource(fn)
}
\description{
See the original description in \code{base::removeSource}.
}
\examples{
# get function from namespace instead of possibly getting
# implementation shipped with recent R versions:
bp_removeSource = getFromNamespace("removeSource", "backports")
bp_removeSource(mean)
}
\keyword{internal}
| /man/removeSource.Rd | no_license | Tubbz-alt/backports | R | false | true | 509 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/removeSource.R
\name{removeSource}
\alias{removeSource}
\title{Backport of removeSource for R < 3.6.0}
\usage{
removeSource(fn)
}
\description{
See the original description in \code{base::removeSource}.
}
\examples{
# get function from namespace instead of possibly getting
# implementation shipped with recent R versions:
bp_removeSource = getFromNamespace("removeSource", "backports")
bp_removeSource(mean)
}
\keyword{internal}
|
context("basic functionality")
test_that("we can do something", {
expect_that(geocode(c("Portland", "Berwick", "Alfred"), "ME"), is_a("data.frame"))
})
| /tests/testthat/test-localgeo.R | no_license | hrbrmstr/localgeo | R | false | false | 156 | r | context("basic functionality")
test_that("we can do something", {
expect_that(geocode(c("Portland", "Berwick", "Alfred"), "ME"), is_a("data.frame"))
})
|
## File Name: anova.tam.R
## File Version: 9.12
#-- Likelihood ratio test for tam objects
#-- Function is copied from the CDM package
anova.tam <- function( object, ... )
{
cl2 <- paste(match.call())[-1]
if (length(list(object, ...)) !=2){
stop("anova method can only be applied for comparison of two models.\n")
}
objects <- list(object, ...)
model1 <- objects[[1]]
model2 <- objects[[2]]
# define some necessary parameters
model1$AIC <- model1$ic$AIC
model1$BIC <- model1$ic$BIC
model1$loglike <- model1$deviance / (-2)
model1$Npars <- model1$ic$Npars
model2$AIC <- model2$ic$AIC
model2$BIC <- model2$ic$BIC
model2$loglike <- model2$deviance / (-2)
model2$Npars <- model2$ic$Npars
# test
dfr1 <- data.frame( "Model"=cl2[1],
"loglike"=model1$loglike,
"Deviance"=-2*model1$loglike )
dfr1$Npars <- sum(model1$Npars)
dfr1$AIC <- model1$AIC
dfr1$BIC <- model1$BIC
dfr2 <- data.frame( "Model"=cl2[2],
"loglike"=model2$loglike,
"Deviance"=-2*model2$loglike )
dfr2$Npars <- sum(model2$Npars)
dfr2$AIC <- model2$AIC
dfr2$BIC <- model2$BIC
dfr <- rbind( dfr1, dfr2 )
dfr <- dfr[ order( dfr$Npars ), ]
dfr$Chisq <- NA
dfr$df <- NA
dfr$p <- NA
digits <- 5
dfr[1,"Chisq"] <- dfr[1,"Deviance"] - dfr[2,"Deviance"]
dfr[1,"df"] <- abs( dfr[1,"Npars"] - dfr[2,"Npars"] )
dfr[1, "p"] <- round( 1 - stats::pchisq( dfr[1,"Chisq"], df=dfr[1,"df"] ), digits)
tam_round_data_frame_print(obji=dfr, from=2, digits=digits, rownames_null=TRUE)
invisible(dfr)
}
anova.tam.mml <- anova.tam
anova.tam.mml.3pl <- anova.tam.mml
anova.tamaan <- anova.tam.mml
anova.tam.latreg <- anova.tam
anova.tam.np <- anova.tam
| /R/anova.tam.R | no_license | cran/TAM | R | false | false | 1,824 | r | ## File Name: anova.tam.R
## File Version: 9.12
#-- Likelihood ratio test for tam objects
#-- Function is copied from the CDM package
anova.tam <- function( object, ... )
{
cl2 <- paste(match.call())[-1]
if (length(list(object, ...)) !=2){
stop("anova method can only be applied for comparison of two models.\n")
}
objects <- list(object, ...)
model1 <- objects[[1]]
model2 <- objects[[2]]
# define some necessary parameters
model1$AIC <- model1$ic$AIC
model1$BIC <- model1$ic$BIC
model1$loglike <- model1$deviance / (-2)
model1$Npars <- model1$ic$Npars
model2$AIC <- model2$ic$AIC
model2$BIC <- model2$ic$BIC
model2$loglike <- model2$deviance / (-2)
model2$Npars <- model2$ic$Npars
# test
dfr1 <- data.frame( "Model"=cl2[1],
"loglike"=model1$loglike,
"Deviance"=-2*model1$loglike )
dfr1$Npars <- sum(model1$Npars)
dfr1$AIC <- model1$AIC
dfr1$BIC <- model1$BIC
dfr2 <- data.frame( "Model"=cl2[2],
"loglike"=model2$loglike,
"Deviance"=-2*model2$loglike )
dfr2$Npars <- sum(model2$Npars)
dfr2$AIC <- model2$AIC
dfr2$BIC <- model2$BIC
dfr <- rbind( dfr1, dfr2 )
dfr <- dfr[ order( dfr$Npars ), ]
dfr$Chisq <- NA
dfr$df <- NA
dfr$p <- NA
digits <- 5
dfr[1,"Chisq"] <- dfr[1,"Deviance"] - dfr[2,"Deviance"]
dfr[1,"df"] <- abs( dfr[1,"Npars"] - dfr[2,"Npars"] )
dfr[1, "p"] <- round( 1 - stats::pchisq( dfr[1,"Chisq"], df=dfr[1,"df"] ), digits)
tam_round_data_frame_print(obji=dfr, from=2, digits=digits, rownames_null=TRUE)
invisible(dfr)
}
anova.tam.mml <- anova.tam
anova.tam.mml.3pl <- anova.tam.mml
anova.tamaan <- anova.tam.mml
anova.tam.latreg <- anova.tam
anova.tam.np <- anova.tam
|
require(ggplot2)
# Loading provided datasets - loading from local machine
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Subset NEI data by Baltimore's fip.
baltimoreNEI <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotalsBaltimore <- aggregate(Emissions ~ year, baltimoreNEI,sum)
png("plot3.png",width=800, height=500,units="px")
ggp <- ggplot(baltimoreNEI,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
print(ggp)
dev.off() | /plot3.R | no_license | xiaohan98/ExData_Project2 | R | false | false | 778 | r | require(ggplot2)
# Loading provided datasets - loading from local machine
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Subset NEI data by Baltimore's fip.
baltimoreNEI <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotalsBaltimore <- aggregate(Emissions ~ year, baltimoreNEI,sum)
png("plot3.png",width=800, height=500,units="px")
ggp <- ggplot(baltimoreNEI,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
print(ggp)
dev.off() |
#' @title Utility Computing Function
#'
#' @description
#' Computing Utility.
#'
#' @param x_vec a vector of scaled hyperparameters
#' @param GP an object of class GP
#' @param acq Acquisition function type to be used
#' @param y_max The current maximum known value of the target utility function
#' @param kappa tunable parameter kappa to balance exploitation against exploration
#' @param eps tunable parameter epsilon to balance exploitation against exploration
#' @return negative utility to be minimized
#' @importFrom stats pnorm
#' @importFrom magrittr %>%
#' @keywords internal
#' @export
Utility <- function(x_vec, GP, acq = "ucb", y_max, kappa, eps) {
# Gaussian Process Prediction
GP_Pred <- GPfit::predict.GP(object = GP, xnew = matrix(x_vec, nrow = 1))
GP_Mean <- GP_Pred$Y_hat
GP_MSE <- GP_Pred$MSE %>% pmax(., 1e-9)
# Utility Function Type
if (acq == "ucb") {
Utility <- GP_Mean + kappa * sqrt(GP_MSE)
} else if (acq == "ei") {
z <- (GP_Mean - y_max - eps) / sqrt(GP_MSE)
Utility <- (GP_Mean - y_max - eps) * pnorm(z) + sqrt(GP_MSE) * dnorm(z)
} else if (acq == "poi") {
z <- (GP_Mean - y_max - eps) / sqrt(GP_MSE)
Utility <- pnorm(z)
}
return(-Utility)
}
| /R/Utility.R | no_license | yonghuajiang/rBayesianOptimization | R | false | false | 1,212 | r | #' @title Utility Computing Function
#'
#' @description
#' Computing Utility.
#'
#' @param x_vec a vector of scaled hyperparameters
#' @param GP an object of class GP
#' @param acq Acquisition function type to be used
#' @param y_max The current maximum known value of the target utility function
#' @param kappa tunable parameter kappa to balance exploitation against exploration
#' @param eps tunable parameter epsilon to balance exploitation against exploration
#' @return negative utility to be minimized
#' @importFrom stats pnorm
#' @importFrom magrittr %>%
#' @keywords internal
#' @export
Utility <- function(x_vec, GP, acq = "ucb", y_max, kappa, eps) {
# Gaussian Process Prediction
GP_Pred <- GPfit::predict.GP(object = GP, xnew = matrix(x_vec, nrow = 1))
GP_Mean <- GP_Pred$Y_hat
GP_MSE <- GP_Pred$MSE %>% pmax(., 1e-9)
# Utility Function Type
if (acq == "ucb") {
Utility <- GP_Mean + kappa * sqrt(GP_MSE)
} else if (acq == "ei") {
z <- (GP_Mean - y_max - eps) / sqrt(GP_MSE)
Utility <- (GP_Mean - y_max - eps) * pnorm(z) + sqrt(GP_MSE) * dnorm(z)
} else if (acq == "poi") {
z <- (GP_Mean - y_max - eps) / sqrt(GP_MSE)
Utility <- pnorm(z)
}
return(-Utility)
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
markerinsert <- function(mp, geno, map, cl, gg1, gg2, gg0, flagRIL) {
.Call(`_QTL_gCIMapping_GUI_markerinsert`, mp, geno, map, cl, gg1, gg2, gg0, flagRIL)
}
| /QTL.gCIMapping.GUI/R/RcppExports.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 289 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
markerinsert <- function(mp, geno, map, cl, gg1, gg2, gg0, flagRIL) {
.Call(`_QTL_gCIMapping_GUI_markerinsert`, mp, geno, map, cl, gg1, gg2, gg0, flagRIL)
}
|
library(RColorBrewer)
cols <- brewer.pal(11,"Spectral")
##### SITE-SPECIFIC TRANSMISSIBILITES: UNPACK beta.estimates.zip
load("beta1.matrix.RData")
load("beta2.matrix.RData")
names.arg = c(
"SIS","SISPS","SISPminRS","SISPmaxRS",
"SIRS","SIS10RS","SIS33RS",
"SIRlocalS","SIS10RlocalS","SIS33RlocalS",
"SIRanalS","SIS10RanalS","SIS33RanalS",
"SIRpenileS","SIS10RpenileS","SIS33RpenileS",
"SIL","SIS10L","SIS33L","SIR33L"
)
table.order <- c(1,2,5,7,6,3,4,8,10,9,14,16,15,11,13,12,17,19,18,20)
modcols <- c(rep(5,2),rep(2,5),rep(8,9),rep(10,3),rep(11,1))
### PLOT ESTIMATES
x11()
boxplot(t(beta1.matrix[table.order,]), xaxt="n", ylab="", col=cols[modcols])
text(x=c(1,5,12,18), y=-0.13, c("SIS","systemic\nimmunity","local\nimmunity", "latency"), cex=0.9, xpd=TRUE)
title("Penile-to-anal transmissibility")
x11()
boxplot(t(beta2.matrix[table.order,]), xaxt="n",ylab="", col=cols[modcols])
text(x=c(1,5,12,18), y=-0.033, c("SIS","systemic\nimmunity","local\nimmunity", "latency"), cex=0.9, xpd=TRUE)
title("Anal-to-penile transmissibility")
modcols <- c(sort(rep(1:3,2),decreasing=T),sort(rep(4:6,2),decreasing=T),sort(rep(9:11,2)))
x11()
boxplot(beta1.matrix[1:20,], xaxt="n", ylab="", col=cols[modcols])
text(x=-0.5, y=-0.1, labels="Assortativity", cex=0.8, srt=0, xpd=TRUE)
text(x=c(1.5+2*(0:8)), y=-0.1, labels=c("0","1/3","2/3"), cex=0.8, xpd=TRUE)
text(x=c(6.5,12.5), y=-0.1, labels="|", xpd=TRUE)
text(x=0, y=-0.15, labels="Activity", cex=0.8, srt=0, xpd=TRUE)
text(x=c(3.5, 9.5, 15.5), y=-0.15, labels=c("80-20%","90-10%","60-30-10%"), cex=0.8, xpd=TRUE)
text(x=c(6.5,12.5), y=-0.15, labels="|", xpd=TRUE)
title("Penile-to-anal transmissibility")
x11()
boxplot(beta2.matrix[1:20,], xaxt="n", ylab="", col=cols[modcols])
text(x=-0.5, y=-0.026, labels="Assortativity", cex=0.8, srt=0, xpd=TRUE)
text(x=c(1.5+2*(0:8)), y=-0.026, labels=c("0","1/3","2/3"), cex=0.8, xpd=TRUE)
text(x=c(6.5,12.5), y=-0.026, labels="|", xpd=TRUE)
text(x=0, y=-0.0375, labels="Activity", cex=0.8, srt=0, xpd=TRUE)
text(x=c(3.5, 9.5, 15.5), y=-0.0375, labels=c("80-20%","90-10%","60-30-10%"), cex=0.8, xpd=TRUE)
text(x=c(6.5,12.5), y=-0.0375, labels="|", xpd=TRUE)
title("Anal-to-penile transmissibility")
| /R-scripts/FigS7.R | no_license | hansbogaards/HPV-immunization-for-MSM-multimodel-approach | R | false | false | 2,257 | r | library(RColorBrewer)
cols <- brewer.pal(11,"Spectral")
##### SITE-SPECIFIC TRANSMISSIBILITES: UNPACK beta.estimates.zip
load("beta1.matrix.RData")
load("beta2.matrix.RData")
names.arg = c(
"SIS","SISPS","SISPminRS","SISPmaxRS",
"SIRS","SIS10RS","SIS33RS",
"SIRlocalS","SIS10RlocalS","SIS33RlocalS",
"SIRanalS","SIS10RanalS","SIS33RanalS",
"SIRpenileS","SIS10RpenileS","SIS33RpenileS",
"SIL","SIS10L","SIS33L","SIR33L"
)
table.order <- c(1,2,5,7,6,3,4,8,10,9,14,16,15,11,13,12,17,19,18,20)
modcols <- c(rep(5,2),rep(2,5),rep(8,9),rep(10,3),rep(11,1))
### PLOT ESTIMATES
x11()
boxplot(t(beta1.matrix[table.order,]), xaxt="n", ylab="", col=cols[modcols])
text(x=c(1,5,12,18), y=-0.13, c("SIS","systemic\nimmunity","local\nimmunity", "latency"), cex=0.9, xpd=TRUE)
title("Penile-to-anal transmissibility")
x11()
boxplot(t(beta2.matrix[table.order,]), xaxt="n",ylab="", col=cols[modcols])
text(x=c(1,5,12,18), y=-0.033, c("SIS","systemic\nimmunity","local\nimmunity", "latency"), cex=0.9, xpd=TRUE)
title("Anal-to-penile transmissibility")
modcols <- c(sort(rep(1:3,2),decreasing=T),sort(rep(4:6,2),decreasing=T),sort(rep(9:11,2)))
x11()
boxplot(beta1.matrix[1:20,], xaxt="n", ylab="", col=cols[modcols])
text(x=-0.5, y=-0.1, labels="Assortativity", cex=0.8, srt=0, xpd=TRUE)
text(x=c(1.5+2*(0:8)), y=-0.1, labels=c("0","1/3","2/3"), cex=0.8, xpd=TRUE)
text(x=c(6.5,12.5), y=-0.1, labels="|", xpd=TRUE)
text(x=0, y=-0.15, labels="Activity", cex=0.8, srt=0, xpd=TRUE)
text(x=c(3.5, 9.5, 15.5), y=-0.15, labels=c("80-20%","90-10%","60-30-10%"), cex=0.8, xpd=TRUE)
text(x=c(6.5,12.5), y=-0.15, labels="|", xpd=TRUE)
title("Penile-to-anal transmissibility")
x11()
boxplot(beta2.matrix[1:20,], xaxt="n", ylab="", col=cols[modcols])
text(x=-0.5, y=-0.026, labels="Assortativity", cex=0.8, srt=0, xpd=TRUE)
text(x=c(1.5+2*(0:8)), y=-0.026, labels=c("0","1/3","2/3"), cex=0.8, xpd=TRUE)
text(x=c(6.5,12.5), y=-0.026, labels="|", xpd=TRUE)
text(x=0, y=-0.0375, labels="Activity", cex=0.8, srt=0, xpd=TRUE)
text(x=c(3.5, 9.5, 15.5), y=-0.0375, labels=c("80-20%","90-10%","60-30-10%"), cex=0.8, xpd=TRUE)
text(x=c(6.5,12.5), y=-0.0375, labels="|", xpd=TRUE)
title("Anal-to-penile transmissibility")
|
#Precision
coref_p=c(0.049259562,0.050491319,0.053675641,0.053688315,0.05848086,0.061600881,0.081521995,0.07497863,0.067508733)
cochg_p=c(0.121067604,0.131267257,0.143525871,0.181228905,0.268274008,0.259508491,0.342892887,0.455,0.291666667)
#Recall)
coref_r=c(0.150128129,0.147436298,0.137103184,0.129494687,0.12295152,0.104789257,0.086645924,0.083679745,0.078624069)
cochg_r=c(0.013691642,0.013691642,0.013659673,0.011424085,0.01041725,0.010051532,0.007757513,0.0068497,0.00471189)
#F-measure
coref_f=c(0.074179563,0.075221976,0.077147988,0.075906077,0.079261603,0.077590062,0.084005899,0.079090595,0.072643667)
cochg_f=c(0.024601122,0.024796882,0.024945252,0.021493301,0.020055724,0.019353448,0.015171784,0.013496223,0.009273959)
wilcox.test(coref_p,cochg_p,alternative='greater')
wilcox.test(coref_r,cochg_r,alternative='greater')
wilcox.test(coref_f,cochg_f,alternative='greater')
| /paper/data/wilcox_method_0002_cost.r | no_license | gecko655/yamamori-mthesis | R | false | false | 886 | r | #Precision
coref_p=c(0.049259562,0.050491319,0.053675641,0.053688315,0.05848086,0.061600881,0.081521995,0.07497863,0.067508733)
cochg_p=c(0.121067604,0.131267257,0.143525871,0.181228905,0.268274008,0.259508491,0.342892887,0.455,0.291666667)
#Recall)
coref_r=c(0.150128129,0.147436298,0.137103184,0.129494687,0.12295152,0.104789257,0.086645924,0.083679745,0.078624069)
cochg_r=c(0.013691642,0.013691642,0.013659673,0.011424085,0.01041725,0.010051532,0.007757513,0.0068497,0.00471189)
#F-measure
coref_f=c(0.074179563,0.075221976,0.077147988,0.075906077,0.079261603,0.077590062,0.084005899,0.079090595,0.072643667)
cochg_f=c(0.024601122,0.024796882,0.024945252,0.021493301,0.020055724,0.019353448,0.015171784,0.013496223,0.009273959)
wilcox.test(coref_p,cochg_p,alternative='greater')
wilcox.test(coref_r,cochg_r,alternative='greater')
wilcox.test(coref_f,cochg_f,alternative='greater')
|
#' Display seasonal data collection progress
#'
#' Display an interactive visualization of data collection progress for the
#' season. All adult females caught in the given year are inlcuded provided
#' their most recent trapping record has fate 1-3.
#'
#' The status of a female is based on the most recent trapping record and the
#' litter table. If the most recent trapping record is dated prior to the date
#' the litter table was updated it is ignored, and only the litter table is
#' used. If nipple condition is 5, the status is LL (Lost Litter). Otherwise,
#' status is Parturition, N1, or Completed if fieldBDate, date1, or tagDt fields
#' in litter table are filled in, respectively. Finally, if litter table dates
#' are empty, rep_con field in trapping record is used and status is P0, P1, P2,
#' or P3 is assigned for rep_con = 1, 2, 3, 4, respectively.
#'
#' @param con Connection to KRSP database
#' @param grid character; a single grid to map
#' @param year integer; defaults to current year
#' @param data logical; if TRUE return data frame instead of plotting
#'
#' @return Displays and returns a \code{ggvis} plot of seasonal workflow
#' progress for all females, unless \code{data} is TRUE, in which case a data
#' frame is returned and nothing is plotted.
#' @export
#' @examples
#' con <- krsp_connect()
#' krsp_progress(con, "JO", 2015, data = TRUE) %>%
#' head()
#' krsp_progress(con, "KL", 2011)
krsp_progress <- function(con, grid, year, data) {
UseMethod("krsp_progress")
}
#' @export
krsp_progress.krsp <- function(con, grid, year = current_year(), data = FALSE) {
# assertions on arguments
assert_that(inherits(con, "src_mysql"),
valid_year(year, single = TRUE),
valid_grid(grid, single = TRUE))
year <- as.integer(year)
grid_choice <- grid
# query for most recent trapping record
female_query <- sprintf(
"SELECT
t.squirrel_id, t.date AS trap_date,
t.taglft, t.tagrt,
t.color_left, t.color_right,
t.locx, t.locy,
t.ft, t.rep_con, t.nipple
FROM
trapping t
INNER JOIN squirrel s
ON t.squirrel_id = s.id
WHERE
s.sex = 'F'
AND s.gr = '%s'
AND t.rep_con IS NOT NULL
AND (t.squirrel_id, t.date) IN (
SELECT squirrel_id, MAX(date) as max_date
FROM trapping
WHERE YEAR(date) = %i AND rep_con IS NOT NULL
GROUP BY squirrel_id)
AND t.squirrel_id NOT IN (
SELECT j.squirrel_id
FROM JUVENILE j
LEFT JOIN LITTER l
ON j.litter_id = l.id
WHERE
YEAR(COALESCE(fieldBDate, date1, tagDt)) = %i
AND GRID = '%s'
);", grid_choice, year, year, grid_choice)
# suppressWarnings to avoid typcasting warnings
suppressWarnings({
females <- krsp_sql(con, female_query)
litter <- tbl(con, "litter") %>%
filter_(~ yr == year) %>%
select_("id", "squirrel_id", "br", "ln",
"fieldBDate", "date1", "tagDt") %>%
collect()
})
# remove multiple trapping records from same date
females <- females %>%
# remove dead squirrels
filter_(~ ft %in% 1:3) %>%
group_by_("squirrel_id") %>%
filter_(~ row_number() == 1) %>%
ungroup()
# bring in litter data
rep_con_map <- c("P0", "P1", "P2", "P3")
females <- left_join(females, litter, by = "squirrel_id") %>%
arrange_("squirrel_id", "ln") %>%
# sort out the various permutations of data - messy!
mutate_(
trap_date = ~ suppressWarnings(as.Date(lubridate::ymd(trap_date))),
ln = ~ as.character(ifelse(ln %in% 1:3, ln, NA)),
ln = ~ ifelse(is.na(id), "-", ln),
litter_date = ~ pmax(fieldBDate, date1, tagDt, na.rm = TRUE),
litter_date = ~ suppressWarnings(as.Date(lubridate::ymd(litter_date))),
litter_status = ~ ifelse(!is.na(tagDt), "N2",
ifelse(!is.na(date1), "N1",
ifelse(!is.na(fieldBDate), "Parturition", NA))),
# use breeding status to assess non-breeders and lost litters
litter_status = ~ ifelse(br == 0 & is.na(litter_status), "Non-breeder",
litter_status),
litter_status = ~ ifelse(br %in% c(2, 4, 7) & litter_status != "N2",
"LL", litter_status),
trap_status = ~ ifelse(!is.na(nipple) & nipple == 5, "LL",
ifelse(is.na(rep_con) | !rep_con %in% 1:4, NA,
rep_con_map[rep_con])),
# nest record more recent than trap record
status = ~ ifelse(is.na(litter_status), trap_status, litter_status),
# if trap later than nest and lost litter or pregnant, takes precedence
trap_precedence = ~ (is.na(litter_date) | litter_date < trap_date),
status = ~ ifelse(trap_precedence & trap_status != "P0", trap_status, status),
# status = ~ ifelse(trap_precedence & !is.na(litter_date) & litter_status == "N2",
# trap_status, status),
status = ~ factor(status,
levels = c("P3", "P2", "P1",
"Parturition", "N1",
"LL", "P0", "N2", "Non-breeder")),
completion = ~ ifelse(status %in% c("LL", "N2", "P0", "Non-breeder"),
as.character(status), "In Progress"),
completion = ~ factor(completion,
levels = c("In Progress", "LL", "P0", "Non-breeder",
"N2")),
# convert trap and litter status to factor
trap_status = ~ factor(trap_status, levels = c(rev(rep_con_map), "LL")),
litter_status = ~ factor(litter_status,
levels = c("Parturition", "N1", "N2",
"LL", "Non-breeder"))) %>%
# prepare tags, colours, and locs
mutate_(
color_left = ~ ifelse(is.na(color_left) | color_left == "",
"-", color_left),
color_right = ~ ifelse(is.na(color_right) | color_right == "",
"-", color_right),
taglft = ~ ifelse(is.na(taglft) | taglft == "", "-", taglft),
tagrt = ~ ifelse(is.na(tagrt) | tagrt == "", "-", tagrt),
locx = ~ ifelse(is.na(locx) | locx == "", "-", locx),
locy = ~ ifelse(is.na(locy) | locy == "", "-", locy),
colours = ~ paste(color_left, color_right, sep = "/"),
tags = ~ paste(taglft, tagrt, sep = "/"),
loc = ~ paste(locx, locy, sep = "/"))
# target trap date
females <- females %>%
mutate_(target_trap_date = ~ next_trap(as.character(status), trap_date))
# sensible ordering
females <- females %>%
group_by_("squirrel_id") %>%
summarize_(arr_comp = ~ min(as.integer(completion), na.rm = TRUE),
arr_status = ~ min(as.integer(status), na.rm = TRUE)) %>%
inner_join(females, by = "squirrel_id") %>%
arrange_("arr_comp", "arr_status", "squirrel_id", "ln") %>%
select_("squirrel_id", "tags", "colours", "loc", litter_number = "ln",
"status", "litter_status", "litter_date",
"trap_status", "trap_date", "target_trap_date")
# return raw data frame or DataTable
if (data) {
return(females)
} else {
progress_datatable(females)
}
}
progress_datatable <- function(df) {
col_names <- c("ID", "Tags", "Colours", "Loc", "Litter",
"Status", "Litter Status", "Litter Date",
"Trap Status", "Last Trapped", "Trap By")
dt <- DT::datatable(df,
rownames = FALSE,
colnames = col_names,
class = "nowrap stripe compact",
options = list(
paging = FALSE,
searching = FALSE,
info = FALSE,
columnDefs = list(list(className = 'dt-center',
targets = c(2:10)))))
# highlight based on status
clr_lvl <- c("LL", "Non-breeder",
"P0", "P1", "P2", "P3",
"Parturition", "N1", "N2")
clr_bg <- c("#555555", "#FFFFFF",
"#FFFFFF", "#4DAF4A", "#4DAF4A", "#4DAF4A",
"#E41A1C", "#FF7F00", "#377EB8")
clr_txt <- c("#FFFFFF", "#000000",
"#000000", "#FFFFFF", "#FFFFFF", "#FFFFFF",
"#FFFFFF", "#FFFFFF", "#FFFFFF")
dt <- dt %>%
DT::formatStyle("status",
color = DT::styleEqual(clr_lvl, clr_txt),
textAlign = "center",
fontWeight = "bold",
backgroundColor = DT::styleEqual(clr_lvl, clr_bg))
return(dt)
}
| /R/krsp-progress.r | no_license | mstrimas/krsp | R | false | false | 8,678 | r | #' Display seasonal data collection progress
#'
#' Display an interactive visualization of data collection progress for the
#' season. All adult females caught in the given year are inlcuded provided
#' their most recent trapping record has fate 1-3.
#'
#' The status of a female is based on the most recent trapping record and the
#' litter table. If the most recent trapping record is dated prior to the date
#' the litter table was updated it is ignored, and only the litter table is
#' used. If nipple condition is 5, the status is LL (Lost Litter). Otherwise,
#' status is Parturition, N1, or Completed if fieldBDate, date1, or tagDt fields
#' in litter table are filled in, respectively. Finally, if litter table dates
#' are empty, rep_con field in trapping record is used and status is P0, P1, P2,
#' or P3 is assigned for rep_con = 1, 2, 3, 4, respectively.
#'
#' @param con Connection to KRSP database
#' @param grid character; a single grid to map
#' @param year integer; defaults to current year
#' @param data logical; if TRUE return data frame instead of plotting
#'
#' @return Displays and returns a \code{ggvis} plot of seasonal workflow
#' progress for all females, unless \code{data} is TRUE, in which case a data
#' frame is returned and nothing is plotted.
#' @export
#' @examples
#' con <- krsp_connect()
#' krsp_progress(con, "JO", 2015, data = TRUE) %>%
#' head()
#' krsp_progress(con, "KL", 2011)
krsp_progress <- function(con, grid, year, data) {
UseMethod("krsp_progress")
}
#' @export
krsp_progress.krsp <- function(con, grid, year = current_year(), data = FALSE) {
# assertions on arguments
assert_that(inherits(con, "src_mysql"),
valid_year(year, single = TRUE),
valid_grid(grid, single = TRUE))
year <- as.integer(year)
grid_choice <- grid
# query for most recent trapping record
female_query <- sprintf(
"SELECT
t.squirrel_id, t.date AS trap_date,
t.taglft, t.tagrt,
t.color_left, t.color_right,
t.locx, t.locy,
t.ft, t.rep_con, t.nipple
FROM
trapping t
INNER JOIN squirrel s
ON t.squirrel_id = s.id
WHERE
s.sex = 'F'
AND s.gr = '%s'
AND t.rep_con IS NOT NULL
AND (t.squirrel_id, t.date) IN (
SELECT squirrel_id, MAX(date) as max_date
FROM trapping
WHERE YEAR(date) = %i AND rep_con IS NOT NULL
GROUP BY squirrel_id)
AND t.squirrel_id NOT IN (
SELECT j.squirrel_id
FROM JUVENILE j
LEFT JOIN LITTER l
ON j.litter_id = l.id
WHERE
YEAR(COALESCE(fieldBDate, date1, tagDt)) = %i
AND GRID = '%s'
);", grid_choice, year, year, grid_choice)
# suppressWarnings to avoid typcasting warnings
suppressWarnings({
females <- krsp_sql(con, female_query)
litter <- tbl(con, "litter") %>%
filter_(~ yr == year) %>%
select_("id", "squirrel_id", "br", "ln",
"fieldBDate", "date1", "tagDt") %>%
collect()
})
# remove multiple trapping records from same date
females <- females %>%
# remove dead squirrels
filter_(~ ft %in% 1:3) %>%
group_by_("squirrel_id") %>%
filter_(~ row_number() == 1) %>%
ungroup()
# bring in litter data
rep_con_map <- c("P0", "P1", "P2", "P3")
females <- left_join(females, litter, by = "squirrel_id") %>%
arrange_("squirrel_id", "ln") %>%
# sort out the various permutations of data - messy!
mutate_(
trap_date = ~ suppressWarnings(as.Date(lubridate::ymd(trap_date))),
ln = ~ as.character(ifelse(ln %in% 1:3, ln, NA)),
ln = ~ ifelse(is.na(id), "-", ln),
litter_date = ~ pmax(fieldBDate, date1, tagDt, na.rm = TRUE),
litter_date = ~ suppressWarnings(as.Date(lubridate::ymd(litter_date))),
litter_status = ~ ifelse(!is.na(tagDt), "N2",
ifelse(!is.na(date1), "N1",
ifelse(!is.na(fieldBDate), "Parturition", NA))),
# use breeding status to assess non-breeders and lost litters
litter_status = ~ ifelse(br == 0 & is.na(litter_status), "Non-breeder",
litter_status),
litter_status = ~ ifelse(br %in% c(2, 4, 7) & litter_status != "N2",
"LL", litter_status),
trap_status = ~ ifelse(!is.na(nipple) & nipple == 5, "LL",
ifelse(is.na(rep_con) | !rep_con %in% 1:4, NA,
rep_con_map[rep_con])),
# nest record more recent than trap record
status = ~ ifelse(is.na(litter_status), trap_status, litter_status),
# if trap later than nest and lost litter or pregnant, takes precedence
trap_precedence = ~ (is.na(litter_date) | litter_date < trap_date),
status = ~ ifelse(trap_precedence & trap_status != "P0", trap_status, status),
# status = ~ ifelse(trap_precedence & !is.na(litter_date) & litter_status == "N2",
# trap_status, status),
status = ~ factor(status,
levels = c("P3", "P2", "P1",
"Parturition", "N1",
"LL", "P0", "N2", "Non-breeder")),
completion = ~ ifelse(status %in% c("LL", "N2", "P0", "Non-breeder"),
as.character(status), "In Progress"),
completion = ~ factor(completion,
levels = c("In Progress", "LL", "P0", "Non-breeder",
"N2")),
# convert trap and litter status to factor
trap_status = ~ factor(trap_status, levels = c(rev(rep_con_map), "LL")),
litter_status = ~ factor(litter_status,
levels = c("Parturition", "N1", "N2",
"LL", "Non-breeder"))) %>%
# prepare tags, colours, and locs
mutate_(
color_left = ~ ifelse(is.na(color_left) | color_left == "",
"-", color_left),
color_right = ~ ifelse(is.na(color_right) | color_right == "",
"-", color_right),
taglft = ~ ifelse(is.na(taglft) | taglft == "", "-", taglft),
tagrt = ~ ifelse(is.na(tagrt) | tagrt == "", "-", tagrt),
locx = ~ ifelse(is.na(locx) | locx == "", "-", locx),
locy = ~ ifelse(is.na(locy) | locy == "", "-", locy),
colours = ~ paste(color_left, color_right, sep = "/"),
tags = ~ paste(taglft, tagrt, sep = "/"),
loc = ~ paste(locx, locy, sep = "/"))
# target trap date
females <- females %>%
mutate_(target_trap_date = ~ next_trap(as.character(status), trap_date))
# sensible ordering
females <- females %>%
group_by_("squirrel_id") %>%
summarize_(arr_comp = ~ min(as.integer(completion), na.rm = TRUE),
arr_status = ~ min(as.integer(status), na.rm = TRUE)) %>%
inner_join(females, by = "squirrel_id") %>%
arrange_("arr_comp", "arr_status", "squirrel_id", "ln") %>%
select_("squirrel_id", "tags", "colours", "loc", litter_number = "ln",
"status", "litter_status", "litter_date",
"trap_status", "trap_date", "target_trap_date")
# return raw data frame or DataTable
if (data) {
return(females)
} else {
progress_datatable(females)
}
}
progress_datatable <- function(df) {
col_names <- c("ID", "Tags", "Colours", "Loc", "Litter",
"Status", "Litter Status", "Litter Date",
"Trap Status", "Last Trapped", "Trap By")
dt <- DT::datatable(df,
rownames = FALSE,
colnames = col_names,
class = "nowrap stripe compact",
options = list(
paging = FALSE,
searching = FALSE,
info = FALSE,
columnDefs = list(list(className = 'dt-center',
targets = c(2:10)))))
# highlight based on status
clr_lvl <- c("LL", "Non-breeder",
"P0", "P1", "P2", "P3",
"Parturition", "N1", "N2")
clr_bg <- c("#555555", "#FFFFFF",
"#FFFFFF", "#4DAF4A", "#4DAF4A", "#4DAF4A",
"#E41A1C", "#FF7F00", "#377EB8")
clr_txt <- c("#FFFFFF", "#000000",
"#000000", "#FFFFFF", "#FFFFFF", "#FFFFFF",
"#FFFFFF", "#FFFFFF", "#FFFFFF")
dt <- dt %>%
DT::formatStyle("status",
color = DT::styleEqual(clr_lvl, clr_txt),
textAlign = "center",
fontWeight = "bold",
backgroundColor = DT::styleEqual(clr_lvl, clr_bg))
return(dt)
}
|
rollingVolatilityContribution = function(contribution_breakdown, width = 63)
{
#contribution_breakdown = coe.cbd_sector
tslices = createTimeSlices2(data = contribution_breakdown, initialWindow = width, fixedWindow = T, on = "days")
return(as.xts(t(sapply(tslices, FUN = function(t)return(volatilityContribution(contribution_breakdown[t, ]))))))
}
ewmaVolatilityContribution = function(contribution_breakdown, lambda)
{
contribution_breakdown = coe.cbd_sector
tslices = createTimeSlices2(data = contribution_breakdown, initialWindow = width, fixedWindow = T, on = "days")
return(as.xts(t(sapply(tslices, FUN = function(t)return(volatilityBreakdown(contribution_breakdown[t, ]))))))
} | /Functions/rollingVolatilityContribution_test.R | no_license | bplloyd/R-risk-mgmt | R | false | false | 696 | r | rollingVolatilityContribution = function(contribution_breakdown, width = 63)
{
#contribution_breakdown = coe.cbd_sector
tslices = createTimeSlices2(data = contribution_breakdown, initialWindow = width, fixedWindow = T, on = "days")
return(as.xts(t(sapply(tslices, FUN = function(t)return(volatilityContribution(contribution_breakdown[t, ]))))))
}
ewmaVolatilityContribution = function(contribution_breakdown, lambda)
{
contribution_breakdown = coe.cbd_sector
tslices = createTimeSlices2(data = contribution_breakdown, initialWindow = width, fixedWindow = T, on = "days")
return(as.xts(t(sapply(tslices, FUN = function(t)return(volatilityBreakdown(contribution_breakdown[t, ]))))))
} |
setwd("E:/R_Projects/stat545_Practces_cms/PCA")
rm(list = ls())
library(tidyverse)
library(stringr)
library(forcats)
Mutate_VarCollapse_criteria <- function(data, collapsing_vars, ...){
library(tidyverse)
library(stringr)
library(forcats)
data <- mutate(data,
criteria = data[[collapsing_vars[[1]]]],
criteria = as.factor(criteria),
...)
if (length(collapsing_vars)>1){
for (i in 1:(length(collapsing_vars)-1)) {
data <- mutate(data,
criteria = str_c(criteria, data[[collapsing_vars[[i+1]]]],
sep = "_", ...),
criteria = as.factor(criteria),
...)
}
}
return(data)
}
Mutate_VarCollapse_facet <- function(data, collapsing_vars, ...){
library(tidyverse)
library(stringr)
library(forcats)
data <- mutate(data,
facet = data[[collapsing_vars[[1]]]],
facet = as.factor(facet),
...)
if (length(collapsing_vars)>1){
for (i in 1:(length(collapsing_vars)-1)) {
data <- mutate(data,
facet = str_c(facet, data[[collapsing_vars[[i+1]]]],
sep = "_", ...),
facet = as.factor(facet),
...)
}
}
return(data)
}
Exposed <- readRDS("Exposed.rds")
colour <- readRDS("colour.rds")
crit_vars <- c("Material", "Coating", "Cure")
AnalCrit <- c("Material")
FacetCrit <- c("Coating","Cure")
colour_scheme <- str_c(AnalCrit, collapse = "_")
Colour <- unique(colour[[colour_scheme]])
E0 <- Exposed %>%
Mutate_VarCollapse_criteria(collapsing_vars = AnalCrit) %>%
Mutate_VarCollapse_facet(collapsing_vars = FacetCrit)
E0$criteria
E0$Material
E0$facet
E0_crit <- E0 %>%
group_by(Material, Coating, Cure, criteria) %>%
summarize() %>%
mutate(groups = criteria, criteria = NULL)
Exposed.pca <- E0 %>%
select(Time, Hardness, Roughness) %>%
filter(!is.na(Hardness)) %>%
prcomp(center = TRUE , scale = TRUE)
types <- E0 %>%
filter(!is.na(Hardness)) %>%
.$criteria
# library(ggbiplot)
#
# # E0 %>%
# # filter(!is.na(Hardness)) %>%
# # select(Time, Hardness, Roughness, criteria) %>%
# # with(ggbiplot(Exposed.pca, obs.scale = 1, var.scale = 1,
# # groups = criteria, ellipse = TRUE,
# # circle = TRUE) +
# # scale_colour_manual(values = Colour) +
# # theme(plot.subtitle = element_text(vjust = 1),
# # plot.caption = element_text(vjust = 1),
# # panel.grid.major = element_line(colour = "gray5",
# # linetype = "longdash"),
# # panel.grid.minor = element_line(colour = "gray5",
# # linetype = "dotdash"),
# # panel.background = element_rect(fill = "gray100"),
# # axis.text = element_text(colour = "gray5"))
# # # facet_wrap(~ Coating)
# # )
#
# (plot <- ggbiplot(Exposed.pca, obs.scale = 1, var.scale = 1,
# groups = types, ellipse = TRUE,
# circle = TRUE) +
# scale_colour_manual(values = Colour) +
# theme(plot.subtitle = element_text(vjust = 1),
# plot.caption = element_text(vjust = 1),
# panel.grid.major = element_line(colour = "gray5",
# linetype = "longdash"),
# panel.grid.minor = element_line(colour = "gray5",
# linetype = "dotdash"),
# panel.background = element_rect(fill = "gray100"),
# axis.text = element_text(colour = "gray5")))
# # facet_wrap(~ str_split_fixed(groups, pattern= "_", n = 2)[,1])
#
#
#
# # (plot$data <- inner_join(plot$data, E0_crit))
#
# plot + facet_wrap(~ Coating)
#
# (str_split_fixed(plot$data$groups, pattern= "_", n = 2)[,1])
# dim(plot$data)
# install.packages("ggfortify")
library(ggfortify)
library(cluster)
EE0 <- E0 %>%
filter(!is.na(Hardness))
EE <- select(EE0,Time, Roughness, Hardness)
(p <- autoplot(prcomp(EE, center = TRUE, scale = TRUE), x= 1, y= 2,
data = EE0,
colour = "criteria", loadings = TRUE, loadings.label = TRUE,
frame = TRUE, frame.type = 'norm',
loadings.colour = "#000000", loadings.label.colour = "#000000",
alpha = 1/3, size = 3) +
facet_wrap(~ facet) +
coord_fixed()+
scale_colour_manual(values = Colour) +
scale_fill_manual(values = Colour) +
theme(plot.subtitle = element_text(vjust = 1),
plot.caption = element_text(vjust = 1),
panel.grid.major = element_line(colour = "gray5",
linetype = "longdash"),
panel.grid.minor = element_line(colour = "gray5",
linetype = "dotdash"),
panel.background = element_rect(fill = "gray100"),
axis.text = element_text(colour = "gray5")))
ggsave("C:/additional/UBC/MENG_Papers/PCA/Material_CoatingCureFacet.png",p, scale = 2)
p_d <- select(p$data, PC1, PC2, Time, Roughness,Hardness,
Material, Coating, Cure, criteria)
# mutate(Coating = as.character(Coating),
# criteria = as.character(criteria))
# p_d %>%
# with(autoplot(pam(p_d, 6), frame = TRUE, frame.type = 'norm', colour = "criteria") +
# aes(group = criteria))
autoplot(pam(p_d, 3), frame = TRUE, frame.type = 'norm', colour = "criteria",
groups = "criteria") +
aes(group = "criteria", colour = "criteria")
# facet_wrap(~ Coating)
p_d$Coating
# , data = EE0, colour = "criteria"
EE2 <- select(EE0,Time, Roughness, Hardness, criteria)
autoplot(kmeans(EE,6), data = EE0,
colour = "criteria", loadings = TRUE, loadings.label = TRUE,
loadings.colour = "blue")
autoplot(fanny(EE,6), frame = TRUE)
autoplot(pam(EE, 6), frame = TRUE, frame.type = 'norm')
# Coding to find correlation of material, coating, cure with physical properties
x0 <- scale(EE, center = TRUE, scale = TRUE)
pca <- prcomp(EE, center = TRUE, scale = TRUE)
x1 <- pca$x
pca2 <- prcomp(EE, center = TRUE, scale = TRUE, retx = TRUE)
x2 <- pca2$x
x2==x1
diag(t(x2%*%t(pca$rotation))%*%(x2%*%t(pca$rotation)))
pca3 <- prcomp(EE, center = TRUE, scale = TRUE, rank=1, retx = TRUE)
x3 <- pca3$x
pca3$rotation
diag(t(x3%*%t(pca3$rotation))%*%(x3%*%t(pca3$rotation)))/diag(t(x0)%*%x0)
pca$sdev/sum(pca$sdev)
MaterialNo <- data_frame(Material = as.factor(c("W", "R", "C")),
Material_No = c(1,2,3))
CoatingNo <- data_frame(Coating = as.factor(c("GC", "NG")),
Coating_No = c(1,2))
CureNo <- data_frame(Cure = as.factor(c("FC", "PC")),
Cure_No = c(1,2))
select(inner_join(E0,MaterialNo), Material , Material_No) %>%
print(n = Inf)
e01 <- inner_join(E0, MaterialNo)
e02 <- inner_join(e01, CoatingNo)
E_types <- inner_join(e02, CureNo)
E_types_pca <- e03 %>%
filter(!is.na(Hardness),
!is.na(Roughness)) %>%
select(Material_No, Coating_No, Cure_No, Time, Hardness, Roughness)
library(Matrix)
rankMatrix(as.matrix.data.frame(E_types_pca))
pca_model <- prcomp(E_types_pca, center = TRUE, scale = TRUE)
autoplot(pca_model, x= 1, y= 2,
loadings = TRUE, loadings.label = TRUE)
| /PCA_Exposed_revisedFS/PCA_Exposed.R | no_license | sinaneza/stat545_Practces_cms | R | false | false | 6,793 | r | setwd("E:/R_Projects/stat545_Practces_cms/PCA")
rm(list = ls())
library(tidyverse)
library(stringr)
library(forcats)
Mutate_VarCollapse_criteria <- function(data, collapsing_vars, ...){
library(tidyverse)
library(stringr)
library(forcats)
data <- mutate(data,
criteria = data[[collapsing_vars[[1]]]],
criteria = as.factor(criteria),
...)
if (length(collapsing_vars)>1){
for (i in 1:(length(collapsing_vars)-1)) {
data <- mutate(data,
criteria = str_c(criteria, data[[collapsing_vars[[i+1]]]],
sep = "_", ...),
criteria = as.factor(criteria),
...)
}
}
return(data)
}
Mutate_VarCollapse_facet <- function(data, collapsing_vars, ...){
library(tidyverse)
library(stringr)
library(forcats)
data <- mutate(data,
facet = data[[collapsing_vars[[1]]]],
facet = as.factor(facet),
...)
if (length(collapsing_vars)>1){
for (i in 1:(length(collapsing_vars)-1)) {
data <- mutate(data,
facet = str_c(facet, data[[collapsing_vars[[i+1]]]],
sep = "_", ...),
facet = as.factor(facet),
...)
}
}
return(data)
}
Exposed <- readRDS("Exposed.rds")
colour <- readRDS("colour.rds")
crit_vars <- c("Material", "Coating", "Cure")
AnalCrit <- c("Material")
FacetCrit <- c("Coating","Cure")
colour_scheme <- str_c(AnalCrit, collapse = "_")
Colour <- unique(colour[[colour_scheme]])
E0 <- Exposed %>%
Mutate_VarCollapse_criteria(collapsing_vars = AnalCrit) %>%
Mutate_VarCollapse_facet(collapsing_vars = FacetCrit)
E0$criteria
E0$Material
E0$facet
E0_crit <- E0 %>%
group_by(Material, Coating, Cure, criteria) %>%
summarize() %>%
mutate(groups = criteria, criteria = NULL)
Exposed.pca <- E0 %>%
select(Time, Hardness, Roughness) %>%
filter(!is.na(Hardness)) %>%
prcomp(center = TRUE , scale = TRUE)
types <- E0 %>%
filter(!is.na(Hardness)) %>%
.$criteria
# library(ggbiplot)
#
# # E0 %>%
# # filter(!is.na(Hardness)) %>%
# # select(Time, Hardness, Roughness, criteria) %>%
# # with(ggbiplot(Exposed.pca, obs.scale = 1, var.scale = 1,
# # groups = criteria, ellipse = TRUE,
# # circle = TRUE) +
# # scale_colour_manual(values = Colour) +
# # theme(plot.subtitle = element_text(vjust = 1),
# # plot.caption = element_text(vjust = 1),
# # panel.grid.major = element_line(colour = "gray5",
# # linetype = "longdash"),
# # panel.grid.minor = element_line(colour = "gray5",
# # linetype = "dotdash"),
# # panel.background = element_rect(fill = "gray100"),
# # axis.text = element_text(colour = "gray5"))
# # # facet_wrap(~ Coating)
# # )
#
# (plot <- ggbiplot(Exposed.pca, obs.scale = 1, var.scale = 1,
# groups = types, ellipse = TRUE,
# circle = TRUE) +
# scale_colour_manual(values = Colour) +
# theme(plot.subtitle = element_text(vjust = 1),
# plot.caption = element_text(vjust = 1),
# panel.grid.major = element_line(colour = "gray5",
# linetype = "longdash"),
# panel.grid.minor = element_line(colour = "gray5",
# linetype = "dotdash"),
# panel.background = element_rect(fill = "gray100"),
# axis.text = element_text(colour = "gray5")))
# # facet_wrap(~ str_split_fixed(groups, pattern= "_", n = 2)[,1])
#
#
#
# # (plot$data <- inner_join(plot$data, E0_crit))
#
# plot + facet_wrap(~ Coating)
#
# (str_split_fixed(plot$data$groups, pattern= "_", n = 2)[,1])
# dim(plot$data)
# install.packages("ggfortify")
library(ggfortify)
library(cluster)
EE0 <- E0 %>%
filter(!is.na(Hardness))
EE <- select(EE0,Time, Roughness, Hardness)
(p <- autoplot(prcomp(EE, center = TRUE, scale = TRUE), x= 1, y= 2,
data = EE0,
colour = "criteria", loadings = TRUE, loadings.label = TRUE,
frame = TRUE, frame.type = 'norm',
loadings.colour = "#000000", loadings.label.colour = "#000000",
alpha = 1/3, size = 3) +
facet_wrap(~ facet) +
coord_fixed()+
scale_colour_manual(values = Colour) +
scale_fill_manual(values = Colour) +
theme(plot.subtitle = element_text(vjust = 1),
plot.caption = element_text(vjust = 1),
panel.grid.major = element_line(colour = "gray5",
linetype = "longdash"),
panel.grid.minor = element_line(colour = "gray5",
linetype = "dotdash"),
panel.background = element_rect(fill = "gray100"),
axis.text = element_text(colour = "gray5")))
ggsave("C:/additional/UBC/MENG_Papers/PCA/Material_CoatingCureFacet.png",p, scale = 2)
p_d <- select(p$data, PC1, PC2, Time, Roughness,Hardness,
Material, Coating, Cure, criteria)
# mutate(Coating = as.character(Coating),
# criteria = as.character(criteria))
# p_d %>%
# with(autoplot(pam(p_d, 6), frame = TRUE, frame.type = 'norm', colour = "criteria") +
# aes(group = criteria))
autoplot(pam(p_d, 3), frame = TRUE, frame.type = 'norm', colour = "criteria",
groups = "criteria") +
aes(group = "criteria", colour = "criteria")
# facet_wrap(~ Coating)
p_d$Coating
# , data = EE0, colour = "criteria"
EE2 <- select(EE0,Time, Roughness, Hardness, criteria)
autoplot(kmeans(EE,6), data = EE0,
colour = "criteria", loadings = TRUE, loadings.label = TRUE,
loadings.colour = "blue")
autoplot(fanny(EE,6), frame = TRUE)
autoplot(pam(EE, 6), frame = TRUE, frame.type = 'norm')
# Coding to find correlation of material, coating, cure with physical properties
x0 <- scale(EE, center = TRUE, scale = TRUE)
pca <- prcomp(EE, center = TRUE, scale = TRUE)
x1 <- pca$x
pca2 <- prcomp(EE, center = TRUE, scale = TRUE, retx = TRUE)
x2 <- pca2$x
x2==x1
diag(t(x2%*%t(pca$rotation))%*%(x2%*%t(pca$rotation)))
pca3 <- prcomp(EE, center = TRUE, scale = TRUE, rank=1, retx = TRUE)
x3 <- pca3$x
pca3$rotation
diag(t(x3%*%t(pca3$rotation))%*%(x3%*%t(pca3$rotation)))/diag(t(x0)%*%x0)
pca$sdev/sum(pca$sdev)
MaterialNo <- data_frame(Material = as.factor(c("W", "R", "C")),
Material_No = c(1,2,3))
CoatingNo <- data_frame(Coating = as.factor(c("GC", "NG")),
Coating_No = c(1,2))
CureNo <- data_frame(Cure = as.factor(c("FC", "PC")),
Cure_No = c(1,2))
select(inner_join(E0,MaterialNo), Material , Material_No) %>%
print(n = Inf)
e01 <- inner_join(E0, MaterialNo)
e02 <- inner_join(e01, CoatingNo)
E_types <- inner_join(e02, CureNo)
E_types_pca <- e03 %>%
filter(!is.na(Hardness),
!is.na(Roughness)) %>%
select(Material_No, Coating_No, Cure_No, Time, Hardness, Roughness)
library(Matrix)
rankMatrix(as.matrix.data.frame(E_types_pca))
pca_model <- prcomp(E_types_pca, center = TRUE, scale = TRUE)
autoplot(pca_model, x= 1, y= 2,
loadings = TRUE, loadings.label = TRUE)
|
install.packages("plyr")
install.packages("dplyr")
install.packages("ggplot2")
install.packages("knitr")
library("plyr")
library("dplyr")
library("ggplot2")
library("knitr")
library("markdown")
download.file("https://d396qusza40orc.cloudfront.net/repdata/data/StormData.csv.bz2", "StormData.csv.bz2")
stormData <- read.csv("StormData.csv.bz2")
head(stormData)
casualties <- ddply(stormData, .(EVTYPE), summarize, fatalities = sum(FATALITIES), injuries = sum(INJURIES))
casualties$totalCasualties <- casualties$fatalities + casualties$injuries
casualties <- casualties[with(casualties, order( - totalCasualties)),][1:5,]
ggplot(casualties, aes(x = EVTYPE, y = totalCasualties)) + geom_bar(stat = "identity", fill = "light blue") + xlab("Event type") + ylab("Total casualties")
damages <- ddply(stormData, .(EVTYPE), summarize, property = sum(PROPDMG), crop = sum(CROPDMG))
damages$totalDamages <- damages$property + damages$crop
damages <- damages[with(damages, order( - totalDamages)),][1:5,]
ggplot(damages, aes(x = EVTYPE, y = totalDamages)) + geom_bar(stat = "identity", fill = "light blue") + xlab("Event type") + ylab("Total damages ($)")
knit("Markdown.rmd")
markdownToHTML('Markdown.md', 'Markdown.html', options = c("use_xhml"))
system("pandoc -s Markdown.html -o Markdown.pdf") | /Assignment/Assignment.Project/script.R | no_license | Elmesp/RepData_Assignment2 | R | false | false | 1,291 | r | install.packages("plyr")
install.packages("dplyr")
install.packages("ggplot2")
install.packages("knitr")
library("plyr")
library("dplyr")
library("ggplot2")
library("knitr")
library("markdown")
download.file("https://d396qusza40orc.cloudfront.net/repdata/data/StormData.csv.bz2", "StormData.csv.bz2")
stormData <- read.csv("StormData.csv.bz2")
head(stormData)
casualties <- ddply(stormData, .(EVTYPE), summarize, fatalities = sum(FATALITIES), injuries = sum(INJURIES))
casualties$totalCasualties <- casualties$fatalities + casualties$injuries
casualties <- casualties[with(casualties, order( - totalCasualties)),][1:5,]
ggplot(casualties, aes(x = EVTYPE, y = totalCasualties)) + geom_bar(stat = "identity", fill = "light blue") + xlab("Event type") + ylab("Total casualties")
damages <- ddply(stormData, .(EVTYPE), summarize, property = sum(PROPDMG), crop = sum(CROPDMG))
damages$totalDamages <- damages$property + damages$crop
damages <- damages[with(damages, order( - totalDamages)),][1:5,]
ggplot(damages, aes(x = EVTYPE, y = totalDamages)) + geom_bar(stat = "identity", fill = "light blue") + xlab("Event type") + ylab("Total damages ($)")
knit("Markdown.rmd")
markdownToHTML('Markdown.md', 'Markdown.html', options = c("use_xhml"))
system("pandoc -s Markdown.html -o Markdown.pdf") |
## Assume matrix supplied is always invertible
## Makes matrix x and caches its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(solveMatrix) inv <<- solveMatrix
getInv <- function() inv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## Compute inverse of matrix returned by makeCacheMatrix()
## Else, retrieve inverse if already calculated
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
if(!is.null(inv)){
message("retrieving from cache")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInv(inv)
inv
}
| /cachematrix.R | no_license | gerpang/ProgrammingAssignment2 | R | false | false | 788 | r | ## Assume matrix supplied is always invertible
## Makes matrix x and caches its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(solveMatrix) inv <<- solveMatrix
getInv <- function() inv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## Compute inverse of matrix returned by makeCacheMatrix()
## Else, retrieve inverse if already calculated
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
if(!is.null(inv)){
message("retrieving from cache")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInv(inv)
inv
}
|
library(pairwiseCI)
### Name: QBmover
### Title: Confidence intervals for ratios of proportions based on the
### quasibinomial assumption
### Aliases: QBmover
### Keywords: htest
### ** Examples
QBmover(succ=c(0,0,1, 0,6,8), fail=c(20,20,18, 20,14,12),
trt=factor(rep(c("A", "B"), c(3,3))), conf.level = 0.95,
alternative = "two.sided", grid = NULL)
| /data/genthat_extracted_code/pairwiseCI/examples/QBmover.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 364 | r | library(pairwiseCI)
### Name: QBmover
### Title: Confidence intervals for ratios of proportions based on the
### quasibinomial assumption
### Aliases: QBmover
### Keywords: htest
### ** Examples
QBmover(succ=c(0,0,1, 0,6,8), fail=c(20,20,18, 20,14,12),
trt=factor(rep(c("A", "B"), c(3,3))), conf.level = 0.95,
alternative = "two.sided", grid = NULL)
|
## File Name: sampling_hrm_mu_1dim.R
## File Version: 0.10
##########################################################
# sampling theta one dimension
sampling_hrm_mu_1dim <- function( theta, prior, N ){
m1 <- mean( theta )
m2 <- prior$mu$M
w1 <- N / stats::var(theta)
w2 <- 1 / prior$mu$SD^2
m0 <- ( w1*m1 + w2*m2 ) / (w1 + w2 )
s0 <- 1 / sqrt( w1 + w2 )
mu_new <- stats::rnorm( 1, mean=m0, sd=s0 )
return(mu_new)
}
###########################################################
| /immer/R/sampling_hrm_mu_1dim.R | no_license | akhikolla/InformationHouse | R | false | false | 524 | r | ## File Name: sampling_hrm_mu_1dim.R
## File Version: 0.10
##########################################################
# sampling theta one dimension
sampling_hrm_mu_1dim <- function( theta, prior, N ){
m1 <- mean( theta )
m2 <- prior$mu$M
w1 <- N / stats::var(theta)
w2 <- 1 / prior$mu$SD^2
m0 <- ( w1*m1 + w2*m2 ) / (w1 + w2 )
s0 <- 1 / sqrt( w1 + w2 )
mu_new <- stats::rnorm( 1, mean=m0, sd=s0 )
return(mu_new)
}
###########################################################
|
\name{LP.comean}
\alias{LP.comean}
\title{Function to find LP-comeans}
\description{
The function computes the LP comeans between \code{x} and \code{y}.
}
\usage{
LP.comean(x, y, perm=0)
}
\arguments{
\item{x}{vector, observations of an univariate random variable}
\item{y}{vector, observations of another univariate random variable}
\item{perm}{ Number of permutations for approximating p-value, set to 0 to use asymptotic p-value. }
}
\value{
A list containing:
\item{LPINFOR }{The test statistics based on LP comeans}
\item{p.val}{Test p-value}
\item{LP.matrix}{LP comean matrix}
}
\references{Mukhopadhyay, S. and Wang, K. (2020), "A Nonparametric Approach to High-dimensional K-sample Comparison Problem", arXiv:1810.01724.
Parzen, E. and Mukhopadhyay, S. (2012) "Modeling, Dependence, Classification, United Statistical Science, Many Cultures".
}
\author{ Mukhopadhyay, S. and Wang, K.}
\examples{
#example: LP-comean for two simple vectors:
y<-c(1,2,3,4,5)
z<-c(0,-1,-1,3,4)
comeanYZ=LP.comean(y,z)
#sum square statistics of LP comean:
comeanYZ$LPINFOR
#p-value:
comeanYZ$p.val
#comean matrix:
comeanYZ$LP.matrix
}
\keyword{ GLP other functions }
| /man/LP.comean.Rd | no_license | cran/LPKsample | R | false | false | 1,224 | rd | \name{LP.comean}
\alias{LP.comean}
\title{Function to find LP-comeans}
\description{
The function computes the LP comeans between \code{x} and \code{y}.
}
\usage{
LP.comean(x, y, perm=0)
}
\arguments{
\item{x}{vector, observations of an univariate random variable}
\item{y}{vector, observations of another univariate random variable}
\item{perm}{ Number of permutations for approximating p-value, set to 0 to use asymptotic p-value. }
}
\value{
A list containing:
\item{LPINFOR }{The test statistics based on LP comeans}
\item{p.val}{Test p-value}
\item{LP.matrix}{LP comean matrix}
}
\references{Mukhopadhyay, S. and Wang, K. (2020), "A Nonparametric Approach to High-dimensional K-sample Comparison Problem", arXiv:1810.01724.
Parzen, E. and Mukhopadhyay, S. (2012) "Modeling, Dependence, Classification, United Statistical Science, Many Cultures".
}
\author{ Mukhopadhyay, S. and Wang, K.}
\examples{
#example: LP-comean for two simple vectors:
y<-c(1,2,3,4,5)
z<-c(0,-1,-1,3,4)
comeanYZ=LP.comean(y,z)
#sum square statistics of LP comean:
comeanYZ$LPINFOR
#p-value:
comeanYZ$p.val
#comean matrix:
comeanYZ$LP.matrix
}
\keyword{ GLP other functions }
|
# WQtauCens.R
WQtauCens <- function(ri,qi,w,lgrid,c1=1.547647,c2=6.08,N=100,maxit=750,tol=1e-6,Qrefine=TRUE) {
# Weighted Qtau estimate (grid optimization)
m <- length(ri)
X <- matrix(0,ncol=2,nrow=m)
nl <- length(lgrid)
sL1 <- sL2 <- aL1 <- aL2 <- bL1 <- bL2 <- lgrid
qq <- list(beta = 0, Tscale = 0, nit=0)
b1 <- integrate(rhoBWdnorm,lower=-10,upper=10,k=c1)$value
b2 <- integrate(rhoBWdnorm,lower=-10,upper=10,k=c2)$value
for (i in 1:nl) {
lam <- lgrid[i]
ql <- qloggamma(qi,lambda=lam)
z <- RegtauW.f(x=ql,y=ri,w=w,
b1=b1,c1=c1,b2=b2,c2=c2,N=N,tol=tol,seed=567) # Fortran
# z <- RegtauW(ql,ri,w,b1,c1,b2,c2,N) # S
X <- cbind(1,ql)
B0 <- c(z$ao,z$bo)
s0 <- z$to
if (Qrefine) {
qq <- IRLStauW(X=X,y=ri,w=w,inib=B0,iniscale=s0,
b1=b1,c1=c1,b2=b2,c2=c2,maxit=maxit,tol=tol)
} else {
qq$Tscale <- s0
qq$beta <- B0
}
#cat(i,lgrid[i],z$to,qq$Tscale,qq$nit,"\n")
sL1[i] <- z$to; sL2[i] <- qq$Tscale
aL1[i] <- z$ao; aL2[i] <- qq$beta[1]
bL1[i] <- z$bo; bL2[i] <- qq$beta[2]
}
io1 <- (1:nl)[sL1 == min(sL1)]
io1 <- min(io1)
lam.est1 <- lgrid[io1]
s.est1 <- sL1[io1]
a.est1 <- aL1[io1]
b.est1 <- bL1[io1]
io2 <- (1:nl)[sL2 == min(sL2)]
io2 <- min(io2)
lam.est2 <- lgrid[io2]
s.est2 <- sL2[io2]
a.est2 <- aL2[io2]
b.est2 <- bL2[io2]
res <- list(lam1=lam.est1,Stau1=s.est1,mu1=a.est1,sig1=b.est1,
lam2=lam.est2,Stau2=s.est2,mu2=a.est2,sig2=b.est2,sL1=sL1,sL2=sL2)
return(res)
}
#######################
# auxiliary functions #
#######################
KpMrG <- function(t,x) {
# Kaplan-Meier with Greenwood's estimate
# t: sorted survival times
# x: censored times
if (length(x)==0) {
k <- length(t)
tu <- unique(t)
pt <- rep(1/k,k)
pt[k+1] <- 0
Ft <- cumsum(pt[1:k])
Vt <- Ft*(1-Ft)/k
if (is.na(Vt[k])) Vt[k] =Vt[k-1]}
else {
n0 <- length(t)+length(x)
n1 <- n0-sum(x<min(t))
tu <- unique(t)
d <- table(t)
k <- length(tu)
m <- rep(0,k-1)
for (i in 1:(k-1)) m[i] <- sum(tu[i] <= x & x < tu[i+1])
nj <- rep(n1,k)
ds <- cumsum(c(0,d)[1:k])
nj <- nj-ds
cs <- cumsum(c(0,m)[1:k])
nj <- nj-cs
Ft <- cumprod((nj-d)/nj)
pt <- as.numeric(diff(c(0,1-Ft,1)))
# Greenwood's estimate # da controllare
gt <- rep(0,k)
gt <- (d/(nj*(nj-d)))
Vt <- (Ft^2)*cumsum( gt )
if (is.na(Vt[k])) Vt[k] =Vt[k-1] } # ???
# k : number of different survival times
# tu[1:k] : unique survival times
# Ft[1:k] : Kaplan Meier survival function at tu (length=k)
# pt[1:k+1]: point masses at tu;
# the support of pt[k+1] is undefined
# Vt[1:k] : variance of survival function
list(k=k,tu=tu,Ft=Ft,pt=pt,Vt=Vt)}
F.KM <- function(x,tu,wkm){ # KM cdf at x
sum(wkm[tu <= x])}
# Biweight functions
rhoBW <- function(x,k){
k2 <- k*k; k4 <- k2*k2; k6 <- k2*k4
x2 <- x*x; x4 <- x2*x2; x6 <- x4*x2
(3*x2/k2-3*x4/k4+x6/k6)*(abs(x)<k)+(abs(x)>=k)}
psiBW <- function(x,k){
(6/k)*(x/k)*(1-(x/k)^2)^2*(abs(x)<k)}
pspBW <- function(x,k){
k2 <- k*k; k4 <- k2*k2; k6 <- k2*k4
x2 <- x*x; x4 <- x2*x2
(6/k2-36*x2/k4+30*x4/k6)*(abs(x)<k)}
## rhoBW <- function(x,k){
## Mchi(x,k,psi="optimal")
## }
## psiBW <- function(x,k){
## Mpsi(x,k,psi="optimal")
## }
## pspBW <- function(x,k){
## NA
## }
rhoBWdnorm <- function(x,k) {
rhoBW(x,k)*dnorm(x)
}
MscaleW <- function(u,w,b1,c1,tol) {
h <- 1
it <- 0
s0 <- median(abs(u*w))/.6745
if (s0 > tol) {
while((h > tol) & (it < 50)) {
it <- it+1
s1 <- (s0^2)*mean(rhoBW((u*w/s0),c1)) / b1
s1 <- s1^(1/2)
h <- abs(s1-s0)/s0
s0 <- s1
}
}
return(s0)
}
TauscaleW <- function(u,w,b1,c1,b2,c2,tol) {
tau <- tol
s0 <- MscaleW(u,w,b1,c1,tol)
if (s0 > tol)
tau <- sqrt(s0^2*mean(rhoBW(u*w/s0,c2)) / b2)
return(tau)
}
| /R/WQtauCens.R | no_license | cran/robustloggamma | R | false | false | 3,994 | r | # WQtauCens.R
WQtauCens <- function(ri,qi,w,lgrid,c1=1.547647,c2=6.08,N=100,maxit=750,tol=1e-6,Qrefine=TRUE) {
# Weighted Qtau estimate (grid optimization)
m <- length(ri)
X <- matrix(0,ncol=2,nrow=m)
nl <- length(lgrid)
sL1 <- sL2 <- aL1 <- aL2 <- bL1 <- bL2 <- lgrid
qq <- list(beta = 0, Tscale = 0, nit=0)
b1 <- integrate(rhoBWdnorm,lower=-10,upper=10,k=c1)$value
b2 <- integrate(rhoBWdnorm,lower=-10,upper=10,k=c2)$value
for (i in 1:nl) {
lam <- lgrid[i]
ql <- qloggamma(qi,lambda=lam)
z <- RegtauW.f(x=ql,y=ri,w=w,
b1=b1,c1=c1,b2=b2,c2=c2,N=N,tol=tol,seed=567) # Fortran
# z <- RegtauW(ql,ri,w,b1,c1,b2,c2,N) # S
X <- cbind(1,ql)
B0 <- c(z$ao,z$bo)
s0 <- z$to
if (Qrefine) {
qq <- IRLStauW(X=X,y=ri,w=w,inib=B0,iniscale=s0,
b1=b1,c1=c1,b2=b2,c2=c2,maxit=maxit,tol=tol)
} else {
qq$Tscale <- s0
qq$beta <- B0
}
#cat(i,lgrid[i],z$to,qq$Tscale,qq$nit,"\n")
sL1[i] <- z$to; sL2[i] <- qq$Tscale
aL1[i] <- z$ao; aL2[i] <- qq$beta[1]
bL1[i] <- z$bo; bL2[i] <- qq$beta[2]
}
io1 <- (1:nl)[sL1 == min(sL1)]
io1 <- min(io1)
lam.est1 <- lgrid[io1]
s.est1 <- sL1[io1]
a.est1 <- aL1[io1]
b.est1 <- bL1[io1]
io2 <- (1:nl)[sL2 == min(sL2)]
io2 <- min(io2)
lam.est2 <- lgrid[io2]
s.est2 <- sL2[io2]
a.est2 <- aL2[io2]
b.est2 <- bL2[io2]
res <- list(lam1=lam.est1,Stau1=s.est1,mu1=a.est1,sig1=b.est1,
lam2=lam.est2,Stau2=s.est2,mu2=a.est2,sig2=b.est2,sL1=sL1,sL2=sL2)
return(res)
}
#######################
# auxiliary functions #
#######################
KpMrG <- function(t,x) {
# Kaplan-Meier with Greenwood's estimate
# t: sorted survival times
# x: censored times
if (length(x)==0) {
k <- length(t)
tu <- unique(t)
pt <- rep(1/k,k)
pt[k+1] <- 0
Ft <- cumsum(pt[1:k])
Vt <- Ft*(1-Ft)/k
if (is.na(Vt[k])) Vt[k] =Vt[k-1]}
else {
n0 <- length(t)+length(x)
n1 <- n0-sum(x<min(t))
tu <- unique(t)
d <- table(t)
k <- length(tu)
m <- rep(0,k-1)
for (i in 1:(k-1)) m[i] <- sum(tu[i] <= x & x < tu[i+1])
nj <- rep(n1,k)
ds <- cumsum(c(0,d)[1:k])
nj <- nj-ds
cs <- cumsum(c(0,m)[1:k])
nj <- nj-cs
Ft <- cumprod((nj-d)/nj)
pt <- as.numeric(diff(c(0,1-Ft,1)))
# Greenwood's estimate # da controllare
gt <- rep(0,k)
gt <- (d/(nj*(nj-d)))
Vt <- (Ft^2)*cumsum( gt )
if (is.na(Vt[k])) Vt[k] =Vt[k-1] } # ???
# k : number of different survival times
# tu[1:k] : unique survival times
# Ft[1:k] : Kaplan Meier survival function at tu (length=k)
# pt[1:k+1]: point masses at tu;
# the support of pt[k+1] is undefined
# Vt[1:k] : variance of survival function
list(k=k,tu=tu,Ft=Ft,pt=pt,Vt=Vt)}
F.KM <- function(x,tu,wkm){ # KM cdf at x
sum(wkm[tu <= x])}
# Biweight functions
rhoBW <- function(x,k){
k2 <- k*k; k4 <- k2*k2; k6 <- k2*k4
x2 <- x*x; x4 <- x2*x2; x6 <- x4*x2
(3*x2/k2-3*x4/k4+x6/k6)*(abs(x)<k)+(abs(x)>=k)}
psiBW <- function(x,k){
(6/k)*(x/k)*(1-(x/k)^2)^2*(abs(x)<k)}
pspBW <- function(x,k){
k2 <- k*k; k4 <- k2*k2; k6 <- k2*k4
x2 <- x*x; x4 <- x2*x2
(6/k2-36*x2/k4+30*x4/k6)*(abs(x)<k)}
## rhoBW <- function(x,k){
## Mchi(x,k,psi="optimal")
## }
## psiBW <- function(x,k){
## Mpsi(x,k,psi="optimal")
## }
## pspBW <- function(x,k){
## NA
## }
rhoBWdnorm <- function(x,k) {
rhoBW(x,k)*dnorm(x)
}
MscaleW <- function(u,w,b1,c1,tol) {
h <- 1
it <- 0
s0 <- median(abs(u*w))/.6745
if (s0 > tol) {
while((h > tol) & (it < 50)) {
it <- it+1
s1 <- (s0^2)*mean(rhoBW((u*w/s0),c1)) / b1
s1 <- s1^(1/2)
h <- abs(s1-s0)/s0
s0 <- s1
}
}
return(s0)
}
TauscaleW <- function(u,w,b1,c1,b2,c2,tol) {
tau <- tol
s0 <- MscaleW(u,w,b1,c1,tol)
if (s0 > tol)
tau <- sqrt(s0^2*mean(rhoBW(u*w/s0,c2)) / b2)
return(tau)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.