content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
theme_aapre0 <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.text.x = element_text(angle = 60, hjust = 1),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = -5),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"))
theme_aapre <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(linetype = "dotted"),
panel.grid.major.x = element_line(linetype = "dashed"),
axis.text.x = element_text(angle = 60, hjust = 1),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = 0),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"))
theme_dens <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major.y = element_line(linetype = "dotted"),
panel.grid.major.x = element_line(linetype = "dashed"),
legend.position = "top", legend.justification = "right",
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"),
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = 0),
axis.text.x = element_text(
hjust = -0.2,
vjust = 6
),
# text = element_text(size = 15),
axis.ticks.length.x = unit(0.5, "cm"))
# axis.text.x = element_text(, hjust = 1),
#https://scottishsnow.wordpress.com/2020/04/24/lubridate-ggplot-date-helpers/
theme_dens1_legend <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major = element_line(linetype = "dotted"),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = -7),
axis.text.x = element_text(angle = 60, hjust = 1),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"))
theme_dens1 <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major = element_line(linetype = "dotted"),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold'
# hjust = 0, vjust = -7
),
axis.text.x = element_text(angle = 60, hjust = 1),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"))
theme_dens2 <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major = element_line(linetype = "dotted"),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = -10),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"),
plot.margin=grid::unit(c(-1,1,0,1), "mm"))
theme_bw2 <- theme_bw()+
theme(panel.grid.major= element_line(color = gray(0.5),
linetype = "dashed", size = 0.05),
panel.grid.minor= element_blank(),
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = 0),
)
theme_juan <- function (base_size = base_size, legen_pos = legen_pos) {
theme_bw(base_size = base_size) %+replace%
theme(
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = 0),
axis.text = element_text(colour = "black"),
axis.title.x = element_text(colour = "black", size=rel(1)),
axis.title.y = element_text(colour = "black", angle=90),
strip.background = element_blank(),
strip.text = element_text(size = rel(1.1)),#,face = "bold"),
# panel.border = element_blank(),
axis.line = element_line(color='grey'),
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major = element_line(linetype = "dotted"),
# panel.grid.major.y = element_line(linetype = "dotted"),
# panel.grid.major.x = element_line(linetype = "dotted") ,
# legend
legend.position=legen_pos,
panel.spacing = unit(1,"lines")
)
}
| /0 themes.R | no_license | juanchiem/trial_analysis | R | false | false | 5,481 | r |
theme_aapre0 <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.text.x = element_text(angle = 60, hjust = 1),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = -5),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"))
theme_aapre <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(linetype = "dotted"),
panel.grid.major.x = element_line(linetype = "dashed"),
axis.text.x = element_text(angle = 60, hjust = 1),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = 0),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"))
theme_dens <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major.y = element_line(linetype = "dotted"),
panel.grid.major.x = element_line(linetype = "dashed"),
legend.position = "top", legend.justification = "right",
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"),
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = 0),
axis.text.x = element_text(
hjust = -0.2,
vjust = 6
),
# text = element_text(size = 15),
axis.ticks.length.x = unit(0.5, "cm"))
# axis.text.x = element_text(, hjust = 1),
#https://scottishsnow.wordpress.com/2020/04/24/lubridate-ggplot-date-helpers/
theme_dens1_legend <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major = element_line(linetype = "dotted"),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = -7),
axis.text.x = element_text(angle = 60, hjust = 1),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"))
theme_dens1 <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major = element_line(linetype = "dotted"),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold'
# hjust = 0, vjust = -7
),
axis.text.x = element_text(angle = 60, hjust = 1),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"))
theme_dens2 <- theme_bw(base_size = 12) +
theme(
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major = element_line(linetype = "dotted"),
legend.position = "top", legend.justification = "right",
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = -10),
plot.caption = element_text(hjust = 0,
color = "gray30", face = "italic"),
plot.margin=grid::unit(c(-1,1,0,1), "mm"))
theme_bw2 <- theme_bw()+
theme(panel.grid.major= element_line(color = gray(0.5),
linetype = "dashed", size = 0.05),
panel.grid.minor= element_blank(),
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = 0),
)
theme_juan <- function (base_size = base_size, legen_pos = legen_pos) {
theme_bw(base_size = base_size) %+replace%
theme(
plot.title = element_text(family = 'Helvetica',
face = 'bold',
hjust = 0,
vjust = 0),
axis.text = element_text(colour = "black"),
axis.title.x = element_text(colour = "black", size=rel(1)),
axis.title.y = element_text(colour = "black", angle=90),
strip.background = element_blank(),
strip.text = element_text(size = rel(1.1)),#,face = "bold"),
# panel.border = element_blank(),
axis.line = element_line(color='grey'),
panel.grid.minor = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.major = element_line(linetype = "dotted"),
# panel.grid.major.y = element_line(linetype = "dotted"),
# panel.grid.major.x = element_line(linetype = "dotted") ,
# legend
legend.position=legen_pos,
panel.spacing = unit(1,"lines")
)
}
|
##########################
# Merge tables in complexList format:
##########################
merge_complexes_lists = function(list_of_complexLists) {
# 1. Making unique tables for each complexList:
n = 0; new_table = list(); new_list_of_complexLists = list()
for (i in list_of_complexLists) {
n = n + 1
colnames(i) = NULL
list_unique_complexes = unique(i[,2])
for (j in 1:length(list_unique_complexes)) {
tmp = i[which(i[,2]==list_unique_complexes[j]),]
if (is.matrix(tmp)=="FALSE") {
new_table[[j]] = tmp[1:2]
} else {
new_table[[j]] = tmp[1,]
}
}
new_list_of_complexLists[[n]] = do.call(rbind, new_table)
}
# 2. Function to remove from less prioritized list:
remotionator = function(list1, list2) {
matches = which((list2[,2] %in% list1[,2])==TRUE)
if (length(matches)>0) {
new_list2 = list2[-matches,]
} else {
new_list2 = list2
}
new_list2
}
# 3. Actual comparison and prioritized remotion of repeated complexes:
n = length(new_list_of_complexLists)
for (i in 1:(n-1)) {
for (j in (i+1):n) {
new_list_of_complexLists[[j]] = remotionator(new_list_of_complexLists[[i]], new_list_of_complexLists[[j]])
}
}
merged_table = do.call(rbind, new_list_of_complexLists)
colnames(merged_table) = c("complex ID", "subunits")
result = merged_table
}
| /R/merge_complexes_lists.R | permissive | antonio-mora/iRefR | R | false | false | 1,313 | r | ##########################
# Merge tables in complexList format:
##########################
merge_complexes_lists = function(list_of_complexLists) {
# 1. Making unique tables for each complexList:
n = 0; new_table = list(); new_list_of_complexLists = list()
for (i in list_of_complexLists) {
n = n + 1
colnames(i) = NULL
list_unique_complexes = unique(i[,2])
for (j in 1:length(list_unique_complexes)) {
tmp = i[which(i[,2]==list_unique_complexes[j]),]
if (is.matrix(tmp)=="FALSE") {
new_table[[j]] = tmp[1:2]
} else {
new_table[[j]] = tmp[1,]
}
}
new_list_of_complexLists[[n]] = do.call(rbind, new_table)
}
# 2. Function to remove from less prioritized list:
remotionator = function(list1, list2) {
matches = which((list2[,2] %in% list1[,2])==TRUE)
if (length(matches)>0) {
new_list2 = list2[-matches,]
} else {
new_list2 = list2
}
new_list2
}
# 3. Actual comparison and prioritized remotion of repeated complexes:
n = length(new_list_of_complexLists)
for (i in 1:(n-1)) {
for (j in (i+1):n) {
new_list_of_complexLists[[j]] = remotionator(new_list_of_complexLists[[i]], new_list_of_complexLists[[j]])
}
}
merged_table = do.call(rbind, new_list_of_complexLists)
colnames(merged_table) = c("complex ID", "subunits")
result = merged_table
}
|
test_that("conversion between matrix and data.frame", {
mm = matrix(c(1:8,NA),ncol = 3,dimnames = list(letters[1:3],LETTERS[1:3]))
tdf = data.frame(
row = as.factor(c("a",
"b","c","a","b","c","a",
"b","c")),
col = as.factor(c("A",
"A","A","B","B","B","C",
"C","C")),
value = c(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, NA)
)
expect_equal(mat_df(mm),tdf)
expect_equal(mm,df_mat(tdf,row,col,value))
})
test_that("transpose of data.frame",{
expect_equal(
transpose(iris),
t_dt(iris) %>% setDT %>% setDF()
)
})
| /tests/testthat/test-tidymat.R | permissive | hope-data-science/tidyfst | R | false | false | 647 | r | test_that("conversion between matrix and data.frame", {
mm = matrix(c(1:8,NA),ncol = 3,dimnames = list(letters[1:3],LETTERS[1:3]))
tdf = data.frame(
row = as.factor(c("a",
"b","c","a","b","c","a",
"b","c")),
col = as.factor(c("A",
"A","A","B","B","B","C",
"C","C")),
value = c(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, NA)
)
expect_equal(mat_df(mm),tdf)
expect_equal(mm,df_mat(tdf,row,col,value))
})
test_that("transpose of data.frame",{
expect_equal(
transpose(iris),
t_dt(iris) %>% setDT %>% setDF()
)
})
|
\name{ls_fit_ultrametric}
\encoding{UTF-8}
\alias{ls_fit_ultrametric}
\title{Least Squares Fit of Ultrametrics to Dissimilarities}
\description{
Find the ultrametric with minimal square distance (Euclidean
dissimilarity) to given dissimilarity objects.
}
\usage{
ls_fit_ultrametric(x, method = c("SUMT", "IP", "IR"), weights = 1,
control = list())
}
\arguments{
\item{x}{a dissimilarity object inheriting from or coercible to class
\code{"\link{dist}"}, or an ensemble of such objects.}
\item{method}{a character string indicating the fitting method to be
employed. Must be one of \code{"SUMT"} (default), \code{"IP"}, or
\code{"IR"}, or a unique abbreviation thereof.}
\item{weights}{a numeric vector or matrix with non-negative weights
for obtaining a weighted least squares fit. If a matrix, its
numbers of rows and columns must be the same as the number of
objects in \code{x}, and the lower diagonal part is used.
Otherwise, it is recycled to the number of elements in \code{x}.}
\item{control}{a list of control parameters. See \bold{Details}.}
}
\value{
An object of class \code{"\link{cl_ultrametric}"} containing the
fitted ultrametric distances.
}
\details{
For a single dissimilarity object \code{x}, the problem to be solved
is minimizing
\deqn{L(u) = \sum_{i,j} w_{ij} (x_{ij} - u_{ij})^2}
over all \eqn{u} satisfying the ultrametric constraints (i.e., for all
\eqn{i, j, k}, \eqn{u_{ij} \le \max(u_{ik}, u_{jk})}). This problem
is known to be NP hard (Krivanek and Moravek, 1986).
For an ensemble of dissimilarity objects, the criterion function is
\deqn{L(u) = \sum_b w_b \sum_{i,j} w_{ij} (x_{ij}(b) - u_{ij})^2,}
where \eqn{w_b} is the weight given to element \eqn{x_b} of the
ensemble and can be specified via control parameter \code{weights}
(default: all ones). This problem reduces to the above basic problem
with \eqn{x} as the \eqn{w_b}-weighted mean of the \eqn{x_b}.
We provide three heuristics for solving the basic problem.
Method \code{"SUMT"} implements the \acronym{SUMT} (Sequential
Unconstrained Minimization Technique, Fiacco and McCormick, 1968)
approach of de Soete (1986) which in turn simplifies the suggestions
in Carroll and Pruzansky (1980). (See \code{\link{sumt}} for more
information on the \acronym{SUMT} approach.) We then use a final
single linkage hierarchical clustering step to ensure that the
returned object exactly satisfies the ultrametric constraints. The
starting value \eqn{u_0} is obtained by \dQuote{random shaking} of the
given dissimilarity object (if not given). If there are missing
values in \code{x}, i.e., the given dissimilarities are
\emph{incomplete}, we follow a suggestion of de Soete (1984), imputing
the missing values by the weighted mean of the non-missing ones, and
setting the corresponding weights to zero.
Available control parameters are \code{method}, \code{control},
\code{eps}, \code{q}, and \code{verbose}, which have the same roles as
for \code{\link{sumt}}, and the following.
\describe{
\item{\code{nruns}}{an integer giving the number of runs to be
performed. Defaults to 1.}
\item{\code{start}}{a single dissimilarity, or a list of
dissimilarities to be employed as starting values.}
}
The default optimization using conjugate gradients should work
reasonably well for medium to large size problems. For \dQuote{small}
ones, using \code{nlm} is usually faster. Note that the number of
ultrametric constraints is of the order \eqn{n^3}, where \eqn{n} is
the number of objects in the dissimilarity object, suggesting to use
the \acronym{SUMT} approach in favor of
\code{\link[stats]{constrOptim}}.
If starting values for the \acronym{SUMT} are provided via
\code{start}, the number of starting values gives the number of runs
to be performed, and control option \code{nruns} is ignored.
Otherwise, \code{nruns} starting values are obtained by random shaking
of the dissimilarity to be fitted. In the case of multiple
\acronym{SUMT} runs, the (first) best solution found is returned.
Method \code{"IP"} implements the Iterative Projection approach of
Hubert and Arabie (1995). This iteratively projects the current
dissimilarities to the closed convex set given by the ultrametric
constraints (3-point conditions) for a single index triple \eqn{(i, j,
k)}, in fact replacing the two largest values among \eqn{d_{ij},
d_{ik}, d_{jk}} by their mean. The following control parameters can
be provided via the \code{control} argument.
\describe{
\item{\code{nruns}}{an integer giving the number of runs to be
performed. Defaults to 1.}
\item{\code{order}}{a permutation of the numbers from 1 to the
number of objects in \code{x}, specifying the order in which the
ultrametric constraints are considered, or a list of such
permutations.}
\item{\code{maxiter}}{an integer giving the maximal number of
iterations to be employed.}
\item{\code{tol}}{a double indicating the maximal convergence
tolerance. The algorithm stops if the total absolute change in
the dissimilarities in an iteration is less than \code{tol}.}
\item{\code{verbose}}{a logical indicating whether to provide some
output on minimization progress. Defaults to
\code{getOption("verbose")}.}
}
If permutations are provided via \code{order}, the number of these
gives the number of runs to be performed, and control option
\code{nruns} is ignored. Otherwise, \code{nruns} randomly generated
orders are tried. In the case of multiple runs, the (first) best
solution found is returned.
Non-identical weights and incomplete dissimilarities are currently not
supported.
Method \code{"IR"} implements the Iterative Reduction approach
suggested by Roux (1988), see also Barthélémy and Guénoche (1991).
This is similar to the Iterative Projection method, but modifies the
dissimilarities between objects proportionally to the aggregated
change incurred from the ultrametric projections. Available control
parameters are identical to those of method \code{"IP"}.
Non-identical weights and incomplete dissimilarities are currently not
supported.
It should be noted that all methods are heuristics which can not be
guaranteed to find the global minimum. Standard practice would
recommend to use the best solution found in \dQuote{sufficiently many}
replications of the base algorithm.
}
\references{
J.-P. Barthélémy and A. Guénoche (1991).
\emph{Trees and proximity representations}.
Chichester: John Wiley & Sons.
ISBN 0-471-92263-3.
J. D. Carroll and S. Pruzansky (1980).
Discrete and hybrid scaling models.
In E. D. Lantermann and H. Feger (eds.), \emph{Similarity and Choice}.
Bern (Switzerland): Huber.
L. Hubert and P. Arabie (1995).
Iterative projection strategies for the least squares fitting of tree
structures to proximity data.
\emph{British Journal of Mathematical and Statistical Psychology},
\bold{48}, 281--317.
\doi{10.1111/j.2044-8317.1995.tb01065.x}.
M. Krivanek and J. Moravek (1986).
NP-hard problems in hierarchical tree clustering.
\emph{Acta Informatica}, \bold{23}, 311--323.
\doi{10.1007/BF00289116}.
M. Roux (1988).
Techniques of approximation for building two tree structures.
In C. Hayashi and E. Diday and M. Jambu and N. Ohsumi (Eds.),
\emph{Recent Developments in Clustering and Data Analysis}, pages
151--170.
New York: Academic Press.
G. de Soete (1984).
Ultrametric tree representations of incomplete dissimilarity data.
\emph{Journal of Classification}, \bold{1}, 235--242.
\doi{10.1007/BF01890124}.
G. de Soete (1986).
A least squares algorithm for fitting an ultrametric tree to a
dissimilarity matrix.
\emph{Pattern Recognition Letters}, \bold{2}, 133--137.
\doi{10.1016/0167-8655(84)90036-9}.
}
\seealso{
\code{\link{cl_consensus}} for computing least squares (Euclidean)
consensus hierarchies by least squares fitting of average ultrametric
distances;
\code{\link{l1_fit_ultrametric}}.
}
\examples{
## Least squares fit of an ultrametric to the Miller-Nicely consonant
## phoneme confusion data.
data("Phonemes")
## Note that the Phonemes data set has the consonant misclassification
## probabilities, i.e., the similarities between the phonemes.
d <- as.dist(1 - Phonemes)
u <- ls_fit_ultrametric(d, control = list(verbose = TRUE))
## Cophenetic correlation:
cor(d, u)
## Plot:
plot(u)
## ("Basically" the same as Figure 1 in de Soete (1986).)
}
\keyword{cluster}
\keyword{optimize}
| /man/ls_fit_ultrametric.Rd | no_license | cran/clue | R | false | false | 8,695 | rd | \name{ls_fit_ultrametric}
\encoding{UTF-8}
\alias{ls_fit_ultrametric}
\title{Least Squares Fit of Ultrametrics to Dissimilarities}
\description{
Find the ultrametric with minimal square distance (Euclidean
dissimilarity) to given dissimilarity objects.
}
\usage{
ls_fit_ultrametric(x, method = c("SUMT", "IP", "IR"), weights = 1,
control = list())
}
\arguments{
\item{x}{a dissimilarity object inheriting from or coercible to class
\code{"\link{dist}"}, or an ensemble of such objects.}
\item{method}{a character string indicating the fitting method to be
employed. Must be one of \code{"SUMT"} (default), \code{"IP"}, or
\code{"IR"}, or a unique abbreviation thereof.}
\item{weights}{a numeric vector or matrix with non-negative weights
for obtaining a weighted least squares fit. If a matrix, its
numbers of rows and columns must be the same as the number of
objects in \code{x}, and the lower diagonal part is used.
Otherwise, it is recycled to the number of elements in \code{x}.}
\item{control}{a list of control parameters. See \bold{Details}.}
}
\value{
An object of class \code{"\link{cl_ultrametric}"} containing the
fitted ultrametric distances.
}
\details{
For a single dissimilarity object \code{x}, the problem to be solved
is minimizing
\deqn{L(u) = \sum_{i,j} w_{ij} (x_{ij} - u_{ij})^2}
over all \eqn{u} satisfying the ultrametric constraints (i.e., for all
\eqn{i, j, k}, \eqn{u_{ij} \le \max(u_{ik}, u_{jk})}). This problem
is known to be NP hard (Krivanek and Moravek, 1986).
For an ensemble of dissimilarity objects, the criterion function is
\deqn{L(u) = \sum_b w_b \sum_{i,j} w_{ij} (x_{ij}(b) - u_{ij})^2,}
where \eqn{w_b} is the weight given to element \eqn{x_b} of the
ensemble and can be specified via control parameter \code{weights}
(default: all ones). This problem reduces to the above basic problem
with \eqn{x} as the \eqn{w_b}-weighted mean of the \eqn{x_b}.
We provide three heuristics for solving the basic problem.
Method \code{"SUMT"} implements the \acronym{SUMT} (Sequential
Unconstrained Minimization Technique, Fiacco and McCormick, 1968)
approach of de Soete (1986) which in turn simplifies the suggestions
in Carroll and Pruzansky (1980). (See \code{\link{sumt}} for more
information on the \acronym{SUMT} approach.) We then use a final
single linkage hierarchical clustering step to ensure that the
returned object exactly satisfies the ultrametric constraints. The
starting value \eqn{u_0} is obtained by \dQuote{random shaking} of the
given dissimilarity object (if not given). If there are missing
values in \code{x}, i.e., the given dissimilarities are
\emph{incomplete}, we follow a suggestion of de Soete (1984), imputing
the missing values by the weighted mean of the non-missing ones, and
setting the corresponding weights to zero.
Available control parameters are \code{method}, \code{control},
\code{eps}, \code{q}, and \code{verbose}, which have the same roles as
for \code{\link{sumt}}, and the following.
\describe{
\item{\code{nruns}}{an integer giving the number of runs to be
performed. Defaults to 1.}
\item{\code{start}}{a single dissimilarity, or a list of
dissimilarities to be employed as starting values.}
}
The default optimization using conjugate gradients should work
reasonably well for medium to large size problems. For \dQuote{small}
ones, using \code{nlm} is usually faster. Note that the number of
ultrametric constraints is of the order \eqn{n^3}, where \eqn{n} is
the number of objects in the dissimilarity object, suggesting to use
the \acronym{SUMT} approach in favor of
\code{\link[stats]{constrOptim}}.
If starting values for the \acronym{SUMT} are provided via
\code{start}, the number of starting values gives the number of runs
to be performed, and control option \code{nruns} is ignored.
Otherwise, \code{nruns} starting values are obtained by random shaking
of the dissimilarity to be fitted. In the case of multiple
\acronym{SUMT} runs, the (first) best solution found is returned.
Method \code{"IP"} implements the Iterative Projection approach of
Hubert and Arabie (1995). This iteratively projects the current
dissimilarities to the closed convex set given by the ultrametric
constraints (3-point conditions) for a single index triple \eqn{(i, j,
k)}, in fact replacing the two largest values among \eqn{d_{ij},
d_{ik}, d_{jk}} by their mean. The following control parameters can
be provided via the \code{control} argument.
\describe{
\item{\code{nruns}}{an integer giving the number of runs to be
performed. Defaults to 1.}
\item{\code{order}}{a permutation of the numbers from 1 to the
number of objects in \code{x}, specifying the order in which the
ultrametric constraints are considered, or a list of such
permutations.}
\item{\code{maxiter}}{an integer giving the maximal number of
iterations to be employed.}
\item{\code{tol}}{a double indicating the maximal convergence
tolerance. The algorithm stops if the total absolute change in
the dissimilarities in an iteration is less than \code{tol}.}
\item{\code{verbose}}{a logical indicating whether to provide some
output on minimization progress. Defaults to
\code{getOption("verbose")}.}
}
If permutations are provided via \code{order}, the number of these
gives the number of runs to be performed, and control option
\code{nruns} is ignored. Otherwise, \code{nruns} randomly generated
orders are tried. In the case of multiple runs, the (first) best
solution found is returned.
Non-identical weights and incomplete dissimilarities are currently not
supported.
Method \code{"IR"} implements the Iterative Reduction approach
suggested by Roux (1988), see also Barthélémy and Guénoche (1991).
This is similar to the Iterative Projection method, but modifies the
dissimilarities between objects proportionally to the aggregated
change incurred from the ultrametric projections. Available control
parameters are identical to those of method \code{"IP"}.
Non-identical weights and incomplete dissimilarities are currently not
supported.
It should be noted that all methods are heuristics which can not be
guaranteed to find the global minimum. Standard practice would
recommend to use the best solution found in \dQuote{sufficiently many}
replications of the base algorithm.
}
\references{
J.-P. Barthélémy and A. Guénoche (1991).
\emph{Trees and proximity representations}.
Chichester: John Wiley & Sons.
ISBN 0-471-92263-3.
J. D. Carroll and S. Pruzansky (1980).
Discrete and hybrid scaling models.
In E. D. Lantermann and H. Feger (eds.), \emph{Similarity and Choice}.
Bern (Switzerland): Huber.
L. Hubert and P. Arabie (1995).
Iterative projection strategies for the least squares fitting of tree
structures to proximity data.
\emph{British Journal of Mathematical and Statistical Psychology},
\bold{48}, 281--317.
\doi{10.1111/j.2044-8317.1995.tb01065.x}.
M. Krivanek and J. Moravek (1986).
NP-hard problems in hierarchical tree clustering.
\emph{Acta Informatica}, \bold{23}, 311--323.
\doi{10.1007/BF00289116}.
M. Roux (1988).
Techniques of approximation for building two tree structures.
In C. Hayashi and E. Diday and M. Jambu and N. Ohsumi (Eds.),
\emph{Recent Developments in Clustering and Data Analysis}, pages
151--170.
New York: Academic Press.
G. de Soete (1984).
Ultrametric tree representations of incomplete dissimilarity data.
\emph{Journal of Classification}, \bold{1}, 235--242.
\doi{10.1007/BF01890124}.
G. de Soete (1986).
A least squares algorithm for fitting an ultrametric tree to a
dissimilarity matrix.
\emph{Pattern Recognition Letters}, \bold{2}, 133--137.
\doi{10.1016/0167-8655(84)90036-9}.
}
\seealso{
\code{\link{cl_consensus}} for computing least squares (Euclidean)
consensus hierarchies by least squares fitting of average ultrametric
distances;
\code{\link{l1_fit_ultrametric}}.
}
\examples{
## Least squares fit of an ultrametric to the Miller-Nicely consonant
## phoneme confusion data.
data("Phonemes")
## Note that the Phonemes data set has the consonant misclassification
## probabilities, i.e., the similarities between the phonemes.
d <- as.dist(1 - Phonemes)
u <- ls_fit_ultrametric(d, control = list(verbose = TRUE))
## Cophenetic correlation:
cor(d, u)
## Plot:
plot(u)
## ("Basically" the same as Figure 1 in de Soete (1986).)
}
\keyword{cluster}
\keyword{optimize}
|
# View coverage for this file using
# library(testthat); library(FeatureExtraction)
# covr::file_report(covr::file_coverage("R/Normalization.R", "tests/testthat/test-tidyCovariates.R"))
test_that("Test exit conditions ", {
# Covariate Data object check
expect_error(tidyCovariateData(covariateData = list()))
# CovariateData object closed
cvData <- FeatureExtraction:::createEmptyCovariateData(
cohortId = 1,
aggregated = FALSE,
temporal = FALSE
)
Andromeda::close(cvData)
expect_error(tidyCovariateData(covariateData = cvData))
# CovariateData aggregated
cvData <- FeatureExtraction:::createEmptyCovariateData(
cohortId = 1,
aggregated = TRUE,
temporal = FALSE
)
expect_error(tidyCovariateData(covariateData = cvData))
})
test_that("Test empty covariateData", {
cvData <- FeatureExtraction:::createEmptyCovariateData(
cohortId = 1,
aggregated = FALSE,
temporal = FALSE
)
result <- tidyCovariateData(covariateData = cvData)
expect_equal(length(result$covariates$covariateId), length(cvData$covariates$covariateId))
})
test_that("tidyCovariates works", {
# Generate some data:
createCovariate <- function(i, analysisId) {
return(tibble(
covariateId = rep(i * 1000 + analysisId, i),
covariateValue = rep(1, i)
))
}
covariates <- lapply(1:10, createCovariate, analysisId = 1)
covariates <- do.call("rbind", covariates)
covariates$rowId <- 1:nrow(covariates)
metaData <- list(populationSize = nrow(covariates))
frequentCovariate <- createCovariate(40, analysisId = 2)
frequentCovariate$rowId <- sample.int(metaData$populationSize, nrow(frequentCovariate), replace = FALSE)
infrequentCovariate <- createCovariate(1, analysisId = 3)
infrequentCovariate$rowId <- sample.int(metaData$populationSize, nrow(infrequentCovariate), replace = FALSE)
covariates <- rbind(covariates, frequentCovariate, infrequentCovariate)
covariateRef <- tibble(
covariateId = c(1:10 * 1000 + 1, 40002, 1003),
analysisId = c(rep(1, 10), 2, 3)
)
covariateData <- Andromeda::andromeda(
covariates = covariates,
covariateRef = covariateRef
)
attr(covariateData, "metaData") <- metaData
class(covariateData) <- "CovariateData"
tidy <- tidyCovariateData(covariateData, minFraction = 0.1, normalize = TRUE, removeRedundancy = TRUE)
# Test: most prevalent covariate in analysis 1 is dropped:
expect_true(nrow(filter(tidy$covariates, covariateId == 10001) %>% collect()) == 0)
# Test: infrequent covariate in analysis 1 isn't dropped:
expect_true(nrow(filter(tidy$covariates, covariateId == 1001) %>% collect()) != 0)
# Test: infrequent covariate is dropped:
expect_true(nrow(filter(tidy$covariates, covariateId == 1003) %>% collect()) == 0)
# Test: frequent covariate isn't dropped:
expect_true(nrow(filter(tidy$covariates, covariateId == 40002) %>% collect()) != 0)
})
test_that("tidyCovariateData on Temporal Data", {
skip_if_not(runTestsOnEunomia)
covariateSettings <- createTemporalCovariateSettings(
useDrugExposure = TRUE,
temporalStartDays = -2:-1,
temporalEndDays = -2:-1
)
covariateData <- getDbCovariateData(
connection = eunomiaConnection,
cdmDatabaseSchema = eunomiaCdmDatabaseSchema,
cohortId = 1,
covariateSettings = covariateSettings
)
tidy <- tidyCovariateData(covariateData)
expect_equal(length(tidy$analysisRef$analysisId), length(covariateData$analysisRef$analysisId))
})
| /tests/testthat/test-tidyCovariates.R | permissive | OHDSI/FeatureExtraction | R | false | false | 3,462 | r | # View coverage for this file using
# library(testthat); library(FeatureExtraction)
# covr::file_report(covr::file_coverage("R/Normalization.R", "tests/testthat/test-tidyCovariates.R"))
test_that("Test exit conditions ", {
# Covariate Data object check
expect_error(tidyCovariateData(covariateData = list()))
# CovariateData object closed
cvData <- FeatureExtraction:::createEmptyCovariateData(
cohortId = 1,
aggregated = FALSE,
temporal = FALSE
)
Andromeda::close(cvData)
expect_error(tidyCovariateData(covariateData = cvData))
# CovariateData aggregated
cvData <- FeatureExtraction:::createEmptyCovariateData(
cohortId = 1,
aggregated = TRUE,
temporal = FALSE
)
expect_error(tidyCovariateData(covariateData = cvData))
})
test_that("Test empty covariateData", {
cvData <- FeatureExtraction:::createEmptyCovariateData(
cohortId = 1,
aggregated = FALSE,
temporal = FALSE
)
result <- tidyCovariateData(covariateData = cvData)
expect_equal(length(result$covariates$covariateId), length(cvData$covariates$covariateId))
})
test_that("tidyCovariates works", {
# Generate some data:
createCovariate <- function(i, analysisId) {
return(tibble(
covariateId = rep(i * 1000 + analysisId, i),
covariateValue = rep(1, i)
))
}
covariates <- lapply(1:10, createCovariate, analysisId = 1)
covariates <- do.call("rbind", covariates)
covariates$rowId <- 1:nrow(covariates)
metaData <- list(populationSize = nrow(covariates))
frequentCovariate <- createCovariate(40, analysisId = 2)
frequentCovariate$rowId <- sample.int(metaData$populationSize, nrow(frequentCovariate), replace = FALSE)
infrequentCovariate <- createCovariate(1, analysisId = 3)
infrequentCovariate$rowId <- sample.int(metaData$populationSize, nrow(infrequentCovariate), replace = FALSE)
covariates <- rbind(covariates, frequentCovariate, infrequentCovariate)
covariateRef <- tibble(
covariateId = c(1:10 * 1000 + 1, 40002, 1003),
analysisId = c(rep(1, 10), 2, 3)
)
covariateData <- Andromeda::andromeda(
covariates = covariates,
covariateRef = covariateRef
)
attr(covariateData, "metaData") <- metaData
class(covariateData) <- "CovariateData"
tidy <- tidyCovariateData(covariateData, minFraction = 0.1, normalize = TRUE, removeRedundancy = TRUE)
# Test: most prevalent covariate in analysis 1 is dropped:
expect_true(nrow(filter(tidy$covariates, covariateId == 10001) %>% collect()) == 0)
# Test: infrequent covariate in analysis 1 isn't dropped:
expect_true(nrow(filter(tidy$covariates, covariateId == 1001) %>% collect()) != 0)
# Test: infrequent covariate is dropped:
expect_true(nrow(filter(tidy$covariates, covariateId == 1003) %>% collect()) == 0)
# Test: frequent covariate isn't dropped:
expect_true(nrow(filter(tidy$covariates, covariateId == 40002) %>% collect()) != 0)
})
test_that("tidyCovariateData on Temporal Data", {
skip_if_not(runTestsOnEunomia)
covariateSettings <- createTemporalCovariateSettings(
useDrugExposure = TRUE,
temporalStartDays = -2:-1,
temporalEndDays = -2:-1
)
covariateData <- getDbCovariateData(
connection = eunomiaConnection,
cdmDatabaseSchema = eunomiaCdmDatabaseSchema,
cohortId = 1,
covariateSettings = covariateSettings
)
tidy <- tidyCovariateData(covariateData)
expect_equal(length(tidy$analysisRef$analysisId), length(covariateData$analysisRef$analysisId))
})
|
#' M squared of the return distribution
#'
#' M squared is a risk adjusted return useful to judge the size of relative
#' performance between differents portfolios. With it you can compare portfolios
#' with different levels of risk.
#'
#' \deqn{M^2 = r_P + SR * (\sigma_M - \sigma_P) = (r_P - r_F) * \frac{\sigma_M}{\sigma_P} + r_F}{M squared = Rp + SR * (Market risk - Portfolio risk) = (Rp - Rf) * Market risk / Portfolio risk + Rf}
#'
#' where \eqn{r_P} is the portfolio return annualized, \eqn{\sigma_M} is the market
#' risk and \eqn{\sigma_P} is the portfolio risk
#'
#' @aliases MSquared
#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of
#' asset return
#' @param Rb return vector of the benchmark asset
#' @param Rf risk free rate, in same period as your returns
#' @param \dots any other passthru parameters
#' @author Matthieu Lestel
#' @references Carl Bacon, \emph{Practical portfolio performance measurement
#' and attribution}, second edition 2008 p.67-68
#'
#' @keywords ts multivariate distribution models
#' @examples
#'
#' data(portfolio_bacon)
#' print(MSquared(portfolio_bacon[,1], portfolio_bacon[,2])) #expected 0.10062
#'
#' data(managers)
#' print(MSquared(managers['1996',1], managers['1996',8]))
#' print(MSquared(managers['1996',1:5], managers['1996',8]))
#'
#' @export
MSquared <-
function (Ra, Rb, Rf = 0, ...)
{
Ra = checkData(Ra)
Rb = checkData(Rb)
if (ncol(Ra)==1 || is.null(Ra) || is.vector(Ra)) {
calcul = FALSE
for (i in (1:length(Ra))) {
if (!is.na(Ra[i])) {
calcul = TRUE
}
}
if (calcul) {
Period = Frequency(Ra)
Rp = (prod(1 + Ra))^(Period / length(Ra)) - 1
sigp = sqrt(var(Ra)*(length(Ra)-1)/length(Ra))*sqrt(Period)
sigm = sqrt(var(Rb)*(length(Rb)-1)/length(Rb))*sqrt(Period)
result = (Rp - Rf) * sigm / sigp + Rf
}
else {
result = NA
}
return(result)
}
else {
result = apply(Ra, MARGIN = 2, MSquared, Rb = Rb, Rf = Rf, Period = Period, ...)
result<-t(result)
colnames(result) = colnames(Ra)
rownames(result) = paste("MSquared (Risk free = ",Rf,")", sep="")
return(result)
}
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2014 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
| /R/MSquared.R | no_license | guillermozbta/portafolio-master | R | false | false | 2,658 | r | #' M squared of the return distribution
#'
#' M squared is a risk adjusted return useful to judge the size of relative
#' performance between differents portfolios. With it you can compare portfolios
#' with different levels of risk.
#'
#' \deqn{M^2 = r_P + SR * (\sigma_M - \sigma_P) = (r_P - r_F) * \frac{\sigma_M}{\sigma_P} + r_F}{M squared = Rp + SR * (Market risk - Portfolio risk) = (Rp - Rf) * Market risk / Portfolio risk + Rf}
#'
#' where \eqn{r_P} is the portfolio return annualized, \eqn{\sigma_M} is the market
#' risk and \eqn{\sigma_P} is the portfolio risk
#'
#' @aliases MSquared
#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of
#' asset return
#' @param Rb return vector of the benchmark asset
#' @param Rf risk free rate, in same period as your returns
#' @param \dots any other passthru parameters
#' @author Matthieu Lestel
#' @references Carl Bacon, \emph{Practical portfolio performance measurement
#' and attribution}, second edition 2008 p.67-68
#'
#' @keywords ts multivariate distribution models
#' @examples
#'
#' data(portfolio_bacon)
#' print(MSquared(portfolio_bacon[,1], portfolio_bacon[,2])) #expected 0.10062
#'
#' data(managers)
#' print(MSquared(managers['1996',1], managers['1996',8]))
#' print(MSquared(managers['1996',1:5], managers['1996',8]))
#'
#' @export
MSquared <-
function (Ra, Rb, Rf = 0, ...)
{
Ra = checkData(Ra)
Rb = checkData(Rb)
if (ncol(Ra)==1 || is.null(Ra) || is.vector(Ra)) {
calcul = FALSE
for (i in (1:length(Ra))) {
if (!is.na(Ra[i])) {
calcul = TRUE
}
}
if (calcul) {
Period = Frequency(Ra)
Rp = (prod(1 + Ra))^(Period / length(Ra)) - 1
sigp = sqrt(var(Ra)*(length(Ra)-1)/length(Ra))*sqrt(Period)
sigm = sqrt(var(Rb)*(length(Rb)-1)/length(Rb))*sqrt(Period)
result = (Rp - Rf) * sigm / sigp + Rf
}
else {
result = NA
}
return(result)
}
else {
result = apply(Ra, MARGIN = 2, MSquared, Rb = Rb, Rf = Rf, Period = Period, ...)
result<-t(result)
colnames(result) = colnames(Ra)
rownames(result) = paste("MSquared (Risk free = ",Rf,")", sep="")
return(result)
}
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2014 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
|
# Wrapping: main wrapper script
# QDT 2015 April 09
# Tim Szewczyk
# This script sets the parameters and runs the simulation function.
##########
## set up workspace
##########
setwd("~/Desktop/QDT_wrap/")
source("02-FnSim.R")
source("03-FnAux.R")
set.seed(172) # for maximum reproducibility
##########
## storage switches
##########
storeSims <- TRUE # write abundances, lambdas, parameters to files
##########
## set parameters
##########
###--- main parameters
parList <- list(N0=100, # initial population size
K=100, # carrying capacity
r=0.75, # average log(proportional growth rate)
s2=0.5, # variance in log(proportional growth rate)
Ne=0.5, # quasi-extinction threshold
sims=1000, # number of simulations
maxt=100) # maximum time
###--- meta-parameters
param <- "r" # parameter to vary
low <- 0.01 # low value for parameter range
high <- 2 # high value for parameter range
parLen <- 6 # number of values for varied parameter
logSeq <- FALSE # make parameter values distributed along a log scale
##########
## run simulation
##########
# make a parameter sequence to vary across simulation sets
parSeq <- makeParSeq(param=param, low=low, high=high,
len=parLen, logSeq=logSeq)
# simulation loop
for(p in 1:parLen) {
# update varied parameter
parList[names(parList)==param] <- parSeq[p]
# simulate
sim.out <- popSim(parList)
# write data to files
if(storeSims) {
writeOutputAndPars(sim.out=sim.out,
param=param,
parList=parList,
dirNum=p)
}
# progress
cat("Finished parameter set", p, "of", parLen, "\n")
}
| /01-Main.R | no_license | Sz-Tim/QDT_wrapping | R | false | false | 1,807 | r | # Wrapping: main wrapper script
# QDT 2015 April 09
# Tim Szewczyk
# This script sets the parameters and runs the simulation function.
##########
## set up workspace
##########
setwd("~/Desktop/QDT_wrap/")
source("02-FnSim.R")
source("03-FnAux.R")
set.seed(172) # for maximum reproducibility
##########
## storage switches
##########
storeSims <- TRUE # write abundances, lambdas, parameters to files
##########
## set parameters
##########
###--- main parameters
parList <- list(N0=100, # initial population size
K=100, # carrying capacity
r=0.75, # average log(proportional growth rate)
s2=0.5, # variance in log(proportional growth rate)
Ne=0.5, # quasi-extinction threshold
sims=1000, # number of simulations
maxt=100) # maximum time
###--- meta-parameters
param <- "r" # parameter to vary
low <- 0.01 # low value for parameter range
high <- 2 # high value for parameter range
parLen <- 6 # number of values for varied parameter
logSeq <- FALSE # make parameter values distributed along a log scale
##########
## run simulation
##########
# make a parameter sequence to vary across simulation sets
parSeq <- makeParSeq(param=param, low=low, high=high,
len=parLen, logSeq=logSeq)
# simulation loop
for(p in 1:parLen) {
# update varied parameter
parList[names(parList)==param] <- parSeq[p]
# simulate
sim.out <- popSim(parList)
# write data to files
if(storeSims) {
writeOutputAndPars(sim.out=sim.out,
param=param,
parList=parList,
dirNum=p)
}
# progress
cat("Finished parameter set", p, "of", parLen, "\n")
}
|
## The following functions are used to create a special object that
## stores a matrix and caches its inverse. The first function,
## makeCacheMatrix creates a special “matrix”, which is really a list
## containing a function to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse
## 4. get the value of the inverse
## This function creates a special object that stores a matrix and
## caches its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special “matrix” returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then cacheSolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | amjonas/ProgrammingAssignment2 | R | false | false | 1,228 | r | ## The following functions are used to create a special object that
## stores a matrix and caches its inverse. The first function,
## makeCacheMatrix creates a special “matrix”, which is really a list
## containing a function to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse
## 4. get the value of the inverse
## This function creates a special object that stores a matrix and
## caches its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special “matrix” returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then cacheSolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
# File name: g_1_PennWorldTable_gdppc_QoQ
# Description: Calculate population growth rate and GDP growth rate using
# constant samples between two periods.
# Author: Shijie Shi
# Last updated: 08/06/2018
rm(list = ls())
library(xlsx)
library(foreign)
setwd("R:/Shi/Project_AK's book update_CollapseAndRevival/output")
calc_gr <- function( inFile, outFile, gap) { # need to get qoq, gap = 1
df <- read.csv( inFile )
df <- df[, -1]
vdf <- !is.na(df)
v <- vdf
v [v==1] = 0
col = dim(outFile)[2]
nloop <- dim(df)[[2]]
for ( i in 1 : nloop ) {
if ( i-gap>=1 ) { #make sure i-gap is within the bound of v0
v[, i] = vdf[,i] * vdf[,i-gap]
}
}
for ( i in 1 : gap) {
v[, i] = v[, i+gap]
}
for ( i in 1 : nloop ) {
outFile[i, col + 1] = sum(v[, i])
}
for ( i in 1 : nloop ) {
if ( i-gap>=1 ) {
gr = sum( df[, i] * v[, i], na.rm = TRUE ) /
sum( df[, i-gap] * v[, i], na.rm = TRUE ) - 1
outFile[i, col + 2] = gr * 100
}
}
return(outFile)
}
calc_gr_weight <- function( inFile, inFile_weight, outFile, gap) { # need to get qoq, gap = 1
df1 <- read.csv( inFile )
df1 <- df1[, -1]
df2 <- read.csv( inFile_weight )
df2 <- df2[, -1]
v1 <- !is.na(df1)
v2 <- !is.na(df2)
v <- v1
v [v==1] = 0
col = dim(outFile)[2]
nloop <- dim(df1)[[2]] # equal to dim(df2)[[2]]
for ( i in 1 : nloop ) {
if ( i-gap>=1 ) { #make sure i-gap is within the bound
v[, i] = v1[,i] * v1[,i-gap] * v2[,i] * v2[,i-gap]
}
}
for ( i in 1 : gap) {
v[, i] = v[, i+gap]
}
for ( i in 1 : nloop ) {
outFile[i, col + 1] = sum(v[, i])
}
for ( i in 1 : nloop ) {
if ( i-gap>=1 ) {
value = ( df1[, i] * v[ , i] ) / ( df1[, i-gap] * v[ , i] ) - 1
weight = df2[, i] * v[ , i]
outFile[i, col+2] = weighted.mean(value, weight, na.rm = TRUE) * 100
}
}
return (outFile)
}
# Construct a data frame to store results
results <- data.frame( matrix( ncol = 1 ) )
names(results) <- c("Date")
row = 1
for ( i in 1960:2018 ) {
for ( j in 1:4){
results[row, 1] = paste(i, j, sep = "Q")
row = row + 1
}
}
# World
results <- calc_gr("pop_PWT.csv", results, 1)
results <- calc_gr_weight("gdp.csv", "pppwgt_PWT.csv", results, 1)
names(results) <- c("Date", "pop_count", "pop_gr_a", "gdp_count", "gdp_gr_q")
name <- names(results)
write.dta(results, "g_1_PennWorldTrade_gdppc_QoQ_201902.dta")
| /R/g_1_PennWorldTable_gdppc_QoQ.R | no_license | fagan2888/Sample_Code | R | false | false | 2,880 | r | # File name: g_1_PennWorldTable_gdppc_QoQ
# Description: Calculate population growth rate and GDP growth rate using
# constant samples between two periods.
# Author: Shijie Shi
# Last updated: 08/06/2018
rm(list = ls())
library(xlsx)
library(foreign)
setwd("R:/Shi/Project_AK's book update_CollapseAndRevival/output")
calc_gr <- function( inFile, outFile, gap) { # need to get qoq, gap = 1
df <- read.csv( inFile )
df <- df[, -1]
vdf <- !is.na(df)
v <- vdf
v [v==1] = 0
col = dim(outFile)[2]
nloop <- dim(df)[[2]]
for ( i in 1 : nloop ) {
if ( i-gap>=1 ) { #make sure i-gap is within the bound of v0
v[, i] = vdf[,i] * vdf[,i-gap]
}
}
for ( i in 1 : gap) {
v[, i] = v[, i+gap]
}
for ( i in 1 : nloop ) {
outFile[i, col + 1] = sum(v[, i])
}
for ( i in 1 : nloop ) {
if ( i-gap>=1 ) {
gr = sum( df[, i] * v[, i], na.rm = TRUE ) /
sum( df[, i-gap] * v[, i], na.rm = TRUE ) - 1
outFile[i, col + 2] = gr * 100
}
}
return(outFile)
}
calc_gr_weight <- function( inFile, inFile_weight, outFile, gap) { # need to get qoq, gap = 1
df1 <- read.csv( inFile )
df1 <- df1[, -1]
df2 <- read.csv( inFile_weight )
df2 <- df2[, -1]
v1 <- !is.na(df1)
v2 <- !is.na(df2)
v <- v1
v [v==1] = 0
col = dim(outFile)[2]
nloop <- dim(df1)[[2]] # equal to dim(df2)[[2]]
for ( i in 1 : nloop ) {
if ( i-gap>=1 ) { #make sure i-gap is within the bound
v[, i] = v1[,i] * v1[,i-gap] * v2[,i] * v2[,i-gap]
}
}
for ( i in 1 : gap) {
v[, i] = v[, i+gap]
}
for ( i in 1 : nloop ) {
outFile[i, col + 1] = sum(v[, i])
}
for ( i in 1 : nloop ) {
if ( i-gap>=1 ) {
value = ( df1[, i] * v[ , i] ) / ( df1[, i-gap] * v[ , i] ) - 1
weight = df2[, i] * v[ , i]
outFile[i, col+2] = weighted.mean(value, weight, na.rm = TRUE) * 100
}
}
return (outFile)
}
# Construct a data frame to store results
results <- data.frame( matrix( ncol = 1 ) )
names(results) <- c("Date")
row = 1
for ( i in 1960:2018 ) {
for ( j in 1:4){
results[row, 1] = paste(i, j, sep = "Q")
row = row + 1
}
}
# World
results <- calc_gr("pop_PWT.csv", results, 1)
results <- calc_gr_weight("gdp.csv", "pppwgt_PWT.csv", results, 1)
names(results) <- c("Date", "pop_count", "pop_gr_a", "gdp_count", "gdp_gr_q")
name <- names(results)
write.dta(results, "g_1_PennWorldTrade_gdppc_QoQ_201902.dta")
|
library(TSdist)
### Name: IntPerDistance
### Title: Integrated Periodogram based dissimilarity
### Aliases: IntPerDistance
### ** Examples
# The objects example.series1 and example.series2 are two
# numeric series of length 100.
data(example.series1)
data(example.series2)
# For information on their generation and shape see
# help page of example.series.
help(example.series)
# Calculate the ar.mah distance between the two series using
# the default parameters.
IntPerDistance(example.series1, example.series2)
| /data/genthat_extracted_code/TSdist/examples/IntPerDistance.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 531 | r | library(TSdist)
### Name: IntPerDistance
### Title: Integrated Periodogram based dissimilarity
### Aliases: IntPerDistance
### ** Examples
# The objects example.series1 and example.series2 are two
# numeric series of length 100.
data(example.series1)
data(example.series2)
# For information on their generation and shape see
# help page of example.series.
help(example.series)
# Calculate the ar.mah distance between the two series using
# the default parameters.
IntPerDistance(example.series1, example.series2)
|
#Load xlsx
library(xlsx)
# Load the readxl package
library(readxl)
#Getting the right
getwd()
#To oberve the frequency great visualisation tool
library(funModeling)
library(tidyverse)
library(Hmisc)
# Read the sheets, one by one
LS <- read_excel('Lawsuits.xlsx', sheet = 'Lawsuits')
LS
#No missing value found in the entire dataset
sum(is.na(LS))
library(moments)
library(ggplot2)
library(dplyr)
glimpse(LS)
dim(LS)
str(LS)
#Gender
Sex<-LS$Gender
Sex
Sex<- table(Sex)
Sex
barplot(Sex,main='Gender Distribution',xlab='Gender',ylab='Frequency',col=c('beige','bisque4'))
#Pie chart
pielabels <- sprintf("%s = %3.1f%s", Sex,
100*Sex/sum(Sex), "%")
pie(Sex, labels=pielabels,
clockwise=TRUE,
radius=1,
border="red",
cex=0.8,
main="Gender distribution")
str(LS)
#Specialty frequency barplot
Sp<-ggplot(data = LS) +geom_bar(mapping = aes(x = Specialty))
Sp + theme(axis.text.x = element_text(angle = 45, hjust = 1))
LS %>%
count(Specialty)
#71 are married
LS %>%
count(`Marital Status`)
#Maritial Status
Ms<-ggplot(data = LS) +geom_bar(mapping = aes(x =`Marital Status` )) +scale_colour_brewer(palette = "Set2")
MS<-Ms + theme(axis.text.x = element_text(angle = 45, hjust = 5))
Ms<-Ms+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 14),
legend.text = element_text(size = 13), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))
Ms
#Private Attory frequency
PA<-ggplot(data = LS) +geom_bar(mapping = aes(x =`Private Attorney` ))
PA + theme(axis.text.x = element_text(angle = 45, hjust = 1))
PrivateAt<-LS$`Private Attorney`
PrivateAt
PrivateAt<- table(PrivateAt)
PrivateAt
#barplot(PrivateAt,main='Private Attorney Distribution',xlab='Private Attorney',ylab='Frequency',col=c('beige','bisque4'))
#Insurance
Ins<-ggplot(data = LS) +geom_bar(mapping = aes(x = Insurance))
Ins + theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Count of different insurance 30% is unkown
LS%>%
count(Insurance)
#pielabels <- sprintf("%s = %3.1f%s", Ins,
# 100*Ins/sum(Ins), "%")
#pie(Ins, labels=pielabels,
# clockwise=TRUE,
# radius=1,
# border="red",
# cex=0.8,
# main="Insurance distribution")
library(dplyr)
LS%>%
filter(Severity>=6)
#Group by gender
Gender<-LS%>%
group_by(Gender)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment))
Gender
#below we combine unkown and Unkown together because there is a typo of capital letters
levels(LS$Insurance) <- c(levels(LS$Insurance),'unkown')
LS$Insurance[LS$Insurance=='Unknown'] <- 'unknown'
LS
#Insurance boxplot after correcting unkown
Ins<-ggplot(data = LS) +geom_bar(mapping = aes(x = Insurance))
Ins + theme(axis.text.x = element_text(angle = 45, hjust = 1))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 11),
legend.text = element_text(size = 12), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#B0AFED"), legend.background = element_rect(fill = "#C8EDAF"))
#Graph of Insurance
Insu <- LS%>%
group_by(Insurance) %>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment))
Insu
#Need to make a boxplot of the Insurance
Private <- LS%>%
group_by('Private Attorney') %>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment)
)
Private
#Checking for private attorney
#Private is way more in terms of mean and median for payment
PRi <-LS%>%
group_by(`Private Attorney`)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
PRi
#Group by Specialty
SPEC <-LS%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
#group by severity
SEVEE <-LS%>%
group_by(Severity)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment),
)
SEVEE
kurtosis(LS$Payment)
skewness(LS$Payment)
###########have to see#########
freq(LS$Severity)
PAA<-table(LS$`Private Attorney`)
pielab <- sprintf("%s = %3.1f%s", PAA,
100*PAA/sum(PAA), "%")
#Pie chart of Private Attorney
pie(PAA, labels=pielab,
clockwise=TRUE,
radius=1,
border="red",
cex=0.8,
main="Private Attorney",
col=c("Green","Pink")
)
legend(1.3, .1, c("Non-Private","Private"), cex = 0.9, fill = PAA)
MARII <-LS%>%
group_by(`Marital Status`)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
MARII
LS%>%
count(`Private Attorney`)
#Checking which are the highest payment
HigestTOLowest<- LS%>%
arrange(desc(Payment))
HigestTOLowest
#Gender i have to create a box plot
ggplot(LS, aes(Gender, Payment)) +
geom_point() +
geom_smooth()
# box plot of the gender to see outliers and others
g<- ggplot(LS, aes(Gender, Payment)) +
geom_boxplot() +
geom_smooth()
g
#boxplot for insurance
ggplotInsurance <- ggplot(LS, aes(Insurance, Payment)) +
geom_boxplot() +
geom_smooth() + theme(axis.text.x = element_text(angle = 45, hjust = 1))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 11),
legend.text = element_text(size = 12), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#EDAFEC"), legend.background = element_rect(fill = "#EDAFEC"))
ggplotInsurance
#Bad graph because of many categories .redraw it
# Group by age and Payment
by_Age<- LS %>%
group_by(Age,Specialty)
by_Age
#We have to correct the typos for obGyN to OBGYN
levels(LS$Specialty) <- c(levels(LS$Specialty),'ObGyN')
LS$Specialty[LS$Specialty=='ObGyn'] <- 'OBGYN'
LS
# Vector of Specialty to examine
specialty1 <- c('Pediatrics', 'Plastic Surgeon', 'Internal Medicine',
'Urological Surgery', 'General Surgery', 'OBGYN',
'Orthopedic Surgery', 'Ophthamology', 'Emergency Medicine',
'ObGyn', 'Anesthesiology', 'Neurology/Neurosurgery',
'Family Practice', 'Dermatology', 'Physical Medicine',
'Cardiology', 'Resident', 'Pathology', 'Radiology',
'Thoracic Surgery', 'Occupational Medicine')
# Filter
filteredSpecialty <- by_Age %>%
filter(Specialty %in% specialty1)
# Line plot
ggplot(filteredSpecialty, aes(Age, Payment, color = Specialty)) +
geom_line()
#Gender
by_G<- LS %>%
group_by(Age,Gender)
# Vector of four countries to examine
countries <- c('Male','Female')
# Filter
filtered_4_countries <- by_G %>%
filter(Gender %in% countries)
# Line plot
ggplot(filtered_4_countries, aes(Age, Payment, color = Gender)) +
geom_line()
#F
by_Se<- LS %>%
group_by(Age,Gender)
countries <- c('Pediatrics', 'Plastic Surgeon', 'Internal Medicine',
'Urological Surgery', 'General Surgery', 'OBGYN',
'Orthopedic Surgery', 'Ophthamology', 'Emergency Medicine',
'ObGyn', 'Anesthesiology', 'Neurology/Neurosurgery',
'Family Practice', 'Dermatology', 'Physical Medicine',
'Cardiology', 'Resident', 'Pathology', 'Radiology',
'Thoracic Surgery', 'Occupational Medicine')
f <- by_Se %>%
filter(Specialty %in% countries)
ggplot(f, aes(Age,Payment)) +
geom_line() +
facet_wrap(~ Specialty,scales = "free_y")
library(moments)
skewness(LS$Payment)
kurtosis(LS$Payment)
jarque.test(LS$Payment)
#count private attorney
LS %>%
count(`Private Attorney`)
#observe the Payment but we can't treat them as outliers
ggplot(data = LS) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
#The 3 numbers in the last are outliers if we do z score and >3
LS %>%
count(cut_width(Payment, 500))
# not getting executed but will try to
#AGE<-function(age){
# if (age<=18){
# return (small)
#}else if(age>18 & age <=35){
# retun (young)
#}else if(age>35 & age <=60){
# return (old)
#}else {
# return(seniorCitzen)
#}
#}
#fun<-lapply(LS$Age,AGE)
library(psych)
skew(LS$Payment)
quantile(LS$Payment)
#quantile(LS$Age)
#Total payment in the dataset
Total=sum(LS$Payment)
Total
#Famiy Practice
FamilyP<-LS %>%
filter(Specialty=='Family Practice')
FamilyP
sum(FamilyP$Payment)
MedFP<- median(FamilyP$Payment)
MedFP
MeanFP<-mean(FamilyP$Payment)
MeanFP
#print(paste('Average Family Practice:',MedFP/17))
#General Surgery
GeneralS<-LS %>%
filter(Specialty=='General Surgery')
GeneralS
MedGS<- median(GeneralS$Payment)
MedGS
MeanGS<-mean(GeneralS$Payment)
MeanGS
sum(GeneralS$Payment)
#print(paste('Average General Surgery:',MedGS/14))
#Anesthesiology
Anes<-LS %>%
filter(Specialty=='Anesthesiology')
Anes
MedAN<- median(Anes$Payment)
MedAN
MeanAN<-mean(Anes$Payment)
MeanAN
sum(Anes$Payment)
#Orthopedic Surgery
ORT<-LS %>%
filter(Specialty=='Orthopedic Surgery')
ORT
MedOR<- median(ORT$Payment)
MedOR
MeanOR<-mean(ORT$Payment)
MeanOR
sum(ORT$Payment)
#OBGYN
OBGYN<-LS %>%
filter(Specialty=='OBGYN')
OBGYN
MedOBGYN<- median(OBGYN$Payment)
MedOBGYN
MeanOBGYN<-mean(OBGYN$Payment)
MeanOBGYN
sum(OBGYN$Payment)
#Top 5 speciality consisits of 67.23 % Payment but they are more in number
ProportionofTop5<-(sum(FamilyP$Payment)+sum(GeneralS$Payment)+sum(OBGYN$Payment)+sum(ORT$Payment)+sum(OBGYN$Payment))/Total
ProportionofTop5
#Function to check median,mean,total_sum
SpecialtyFunction <- function(a){
Med<-median(a$Payment)
Mean<-mean(a$Payment)
SUM<-sum(a$Payment)
return (list(Med,Mean,SUM))
}
#Checking statistics for Internal Medicine
IM<-LS%>%
filter(Specialty=='Internal Medicine')
IM
I<-SpecialtyFunction(IM)
I
#checking statistics for Neurology/Neurosurgery
Neuro <- LS%>%
filter(Specialty=='Neurology/Neurosurgery')
Neuro
Neurolo<-SpecialtyFunction(Neuro)
Neurolo
#checking statistics for Emergency Medicine
Emergency <- LS%>%
filter(Specialty=='Emergency Medicine')
Emergency
EMERGEN<- SpecialtyFunction(Emergency)
EMERGEN
#combination of Private attorney and Private insurance
CombinationAllPrivate<-LS%>%
group_by(Insurance,Specialty,`Private Attorney`) %>%
filter(`Private Attorney`==1,Insurance=='Private')
glimpse(CombinationAllPrivate)
#Median and mean is higher than normal
SpecialtyFunction(CombinationAllPrivate)
#Comparing Private to all the values including Private
#hist for All
hist(LS$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment of all values vs Payment for Private ")
#hist for combinationAllPrivate
hist(CombinationAllPrivate$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for All,we may take median
abline(v = median(LS$Payment),
col = "#00009950", lwd = 2)
#vertical line for CombinationAll for mean ,we may take median
abline(v = median(CombinationAllPrivate$Payment),
col = "#99000050", lwd = 2)
#have to remove the y axis as density
#Private attorney
PrivateAttorney1<-LS%>%
filter(`Private Attorney`==1)
PrivateAttorney1
NonPrivateAttorney<-LS%>%
filter(`Private Attorney`==0)
NonPrivateAttorney
#Make PrivateAttorney vs Non-Private
dat <- PrivateAttorney1$Payment
extra_dat <- NonPrivateAttorney$Payment
#Plot
plot(density(dat),col="blue")
lines(density(extra_dat),col="red")
#Histogram of Private vs Non-Private
hist(PrivateAttorney1$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment of Private Attorney vs Payment for Non-Private Attorney")
#hist for combinationAllPrivate
hist(NonPrivateAttorney$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for All,we may take median
abline(v = median(PrivateAttorney1$Payment),
col = "#00009950", lwd = 2)
#vertical line for CombinationAll for mean ,we may take median
abline(v = median(NonPrivateAttorney$Payment),
col = "#99000050", lwd = 2)
#To oberve the frequency great visualisation tool
library(funModeling)
library(tidyverse)
library(Hmisc)
freq(LS)
#basic eda done in one function
basic_eda <- function(data)
{
glimpse(data)
df_status(data)
freq(data)
profiling_num(data)
plot_num(data)
describe(data)
}
basic_eda(LS)
#checking metrices
df_status(LS)
#Not usefull now but is used for numerical variable
#data_prof=profiling_num(LS)
#data_prof
#contingency table we can create to see for any two variable
#describe(LS)
#Checking for specialty that is Surgery
Surgery <- LS%>%
filter(Specialty=='General Surgery' | Specialty =='Orthopedic Surgery' | Specialty =='Neurology/Neurosurgery' |Specialty=='Urological Surgery' | Specialty=='Plastic Surgeon' | Specialty=='Thoracic Surgery')
Surgery
hist(Surgery$Payment)
Surgerysats<-SpecialtyFunction(Surgery)
Surgerysats
#checking for specialty that is medicine
Medicine <- LS%>%
filter(Specialty=='Internal Medicine' | Specialty =='Emergency Medicine' | Specialty=='Physical Medicine' | Specialty=='Occupational Medicine')
hist(Medicine$Payment)
Medicine
Medicinestats <- SpecialtyFunction(Medicine)
Medicinestats
#Histogram of Surgery vs Medicine
hist(Surgery$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Surgery vs Medicine for Payment")
#hist for Medicine
hist(Medicine$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for surgery,we may take median
abline(v = median(Surgery$Payment),
col = "#00009950", lwd = 2)
#vertical line for Medicine for median ,we may take median
abline(v = median(Medicine$Payment),
col = "#99000050", lwd = 2)
# Histogram Grey Color
hist(Medicine, col=rgb(0.1,0.1,0.1,0.5),xlim=c(0,10), ylim=c(0,200), main="Overlapping Histogram")
hist(Surgery, col=rgb(0.8,0.8,0.8,0.5), add=T)
# Histogram Colored (blue and red)
hist(Medicine, col=rgb(1,0,0,0.5),xlim=c(0,10), ylim=c(0,200), main="Overlapping Histogram", xlab="Variable")
hist(Surgery, col=rgb(0,0,1,0.5), add=T)
#Have to make a two histogram together
#Surgery and Private
SurgeryPrivate <- LS%>%
filter(`Private Attorney`==1,Specialty=='General Surgery' | Specialty =='Orthopedic Surgery' | Specialty =='Neurology/Neurosurgery' |Specialty=='Urological Surgery' | Specialty=='Plastic Surgeon' | Specialty=='Thoracic Surgery')
SurgeryPrivate
SpecialtyFunction(SurgeryPrivate)
#Medicine and Private not needed
MedicinePrivate <- LS%>%
filter(Insurance=='Private',`Private Attorney`==1,Specialty=='Internal Medicine' | Specialty =='Emergency Medicine' | Specialty=='Physical Medicine' | Specialty=='Occupational Medicine')
MedicinePrivate
SpecialtyFunction(MedicinePrivate)
#whole prive Speciality
SpecialityPrivateALL<- LS%>%
filter(`Private Attorney`==1,Insurance=='Private')
SpecialityPrivateALL
SpecialtyFunction(SpecialityPrivateALL)
table(LS$Specialty,LS$Insurance)
#We should merge the two unkown columns into one column unknown
#Severity
SEVE <- LS %>%
filter(Severity==9 | Severity==8 | Severity==7 | Severity==6)
SEVE
#Proportion of top 4 severity consist of 63.9 % of the payment..High severity means high payment
sum(SEVE$Payment)/sum(LS$Payment)
SEVELESS <-LS%>%
filter(Severity==1 | Severity==2 | Severity==3 | Severity==4 | Severity==5)
SEVELESS
#Majority of high severity is done by private attorney
SEVE1 <- LS %>%
filter(Severity==9 | Severity==8 | Severity==7 | Severity==6,`Private Attorney`==1)
SEVE1
HighSeverityPrivategraph<-table(SEVE$`Private Attorney`)
barplot(HighSeverityPrivategraph,main='Private Attorney Distribution in High Severity',xlab='Private Attorney',ylab='Frequency',col=c('beige','bisque4'))
#SS<- data.frame("HighSeverityAll":HighSeverityAll,"HighSeverityPrivate":HighSeverityPrivate)
#SS
#############Boxplot of less severity and high severity with respect to payment
boxplot(SEVE$Payment,SEVELESS$Payment , xlab="High Severity vs Low Severity",
main="boxplot of High Severity VS Less Severity ",ylab='Payment'
)
plot(density(SEVELESS$Payment))
plot(density(SEVE$Payment))
#See the graph to see relation between two variabe (work to be done)
library("DataExplorer")
plot_correlation(LS)
#
library(vcd)
#mosaic(LS, shade=TRUE, legend=TRUE)
#ssoc(LS, shade=TRUE)
#We are grouping ages
AGE1<-LS%>%
filter(Age<18)
AGE1
AGE2 <- LS%>%
filter(Age>=18 & Age<40)
AGE2
AGE3<-LS%>%
filter(Age>=40 & Age<60)
AGE3
AGE4 <- LS%>%
filter(Age>=60)
AGE4
#Here average we are taking median is more in 60 and above
SpecialtyFunction(AGE1)
SpecialtyFunction(AGE2)
SpecialtyFunction(AGE3)
SpecialtyFunction(AGE4)
#More severity is more in 60 and above followed by 35+
table(AGE1$Severity)
table(AGE2$Severity)
table(AGE3$Severity)
table(AGE4$Severity)
#AGE and specialty
table(AGE1$Specialty)
table(AGE2$Specialty)
table(AGE3$Specialty)
table(AGE4$Specialty)
freq(AGE1$Specialty)
freq(AGE2$Specialty)
freq(AGE3$Specialty)
freq(AGE4$Specialty)
ggplot(data = AGE1) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE2) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE3) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE4) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
AAAG1<-ggplot(data = AGE1) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG1<-AAAG1 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG1<-AAAG1+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 13), axis.title = element_text(size = 10), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#afbaed"), legend.background = element_rect(fill = "#afbaed"))
AAAG1
AAAG2<-ggplot(data = AGE2) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG2<-AAAG2 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG2<-AAAG2+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 10), axis.title = element_text(size = 10), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#afbaed"), legend.background = element_rect(fill = "#afbaed"))
AAAG2
AAAG3<-ggplot(data = AGE3) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG3<-AAAG3 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG3<-AAAG3+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 10), axis.title = element_text(size = 10), axis.line = element_line(size = 0.2, colour = "grey10"),
plot.background = element_rect(fill = "#edafaf"), legend.background = element_rect(fill = "#edafaf"))
AAAG3
AAAG4<-ggplot(data = AGE4) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG4<-AAAG4 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG4<-AAAG4+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 16), axis.title = element_text(size = 14), axis.line = element_line(size = 0.6, colour = "grey10"),
plot.background = element_rect(fill = "#eaafed"), legend.background = element_rect(fill = "#eaafed"))
AAAG4
AAGE1 <-AGE1%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE1
AAGE2 <-AGE2%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE2
AAGE3 <-AGE3%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE3
AAGE4 <-AGE4%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE4
AAGE11 <-AGE1%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE11
AAGE12 <-AGE2%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE12
AAGE13 <-AGE3%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE13
AAGE14 <-AGE4%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE14
#The insurance which is Private and Gender is Female
PF <- LS%>%
filter(Insurance=='Private',Gender=='Female')
PF
dim(PF)
#47.8 percent is private and female
print(34/71)
# Mix both unkown together
UNKOINSU<-LS%>%
filter(Insurance=='Unknown'| Insurance=='unknown')
UNKOINSU
count(UNKOINSU)
#36 values are unkown for Insurance ,out of 118
36/118
#30% are unkown
#ggplot(LS,aes(Gender,Payment))+theme(plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))+geom_point()
ggplot(LS,aes(Gender,Payment))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 14),
legend.text = element_text(size = 13), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))+geom_point()
#We have to see mainly which are the columns we have to focus
#hist(LS$Payment,
#col = "#00009950", freq = FALSE, xlab = "Payment",
#main = "Payment vs count")
#abline(v = mean(LS$Payment),
#col = "#00009950", lwd = 2)
#just
#male
Mapay <- LS%>%
filter(Gender=='Male')
#female
Fepay <-LS%>%
filter(Gender=='Female')
#hist for male
hist(Mapay$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment vs count")
#hist for female
hist(Fepay$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for male,we may take median
abline(v = mean(Mapay$Payment),
col = "#00009950", lwd = 2)
#vertical line for female for mean ,we may take median
abline(v = mean(Fepay$Payment),
col = "#99000050", lwd = 2)
##Severity and Gender
LS$Severity<-as.factor(LS$Severity)
LS$Gender<-as.factor(LS$Gender)
LS$Severity
spineplot(LS$Severity,LS$Gender,
xlab = "severity",ylab = "Gender",col=c("blue","green"))
#Insurance and Gender
#There are many unknowns in Male gender In total 30 % are unknown
LS$Insurance<-as.factor(LS$Insurance)
LS$Insurance
spineplot(LS$Gender,LS$Insurance,ylab = "Insurance",
xlab = "Gender",
col =c("red","green","yellow","purple","orange","blue"))
| /Lawsuits Project- R/R Analysis.R | no_license | AnamikaProjects/Lawsuits-Project-R | R | false | false | 25,421 | r | #Load xlsx
library(xlsx)
# Load the readxl package
library(readxl)
#Getting the right
getwd()
#To oberve the frequency great visualisation tool
library(funModeling)
library(tidyverse)
library(Hmisc)
# Read the sheets, one by one
LS <- read_excel('Lawsuits.xlsx', sheet = 'Lawsuits')
LS
#No missing value found in the entire dataset
sum(is.na(LS))
library(moments)
library(ggplot2)
library(dplyr)
glimpse(LS)
dim(LS)
str(LS)
#Gender
Sex<-LS$Gender
Sex
Sex<- table(Sex)
Sex
barplot(Sex,main='Gender Distribution',xlab='Gender',ylab='Frequency',col=c('beige','bisque4'))
#Pie chart
pielabels <- sprintf("%s = %3.1f%s", Sex,
100*Sex/sum(Sex), "%")
pie(Sex, labels=pielabels,
clockwise=TRUE,
radius=1,
border="red",
cex=0.8,
main="Gender distribution")
str(LS)
#Specialty frequency barplot
Sp<-ggplot(data = LS) +geom_bar(mapping = aes(x = Specialty))
Sp + theme(axis.text.x = element_text(angle = 45, hjust = 1))
LS %>%
count(Specialty)
#71 are married
LS %>%
count(`Marital Status`)
#Maritial Status
Ms<-ggplot(data = LS) +geom_bar(mapping = aes(x =`Marital Status` )) +scale_colour_brewer(palette = "Set2")
MS<-Ms + theme(axis.text.x = element_text(angle = 45, hjust = 5))
Ms<-Ms+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 14),
legend.text = element_text(size = 13), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))
Ms
#Private Attory frequency
PA<-ggplot(data = LS) +geom_bar(mapping = aes(x =`Private Attorney` ))
PA + theme(axis.text.x = element_text(angle = 45, hjust = 1))
PrivateAt<-LS$`Private Attorney`
PrivateAt
PrivateAt<- table(PrivateAt)
PrivateAt
#barplot(PrivateAt,main='Private Attorney Distribution',xlab='Private Attorney',ylab='Frequency',col=c('beige','bisque4'))
#Insurance
Ins<-ggplot(data = LS) +geom_bar(mapping = aes(x = Insurance))
Ins + theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Count of different insurance 30% is unkown
LS%>%
count(Insurance)
#pielabels <- sprintf("%s = %3.1f%s", Ins,
# 100*Ins/sum(Ins), "%")
#pie(Ins, labels=pielabels,
# clockwise=TRUE,
# radius=1,
# border="red",
# cex=0.8,
# main="Insurance distribution")
library(dplyr)
LS%>%
filter(Severity>=6)
#Group by gender
Gender<-LS%>%
group_by(Gender)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment))
Gender
#below we combine unkown and Unkown together because there is a typo of capital letters
levels(LS$Insurance) <- c(levels(LS$Insurance),'unkown')
LS$Insurance[LS$Insurance=='Unknown'] <- 'unknown'
LS
#Insurance boxplot after correcting unkown
Ins<-ggplot(data = LS) +geom_bar(mapping = aes(x = Insurance))
Ins + theme(axis.text.x = element_text(angle = 45, hjust = 1))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 11),
legend.text = element_text(size = 12), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#B0AFED"), legend.background = element_rect(fill = "#C8EDAF"))
#Graph of Insurance
Insu <- LS%>%
group_by(Insurance) %>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment))
Insu
#Need to make a boxplot of the Insurance
Private <- LS%>%
group_by('Private Attorney') %>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment)
)
Private
#Checking for private attorney
#Private is way more in terms of mean and median for payment
PRi <-LS%>%
group_by(`Private Attorney`)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
PRi
#Group by Specialty
SPEC <-LS%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
#group by severity
SEVEE <-LS%>%
group_by(Severity)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment),
)
SEVEE
kurtosis(LS$Payment)
skewness(LS$Payment)
###########have to see#########
freq(LS$Severity)
PAA<-table(LS$`Private Attorney`)
pielab <- sprintf("%s = %3.1f%s", PAA,
100*PAA/sum(PAA), "%")
#Pie chart of Private Attorney
pie(PAA, labels=pielab,
clockwise=TRUE,
radius=1,
border="red",
cex=0.8,
main="Private Attorney",
col=c("Green","Pink")
)
legend(1.3, .1, c("Non-Private","Private"), cex = 0.9, fill = PAA)
MARII <-LS%>%
group_by(`Marital Status`)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
MARII
LS%>%
count(`Private Attorney`)
#Checking which are the highest payment
HigestTOLowest<- LS%>%
arrange(desc(Payment))
HigestTOLowest
#Gender i have to create a box plot
ggplot(LS, aes(Gender, Payment)) +
geom_point() +
geom_smooth()
# box plot of the gender to see outliers and others
g<- ggplot(LS, aes(Gender, Payment)) +
geom_boxplot() +
geom_smooth()
g
#boxplot for insurance
ggplotInsurance <- ggplot(LS, aes(Insurance, Payment)) +
geom_boxplot() +
geom_smooth() + theme(axis.text.x = element_text(angle = 45, hjust = 1))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 11),
legend.text = element_text(size = 12), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#EDAFEC"), legend.background = element_rect(fill = "#EDAFEC"))
ggplotInsurance
#Bad graph because of many categories .redraw it
# Group by age and Payment
by_Age<- LS %>%
group_by(Age,Specialty)
by_Age
#We have to correct the typos for obGyN to OBGYN
levels(LS$Specialty) <- c(levels(LS$Specialty),'ObGyN')
LS$Specialty[LS$Specialty=='ObGyn'] <- 'OBGYN'
LS
# Vector of Specialty to examine
specialty1 <- c('Pediatrics', 'Plastic Surgeon', 'Internal Medicine',
'Urological Surgery', 'General Surgery', 'OBGYN',
'Orthopedic Surgery', 'Ophthamology', 'Emergency Medicine',
'ObGyn', 'Anesthesiology', 'Neurology/Neurosurgery',
'Family Practice', 'Dermatology', 'Physical Medicine',
'Cardiology', 'Resident', 'Pathology', 'Radiology',
'Thoracic Surgery', 'Occupational Medicine')
# Filter
filteredSpecialty <- by_Age %>%
filter(Specialty %in% specialty1)
# Line plot
ggplot(filteredSpecialty, aes(Age, Payment, color = Specialty)) +
geom_line()
#Gender
by_G<- LS %>%
group_by(Age,Gender)
# Vector of four countries to examine
countries <- c('Male','Female')
# Filter
filtered_4_countries <- by_G %>%
filter(Gender %in% countries)
# Line plot
ggplot(filtered_4_countries, aes(Age, Payment, color = Gender)) +
geom_line()
#F
by_Se<- LS %>%
group_by(Age,Gender)
countries <- c('Pediatrics', 'Plastic Surgeon', 'Internal Medicine',
'Urological Surgery', 'General Surgery', 'OBGYN',
'Orthopedic Surgery', 'Ophthamology', 'Emergency Medicine',
'ObGyn', 'Anesthesiology', 'Neurology/Neurosurgery',
'Family Practice', 'Dermatology', 'Physical Medicine',
'Cardiology', 'Resident', 'Pathology', 'Radiology',
'Thoracic Surgery', 'Occupational Medicine')
f <- by_Se %>%
filter(Specialty %in% countries)
ggplot(f, aes(Age,Payment)) +
geom_line() +
facet_wrap(~ Specialty,scales = "free_y")
library(moments)
skewness(LS$Payment)
kurtosis(LS$Payment)
jarque.test(LS$Payment)
#count private attorney
LS %>%
count(`Private Attorney`)
#observe the Payment but we can't treat them as outliers
ggplot(data = LS) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
#The 3 numbers in the last are outliers if we do z score and >3
LS %>%
count(cut_width(Payment, 500))
# not getting executed but will try to
#AGE<-function(age){
# if (age<=18){
# return (small)
#}else if(age>18 & age <=35){
# retun (young)
#}else if(age>35 & age <=60){
# return (old)
#}else {
# return(seniorCitzen)
#}
#}
#fun<-lapply(LS$Age,AGE)
library(psych)
skew(LS$Payment)
quantile(LS$Payment)
#quantile(LS$Age)
#Total payment in the dataset
Total=sum(LS$Payment)
Total
#Famiy Practice
FamilyP<-LS %>%
filter(Specialty=='Family Practice')
FamilyP
sum(FamilyP$Payment)
MedFP<- median(FamilyP$Payment)
MedFP
MeanFP<-mean(FamilyP$Payment)
MeanFP
#print(paste('Average Family Practice:',MedFP/17))
#General Surgery
GeneralS<-LS %>%
filter(Specialty=='General Surgery')
GeneralS
MedGS<- median(GeneralS$Payment)
MedGS
MeanGS<-mean(GeneralS$Payment)
MeanGS
sum(GeneralS$Payment)
#print(paste('Average General Surgery:',MedGS/14))
#Anesthesiology
Anes<-LS %>%
filter(Specialty=='Anesthesiology')
Anes
MedAN<- median(Anes$Payment)
MedAN
MeanAN<-mean(Anes$Payment)
MeanAN
sum(Anes$Payment)
#Orthopedic Surgery
ORT<-LS %>%
filter(Specialty=='Orthopedic Surgery')
ORT
MedOR<- median(ORT$Payment)
MedOR
MeanOR<-mean(ORT$Payment)
MeanOR
sum(ORT$Payment)
#OBGYN
OBGYN<-LS %>%
filter(Specialty=='OBGYN')
OBGYN
MedOBGYN<- median(OBGYN$Payment)
MedOBGYN
MeanOBGYN<-mean(OBGYN$Payment)
MeanOBGYN
sum(OBGYN$Payment)
#Top 5 speciality consisits of 67.23 % Payment but they are more in number
ProportionofTop5<-(sum(FamilyP$Payment)+sum(GeneralS$Payment)+sum(OBGYN$Payment)+sum(ORT$Payment)+sum(OBGYN$Payment))/Total
ProportionofTop5
#Function to check median,mean,total_sum
SpecialtyFunction <- function(a){
Med<-median(a$Payment)
Mean<-mean(a$Payment)
SUM<-sum(a$Payment)
return (list(Med,Mean,SUM))
}
#Checking statistics for Internal Medicine
IM<-LS%>%
filter(Specialty=='Internal Medicine')
IM
I<-SpecialtyFunction(IM)
I
#checking statistics for Neurology/Neurosurgery
Neuro <- LS%>%
filter(Specialty=='Neurology/Neurosurgery')
Neuro
Neurolo<-SpecialtyFunction(Neuro)
Neurolo
#checking statistics for Emergency Medicine
Emergency <- LS%>%
filter(Specialty=='Emergency Medicine')
Emergency
EMERGEN<- SpecialtyFunction(Emergency)
EMERGEN
#combination of Private attorney and Private insurance
CombinationAllPrivate<-LS%>%
group_by(Insurance,Specialty,`Private Attorney`) %>%
filter(`Private Attorney`==1,Insurance=='Private')
glimpse(CombinationAllPrivate)
#Median and mean is higher than normal
SpecialtyFunction(CombinationAllPrivate)
#Comparing Private to all the values including Private
#hist for All
hist(LS$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment of all values vs Payment for Private ")
#hist for combinationAllPrivate
hist(CombinationAllPrivate$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for All,we may take median
abline(v = median(LS$Payment),
col = "#00009950", lwd = 2)
#vertical line for CombinationAll for mean ,we may take median
abline(v = median(CombinationAllPrivate$Payment),
col = "#99000050", lwd = 2)
#have to remove the y axis as density
#Private attorney
PrivateAttorney1<-LS%>%
filter(`Private Attorney`==1)
PrivateAttorney1
NonPrivateAttorney<-LS%>%
filter(`Private Attorney`==0)
NonPrivateAttorney
#Make PrivateAttorney vs Non-Private
dat <- PrivateAttorney1$Payment
extra_dat <- NonPrivateAttorney$Payment
#Plot
plot(density(dat),col="blue")
lines(density(extra_dat),col="red")
#Histogram of Private vs Non-Private
hist(PrivateAttorney1$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment of Private Attorney vs Payment for Non-Private Attorney")
#hist for combinationAllPrivate
hist(NonPrivateAttorney$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for All,we may take median
abline(v = median(PrivateAttorney1$Payment),
col = "#00009950", lwd = 2)
#vertical line for CombinationAll for mean ,we may take median
abline(v = median(NonPrivateAttorney$Payment),
col = "#99000050", lwd = 2)
#To oberve the frequency great visualisation tool
library(funModeling)
library(tidyverse)
library(Hmisc)
freq(LS)
#basic eda done in one function
basic_eda <- function(data)
{
glimpse(data)
df_status(data)
freq(data)
profiling_num(data)
plot_num(data)
describe(data)
}
basic_eda(LS)
#checking metrices
df_status(LS)
#Not usefull now but is used for numerical variable
#data_prof=profiling_num(LS)
#data_prof
#contingency table we can create to see for any two variable
#describe(LS)
#Checking for specialty that is Surgery
Surgery <- LS%>%
filter(Specialty=='General Surgery' | Specialty =='Orthopedic Surgery' | Specialty =='Neurology/Neurosurgery' |Specialty=='Urological Surgery' | Specialty=='Plastic Surgeon' | Specialty=='Thoracic Surgery')
Surgery
hist(Surgery$Payment)
Surgerysats<-SpecialtyFunction(Surgery)
Surgerysats
#checking for specialty that is medicine
Medicine <- LS%>%
filter(Specialty=='Internal Medicine' | Specialty =='Emergency Medicine' | Specialty=='Physical Medicine' | Specialty=='Occupational Medicine')
hist(Medicine$Payment)
Medicine
Medicinestats <- SpecialtyFunction(Medicine)
Medicinestats
#Histogram of Surgery vs Medicine
hist(Surgery$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Surgery vs Medicine for Payment")
#hist for Medicine
hist(Medicine$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for surgery,we may take median
abline(v = median(Surgery$Payment),
col = "#00009950", lwd = 2)
#vertical line for Medicine for median ,we may take median
abline(v = median(Medicine$Payment),
col = "#99000050", lwd = 2)
# Histogram Grey Color
hist(Medicine, col=rgb(0.1,0.1,0.1,0.5),xlim=c(0,10), ylim=c(0,200), main="Overlapping Histogram")
hist(Surgery, col=rgb(0.8,0.8,0.8,0.5), add=T)
# Histogram Colored (blue and red)
hist(Medicine, col=rgb(1,0,0,0.5),xlim=c(0,10), ylim=c(0,200), main="Overlapping Histogram", xlab="Variable")
hist(Surgery, col=rgb(0,0,1,0.5), add=T)
#Have to make a two histogram together
#Surgery and Private
SurgeryPrivate <- LS%>%
filter(`Private Attorney`==1,Specialty=='General Surgery' | Specialty =='Orthopedic Surgery' | Specialty =='Neurology/Neurosurgery' |Specialty=='Urological Surgery' | Specialty=='Plastic Surgeon' | Specialty=='Thoracic Surgery')
SurgeryPrivate
SpecialtyFunction(SurgeryPrivate)
#Medicine and Private not needed
MedicinePrivate <- LS%>%
filter(Insurance=='Private',`Private Attorney`==1,Specialty=='Internal Medicine' | Specialty =='Emergency Medicine' | Specialty=='Physical Medicine' | Specialty=='Occupational Medicine')
MedicinePrivate
SpecialtyFunction(MedicinePrivate)
#whole prive Speciality
SpecialityPrivateALL<- LS%>%
filter(`Private Attorney`==1,Insurance=='Private')
SpecialityPrivateALL
SpecialtyFunction(SpecialityPrivateALL)
table(LS$Specialty,LS$Insurance)
#We should merge the two unkown columns into one column unknown
#Severity
SEVE <- LS %>%
filter(Severity==9 | Severity==8 | Severity==7 | Severity==6)
SEVE
#Proportion of top 4 severity consist of 63.9 % of the payment..High severity means high payment
sum(SEVE$Payment)/sum(LS$Payment)
SEVELESS <-LS%>%
filter(Severity==1 | Severity==2 | Severity==3 | Severity==4 | Severity==5)
SEVELESS
#Majority of high severity is done by private attorney
SEVE1 <- LS %>%
filter(Severity==9 | Severity==8 | Severity==7 | Severity==6,`Private Attorney`==1)
SEVE1
HighSeverityPrivategraph<-table(SEVE$`Private Attorney`)
barplot(HighSeverityPrivategraph,main='Private Attorney Distribution in High Severity',xlab='Private Attorney',ylab='Frequency',col=c('beige','bisque4'))
#SS<- data.frame("HighSeverityAll":HighSeverityAll,"HighSeverityPrivate":HighSeverityPrivate)
#SS
#############Boxplot of less severity and high severity with respect to payment
boxplot(SEVE$Payment,SEVELESS$Payment , xlab="High Severity vs Low Severity",
main="boxplot of High Severity VS Less Severity ",ylab='Payment'
)
plot(density(SEVELESS$Payment))
plot(density(SEVE$Payment))
#See the graph to see relation between two variabe (work to be done)
library("DataExplorer")
plot_correlation(LS)
#
library(vcd)
#mosaic(LS, shade=TRUE, legend=TRUE)
#ssoc(LS, shade=TRUE)
#We are grouping ages
AGE1<-LS%>%
filter(Age<18)
AGE1
AGE2 <- LS%>%
filter(Age>=18 & Age<40)
AGE2
AGE3<-LS%>%
filter(Age>=40 & Age<60)
AGE3
AGE4 <- LS%>%
filter(Age>=60)
AGE4
#Here average we are taking median is more in 60 and above
SpecialtyFunction(AGE1)
SpecialtyFunction(AGE2)
SpecialtyFunction(AGE3)
SpecialtyFunction(AGE4)
#More severity is more in 60 and above followed by 35+
table(AGE1$Severity)
table(AGE2$Severity)
table(AGE3$Severity)
table(AGE4$Severity)
#AGE and specialty
table(AGE1$Specialty)
table(AGE2$Specialty)
table(AGE3$Specialty)
table(AGE4$Specialty)
freq(AGE1$Specialty)
freq(AGE2$Specialty)
freq(AGE3$Specialty)
freq(AGE4$Specialty)
ggplot(data = AGE1) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE2) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE3) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE4) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
AAAG1<-ggplot(data = AGE1) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG1<-AAAG1 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG1<-AAAG1+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 13), axis.title = element_text(size = 10), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#afbaed"), legend.background = element_rect(fill = "#afbaed"))
AAAG1
AAAG2<-ggplot(data = AGE2) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG2<-AAAG2 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG2<-AAAG2+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 10), axis.title = element_text(size = 10), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#afbaed"), legend.background = element_rect(fill = "#afbaed"))
AAAG2
AAAG3<-ggplot(data = AGE3) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG3<-AAAG3 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG3<-AAAG3+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 10), axis.title = element_text(size = 10), axis.line = element_line(size = 0.2, colour = "grey10"),
plot.background = element_rect(fill = "#edafaf"), legend.background = element_rect(fill = "#edafaf"))
AAAG3
AAAG4<-ggplot(data = AGE4) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG4<-AAAG4 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG4<-AAAG4+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 16), axis.title = element_text(size = 14), axis.line = element_line(size = 0.6, colour = "grey10"),
plot.background = element_rect(fill = "#eaafed"), legend.background = element_rect(fill = "#eaafed"))
AAAG4
AAGE1 <-AGE1%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE1
AAGE2 <-AGE2%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE2
AAGE3 <-AGE3%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE3
AAGE4 <-AGE4%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE4
AAGE11 <-AGE1%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE11
AAGE12 <-AGE2%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE12
AAGE13 <-AGE3%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE13
AAGE14 <-AGE4%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE14
#The insurance which is Private and Gender is Female
PF <- LS%>%
filter(Insurance=='Private',Gender=='Female')
PF
dim(PF)
#47.8 percent is private and female
print(34/71)
# Mix both unkown together
UNKOINSU<-LS%>%
filter(Insurance=='Unknown'| Insurance=='unknown')
UNKOINSU
count(UNKOINSU)
#36 values are unkown for Insurance ,out of 118
36/118
#30% are unkown
#ggplot(LS,aes(Gender,Payment))+theme(plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))+geom_point()
ggplot(LS,aes(Gender,Payment))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 14),
legend.text = element_text(size = 13), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))+geom_point()
#We have to see mainly which are the columns we have to focus
#hist(LS$Payment,
#col = "#00009950", freq = FALSE, xlab = "Payment",
#main = "Payment vs count")
#abline(v = mean(LS$Payment),
#col = "#00009950", lwd = 2)
#just
#male
Mapay <- LS%>%
filter(Gender=='Male')
#female
Fepay <-LS%>%
filter(Gender=='Female')
#hist for male
hist(Mapay$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment vs count")
#hist for female
hist(Fepay$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for male,we may take median
abline(v = mean(Mapay$Payment),
col = "#00009950", lwd = 2)
#vertical line for female for mean ,we may take median
abline(v = mean(Fepay$Payment),
col = "#99000050", lwd = 2)
##Severity and Gender
LS$Severity<-as.factor(LS$Severity)
LS$Gender<-as.factor(LS$Gender)
LS$Severity
spineplot(LS$Severity,LS$Gender,
xlab = "severity",ylab = "Gender",col=c("blue","green"))
#Insurance and Gender
#There are many unknowns in Male gender In total 30 % are unknown
LS$Insurance<-as.factor(LS$Insurance)
LS$Insurance
spineplot(LS$Gender,LS$Insurance,ylab = "Insurance",
xlab = "Gender",
col =c("red","green","yellow","purple","orange","blue"))
|
#' @title Party System Nationalization Score
#'
#' @description Party System Nationalization Score Mainwaring and Jones (2003) and Chhibber and Kollman (2004)
#'
#' @param tidy_data data.frame that contains the following variables with these names:
#' \itemize{
#' \item{\code{election}: year of election.}
#' \item{\code{unit}: the unit of analysis (province, department ...)}
#' \item{\code{party}: name of the political parties that obtain votes.}
#' \item{\code{votes}: votes obtained by each party.}
#' \item{\code{votes_nac}: votes at national level for each party.}
#' }
#' If the data is not structured in this way you can order it with: \code{\link{convert_esaps}}.
#'
#' @param method Method to calculate Party System Nationalization Score:
#' \itemize{
#' \item{\code{method = "Mainwaring and Jones"} or \code{method = 1}.}
#' \item{\code{method = "Chhibber and Kollman"} or \code{method = 2}.}
#' }
#'
#' @param pns by default it is \code{FALSE}. If \code{TRUE}, the Party Nationalization Score
#' is calculated. In method, you must indicate: \code{method = 1}.
#'
#' @param scale By default it is \code{100}, the indices will vary between 0 and 100.
#' If \code{scale = 1} the variation will be between 0 and 1.
#'
#'
#'
#' @return if \code{pns = FALSE,} return data.frame.
#'
#' if \code{pns = TRUE}, return a list with two data.frame.
#' \itemize{
#' \item{\code{list[[1]]}} {PSNS: Party System Nationalization Score}
#' \item{\code{list[[2]]}} {PNS: Party Nationalization Score}
#' }
#'
#' @author Nicolas Schmidt \email{nschmidt@cienciassociales.edu.uy}
#'
#'
#'
#' @examples
#' votes <- data.frame(election = rep(2000,4),
#' unit = rep(c("District_1", "District_2"), each = 2),
#' party = rep(c("party_A", "party_B"), 2),
#' votes = c(0.60,0.40, 0.30, 0.70),
#' votes_nac = rep(c(0.55,0.45),2)
#' )
#' psns(tidy_data = votes, method = 1)
#' psns(tidy_data = votes, method = 1, pns = TRUE)
#'
#' @export
#'
psns <- function(tidy_data,
method,
pns = FALSE,
scale = 100){
tidy_data <- tidy_data[, names(tidy_data) %in% c('election','unit', 'party', 'votes', 'votes_nac')]
tidy_data <- stats::na.omit(tidy_data)
if(!is.data.frame(tidy_data)){stop("'tidy_data' must be a 'data.frame'.", call. = FALSE)}
if(missing(method)){stop("You must select only one method.", call. = FALSE)}
if(sum(is.na(tidy_data[, 1:2])) != 0){stop("The variable 'election'and 'unit' must not have NA values.", call. = FALSE)}
if(length(method) > 1){stop("you must select only one method.", call. = FALSE)}
ch.met <- c("Mainwaring and Jones", "Chibber and Kollman")
nu.met <- c(1, 2)
if(!any(method == ch.met | method == nu.met)){stop("the selected method does not exist.", call. = FALSE)}
if(scale != 100 && scale != 1){stop("The value of 'scale' is not correct.", call. = FALSE)}
vscale <- unlist(lapply(split(tidy_data, tidy_data$election), function(x){split(x, x$unit)}), recursive = FALSE)
tidy_data <- lapply(vscale, function(x){cbind.data.frame(x,
t.votes = (x$votes/sum(x$votes, na.rm=TRUE))*scale,
t.votes_nac = (x$votes_nac/sum(x$votes_nac, na.rm=TRUE))*scale )})
tidy_data <- do.call(rbind, lapply(tidy_data, "[", -c(4:5)))
rownames(tidy_data) <- NULL
if(method == "Mainwaring and Jones" || method == 1){
v1 <- unlist(lapply(split(tidy_data, tidy_data$election), function(x){split(x, x$party)}), recursive = FALSE)
v2 <- lapply(v1, function(x){cbind(x, pns = apply(x[4], 2, gini_esaps))})
pns1 <- do.call(rbind, lapply(v2, function(x){x[1,-2]}))
psns <- lapply(split(pns1, pns1$election), function(x){cbind(x, psns = round(sum(x$t.votes_nac*x$pns),3))})
psns <- do.call(rbind,lapply(psns, "[", 1, c(1, 6)))
rownames(psns) <- NULL
if(isTRUE(pns)){
pn <- pns1[order(pns1$party), c(2,1,5)]
rownames(pn) <- NULL
return(list(PSNS = psns, PNS = pn))
}
return(psns)
}
if(method == "Chibber and Kollman" || method == 2){
nep_nac <- lapply(lapply(split(tidy_data, tidy_data$election),
function(x){x[duplicated(x$party) == FALSE, ]}),
function(x){ENP(x$t.votes_nac)})
nep_loc <- unlist(lapply(split(tidy_data, tidy_data$election), function(x){split(x, x$unit)}), recursive = FALSE)
nep_loc <- do.call(rbind,lapply(nep_loc, function(x){cbind(x, nepl = ENP(x$t.votes))}))
out <- lapply(split(nep_loc, nep_loc$election), function(x){mean(x[duplicated(x$unit) == FALSE, "nepl"])})
output <- data.frame(CH_K = round(unlist(nep_nac)-unlist(out),3))
output
}
}
| /R/psns.R | no_license | Nicolas-Schmidt/esaps | R | false | false | 5,388 | r | #' @title Party System Nationalization Score
#'
#' @description Party System Nationalization Score Mainwaring and Jones (2003) and Chhibber and Kollman (2004)
#'
#' @param tidy_data data.frame that contains the following variables with these names:
#' \itemize{
#' \item{\code{election}: year of election.}
#' \item{\code{unit}: the unit of analysis (province, department ...)}
#' \item{\code{party}: name of the political parties that obtain votes.}
#' \item{\code{votes}: votes obtained by each party.}
#' \item{\code{votes_nac}: votes at national level for each party.}
#' }
#' If the data is not structured in this way you can order it with: \code{\link{convert_esaps}}.
#'
#' @param method Method to calculate Party System Nationalization Score:
#' \itemize{
#' \item{\code{method = "Mainwaring and Jones"} or \code{method = 1}.}
#' \item{\code{method = "Chhibber and Kollman"} or \code{method = 2}.}
#' }
#'
#' @param pns by default it is \code{FALSE}. If \code{TRUE}, the Party Nationalization Score
#' is calculated. In method, you must indicate: \code{method = 1}.
#'
#' @param scale By default it is \code{100}, the indices will vary between 0 and 100.
#' If \code{scale = 1} the variation will be between 0 and 1.
#'
#'
#'
#' @return if \code{pns = FALSE,} return data.frame.
#'
#' if \code{pns = TRUE}, return a list with two data.frame.
#' \itemize{
#' \item{\code{list[[1]]}} {PSNS: Party System Nationalization Score}
#' \item{\code{list[[2]]}} {PNS: Party Nationalization Score}
#' }
#'
#' @author Nicolas Schmidt \email{nschmidt@cienciassociales.edu.uy}
#'
#'
#'
#' @examples
#' votes <- data.frame(election = rep(2000,4),
#' unit = rep(c("District_1", "District_2"), each = 2),
#' party = rep(c("party_A", "party_B"), 2),
#' votes = c(0.60,0.40, 0.30, 0.70),
#' votes_nac = rep(c(0.55,0.45),2)
#' )
#' psns(tidy_data = votes, method = 1)
#' psns(tidy_data = votes, method = 1, pns = TRUE)
#'
#' @export
#'
psns <- function(tidy_data,
method,
pns = FALSE,
scale = 100){
tidy_data <- tidy_data[, names(tidy_data) %in% c('election','unit', 'party', 'votes', 'votes_nac')]
tidy_data <- stats::na.omit(tidy_data)
if(!is.data.frame(tidy_data)){stop("'tidy_data' must be a 'data.frame'.", call. = FALSE)}
if(missing(method)){stop("You must select only one method.", call. = FALSE)}
if(sum(is.na(tidy_data[, 1:2])) != 0){stop("The variable 'election'and 'unit' must not have NA values.", call. = FALSE)}
if(length(method) > 1){stop("you must select only one method.", call. = FALSE)}
ch.met <- c("Mainwaring and Jones", "Chibber and Kollman")
nu.met <- c(1, 2)
if(!any(method == ch.met | method == nu.met)){stop("the selected method does not exist.", call. = FALSE)}
if(scale != 100 && scale != 1){stop("The value of 'scale' is not correct.", call. = FALSE)}
vscale <- unlist(lapply(split(tidy_data, tidy_data$election), function(x){split(x, x$unit)}), recursive = FALSE)
tidy_data <- lapply(vscale, function(x){cbind.data.frame(x,
t.votes = (x$votes/sum(x$votes, na.rm=TRUE))*scale,
t.votes_nac = (x$votes_nac/sum(x$votes_nac, na.rm=TRUE))*scale )})
tidy_data <- do.call(rbind, lapply(tidy_data, "[", -c(4:5)))
rownames(tidy_data) <- NULL
if(method == "Mainwaring and Jones" || method == 1){
v1 <- unlist(lapply(split(tidy_data, tidy_data$election), function(x){split(x, x$party)}), recursive = FALSE)
v2 <- lapply(v1, function(x){cbind(x, pns = apply(x[4], 2, gini_esaps))})
pns1 <- do.call(rbind, lapply(v2, function(x){x[1,-2]}))
psns <- lapply(split(pns1, pns1$election), function(x){cbind(x, psns = round(sum(x$t.votes_nac*x$pns),3))})
psns <- do.call(rbind,lapply(psns, "[", 1, c(1, 6)))
rownames(psns) <- NULL
if(isTRUE(pns)){
pn <- pns1[order(pns1$party), c(2,1,5)]
rownames(pn) <- NULL
return(list(PSNS = psns, PNS = pn))
}
return(psns)
}
if(method == "Chibber and Kollman" || method == 2){
nep_nac <- lapply(lapply(split(tidy_data, tidy_data$election),
function(x){x[duplicated(x$party) == FALSE, ]}),
function(x){ENP(x$t.votes_nac)})
nep_loc <- unlist(lapply(split(tidy_data, tidy_data$election), function(x){split(x, x$unit)}), recursive = FALSE)
nep_loc <- do.call(rbind,lapply(nep_loc, function(x){cbind(x, nepl = ENP(x$t.votes))}))
out <- lapply(split(nep_loc, nep_loc$election), function(x){mean(x[duplicated(x$unit) == FALSE, "nepl"])})
output <- data.frame(CH_K = round(unlist(nep_nac)-unlist(out),3))
output
}
}
|
#' Simple Perceptron Model
#'
#' This function implements the simple perceptron algorithm on 2D, linearly separable data
#' @param features data.frame consisting of x1 and x2
#' @param class data.frame/vector of classes/labels
#' @param eta numerical value of the learning rate
#' @keywords perceptron
#' @keywords ml
#' @export
#' @examples
#' df <- generate.perceptron.data(100,0)
#' simple.perceptron(df[,c(3,4)], df$class, 1)
simple.perceptron <- function(features, classes, eta) {
# create an initial weight vector of 0's for x0, x1, and x2
weight <- rep(0, dim(features)[2] + 1)
# by default, the percetron has not converged
nonconvergence <- TRUE
# keep track of number of epochs
num.epochs <- 0
# loop until the perceptron converges, that is
# when all samples in an epoch produce no error
while(nonconvergence) {
# keep track of errors during each epoch
errors <- rep(0, length(classes))
num.epochs <- num.epochs + 1
for (i in 1:length(classes)) {
y.target <- classes[i]
# Find the sum of x0w0 + x1w1 + x2w2
x0w0 <- weight[1]
train.sample <- as.numeric(features[i, ])
train.weight <- weight[2:length(weight)]
x1w1_x2w2 <- sum(train.weight * train.sample)
z <- sum(x0w0, x1w1_x2w2)
if (z < 0) {
y.classifier <- -1
} else {
y.classifier <- 1
}
# compare y.target and y.classifier
if (y.target != y.classifier) {
# change weight vector -> eta * delta * train.sample
delta <- y.target - y.classifier
weight.delta <- eta * delta * c(1, train.sample)
weight <- weight + weight.delta
errors[i] <- 1
}
}
if (sum(errors) == 0) {
break
}
}
print(paste0('The weights of convergence: ', paste0(weight, collapse=',')))
print(paste0('Convergence achieved in ', num.epochs, ' epochs.'))
}
#
# plot(df[, 3:4], xlab = "x1", ylab = "x2",
# pch = ifelse(df$class == 1, 2, 8),
# col = ifelse(df$class == 1, "blue", "red"))
# abline(0/9.86696618323493, -9.7127537728112/9.86696618323493)
| /simple.perceptron.R | no_license | Meznah89/R_HW1 | R | false | false | 2,082 | r | #' Simple Perceptron Model
#'
#' This function implements the simple perceptron algorithm on 2D, linearly separable data
#' @param features data.frame consisting of x1 and x2
#' @param class data.frame/vector of classes/labels
#' @param eta numerical value of the learning rate
#' @keywords perceptron
#' @keywords ml
#' @export
#' @examples
#' df <- generate.perceptron.data(100,0)
#' simple.perceptron(df[,c(3,4)], df$class, 1)
simple.perceptron <- function(features, classes, eta) {
# create an initial weight vector of 0's for x0, x1, and x2
weight <- rep(0, dim(features)[2] + 1)
# by default, the percetron has not converged
nonconvergence <- TRUE
# keep track of number of epochs
num.epochs <- 0
# loop until the perceptron converges, that is
# when all samples in an epoch produce no error
while(nonconvergence) {
# keep track of errors during each epoch
errors <- rep(0, length(classes))
num.epochs <- num.epochs + 1
for (i in 1:length(classes)) {
y.target <- classes[i]
# Find the sum of x0w0 + x1w1 + x2w2
x0w0 <- weight[1]
train.sample <- as.numeric(features[i, ])
train.weight <- weight[2:length(weight)]
x1w1_x2w2 <- sum(train.weight * train.sample)
z <- sum(x0w0, x1w1_x2w2)
if (z < 0) {
y.classifier <- -1
} else {
y.classifier <- 1
}
# compare y.target and y.classifier
if (y.target != y.classifier) {
# change weight vector -> eta * delta * train.sample
delta <- y.target - y.classifier
weight.delta <- eta * delta * c(1, train.sample)
weight <- weight + weight.delta
errors[i] <- 1
}
}
if (sum(errors) == 0) {
break
}
}
print(paste0('The weights of convergence: ', paste0(weight, collapse=',')))
print(paste0('Convergence achieved in ', num.epochs, ' epochs.'))
}
#
# plot(df[, 3:4], xlab = "x1", ylab = "x2",
# pch = ifelse(df$class == 1, 2, 8),
# col = ifelse(df$class == 1, "blue", "red"))
# abline(0/9.86696618323493, -9.7127537728112/9.86696618323493)
|
#' @export
#' @rdname slice
slice_head <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
UseMethod("slice_head")
}
#' @export
slice_head.tidytable <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
n <- enquo(n)
dt_env <- get_dt_env(n)
n <- prep_expr(n)
.by <- tidyselect_names(.df, c({{ .by }}, {{ by }}))
i <- expr(seq_len(min(!!n, .N)))
dt_expr <- call2_i(.df, i, .by)
eval_tidy(dt_expr, env = dt_env)
}
#' @export
slice_head.grouped_tt <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
.by <- group_vars(.df)
out <- ungroup(.df)
out <- slice_head(out, {{ n }}, .by = any_of(.by))
group_by(out, any_of(.by))
}
#' @export
slice_head.data.frame <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
.df <- as_tidytable(.df)
slice_head(.df, {{ n }}, .by = {{ .by }}, by = {{ by }})
}
#' @export
#' @rdname slice
slice_tail <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
UseMethod("slice_tail")
}
#' @export
slice_tail.tidytable <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
n <- enquo(n)
dt_env <- get_dt_env(n)
n <- prep_expr(n)
.by <- tidyselect_names(.df, c({{ .by }}, {{ by }}))
i <- expr(rlang::seq2(.N - min(!!n, .N) + 1, .N))
dt_expr <- call2_i(.df, i, .by)
eval_tidy(dt_expr, env = dt_env)
}
#' @export
slice_tail.grouped_tt <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
.by <- group_vars(.df)
out <- ungroup(.df)
out <- slice_tail(out, {{ n }}, .by = any_of(.by))
group_by(out, any_of(.by))
}
#' @export
slice_tail.data.frame <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
.df <- as_tidytable(.df)
slice_tail(.df, {{ n }}, .by = {{ .by }}, by = {{ by }})
}
| /R/slice-head-tail.R | permissive | markfairbanks/tidytable | R | false | false | 1,683 | r | #' @export
#' @rdname slice
slice_head <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
UseMethod("slice_head")
}
#' @export
slice_head.tidytable <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
n <- enquo(n)
dt_env <- get_dt_env(n)
n <- prep_expr(n)
.by <- tidyselect_names(.df, c({{ .by }}, {{ by }}))
i <- expr(seq_len(min(!!n, .N)))
dt_expr <- call2_i(.df, i, .by)
eval_tidy(dt_expr, env = dt_env)
}
#' @export
slice_head.grouped_tt <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
.by <- group_vars(.df)
out <- ungroup(.df)
out <- slice_head(out, {{ n }}, .by = any_of(.by))
group_by(out, any_of(.by))
}
#' @export
slice_head.data.frame <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
.df <- as_tidytable(.df)
slice_head(.df, {{ n }}, .by = {{ .by }}, by = {{ by }})
}
#' @export
#' @rdname slice
slice_tail <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
UseMethod("slice_tail")
}
#' @export
slice_tail.tidytable <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
n <- enquo(n)
dt_env <- get_dt_env(n)
n <- prep_expr(n)
.by <- tidyselect_names(.df, c({{ .by }}, {{ by }}))
i <- expr(rlang::seq2(.N - min(!!n, .N) + 1, .N))
dt_expr <- call2_i(.df, i, .by)
eval_tidy(dt_expr, env = dt_env)
}
#' @export
slice_tail.grouped_tt <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
.by <- group_vars(.df)
out <- ungroup(.df)
out <- slice_tail(out, {{ n }}, .by = any_of(.by))
group_by(out, any_of(.by))
}
#' @export
slice_tail.data.frame <- function(.df, n = 5, ..., .by = NULL, by = NULL) {
.df <- as_tidytable(.df)
slice_tail(.df, {{ n }}, .by = {{ .by }}, by = {{ by }})
}
|
\name{filterNasFromMatrix}
\alias{filterNasFromMatrix}
\title{removes rows and or columns from a matrix that contain NA values}
\usage{
filterNasFromMatrix(dataMatrix, filterBy = "rows")
}
\description{
removes rows and or columns from a matrix that contain NA
values
}
\author{
Adam Margolin
}
| /man/filterNasFromMatrix.Rd | permissive | anlopezl/predictiveModeling | R | false | false | 304 | rd | \name{filterNasFromMatrix}
\alias{filterNasFromMatrix}
\title{removes rows and or columns from a matrix that contain NA values}
\usage{
filterNasFromMatrix(dataMatrix, filterBy = "rows")
}
\description{
removes rows and or columns from a matrix that contain NA
values
}
\author{
Adam Margolin
}
|
#' Compute predicted values
#'
#' Computes predicted values of interval incidence, cumulative incidence,
#' and the per capita growth rate, conditional on observed data and a fitted
#' nonlinear mixed effects model of epidemic growth.
#'
#' @param object
#' An \code{"\link{egf}"} object.
#' @param what
#' A character vector listing one or more variables for which
#' predicted values are sought.
#' @param time
#' A numeric vector supplying time points at which predicted
#' values are sought. \link{Date} and \link{POSIXt} vectors are
#' tolerated and coerced to numeric with \code{\link{julian}(time)}.
#' When \link{time} is missing, time points stored in
#' \code{\link[=model.frame.egf]{model.frame}(object)} are reused,
#' and \code{window} is ignored.
#' @param window
#' A factor of length \code{length(time)} grouping the elements
#' of \code{time} by fitting window.
#' Levels not found in
#' \code{levels(\link[=model.frame.egf]{model.frame}(object)$window)}
#' are ignored.
#' @param log
#' A logical flag. If \code{FALSE},
#' then inverse log-transformed predicted values are returned.
#' @param se
#' A logical flag. If \code{se = TRUE} and \code{log = TRUE},
#' then approximate delta method standard errors on predicted values
#' are reported.
#' Standard errors are required for subsequent use
#' of \code{\link{confint.egf_predict}}.
#' Setting \code{se = TRUE} and \code{log = FALSE} is an error,
#' as standard errors are not available for inverse log-transformed
#' predicted values.
#' @param ...
#' Unused optional arguments.
#'
#' @details
#' In the result, \code{estimate[i]} can be interpreted as follows,
#' for \code{log = FALSE}:
#' \describe{
#' \item{\code{interval}}{
#' The expected number of cases observed from \code{time[i-1]}
#' to \code{time[i]} in \code{window[i]} (interval incidence).
#' }
#' \item{\code{cumulative}}{
#' The expected number of cases observed up to \code{time[i]}
#' in \code{window[i]} (cumulative incidence).
#' }
#' \item{\code{rt}}{
#' The predicted per capita growth rate at \code{time[i]}.
#' This is computed exactly from the differential equation model
#' associated with \code{object$model$curve}.
#' }
#' }
#'
#' @return
#' A data frame inheriting from class \code{"egf_predict"}, with variables:
#' \item{var}{
#' Predicted variable, from \code{what}.
#' }
#' \item{ts}{
#' Time series, from
#' \code{levels(\link[=model.frame.egf]{model.frame}(object)$ts)}.
#' }
#' \item{window}{
#' Fitting window, from
#' \code{levels(\link[=model.frame.egf]{model.frame}(object)$window)}.
#' }
#' \item{time}{
#' Time, after possible coercion to numeric.
#' }
#' \item{estimate}{
#' Predicted value of \code{var} at \code{time} in \code{window},
#' conditional on the mixed effects data and fitted model.
#' }
#' \item{se}{
#' Approximate delta method standard error on \code{estimate}.
#' Absent except for calls matching \code{predict(log = TRUE, se = TRUE)}.
#' }
#'
#' @examples
#' object <- egf_cache("egf-1.rds")
#' zz <- egf_cache("predict-egf-1.rds", predict(object, se = TRUE))
#' str(zz)
#'
#' @export
#' @importFrom TMB MakeADFun sdreport
#' @importFrom stats model.frame
predict.egf <- function(object,
what = c("interval", "cumulative", "rt"),
time,
window,
log = TRUE,
se = FALSE,
...) {
what <- unique(match.arg(what, several.ok = TRUE))
stopifnot(is_true_or_false(log), is_true_or_false(se))
if (se && !log) {
stop1("Standard errors are not available for inverse log-transformed ",
"predicted values.")
}
frame_windows <- model.frame(object, "windows")
start <- frame_windows$start
end <- frame_windows$end
day1 <- object$tmb_out$env$data$day1
do_day_of_week <- object$model$day_of_week > 0L
if (missing_time <- missing(time)) {
len <- object$tmb_out$env$data$time_seg_len
subset <- seq_along(len)
time <- object$tmb_out$env$data$time + rep.int(start, len)
time_split <- split(time, rep.int(subset, len))
} else {
if (inherits(time, c("Date", "POSIXct", "POSIXlt"))) {
time <- julian(time)
}
stopifnot(is.numeric(time), length(time) > 0L, is.finite(time))
if (do_day_of_week) {
stopifnot(all.equal(time, z <- round(time)))
time <- z
}
stopifnot(is.factor(window), length(window) == length(time))
subset <- which(levels(frame_windows$window)%in%levels(factor(window)))
window <- factor(window, levels = levels(frame_windows$window)[subset])
if (nlevels(window) == 0L) {
stop("'window' must have at least one valid level.")
}
len <- c(table(window))
min_len <- 1L + as.integer("interval" %in% what)
if (any(len < min_len)) {
stop("'time' must have length ", min_len, " or greater ",
"in each level of 'window'.")
}
time_split <- split(time, window)
t0 <- vapply(time_split, min, 0)
t1 <- vapply(time_split, max, 0)
start <- start[subset]
end <- end[subset]
day1 <- day1[subset]
if (any(t0 < start | t1 > end)) {
stop("'time[i]' must not occur before (after) the start (end) ",
"of 'window[i]'.")
}
if (do_day_of_week) {
check_ok_diff_time <- function(x) all(diff(x) == 1)
} else {
check_ok_diff_time <- function(x) all(diff(x) > 0)
}
if (!all(vapply(time_split, check_ok_diff_time, FALSE))) {
stop1("'time' must be increasing ",
if (do_day_of_week) "with one day spacing ",
"in each level of 'window'.")
}
time <- unlist1(time_split)
if (do_day_of_week) {
day1 <- as.integer((day1 + (t0 - start)) %% 7)
}
}
tmb_args <- egf_tmb_remake_args(object$tmb_out, par = object$best)
tmb_args$data$flags$predict <- 1L
tmb_args$data$what <- as.integer(eval(formals(sys.function())$what)%in%what)
tmb_args$data$subset <- subset - 1L
tmb_args$data$new_time <- time - rep.int(start, len)
tmb_args$data$new_time_seg_len <- len
tmb_args$data$new_day1 <- day1
tmb_out_retape <- do.call(MakeADFun, tmb_args)
if (se) {
sdr <- sdreport(tmb_out_retape,
par.fixed = object$best[!object$random],
getReportCovariance = FALSE)
ssdr <- summary(sdr, select = "report")
index <- factor(rownames(ssdr),
levels = sprintf("log_%s", what),
labels = sprintf("log(%s)", what))
report <- split(unname(ssdr[, "Estimate"]), index)
report_se <- split(unname(ssdr[, "Std. Error"]), index)
} else {
report <- tmb_out_retape$report(object$best)[sprintf("log_%s", what)]
names(report) <- sprintf("log(%s)", what)
}
last <- cumsum(len)
first <- c(0L, last[-length(last)]) + 1L
x <- rep.int(NA_real_, length(time))
ix <- list(interval = -first,
cumulative = seq_along(time),
rt = seq_along(time))
res <- data.frame(var = gl(length(report), length(time),
labels = names(report)),
ts = rep.int(frame_windows$ts[subset], len),
window = rep.int(frame_windows$window[subset], len),
time = time,
estimate =
unlist1(Map(replace, list(x), ix[what], report)))
if (se) {
res$se <- unlist1(Map(replace, list(x), ix[what], report_se))
}
if (!log) {
res$estimate <- exp(res$estimate)
levels(res$var) <- what
}
attr(res, "se") <- se
class(res) <- c("egf_predict", oldClass(res))
res
}
#' Confidence intervals on predicted values
#'
#' Computes confidence intervals on predicted values of interval incidence,
#' cumulative incidence, and the per capita growth rate.
#'
#' @param object
#' An \code{"\link[=predict.egf]{egf_predict}"} object.
#' Must supply log scale predicted values and corresponding standard errors.
#' @param parm
#' Unused argument included for generic consistency.
#' @param level
#' A number in the interval (0,1) indicating a confidence level.
#' @param log
#' A logical flag. If \code{FALSE}, then confidence intervals
#' on inverse log-transformed predicted values are returned.
#' @param ...
#' Unused optional arguments.
#'
#' @details
#' Confidence limits on predicted values (log scale) are computed
#' as \code{estimate + c(-1, 1) * sqrt(q) * se},
#' with \code{estimate} and \code{se} as in \code{object} and
#' \code{q = \link{qchisq}(level, df = 1)}.
#'
#' @return
#' If \code{log = TRUE}, then \code{object} but with variable
#' \code{se} replaced with variables \code{lower} and \code{upper}
#' supplying confidence limits on log predicted values.
#'
#' Otherwise, the same result but with variables \code{estimate},
#' \code{lower}, and \code{upper} inverse log-transformed and
#' the \link{levels} of variable \code{var} modified accordingly.
#'
#' \code{level} is retained as an attribute.
#'
#' @examples
#' object <- egf_cache("predict-egf-1.rds")
#' confint(object, log = TRUE)
#' confint(object, log = FALSE)
#'
#' @export
confint.egf_predict <- function(object, parm, level = 0.95, log = TRUE, ...) {
if (!isTRUE(attr(object, "se"))) {
stop1("'object' must supply log scale predicted values ",
"and corresponding standard errors. Retry with ",
"'object = predict(<\"egf\" object>, log = TRUE, se = TRUE)'.")
}
stopifnot(is_number_in_interval(level, 0, 1, "()"), is_true_or_false(log))
res <- data.frame(object[-match("se", names(object), 0L)],
wald(estimate = object$estimate, se = object$se,
level = level))
attr(res, "level") <- level
if (log) {
return(res)
}
elu <- c("estimate", "lower", "upper")
res[elu] <- exp(res[elu])
levels(res$var) <- sub("^log\\((.+)\\)$", "\\1", levels(res$var))
res
}
| /R/predict.R | no_license | davidearn/epigrowthfit | R | false | false | 10,326 | r | #' Compute predicted values
#'
#' Computes predicted values of interval incidence, cumulative incidence,
#' and the per capita growth rate, conditional on observed data and a fitted
#' nonlinear mixed effects model of epidemic growth.
#'
#' @param object
#' An \code{"\link{egf}"} object.
#' @param what
#' A character vector listing one or more variables for which
#' predicted values are sought.
#' @param time
#' A numeric vector supplying time points at which predicted
#' values are sought. \link{Date} and \link{POSIXt} vectors are
#' tolerated and coerced to numeric with \code{\link{julian}(time)}.
#' When \link{time} is missing, time points stored in
#' \code{\link[=model.frame.egf]{model.frame}(object)} are reused,
#' and \code{window} is ignored.
#' @param window
#' A factor of length \code{length(time)} grouping the elements
#' of \code{time} by fitting window.
#' Levels not found in
#' \code{levels(\link[=model.frame.egf]{model.frame}(object)$window)}
#' are ignored.
#' @param log
#' A logical flag. If \code{FALSE},
#' then inverse log-transformed predicted values are returned.
#' @param se
#' A logical flag. If \code{se = TRUE} and \code{log = TRUE},
#' then approximate delta method standard errors on predicted values
#' are reported.
#' Standard errors are required for subsequent use
#' of \code{\link{confint.egf_predict}}.
#' Setting \code{se = TRUE} and \code{log = FALSE} is an error,
#' as standard errors are not available for inverse log-transformed
#' predicted values.
#' @param ...
#' Unused optional arguments.
#'
#' @details
#' In the result, \code{estimate[i]} can be interpreted as follows,
#' for \code{log = FALSE}:
#' \describe{
#' \item{\code{interval}}{
#' The expected number of cases observed from \code{time[i-1]}
#' to \code{time[i]} in \code{window[i]} (interval incidence).
#' }
#' \item{\code{cumulative}}{
#' The expected number of cases observed up to \code{time[i]}
#' in \code{window[i]} (cumulative incidence).
#' }
#' \item{\code{rt}}{
#' The predicted per capita growth rate at \code{time[i]}.
#' This is computed exactly from the differential equation model
#' associated with \code{object$model$curve}.
#' }
#' }
#'
#' @return
#' A data frame inheriting from class \code{"egf_predict"}, with variables:
#' \item{var}{
#' Predicted variable, from \code{what}.
#' }
#' \item{ts}{
#' Time series, from
#' \code{levels(\link[=model.frame.egf]{model.frame}(object)$ts)}.
#' }
#' \item{window}{
#' Fitting window, from
#' \code{levels(\link[=model.frame.egf]{model.frame}(object)$window)}.
#' }
#' \item{time}{
#' Time, after possible coercion to numeric.
#' }
#' \item{estimate}{
#' Predicted value of \code{var} at \code{time} in \code{window},
#' conditional on the mixed effects data and fitted model.
#' }
#' \item{se}{
#' Approximate delta method standard error on \code{estimate}.
#' Absent except for calls matching \code{predict(log = TRUE, se = TRUE)}.
#' }
#'
#' @examples
#' object <- egf_cache("egf-1.rds")
#' zz <- egf_cache("predict-egf-1.rds", predict(object, se = TRUE))
#' str(zz)
#'
#' @export
#' @importFrom TMB MakeADFun sdreport
#' @importFrom stats model.frame
predict.egf <- function(object,
what = c("interval", "cumulative", "rt"),
time,
window,
log = TRUE,
se = FALSE,
...) {
what <- unique(match.arg(what, several.ok = TRUE))
stopifnot(is_true_or_false(log), is_true_or_false(se))
if (se && !log) {
stop1("Standard errors are not available for inverse log-transformed ",
"predicted values.")
}
frame_windows <- model.frame(object, "windows")
start <- frame_windows$start
end <- frame_windows$end
day1 <- object$tmb_out$env$data$day1
do_day_of_week <- object$model$day_of_week > 0L
if (missing_time <- missing(time)) {
len <- object$tmb_out$env$data$time_seg_len
subset <- seq_along(len)
time <- object$tmb_out$env$data$time + rep.int(start, len)
time_split <- split(time, rep.int(subset, len))
} else {
if (inherits(time, c("Date", "POSIXct", "POSIXlt"))) {
time <- julian(time)
}
stopifnot(is.numeric(time), length(time) > 0L, is.finite(time))
if (do_day_of_week) {
stopifnot(all.equal(time, z <- round(time)))
time <- z
}
stopifnot(is.factor(window), length(window) == length(time))
subset <- which(levels(frame_windows$window)%in%levels(factor(window)))
window <- factor(window, levels = levels(frame_windows$window)[subset])
if (nlevels(window) == 0L) {
stop("'window' must have at least one valid level.")
}
len <- c(table(window))
min_len <- 1L + as.integer("interval" %in% what)
if (any(len < min_len)) {
stop("'time' must have length ", min_len, " or greater ",
"in each level of 'window'.")
}
time_split <- split(time, window)
t0 <- vapply(time_split, min, 0)
t1 <- vapply(time_split, max, 0)
start <- start[subset]
end <- end[subset]
day1 <- day1[subset]
if (any(t0 < start | t1 > end)) {
stop("'time[i]' must not occur before (after) the start (end) ",
"of 'window[i]'.")
}
if (do_day_of_week) {
check_ok_diff_time <- function(x) all(diff(x) == 1)
} else {
check_ok_diff_time <- function(x) all(diff(x) > 0)
}
if (!all(vapply(time_split, check_ok_diff_time, FALSE))) {
stop1("'time' must be increasing ",
if (do_day_of_week) "with one day spacing ",
"in each level of 'window'.")
}
time <- unlist1(time_split)
if (do_day_of_week) {
day1 <- as.integer((day1 + (t0 - start)) %% 7)
}
}
tmb_args <- egf_tmb_remake_args(object$tmb_out, par = object$best)
tmb_args$data$flags$predict <- 1L
tmb_args$data$what <- as.integer(eval(formals(sys.function())$what)%in%what)
tmb_args$data$subset <- subset - 1L
tmb_args$data$new_time <- time - rep.int(start, len)
tmb_args$data$new_time_seg_len <- len
tmb_args$data$new_day1 <- day1
tmb_out_retape <- do.call(MakeADFun, tmb_args)
if (se) {
sdr <- sdreport(tmb_out_retape,
par.fixed = object$best[!object$random],
getReportCovariance = FALSE)
ssdr <- summary(sdr, select = "report")
index <- factor(rownames(ssdr),
levels = sprintf("log_%s", what),
labels = sprintf("log(%s)", what))
report <- split(unname(ssdr[, "Estimate"]), index)
report_se <- split(unname(ssdr[, "Std. Error"]), index)
} else {
report <- tmb_out_retape$report(object$best)[sprintf("log_%s", what)]
names(report) <- sprintf("log(%s)", what)
}
last <- cumsum(len)
first <- c(0L, last[-length(last)]) + 1L
x <- rep.int(NA_real_, length(time))
ix <- list(interval = -first,
cumulative = seq_along(time),
rt = seq_along(time))
res <- data.frame(var = gl(length(report), length(time),
labels = names(report)),
ts = rep.int(frame_windows$ts[subset], len),
window = rep.int(frame_windows$window[subset], len),
time = time,
estimate =
unlist1(Map(replace, list(x), ix[what], report)))
if (se) {
res$se <- unlist1(Map(replace, list(x), ix[what], report_se))
}
if (!log) {
res$estimate <- exp(res$estimate)
levels(res$var) <- what
}
attr(res, "se") <- se
class(res) <- c("egf_predict", oldClass(res))
res
}
#' Confidence intervals on predicted values
#'
#' Computes confidence intervals on predicted values of interval incidence,
#' cumulative incidence, and the per capita growth rate.
#'
#' @param object
#' An \code{"\link[=predict.egf]{egf_predict}"} object.
#' Must supply log scale predicted values and corresponding standard errors.
#' @param parm
#' Unused argument included for generic consistency.
#' @param level
#' A number in the interval (0,1) indicating a confidence level.
#' @param log
#' A logical flag. If \code{FALSE}, then confidence intervals
#' on inverse log-transformed predicted values are returned.
#' @param ...
#' Unused optional arguments.
#'
#' @details
#' Confidence limits on predicted values (log scale) are computed
#' as \code{estimate + c(-1, 1) * sqrt(q) * se},
#' with \code{estimate} and \code{se} as in \code{object} and
#' \code{q = \link{qchisq}(level, df = 1)}.
#'
#' @return
#' If \code{log = TRUE}, then \code{object} but with variable
#' \code{se} replaced with variables \code{lower} and \code{upper}
#' supplying confidence limits on log predicted values.
#'
#' Otherwise, the same result but with variables \code{estimate},
#' \code{lower}, and \code{upper} inverse log-transformed and
#' the \link{levels} of variable \code{var} modified accordingly.
#'
#' \code{level} is retained as an attribute.
#'
#' @examples
#' object <- egf_cache("predict-egf-1.rds")
#' confint(object, log = TRUE)
#' confint(object, log = FALSE)
#'
#' @export
confint.egf_predict <- function(object, parm, level = 0.95, log = TRUE, ...) {
if (!isTRUE(attr(object, "se"))) {
stop1("'object' must supply log scale predicted values ",
"and corresponding standard errors. Retry with ",
"'object = predict(<\"egf\" object>, log = TRUE, se = TRUE)'.")
}
stopifnot(is_number_in_interval(level, 0, 1, "()"), is_true_or_false(log))
res <- data.frame(object[-match("se", names(object), 0L)],
wald(estimate = object$estimate, se = object$se,
level = level))
attr(res, "level") <- level
if (log) {
return(res)
}
elu <- c("estimate", "lower", "upper")
res[elu] <- exp(res[elu])
levels(res$var) <- sub("^log\\((.+)\\)$", "\\1", levels(res$var))
res
}
|
run_analysis<-function(wd){
setwd(wd)
#-----------------------------------------------------------------
# 0. Downloding and extracting raw data
#-----------------------------------------------------------------
url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zip_name<-"UCI HAR Dataset.zip"
path<-file.path(getwd(),zip_name)
if (!file.exists(path)){
download.file(url,path)
unzip(zip_name)
}
#-----------------------------------------------------------------
# 1. Merges the training and the test sets to create one data set.
#-----------------------------------------------------------------
###Getting Train Set tables
x_trainSet<-read.table("UCI HAR Dataset/train/X_train.txt")
y_trainSet<-read.table("UCI HAR Dataset/train/y_train.txt")
sub_trainSet<-read.table("UCI HAR Dataset/train/subject_train.txt")
###Getting Test Set tables
x_testSet<-read.table("UCI HAR Dataset/test/X_test.txt")
y_testSet<-read.table("UCI HAR Dataset/test/y_test.txt")
sub_testSet<-read.table("UCI HAR Dataset/test/subject_test.txt")
###Merging datasets into one and adding aswell
###"activity" and "subject" data per observation.
x_final_set<-rbind(x_testSet,x_trainSet)
y_final_set<-rbind(y_testSet,y_trainSet)
sub_final_set<-rbind(sub_testSet,sub_trainSet)
merge_set<-cbind(x_final_set,y_final_set,sub_final_set)
##Getting Feautures Names
featuresNames<-read.table(("UCI HAR Dataset/features.txt"))
##Getting a correspondency table betwen ID and Name activities.
activityName<- read.table("UCI HAR Dataset/activity_labels.txt")
#-----------------------------------------------------------------
#2. Extracts only the measurements on the mean and standard
# deviation for each measurement.
#-----------------------------------------------------------------
#Getting a logical vector that identifies the elements with "mean" and "SD"
validFeatures<-grepl("mean\\(\\)|std\\(\\)", featuresNames[,2])
#Extractiong the columns on data set that have "mean" and "SD" info
merge_fltr_set<-merge_set[,validFeatures]
#Making correspondency between the activity's ID and its name in order to
#change the id value on "activity" column for its name.
merge_fltr_set[,67]<-activityName[,2][match(merge_fltr_set[,67],activityName[,1])]
#-----------------------------------------------------------------
#3. Uses descriptive activity names to name the activities in the
# data set.
#-----------------------------------------------------------------
filter_feat<-as.character(featuresNames[validFeatures,2])
filter_feat <- gsub('Mag',"Magnitude",filter_feat)
filter_feat <- gsub('Acc',"Acceleration",filter_feat)
filter_feat <- gsub('Gyro',"AngularSpeed",filter_feat)
filter_feat <- gsub('^t',"Time.",filter_feat)
filter_feat <- gsub('\\.mean',".Mean",filter_feat)
filter_feat <- gsub('Freq\\.',"Frequency.",filter_feat)
filter_feat <- gsub('Freq$',"Frequency",filter_feat)
filter_feat <- gsub('GyroJerk',"AngularAcceleration",filter_feat)
filter_feat <- gsub('\\.std',".StandardDeviation",filter_feat)
filter_feat <- gsub('^f',"Frequency.",filter_feat)
#-----------------------------------------------------------------
#4. Appropriately labels the data set with descriptive activity
# names.
#-----------------------------------------------------------------
names(merge_fltr_set)<-c(filter_feat,"Activity","Subject")
#-----------------------------------------------------------------
#5. Creates a second, independent tidy data set with the average
# of each variable for each activity and each subject
#-----------------------------------------------------------------
tidyData <- aggregate(. ~ Subject + Activity, merge_fltr_set, mean)
write.table(tidyData, file = "tidydata.txt",row.name=FALSE)
} | /run_analysis.R | no_license | hender1288/Getting_and_Cleaning_Data_Course_Project | R | false | false | 4,008 | r | run_analysis<-function(wd){
setwd(wd)
#-----------------------------------------------------------------
# 0. Downloding and extracting raw data
#-----------------------------------------------------------------
url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zip_name<-"UCI HAR Dataset.zip"
path<-file.path(getwd(),zip_name)
if (!file.exists(path)){
download.file(url,path)
unzip(zip_name)
}
#-----------------------------------------------------------------
# 1. Merges the training and the test sets to create one data set.
#-----------------------------------------------------------------
###Getting Train Set tables
x_trainSet<-read.table("UCI HAR Dataset/train/X_train.txt")
y_trainSet<-read.table("UCI HAR Dataset/train/y_train.txt")
sub_trainSet<-read.table("UCI HAR Dataset/train/subject_train.txt")
###Getting Test Set tables
x_testSet<-read.table("UCI HAR Dataset/test/X_test.txt")
y_testSet<-read.table("UCI HAR Dataset/test/y_test.txt")
sub_testSet<-read.table("UCI HAR Dataset/test/subject_test.txt")
###Merging datasets into one and adding aswell
###"activity" and "subject" data per observation.
x_final_set<-rbind(x_testSet,x_trainSet)
y_final_set<-rbind(y_testSet,y_trainSet)
sub_final_set<-rbind(sub_testSet,sub_trainSet)
merge_set<-cbind(x_final_set,y_final_set,sub_final_set)
##Getting Feautures Names
featuresNames<-read.table(("UCI HAR Dataset/features.txt"))
##Getting a correspondency table betwen ID and Name activities.
activityName<- read.table("UCI HAR Dataset/activity_labels.txt")
#-----------------------------------------------------------------
#2. Extracts only the measurements on the mean and standard
# deviation for each measurement.
#-----------------------------------------------------------------
#Getting a logical vector that identifies the elements with "mean" and "SD"
validFeatures<-grepl("mean\\(\\)|std\\(\\)", featuresNames[,2])
#Extractiong the columns on data set that have "mean" and "SD" info
merge_fltr_set<-merge_set[,validFeatures]
#Making correspondency between the activity's ID and its name in order to
#change the id value on "activity" column for its name.
merge_fltr_set[,67]<-activityName[,2][match(merge_fltr_set[,67],activityName[,1])]
#-----------------------------------------------------------------
#3. Uses descriptive activity names to name the activities in the
# data set.
#-----------------------------------------------------------------
filter_feat<-as.character(featuresNames[validFeatures,2])
filter_feat <- gsub('Mag',"Magnitude",filter_feat)
filter_feat <- gsub('Acc',"Acceleration",filter_feat)
filter_feat <- gsub('Gyro',"AngularSpeed",filter_feat)
filter_feat <- gsub('^t',"Time.",filter_feat)
filter_feat <- gsub('\\.mean',".Mean",filter_feat)
filter_feat <- gsub('Freq\\.',"Frequency.",filter_feat)
filter_feat <- gsub('Freq$',"Frequency",filter_feat)
filter_feat <- gsub('GyroJerk',"AngularAcceleration",filter_feat)
filter_feat <- gsub('\\.std',".StandardDeviation",filter_feat)
filter_feat <- gsub('^f',"Frequency.",filter_feat)
#-----------------------------------------------------------------
#4. Appropriately labels the data set with descriptive activity
# names.
#-----------------------------------------------------------------
names(merge_fltr_set)<-c(filter_feat,"Activity","Subject")
#-----------------------------------------------------------------
#5. Creates a second, independent tidy data set with the average
# of each variable for each activity and each subject
#-----------------------------------------------------------------
tidyData <- aggregate(. ~ Subject + Activity, merge_fltr_set, mean)
write.table(tidyData, file = "tidydata.txt",row.name=FALSE)
} |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{take_samp}
\alias{take_samp}
\title{Sample from two data frames}
\usage{
take_samp(df1, df2, N, p = 1 - q, q = 1 - p)
}
\description{
Sample from two data frames
}
| /man/take_samp.Rd | no_license | JoFrhwld/phoneticChange | R | false | false | 224 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{take_samp}
\alias{take_samp}
\title{Sample from two data frames}
\usage{
take_samp(df1, df2, N, p = 1 - q, q = 1 - p)
}
\description{
Sample from two data frames
}
|
# Cost function and gradient for Linear Regression algorithm
# X is the Feature Matrix plus one additional column of 1's for theta_0; columns j=0,...,n
# Y is the Dependent Variable (Y_i, i=1,...,m)
# X and Y are defined globally
# theta (theta_j, j=0,...,n) is the Parameter Vector of the Linear Regression model
# Function calls are counted by global variables count_f, count_g
#lin_reg_cost <- function(X,Y,theta){
lin_reg_cost <- function(theta){
# Increment function call counter
count_f <<- count_f + 1
# Dimensions of the matrices
np1 = dim(X)[2] # n plus 1
m = dim(X)[1]
# Auxiliary vector, residual res=X*theta-Y
res = X %*% theta - Y
# Cost function
J = 1/(2*m) * t(res) %*% res
} # end linear regression cost function
#lin_reg_grad <- function(X,Y,theta){
lin_reg_grad <- function(theta){
# Increment gradient call counter
count_g <<- count_g + 1
# Dimensions of the matrices
np1 = dim(X)[2]
m = dim(X)[1]
temp_vec = rep(0,np1)
# Auxiliary vector, residual res=X*theta-Y
res = X %*% theta - Y # length m
#print(X)
#print(theta)
#print(Y)
#print(X%*%theta)
#print(X%*%theta-Y)
#Sys.sleep(3)
# Gradient
for (i in 1:m) {
temp = res[i]
for (j in 1:np1) {
temp_vec[j]=temp_vec[j]+temp*X[i,j]
}
}
temp_vec = 1/m * temp_vec
#cat("Theta", theta,"\n")
#cat("temp_vec", temp_vec,"\n")
#print(temp_vec)
#Sys.sleep(0.2)
grad = temp_vec
} # end linear regression gradient | /lin_reg_cost_grad.R | no_license | cc-skuehn/Optimization | R | false | false | 1,484 | r | # Cost function and gradient for Linear Regression algorithm
# X is the Feature Matrix plus one additional column of 1's for theta_0; columns j=0,...,n
# Y is the Dependent Variable (Y_i, i=1,...,m)
# X and Y are defined globally
# theta (theta_j, j=0,...,n) is the Parameter Vector of the Linear Regression model
# Function calls are counted by global variables count_f, count_g
#lin_reg_cost <- function(X,Y,theta){
lin_reg_cost <- function(theta){
# Increment function call counter
count_f <<- count_f + 1
# Dimensions of the matrices
np1 = dim(X)[2] # n plus 1
m = dim(X)[1]
# Auxiliary vector, residual res=X*theta-Y
res = X %*% theta - Y
# Cost function
J = 1/(2*m) * t(res) %*% res
} # end linear regression cost function
#lin_reg_grad <- function(X,Y,theta){
lin_reg_grad <- function(theta){
# Increment gradient call counter
count_g <<- count_g + 1
# Dimensions of the matrices
np1 = dim(X)[2]
m = dim(X)[1]
temp_vec = rep(0,np1)
# Auxiliary vector, residual res=X*theta-Y
res = X %*% theta - Y # length m
#print(X)
#print(theta)
#print(Y)
#print(X%*%theta)
#print(X%*%theta-Y)
#Sys.sleep(3)
# Gradient
for (i in 1:m) {
temp = res[i]
for (j in 1:np1) {
temp_vec[j]=temp_vec[j]+temp*X[i,j]
}
}
temp_vec = 1/m * temp_vec
#cat("Theta", theta,"\n")
#cat("temp_vec", temp_vec,"\n")
#print(temp_vec)
#Sys.sleep(0.2)
grad = temp_vec
} # end linear regression gradient |
\name{pdfsq}
\alias{pdfsq}
\title{Calculate \eqn{f^2(x)}}
\description{Calculates the square of a density.}
\usage{pdfsq(s,dist, p1,p2)}
\arguments{
\item{s}{A scalar or vector: the x-axis grid points where the probability density function will be evaluated.}
\item{dist}{Character string, used as a switch to the user selected distribution function (see details below).}
\item{p1}{A scalar. Parameter 1 (vector or object) of the selected density.}
\item{p2}{A scalar. Parameter 2 (vector or object) of the selected density.}
}
\details{Based on user-specified argument \code{dist}, the function returns the value of \eqn{f^2(x)dx}, used in the definitions of \eqn{\rho_p^*}, \eqn{\rho_p} and their exact versions.
Supported distributions (along with the corresponding \code{dist} values) are:
\itemize{
\item{weib: }{The weibull distribution is implemented as \deqn{f(s;p_1,p_2)= \frac{p_1}{p_2} \left (\frac{s}{p_2}\right )^{p_1-1} \exp \left \{- \left (\frac{s}{p_2}\right )^{p_1} \right \} } with \eqn{ s \ge 0} where \eqn{p_1} is the shape parameter and \eqn{p_2} the scale parameter.}
\item{lognorm: }{The lognormal distribution is implemented as \deqn{f(s) = \frac{1}{p_2s\sqrt{2\pi}}e^{-\frac{(log s -p_1)^2}{2p_2^2}}} where \eqn{p_1} is the mean and \eqn{p_2} is the standard deviation of the distirbution.}
\item{norm: }{The normal distribution is implemented as \deqn{f(s) = \frac{1}{p_2\sqrt{2 \pi}}e^{-\frac{ (s - p_1)^2 }{ 2p_2^2 }}} where \eqn{p_1} is the mean and the \eqn{p_2} is the standard deviation of the distirbution.}
\item{uni: }{The uniform distribution is implemented as \deqn{f(s) = \frac{1}{p_2-p_1}} for \eqn{ p_1 \le s \le p_2}.}
\item{cauchy: }{The cauchy distribution is implemented as
\deqn{f(s)=\frac{1}{\pi p_2 \left \{1+( \frac{s-p_1}{p_2})^2\right \} } }
where \eqn{p_1} is the location parameter and \eqn{p_2} the scale parameter.}
\item{fnorm: }{The half normal distribution is implemented as \deqn{2 f(s)-1} where \deqn{f(s) = \frac{1}{sd\sqrt{2 \pi} }e^{-\frac{s^2}{2 sd^2 }},} and \eqn{sd=\sqrt{\pi/2}/p_1}.}
\item{normmixt:}{The normal mixture distribution is implemented as
\deqn{f(s)=p_1\frac{1}{p_2[2] \sqrt{2\pi} } e^{- \frac{ (s - p_2[1])^2}{2p_2[2]^2}} +(1-p_1)\frac{1}{p_2[4]\sqrt{2\pi}} e^{-\frac{(s - p_2[3])^2}{2p_2[4]^2 }} }
where \eqn{p1} is a mixture component(scalar) and \eqn{p_2} a vector of parameters for the mean and variance of the two mixture components \eqn{p_2= c(mean1, sd1, mean2, sd2)}.}
\item{skewnorm: }{The skew normal distribution with parameter \eqn{p_1} is implemented as \deqn{f(s)=2\phi(s)\Phi(p_1s)}.}
\item{fas: }{The Fernandez and Steel distribution is implemented as
\deqn{f(s; p_1, p_2) = \frac{2}{p_1+\frac{1}{p_1}} \left \{ f_t(s/p_1; p_2) I_{\{s \ge 0\}} + f_t(p_1s; p_2)I_{\{s<0 \}}\right \} }
where \eqn{f_t(x;\nu)} is the p.d.f. of the \eqn{t} distribution with \eqn{\nu = 5} degrees of freedom. \eqn{p_1} controls the skewness of the distribution with values between \eqn{(0, +\infty)} and \eqn{p_2} denotes the degrees of freedom.}
\item{shash: }{The Sinh-Arcsinh distribution is implemented as
\deqn{f(s;\mu, p_1, p_2, \tau) = \frac{ce^{-r^2/2}}{\sqrt{2\pi }} \frac{1}{p_2} \frac{1}{2} \sqrt{1+z^2} }
where \eqn{r=\sinh(\sinh(z)-(-p_1))}, \eqn{c=\cosh(\sinh(z)-(-p_1))} and \eqn{z=((s-\mu)/p2)}. \eqn{p_1} is the vector of skewness, \eqn{p_2} is the scale parameter, \eqn{\mu=0} is the location parameter and \eqn{\tau=1} the kurtosis parameter.}
}
}
\value{A vector containing the user selected density values at the user specified points \code{s}.}
\references{
\href{https://link.springer.com/chapter/10.1007/978-3-319-41582-6_1}{Bagkavos D., Patil P.N., Wood A.T.A. (2016), A Numerical Study of the Power Function of a New Symmetry Test. In: Cao R., Gonzalez Manteiga W., Romo J. (eds) Nonparametric Statistics. Springer Proceedings in Mathematics and Statistics, vol 175, Springer.}
}
\author{
Dimitrios Bagkavos and Lucia Gamez Gallardo
R implementation and documentation: Dimitrios Bagkavos <dimitrios.bagkavos@gmail.com>, Lucia Gamez Gallardo <gamezgallardolucia@gmail.com>
}
\seealso{ \code{\link{r.sample}, \link{q.sample}, \link{p.sample} } }
\examples{
selected.dens <- "weib" #select Weibull
shape <- 2 # specify shape parameter
scale <- 1 # specify scale parameter
xout <- seq(0.1,5,length=50) #design point
pdfsq(xout,selected.dens,shape,scale) # calculate the square density at xout
}
| /man/pdfsq.Rd | no_license | cran/asymmetry.measures | R | false | false | 4,529 | rd | \name{pdfsq}
\alias{pdfsq}
\title{Calculate \eqn{f^2(x)}}
\description{Calculates the square of a density.}
\usage{pdfsq(s,dist, p1,p2)}
\arguments{
\item{s}{A scalar or vector: the x-axis grid points where the probability density function will be evaluated.}
\item{dist}{Character string, used as a switch to the user selected distribution function (see details below).}
\item{p1}{A scalar. Parameter 1 (vector or object) of the selected density.}
\item{p2}{A scalar. Parameter 2 (vector or object) of the selected density.}
}
\details{Based on user-specified argument \code{dist}, the function returns the value of \eqn{f^2(x)dx}, used in the definitions of \eqn{\rho_p^*}, \eqn{\rho_p} and their exact versions.
Supported distributions (along with the corresponding \code{dist} values) are:
\itemize{
\item{weib: }{The weibull distribution is implemented as \deqn{f(s;p_1,p_2)= \frac{p_1}{p_2} \left (\frac{s}{p_2}\right )^{p_1-1} \exp \left \{- \left (\frac{s}{p_2}\right )^{p_1} \right \} } with \eqn{ s \ge 0} where \eqn{p_1} is the shape parameter and \eqn{p_2} the scale parameter.}
\item{lognorm: }{The lognormal distribution is implemented as \deqn{f(s) = \frac{1}{p_2s\sqrt{2\pi}}e^{-\frac{(log s -p_1)^2}{2p_2^2}}} where \eqn{p_1} is the mean and \eqn{p_2} is the standard deviation of the distirbution.}
\item{norm: }{The normal distribution is implemented as \deqn{f(s) = \frac{1}{p_2\sqrt{2 \pi}}e^{-\frac{ (s - p_1)^2 }{ 2p_2^2 }}} where \eqn{p_1} is the mean and the \eqn{p_2} is the standard deviation of the distirbution.}
\item{uni: }{The uniform distribution is implemented as \deqn{f(s) = \frac{1}{p_2-p_1}} for \eqn{ p_1 \le s \le p_2}.}
\item{cauchy: }{The cauchy distribution is implemented as
\deqn{f(s)=\frac{1}{\pi p_2 \left \{1+( \frac{s-p_1}{p_2})^2\right \} } }
where \eqn{p_1} is the location parameter and \eqn{p_2} the scale parameter.}
\item{fnorm: }{The half normal distribution is implemented as \deqn{2 f(s)-1} where \deqn{f(s) = \frac{1}{sd\sqrt{2 \pi} }e^{-\frac{s^2}{2 sd^2 }},} and \eqn{sd=\sqrt{\pi/2}/p_1}.}
\item{normmixt:}{The normal mixture distribution is implemented as
\deqn{f(s)=p_1\frac{1}{p_2[2] \sqrt{2\pi} } e^{- \frac{ (s - p_2[1])^2}{2p_2[2]^2}} +(1-p_1)\frac{1}{p_2[4]\sqrt{2\pi}} e^{-\frac{(s - p_2[3])^2}{2p_2[4]^2 }} }
where \eqn{p1} is a mixture component(scalar) and \eqn{p_2} a vector of parameters for the mean and variance of the two mixture components \eqn{p_2= c(mean1, sd1, mean2, sd2)}.}
\item{skewnorm: }{The skew normal distribution with parameter \eqn{p_1} is implemented as \deqn{f(s)=2\phi(s)\Phi(p_1s)}.}
\item{fas: }{The Fernandez and Steel distribution is implemented as
\deqn{f(s; p_1, p_2) = \frac{2}{p_1+\frac{1}{p_1}} \left \{ f_t(s/p_1; p_2) I_{\{s \ge 0\}} + f_t(p_1s; p_2)I_{\{s<0 \}}\right \} }
where \eqn{f_t(x;\nu)} is the p.d.f. of the \eqn{t} distribution with \eqn{\nu = 5} degrees of freedom. \eqn{p_1} controls the skewness of the distribution with values between \eqn{(0, +\infty)} and \eqn{p_2} denotes the degrees of freedom.}
\item{shash: }{The Sinh-Arcsinh distribution is implemented as
\deqn{f(s;\mu, p_1, p_2, \tau) = \frac{ce^{-r^2/2}}{\sqrt{2\pi }} \frac{1}{p_2} \frac{1}{2} \sqrt{1+z^2} }
where \eqn{r=\sinh(\sinh(z)-(-p_1))}, \eqn{c=\cosh(\sinh(z)-(-p_1))} and \eqn{z=((s-\mu)/p2)}. \eqn{p_1} is the vector of skewness, \eqn{p_2} is the scale parameter, \eqn{\mu=0} is the location parameter and \eqn{\tau=1} the kurtosis parameter.}
}
}
\value{A vector containing the user selected density values at the user specified points \code{s}.}
\references{
\href{https://link.springer.com/chapter/10.1007/978-3-319-41582-6_1}{Bagkavos D., Patil P.N., Wood A.T.A. (2016), A Numerical Study of the Power Function of a New Symmetry Test. In: Cao R., Gonzalez Manteiga W., Romo J. (eds) Nonparametric Statistics. Springer Proceedings in Mathematics and Statistics, vol 175, Springer.}
}
\author{
Dimitrios Bagkavos and Lucia Gamez Gallardo
R implementation and documentation: Dimitrios Bagkavos <dimitrios.bagkavos@gmail.com>, Lucia Gamez Gallardo <gamezgallardolucia@gmail.com>
}
\seealso{ \code{\link{r.sample}, \link{q.sample}, \link{p.sample} } }
\examples{
selected.dens <- "weib" #select Weibull
shape <- 2 # specify shape parameter
scale <- 1 # specify scale parameter
xout <- seq(0.1,5,length=50) #design point
pdfsq(xout,selected.dens,shape,scale) # calculate the square density at xout
}
|
#download data and unzip
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, destfile = "/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset.zip")
unzip(zipfile = "/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset.zip")
#read data
x_train <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/test/subject_test.txt")
features <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/features.txt")
activityLabels <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/activity_labels.txt")
#label the data set with descriptive variable names
colnames(x_train) <- features[ , 2]
colnames(x_test) <- features[ , 2]
colnames(y_train) <- "activityID"
colnames(y_test) <- "activityID"
colnames(subject_train) <- "subjectID"
colnames(subject_test) <- "subjectID"
colnames(activityLabels) <- c("activityID", "activityType")
#merge data sets
train <- cbind(x_train, y_train, subject_train)
test <- cbind(x_test, y_test, subject_test)
onedata <- rbind(train, test)
#use descriptive activity names to name the activities in the data set
onedata1 <- merge(onedata, activityLabels, by = "activityID")
#extract the mean and standard deviation for each measurement
colnames <- colnames(onedata1)
mean_and_sd <- grepl("activityID", colnames) |
grepl("subjectID", colnames) |
grepl("mean", colnames) |
grepl("std", colnames)
setformean_and_sd <- onedata1[ , mean_and_sd == TRUE]
#creates a second, independent tidy data set with the average of each variable for each activity and each subject
tidyData<- aggregate(. ~ subjectID + activityID, data = setformean_and_sd, mean)
tidyData <- tidyData[order(tidyData$subjectID, tidyData$activityID), ]
#write the second data set into a txt file
write.table(tidyData, "tidyData.txt", row.names = FALSE)
knitr::knit("README.Rmd")
| /run_analysis.R | no_license | mcheng23/getting-and-cleaning-data-week-4-project | R | false | false | 2,695 | r | #download data and unzip
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, destfile = "/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset.zip")
unzip(zipfile = "/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset.zip")
#read data
x_train <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/test/subject_test.txt")
features <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/features.txt")
activityLabels <- read.table("/Users/mcheng/Documents/getting-and-cleaning-data-week-4-project/UCI HAR Dataset/activity_labels.txt")
#label the data set with descriptive variable names
colnames(x_train) <- features[ , 2]
colnames(x_test) <- features[ , 2]
colnames(y_train) <- "activityID"
colnames(y_test) <- "activityID"
colnames(subject_train) <- "subjectID"
colnames(subject_test) <- "subjectID"
colnames(activityLabels) <- c("activityID", "activityType")
#merge data sets
train <- cbind(x_train, y_train, subject_train)
test <- cbind(x_test, y_test, subject_test)
onedata <- rbind(train, test)
#use descriptive activity names to name the activities in the data set
onedata1 <- merge(onedata, activityLabels, by = "activityID")
#extract the mean and standard deviation for each measurement
colnames <- colnames(onedata1)
mean_and_sd <- grepl("activityID", colnames) |
grepl("subjectID", colnames) |
grepl("mean", colnames) |
grepl("std", colnames)
setformean_and_sd <- onedata1[ , mean_and_sd == TRUE]
#creates a second, independent tidy data set with the average of each variable for each activity and each subject
tidyData<- aggregate(. ~ subjectID + activityID, data = setformean_and_sd, mean)
tidyData <- tidyData[order(tidyData$subjectID, tidyData$activityID), ]
#write the second data set into a txt file
write.table(tidyData, "tidyData.txt", row.names = FALSE)
knitr::knit("README.Rmd")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/plotDiffHeatmap.R
\docType{methods}
\name{plotDiffHeatmap}
\alias{plotDiffHeatmap}
\alias{plotDiffHeatmap,matrix,SummarizedExperiment-method}
\alias{plotDiffHeatmap,daFrame,SummarizedExperiment-method}
\alias{plotDiffHeatmap,SummarizedExperiment,SummarizedExperiment-method}
\alias{plotDiffHeatmap,ANY,list-method}
\title{Plot differential heatmap}
\usage{
plotDiffHeatmap(x, y, ...)
\S4method{plotDiffHeatmap}{matrix,SummarizedExperiment}(x, y, top_n = 20,
all = FALSE, order = TRUE, th = 0.1, hm1 = TRUE,
normalize = TRUE, row_anno = TRUE, ...)
\S4method{plotDiffHeatmap}{daFrame,SummarizedExperiment}(x, y,
top_n = 20, all = FALSE, order = TRUE, th = 0.1, hm1 = TRUE,
normalize = TRUE, row_anno = TRUE, ...)
\S4method{plotDiffHeatmap}{SummarizedExperiment,SummarizedExperiment}(x, y,
top_n = 20, all = FALSE, order = TRUE, th = 0.1, hm1 = TRUE,
normalize = TRUE, row_anno = TRUE, ...)
\S4method{plotDiffHeatmap}{ANY,list}(x, y, top_n = 20, all = FALSE,
order = TRUE, th = 0.1, hm1 = TRUE, normalize = TRUE,
row_anno = TRUE, ...)
}
\arguments{
\item{x}{a \code{\link{daFrame}} or \code{SummarizedExperiment}.}
\item{y}{a \code{SummarizedExperiment} containing differential testing
results as returned by one of \code{\link[diffcyt]{testDA_edgeR}},
\code{\link[diffcyt]{testDA_voom}}, \code{\link[diffcyt]{testDA_GLMM}},
\code{\link[diffcyt]{testDS_limma}}, or \code{\link[diffcyt]{testDS_LMM}}.
Alternatively, a list as returned by \code{\link[diffcyt]{diffcyt}}.}
\item{...}{optional arguments.}
\item{top_n}{numeric. Number of top clusters (if \code{type = "DA"}) or
cluster-marker combinations (if \code{type = "DS"}) to display.}
\item{all}{logical. Specifies whether all clusters or cluster-marker combinations
should be displayed. If \code{TRUE}, \code{top_n} will be ignored.}
\item{order}{logical. Should results be ordered by significance?}
\item{th}{numeric. Threshold on adjusted p-values below which clusters (DA)
or cluster-marker combinations (DS) should be considered significant.}
\item{hm1}{logical. Specifies whether the left-hand side heatmap should be plotted.}
\item{normalize}{logical. Specifies whether Z-score normalized values should be plotted
in the right-hand side heatmap. If \code{y} contains DA analysis results,
relative population abundances will be arcsine-square-root scaled
prior to normalization.}
\item{row_anno}{logical. Should a row annotation indicating whether cluster (DA)
or cluster-marker combinations (DS) are significant,
as well as adjusted p-values be included?}
}
\value{
a \code{\link{HeatmapList-class}} object.
}
\description{
Heatmaps summarizing differental abundance
& differential state testing results.
}
\details{
For DA tests, \code{plotDiffHeatmap} will display
\itemize{
\item{median (arcsinh-transformed)
cell-type marker expressions (across all samples)}
\item{cluster abundances by samples}
\item{row annotations indicating if detected clusteres
are significant (i.e. adj. p-value >= \code{th})}
}
For DS tests, \code{plotDiffHeatmap} will display
\itemize{
\item{median (arcsinh-transformed)
cell-type marker expressions (across all samples)}
\item{median (arcsinh-transformed)
cell-state marker expressions by sample}
\item{row annotations indicating if detected cluster-marker combinations
are significant (i.e. adj. p-value >= \code{th})}
}
}
\examples{
# construct daFrame
data(PBMC_fs, PBMC_panel, PBMC_md)
re <- daFrame(PBMC_fs, PBMC_panel, PBMC_md)
# run clustering
re <- cluster(re)
## differential analysis
library(diffcyt)
# create design & constrast matrix
design <- createDesignMatrix(PBMC_md, cols_design=3:4)
contrast <- createContrast(c(0, 1, 0, 0, 0))
# test for
# - differential abundance (DA) of clusters
# - differential states (DS) within clusters
da <- diffcyt(re, design = design, contrast = contrast,
analysis_type = "DA", method_DA = "diffcyt-DA-edgeR",
clustering_to_use = "meta20")
ds <- diffcyt(re, design = design, contrast = contrast,
analysis_type = "DS", method_DS = "diffcyt-DS-limma",
clustering_to_use = "meta20")
# display test results for
# - top DA clusters
# - top DS cluster-marker combintations
plotDiffHeatmap(re, da)
plotDiffHeatmap(re, ds)
}
\author{
Lukas M Weber and
Helena Lucia Crowell \email{helena.crowell@uzh.ch}
}
| /man/plotDiffHeatmap.Rd | no_license | almutlue/CATALYST | R | false | true | 4,433 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/plotDiffHeatmap.R
\docType{methods}
\name{plotDiffHeatmap}
\alias{plotDiffHeatmap}
\alias{plotDiffHeatmap,matrix,SummarizedExperiment-method}
\alias{plotDiffHeatmap,daFrame,SummarizedExperiment-method}
\alias{plotDiffHeatmap,SummarizedExperiment,SummarizedExperiment-method}
\alias{plotDiffHeatmap,ANY,list-method}
\title{Plot differential heatmap}
\usage{
plotDiffHeatmap(x, y, ...)
\S4method{plotDiffHeatmap}{matrix,SummarizedExperiment}(x, y, top_n = 20,
all = FALSE, order = TRUE, th = 0.1, hm1 = TRUE,
normalize = TRUE, row_anno = TRUE, ...)
\S4method{plotDiffHeatmap}{daFrame,SummarizedExperiment}(x, y,
top_n = 20, all = FALSE, order = TRUE, th = 0.1, hm1 = TRUE,
normalize = TRUE, row_anno = TRUE, ...)
\S4method{plotDiffHeatmap}{SummarizedExperiment,SummarizedExperiment}(x, y,
top_n = 20, all = FALSE, order = TRUE, th = 0.1, hm1 = TRUE,
normalize = TRUE, row_anno = TRUE, ...)
\S4method{plotDiffHeatmap}{ANY,list}(x, y, top_n = 20, all = FALSE,
order = TRUE, th = 0.1, hm1 = TRUE, normalize = TRUE,
row_anno = TRUE, ...)
}
\arguments{
\item{x}{a \code{\link{daFrame}} or \code{SummarizedExperiment}.}
\item{y}{a \code{SummarizedExperiment} containing differential testing
results as returned by one of \code{\link[diffcyt]{testDA_edgeR}},
\code{\link[diffcyt]{testDA_voom}}, \code{\link[diffcyt]{testDA_GLMM}},
\code{\link[diffcyt]{testDS_limma}}, or \code{\link[diffcyt]{testDS_LMM}}.
Alternatively, a list as returned by \code{\link[diffcyt]{diffcyt}}.}
\item{...}{optional arguments.}
\item{top_n}{numeric. Number of top clusters (if \code{type = "DA"}) or
cluster-marker combinations (if \code{type = "DS"}) to display.}
\item{all}{logical. Specifies whether all clusters or cluster-marker combinations
should be displayed. If \code{TRUE}, \code{top_n} will be ignored.}
\item{order}{logical. Should results be ordered by significance?}
\item{th}{numeric. Threshold on adjusted p-values below which clusters (DA)
or cluster-marker combinations (DS) should be considered significant.}
\item{hm1}{logical. Specifies whether the left-hand side heatmap should be plotted.}
\item{normalize}{logical. Specifies whether Z-score normalized values should be plotted
in the right-hand side heatmap. If \code{y} contains DA analysis results,
relative population abundances will be arcsine-square-root scaled
prior to normalization.}
\item{row_anno}{logical. Should a row annotation indicating whether cluster (DA)
or cluster-marker combinations (DS) are significant,
as well as adjusted p-values be included?}
}
\value{
a \code{\link{HeatmapList-class}} object.
}
\description{
Heatmaps summarizing differental abundance
& differential state testing results.
}
\details{
For DA tests, \code{plotDiffHeatmap} will display
\itemize{
\item{median (arcsinh-transformed)
cell-type marker expressions (across all samples)}
\item{cluster abundances by samples}
\item{row annotations indicating if detected clusteres
are significant (i.e. adj. p-value >= \code{th})}
}
For DS tests, \code{plotDiffHeatmap} will display
\itemize{
\item{median (arcsinh-transformed)
cell-type marker expressions (across all samples)}
\item{median (arcsinh-transformed)
cell-state marker expressions by sample}
\item{row annotations indicating if detected cluster-marker combinations
are significant (i.e. adj. p-value >= \code{th})}
}
}
\examples{
# construct daFrame
data(PBMC_fs, PBMC_panel, PBMC_md)
re <- daFrame(PBMC_fs, PBMC_panel, PBMC_md)
# run clustering
re <- cluster(re)
## differential analysis
library(diffcyt)
# create design & constrast matrix
design <- createDesignMatrix(PBMC_md, cols_design=3:4)
contrast <- createContrast(c(0, 1, 0, 0, 0))
# test for
# - differential abundance (DA) of clusters
# - differential states (DS) within clusters
da <- diffcyt(re, design = design, contrast = contrast,
analysis_type = "DA", method_DA = "diffcyt-DA-edgeR",
clustering_to_use = "meta20")
ds <- diffcyt(re, design = design, contrast = contrast,
analysis_type = "DS", method_DS = "diffcyt-DS-limma",
clustering_to_use = "meta20")
# display test results for
# - top DA clusters
# - top DS cluster-marker combintations
plotDiffHeatmap(re, da)
plotDiffHeatmap(re, ds)
}
\author{
Lukas M Weber and
Helena Lucia Crowell \email{helena.crowell@uzh.ch}
}
|
#Farnaz Fouladi
#04-10-2020
##Functions for generating taxonomic count tables in Taxanomy script
getTaxaTable<-function(svTable,taxaTable,taxa){
colnames(svTable)<-taxaTable[,taxa]
svTable<-t(svTable)
tab<-rowsum(svTable,group=rownames(svTable))
tab<-t(tab)
colnames(tab)[ncol(tab)]<-"others"
return(tab)
}
norm<-function(table){
table<-table[rowSums(table)>1000,]
average<-sum(rowSums(table))/nrow(table)
table<-sweep(table,1,rowSums(table),"/")
table<-log10(table*average + 1)
return(table)
}
| /functions.R | no_license | FarnazFouladi/AnorexiaMicrobiota | R | false | false | 520 | r | #Farnaz Fouladi
#04-10-2020
##Functions for generating taxonomic count tables in Taxanomy script
getTaxaTable<-function(svTable,taxaTable,taxa){
colnames(svTable)<-taxaTable[,taxa]
svTable<-t(svTable)
tab<-rowsum(svTable,group=rownames(svTable))
tab<-t(tab)
colnames(tab)[ncol(tab)]<-"others"
return(tab)
}
norm<-function(table){
table<-table[rowSums(table)>1000,]
average<-sum(rowSums(table))/nrow(table)
table<-sweep(table,1,rowSums(table),"/")
table<-log10(table*average + 1)
return(table)
}
|
#' Empirical Cumulative Percent
#'
#' Computes the empirical cumulative percent or percent exceedance of observed data
#' for specific values.\cr
#'
#'
#' @aliases percentile percentile.default
#' @param x a numeric vector representing the observed data.
#' @param q a vector of quantiles for which the cumulative percent or percent
#' exceedence is desired.
#' @param test a character string indicating the test. The default value, '>=,'
#' is the percent equalling or exceeding the quantile and '<' would return the
#' cumulative percent below the quantile.
#' @param na.rm a logical value indication whether missing values (NAs) should
#' be removed or not. If na.rm is \code{FALSE} and there are missing values in
#' \code{x}, then the result will be NA. The default value is \code{TRUE}.
#' @param percent a logical value indicating whether the result should be
#' expressed as a percent or proportion. The default value, \code{TRUE}, will
#' express the result as a percent.
#' @param \dots not used, required for method function
#' @return A named vector as long as \code{q} corresponding to the requested
#' value.
#' @note The stats package contains the \code{ecdf} function that performs a
#' similar function when \code{test} is "<=."
#' @seealso \code{\link{ecdf}}
#' @keywords univar math manip
#' @examples
#'
#' set.seed(2342)
#' Xr <- rlnorm(24)
#' # The percentage of the observarions greater than or equal to 2
#' percentile(Xr, 1:5)
#'
#' @export percentile
percentile <- function(x, q, test='>=', na.rm=TRUE,
percent=TRUE, ...) {
# Coding history:
# 2007Oct12 DLLorenz Initial Coding
# 2011Aug09 DLLorenz Conversion to R and create generic function
# 2011Oct25 DLLorenz Update for package
# 2013Apr16 DLLorenz Named percentile
# 2014Dec22 DLLorenz Roxygen headers
##
UseMethod("percentile")
}
#' @rdname percentile
#' @export
#' @method percentile default
percentile.default <- function(x, q, test='>=', na.rm=TRUE,
percent=TRUE, ...) {
## Arguments:
## x (numeric vector) the values to test
## q (numeric vector) The numeric criterion
## test (character scalar) the test
## na.rm (logical scalar) remove missing values
## percent (logical scalar) express result in percent
## ... (dots) not used, required for method function
##
retval <- double(length(q))
check <- test
test <- get(test)
if(na.rm)
x <- x[!is.na(x)]
N <- length(x)
for(i in seq(along=q)) {
Ntest <- sum(test(x, q[i]))
retval[i] <- Ntest/N
}
if(percent) {
retval <- retval *100
names(retval) <- paste("Percent", check, q, sep=' ')
}
else
names(retval) <- paste("Proportion", check, q, sep=' ')
return(retval)
}
| /R/percentile.R | permissive | oceanspace/smwrStats | R | false | false | 2,755 | r | #' Empirical Cumulative Percent
#'
#' Computes the empirical cumulative percent or percent exceedance of observed data
#' for specific values.\cr
#'
#'
#' @aliases percentile percentile.default
#' @param x a numeric vector representing the observed data.
#' @param q a vector of quantiles for which the cumulative percent or percent
#' exceedence is desired.
#' @param test a character string indicating the test. The default value, '>=,'
#' is the percent equalling or exceeding the quantile and '<' would return the
#' cumulative percent below the quantile.
#' @param na.rm a logical value indication whether missing values (NAs) should
#' be removed or not. If na.rm is \code{FALSE} and there are missing values in
#' \code{x}, then the result will be NA. The default value is \code{TRUE}.
#' @param percent a logical value indicating whether the result should be
#' expressed as a percent or proportion. The default value, \code{TRUE}, will
#' express the result as a percent.
#' @param \dots not used, required for method function
#' @return A named vector as long as \code{q} corresponding to the requested
#' value.
#' @note The stats package contains the \code{ecdf} function that performs a
#' similar function when \code{test} is "<=."
#' @seealso \code{\link{ecdf}}
#' @keywords univar math manip
#' @examples
#'
#' set.seed(2342)
#' Xr <- rlnorm(24)
#' # The percentage of the observarions greater than or equal to 2
#' percentile(Xr, 1:5)
#'
#' @export percentile
percentile <- function(x, q, test='>=', na.rm=TRUE,
percent=TRUE, ...) {
# Coding history:
# 2007Oct12 DLLorenz Initial Coding
# 2011Aug09 DLLorenz Conversion to R and create generic function
# 2011Oct25 DLLorenz Update for package
# 2013Apr16 DLLorenz Named percentile
# 2014Dec22 DLLorenz Roxygen headers
##
UseMethod("percentile")
}
#' @rdname percentile
#' @export
#' @method percentile default
percentile.default <- function(x, q, test='>=', na.rm=TRUE,
percent=TRUE, ...) {
## Arguments:
## x (numeric vector) the values to test
## q (numeric vector) The numeric criterion
## test (character scalar) the test
## na.rm (logical scalar) remove missing values
## percent (logical scalar) express result in percent
## ... (dots) not used, required for method function
##
retval <- double(length(q))
check <- test
test <- get(test)
if(na.rm)
x <- x[!is.na(x)]
N <- length(x)
for(i in seq(along=q)) {
Ntest <- sum(test(x, q[i]))
retval[i] <- Ntest/N
}
if(percent) {
retval <- retval *100
names(retval) <- paste("Percent", check, q, sep=' ')
}
else
names(retval) <- paste("Proportion", check, q, sep=' ')
return(retval)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasim3.R
\docType{data}
\name{data_example3}
\alias{data_example3}
\title{Simulated data example 3}
\format{A list with multiple elements}
\description{
The confounder is unobserved and we only know the primary variable of interest (the biological conditions).
There are 10 biological conditions, each with 3 replicates.
The variation is shared among replicates for half of the genes and not shared for the other genes.
Here are details for the data:
}
\details{
\itemize{
\item X the N by p data matrix, number of samples=30, number of variables=400
\item Y the N by q confounder matrix, q=30. For a biological condition,
treating the 3 replicates as 3 groups, it can be shown that the penalty term
equals the summation of the between groups sum of squares over the biological conditions.
\item lab labels for the biological conditions.
\item true_pattern the true underlying latent pattern
}
}
\examples{
load_all()
data(data_example3)
}
\keyword{datasets}
| /R_package/acPCA/man/data_example3.Rd | no_license | vgainullin/AC-PCA | R | false | true | 1,056 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasim3.R
\docType{data}
\name{data_example3}
\alias{data_example3}
\title{Simulated data example 3}
\format{A list with multiple elements}
\description{
The confounder is unobserved and we only know the primary variable of interest (the biological conditions).
There are 10 biological conditions, each with 3 replicates.
The variation is shared among replicates for half of the genes and not shared for the other genes.
Here are details for the data:
}
\details{
\itemize{
\item X the N by p data matrix, number of samples=30, number of variables=400
\item Y the N by q confounder matrix, q=30. For a biological condition,
treating the 3 replicates as 3 groups, it can be shown that the penalty term
equals the summation of the between groups sum of squares over the biological conditions.
\item lab labels for the biological conditions.
\item true_pattern the true underlying latent pattern
}
}
\examples{
load_all()
data(data_example3)
}
\keyword{datasets}
|
rm(list=ls()) ### Clean the R workspace
sp500 <- read.csv('sp500.csv', header=T) ### Load the daily return data of SP500 index
price <- sp500$Close ### Extract the price information
T <- length(price)
ret <- log(price[2:T]) - log(price[1:(T-1)]) ### Calculate the log return
ind <- which(ret!=0) ### Only keep those returns that are not zero (i.e. not on holidays)
ret <- ret[ind]
T <- length(ret)
#### Historical simulation
var1_250 <- numeric(T)
var5_250 <- numeric(T)
for (i in 251:T){
var1_250[i] <- -quantile(ret[(i-250):i], probs=0.01) # 99% VaR
#the purpose is to calculate VaR of each date based on previous 250 scenarios.
var5_250[i] <- -quantile(ret[(i-250):i], probs=0.05) # 95% VaR
}
plot(var1_250, col='red', type='l', ylim=c(0,0.1))
points(var5_250, col='blue', type='l')
#### Historical simulation
var1_1000 <- numeric(T)
for (i in 1001:T){
var1_1000[i] <- -quantile(ret[(i-1000):i], probs=0.01)
}
plot(var1_250, col='red', type='l', ylim=c(0,0.1))
points(var1_1000, col='blue', type='l') #over write the second line on the first
| /week2/HistoricalSimulation.R | no_license | Karagul/risk-modeling-2-course-material | R | false | false | 1,256 | r |
rm(list=ls()) ### Clean the R workspace
sp500 <- read.csv('sp500.csv', header=T) ### Load the daily return data of SP500 index
price <- sp500$Close ### Extract the price information
T <- length(price)
ret <- log(price[2:T]) - log(price[1:(T-1)]) ### Calculate the log return
ind <- which(ret!=0) ### Only keep those returns that are not zero (i.e. not on holidays)
ret <- ret[ind]
T <- length(ret)
#### Historical simulation
var1_250 <- numeric(T)
var5_250 <- numeric(T)
for (i in 251:T){
var1_250[i] <- -quantile(ret[(i-250):i], probs=0.01) # 99% VaR
#the purpose is to calculate VaR of each date based on previous 250 scenarios.
var5_250[i] <- -quantile(ret[(i-250):i], probs=0.05) # 95% VaR
}
plot(var1_250, col='red', type='l', ylim=c(0,0.1))
points(var5_250, col='blue', type='l')
#### Historical simulation
var1_1000 <- numeric(T)
for (i in 1001:T){
var1_1000[i] <- -quantile(ret[(i-1000):i], probs=0.01)
}
plot(var1_250, col='red', type='l', ylim=c(0,0.1))
points(var1_1000, col='blue', type='l') #over write the second line on the first
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runApp.R
\name{explore_wateRuse}
\alias{explore_wateRuse}
\title{Run water use application}
\usage{
explore_wateRuse(browse = TRUE)
}
\arguments{
\item{browse}{use browser for map rendering}
}
\description{
Run water use application
}
\examples{
\dontrun{
explore_wateRuse()
}
}
| /man/explore_wateRuse.Rd | permissive | cadieter-usgs/wateRuse | R | false | true | 358 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runApp.R
\name{explore_wateRuse}
\alias{explore_wateRuse}
\title{Run water use application}
\usage{
explore_wateRuse(browse = TRUE)
}
\arguments{
\item{browse}{use browser for map rendering}
}
\description{
Run water use application
}
\examples{
\dontrun{
explore_wateRuse()
}
}
|
\name{searchInterval}
\alias{searchInterval}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{search intervalle (Internal use SMC)}
\description{
Search for intervals with non-empty intersection.
}
\usage{
searchInterval(bbalist, c)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{bbalist}{
List of BBAS
}
\item{c}{
Vector containing the indices of selected elements}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
%\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
%}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{N. Maillet, B. Charnomordic, S. Destercke
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function(bbalist,c){ #bbalist=set of bbas, c=selection vector for each bba
l=c() #fonction used for SMC
L=length(bbalist) #searches for intersecting intervals
for(i in 1:L){
k=bbalist[[i]]@group[c[i],]
p=length(bbalist[[i]]@group[c[i],])
p=1:p
p=p*k
p=p[p!=0]
if(sum(p)!=0){
l=c(l,min(p),max(p))
}
else{
l=c(l)
}
}
return(l)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/searchInterval.Rd | no_license | sdestercke/Belief-R-Package | R | false | false | 1,659 | rd | \name{searchInterval}
\alias{searchInterval}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{search intervalle (Internal use SMC)}
\description{
Search for intervals with non-empty intersection.
}
\usage{
searchInterval(bbalist, c)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{bbalist}{
List of BBAS
}
\item{c}{
Vector containing the indices of selected elements}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
%\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
%}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{N. Maillet, B. Charnomordic, S. Destercke
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function(bbalist,c){ #bbalist=set of bbas, c=selection vector for each bba
l=c() #fonction used for SMC
L=length(bbalist) #searches for intersecting intervals
for(i in 1:L){
k=bbalist[[i]]@group[c[i],]
p=length(bbalist[[i]]@group[c[i],])
p=1:p
p=p*k
p=p[p!=0]
if(sum(p)!=0){
l=c(l,min(p),max(p))
}
else{
l=c(l)
}
}
return(l)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(rstan)
library(latex2exp)
data <- read.csv(file="C:/GABRIEL_20192301/Proyecto-GAMs/Basisfunctions/BF-1D/Birthday_project/births_usa_1969.csv")
str(data)
# input
x <- scale(data$id[], center=TRUE, scale=TRUE)
# response variable
y <- scale(data$births[], center=TRUE, scale=TRUE)
# std and mean of the response variable
std_y <- attr(y,"scaled:scale")
m_y <- attr(y,"scaled:center")
# std of the input
std_x <- attr(x,"scaled:scale")
# period for year and week
period_year <- 365.25/std_x
period_week <- 7/std_x
M_f1 <- 30 #num basis functions for f1= smoth trend
c_f1 <- 1.5 #factor c for f1= smoth trend
J_f3 <- 6 #num basis functions for f3= year effect
J_f4 <- 4 #num basis functions for f4= week effect
standata <- list(M_f1= M_f1, L_f1= c_f1*max(abs(x)), J_f3= J_f3, J_f4= J_f4, x= x[,1], y= y[,1], N= length(x), period_year= period_year, period_week= period_week)
str(standata)
# Run Stan
stanout <- stan(file = "C:/GABRIEL_20180206/GIFLE/Proyecto-GAMs/Basisfunctions/BF-1D/Birthday_project/stancode_v3.stan", data= standata, iter= 50, warmup= 20, chains= 1, thin= 1, algorithm= "NUTS", verbose= FALSE, control=list(adapt_delta =0.99, max_treedepth= 15))
# Load the output
load("C:/GABRIEL_20192301/Proyecto-GAMs/Basisfunctions/BF-1D/Birthday_project/stanout_100iter.rData")
ls()
#Storing the results
f <- summary(stanout, pars = c("f"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
f1 <- summary(stanout, pars = c("f1"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
f3 <- summary(stanout, pars = c("f3"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
f4 <- summary(stanout, pars = c("f4"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
f5 <- summary(stanout, pars = c("f5"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
rho <- summary(stanout, pars = c("rho"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
alpha <- summary(stanout, pars = c("alpha"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
sigma <- summary(stanout, pars = c("sigma"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
tau <- summary(stanout, pars = c("tau"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
lambda_h <- summary(stanout, pars = c("lambda_h"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
z <- summary(stanout, pars = c("z"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
#Printing the results
print(rho[,c(1,3,7,8)])
print(alpha[,c(1,3,7,8)])
print(sigma[,c(1,3,7,8)])
print(head(f[,c(1,3,7,8)]))
print(head(f1[,c(1,3,7,8)]))
print(head(f3[,c(1,3,7,8)]))
print(head(f4[,c(1,3,7,8)]))
print(head(f5[,c(1,3,7,8)]))
print(tau[,c(1,3,7,8)])
print(head(lambda_h[,c(1,3,7,8)]))
print(head(z[,c(1,3,7,8)]))
### PLOT OF ONLY ONE YEAR
dev.new()
par(mai=c(1.02, 1.10, 0.82, 0.42))
data_year <- data[data$year==1972,]
ind <- data_year$id
axis_labels_at <- aggregate(data_year, by=list(data_year$month), FUN=min)$id
plot(ind, (y[ind]*std_y+m_y)/m_y, type="p", pch=21, bg=grey(0.7), cex=0.9, col=grey(0.4), xlab="", ylab="", lwd=1, ylim=c(0.7,1.2), mgp= c(3.5, 1, 0), frame.plot = TRUE, yaxs="r", cex.lab=2.5, las=1, xaxt="n", yaxt="n",fg=grey(0.5), family="serif")
axis(1, at = axis_labels_at, labels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"), tick = TRUE, lty = 1, mgp= c(3, 1.4, 0), las=1, cex.axis=2.5, font=1, col=grey(0.5), col.ticks=grey(0.3), family="")
axis(2, at = NULL, labels = TRUE, tick = TRUE, lty = 1, mgp= c(3, 0.7, 0), las=1, cex.axis=2.5, font=5, col=grey(0.5), col.ticks=grey(0.3)) #mgp= c(2.5, 0.7, 0)
title(xlab ="Month", mgp= c(3.7, 1, 0), cex.lab=2, las=1)
title(ylab ="Proportion of births over the mean", mgp= c(4.1, 0.7, 0), cex.lab=2, las=1)
lines(ind, (f[ind,1]*std_y+m_y)/m_y, col="black", lwd=1) # f
# lines(range(ind), c(1,1), lty=2)
abline(h=1, lty=2) # mean
lines(ind, (f1[ind,1]*std_y+m_y)/m_y, col=2, lwd=2) # f1 smoth trend
lines(ind, (f3[ind,1]*std_y+m_y)/m_y, col=3, lwd=2) # f3 year effect
lines(ind, (f5[ind,1]*std_y+m_y)/m_y, col=6, lwd=2) # f5 horseshoe
#labels special days
text(ind[data_year$month==1&data_year$day==1], y = 0.85, labels = "New year", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==2&data_year$day==14], y = 1.04, labels = "Valentine's day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==2&data_year$day==29], y = 0.98, labels = "Leap day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==4&data_year$day==1], y = 0.985, labels = "April 1st", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==5&data_year$day==27], y = 0.98, labels = "Memorial day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==7&data_year$day==4], y = 0.86, labels = "Independence day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==9&data_year$day==2], y = 0.94, labels = "Labor day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==10&data_year$day==30], y = 0.99, labels = "Halloween", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==11&data_year$day==25], y = 0.94, labels = "Thanks-giving", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==12&data_year$day==25], y = 0.82, labels = "Christmas", pos =NULL, offset = 0, family="serif", cex=1.3)
# abline(v= ind[data_year$day==13], lty=2, col="grey")
legend("topleft",inset=c(0.1,0.05),legend=c("Observations",TeX('Long-term trend ($f_1$)'),TeX('Year effects ($f_2$)'),TeX('Special days effects ($f_4$)'),TeX('$\\mu$=f_1+f_2+f_3+f_4$')), col=c(grey(0.4),2,3,6,"black"), lty=c(NA,1,1,1,1), pch=c(20,NA,NA,NA,NA), lwd=c(2,3,3,3,3), cex=1.7, xpd=TRUE, bty="n", y.intersp=1, x.intersp=0.8, text.font=1, ncol=2, seg.len=1.3)
### PLOT OF ALL THE YEARS
dev.new()
par(mai=c(1.02, 1.10, 0.82, 0.42))
str(data[,])
ind <- data$id
labels_at = aggregate(data, by=list(data$year), FUN=min)$id
plot(ind, (y[ind]*std_y+m_y)/m_y, type="p", pch=20, bg=grey(0.4), cex=0.6, col=grey(0.5), xlab="", ylab="", lwd=1, ylim=c(0.7,1.3), mgp= c(3.5, 1, 0), frame.plot = TRUE, yaxs="r", cex.lab=2.5, las=1, xaxt="n", yaxt="n",fg=grey(0.5), family="serif")
axis(1, at = labels_at, c("1969","1970","1971","1972","1973","1974","1975","1976","1977","1978","1979","1980","1981","1982","1983","1984","1985","1986","1987","1988"), tick = TRUE, lty = 1, mgp= c(3, 1.4, 0), las=1, cex.axis=2.5, font=1, col=grey(0.5), col.ticks=grey(0.3), family="")
axis(2, at = NULL, labels = TRUE, tick = TRUE, lty = 1, mgp= c(3, 0.7, 0), las=1, cex.axis=2.5, font=5, col=grey(0.5), col.ticks=grey(0.3)) #mgp= c(2.5, 0.7, 0)
title(xlab ="Year", mgp= c(3.7, 1, 0), cex.lab=2, las=1)
title(ylab ="Proportion of births over the mean", mgp= c(4.1, 0.7, 0), cex.lab=2, las=1)
# lines(ind, (f[ind,1]*std_y+m_y)/m_y, col="grey", lwd=1) # f
# lines(c(0,7305), c(1,1), lty=2, lwd=2)
abline(h=1, lty=2) # mean
lines(ind, (f1[ind,1]*std_y+m_y)/m_y, col=2, lwd=2) # smooth trend
lines(ind, (f3[ind,1]*std_y+m_y)/m_y, col=3, lwd=2) # year effect
# lines(ind1[], (f5_BF[[1]][,1]*sd_y+m_y)/m_y, col=6, lwd=1) # horseshoe
legend("topleft",inset=c(0.25,0.03),legend=c("Observations",TeX('Long-term trend ($f_1$)'),TeX('Year effects ($f_2$)')), col=c(grey(0.4),2,3,"grey"), lty=c(NA,1,1,1), pch=c(20,NA,NA,NA), lwd=c(1,3,3,3), cex=1.7, xpd=TRUE, bty="n", y.intersp=1, x.intersp=0.8, text.font=1, ncol=1, seg.len=1.5)
### PLOT OF ONLY FIRST MOUNTH
dev.new()
par(mai=c(1.02, 1.10, 0.82, 0.42))
data_month <- data[data$month==1&data$year==1972,]
ind <- data_month$id
axis_labels_at <- aggregate(data_month, by=list(data_month$day), FUN=min)$id
# id_week <- data_month[3:9,]$id
id_week <- data_month$id[data_month$day_of_week==1]
plot(ind, (y[ind]*std_y+m_y)/m_y, type="p", pch=21, bg=grey(0.7), cex=1.2, col=grey(0.4), xlab="", ylab="", lwd=1, ylim=c(0.7,1.2), mgp= c(3.5, 1, 0), frame.plot = TRUE, yaxs="r", cex.lab=2.5, las=1, xaxt="n", yaxt="n",fg=grey(0.5), family="serif")
axis(1, at = axis_labels_at, labels = as.character(1:31), tick = TRUE, lty = 1, mgp= c(3, 1.4, 0), las=1, cex.axis=2.5, font=1, col=grey(0.5), col.ticks=grey(0.3), family="")
axis(1, at = id_week, labels = rep(c("Monday"),5), tick = TRUE, lty = 1, mgp= c(-1, -1.2, 0), las=1, cex.axis=1.5, font=1, col=grey(0.5), col.ticks=grey(0.3), family="")
axis(2, at = NULL, labels = TRUE, tick = TRUE, lty = 1, mgp= c(3, 0.7, 0), las=1, cex.axis=2.5, font=5, col=grey(0.5), col.ticks=grey(0.3)) #mgp= c(2.5, 0.7, 0)
title(xlab ="Day", mgp= c(3.7, 1, 0), cex.lab=2, las=1)
title(ylab ="Proportion of births over the mean", mgp= c(4.1, 0.7, 0), cex.lab=2, las=1)
lines(ind, (f[ind,1]*std_y+m_y)/m_y, col="black", lwd=2) # f
# lines(range(ind), c(1,1), lty=2)
abline(h=1, lty=2) # mean
lines(ind, (f1[ind,1]*std_y+m_y)/m_y, col=2, lwd=2) # f1 smoth trend
lines(ind, (f3[ind,1]*std_y+m_y)/m_y, col=3, lwd=2) # f3 year effect
lines(ind, (f4[ind,1]*std_y+m_y)/m_y, col=4, lwd=2) # f4 week effect
lines(ind, (f5[ind,1]*std_y+m_y)/m_y, col=6, lwd=2) # f5 horseshoe
abline(v=id_week, lty=2, col="grey")
#labels special days
text(ind[data_month$month==1&data_month$day==1], y = 0.85, labels = "New year", pos =NULL, offset = 0, family="serif", cex=1.3)
legend("topleft",inset=c(0.12,0.03),legend=c("Observations",TeX('Long-term trend ($f_1$)'),TeX('Year effects ($f_2$)'),TeX('Week effects ($f_3$)'),TeX('Special-days effects ($f_4$)'),TeX('$\\mu$=f_1+f_2+f_3+f_4$')), col=c(grey(0.4),2,3,4,6,"black"), lty=c(NA,1,1,1,1,1), pch=c(20,NA,NA,NA,NA,NA), lwd=c(1,3,3,3,3,3), cex=1.7, xpd=TRUE, bty="n", y.intersp=1, x.intersp=0.8, text.font=1, ncol=2, seg.len=1.3)
### PLOT THE WEEK EFFECT
dev.new()
str(data[,])
ind <- data$id
plot(ind[6:12], (f4[6:12,1]*std_y+m_y)/m_y, type="b", lty=1, pch=1, cex=1, col=4, xlab= "", ylab= "births/mean_births",cex.lab=1.5, cex.axis=1.5, xaxt="n", ylim=c(0.7,1.3))
axis(1, at = c(6:12), labels = c("Mon","Tue","Wed","Thu","Fri","Sat","Sun"), tick = TRUE)
abline(h=1, lty=2)
legend("topleft",legend=c("Week effect (W)"), col=c(4), lty=c(1), lwd=c(2), cex= 1.3)
### PLOT THE FIRST FOUR YEARS
dev.new()
data_year <- data[data$year==1969 | data$year==1970 | data$year==1971 | data$year==1972,]
ind <- data_year$id
axis_labels_at <- aggregate(data_year, by=list(data_year$year), FUN=min)$id
plot(ind, (y[ind]*std_y+m_y)/m_y, type="p", lty=1, pch=18, cex=0.4, col="black", xlab= "", ylab= "births/mean_births",cex.lab=1.5, cex.axis=1.5, xaxt="n", ylim=c(0.7,1.3))
axis(1, at = axis_labels_at, labels = c("1969","1970","1971","1972"), tick = TRUE, cex.axis=1.5)
lines(ind, (f[ind,1]*std_y+m_y)/m_y, col="grey", lwd=1) # f
lines(range(ind), c(1,1), lty=2, lwd=2) # mean
lines(ind, (f1[ind,1]*std_y+m_y)/m_y, col=2, lwd=1) # f1
lines(ind, (f3[ind,1]*std_y+m_y)/m_y, col=3, lwd=1) # f3
lines(ind, (f5[ind,1]*std_y+m_y)/m_y, col=6, lwd=1) # f5
legend("topleft",legend=c("Observations","Mean","Smooth trend (S)","Year effect (Y)","Special days effect (E)","S + Y + E + Week effect"), col=c(1,1,2,3,6,"grey"), lty=c(NA,2,1,1,1,1), pch=c(18,NA,NA,NA,NA,NA), lwd=c(1,2,3,3,3,3), cex=1.3)
| /uni_dimensional/birthday-dataset/script_1.r | no_license | gabriuma/basis_functions_approach_to_GP | R | false | false | 11,412 | r |
library(rstan)
library(latex2exp)
data <- read.csv(file="C:/GABRIEL_20192301/Proyecto-GAMs/Basisfunctions/BF-1D/Birthday_project/births_usa_1969.csv")
str(data)
# input
x <- scale(data$id[], center=TRUE, scale=TRUE)
# response variable
y <- scale(data$births[], center=TRUE, scale=TRUE)
# std and mean of the response variable
std_y <- attr(y,"scaled:scale")
m_y <- attr(y,"scaled:center")
# std of the input
std_x <- attr(x,"scaled:scale")
# period for year and week
period_year <- 365.25/std_x
period_week <- 7/std_x
M_f1 <- 30 #num basis functions for f1= smoth trend
c_f1 <- 1.5 #factor c for f1= smoth trend
J_f3 <- 6 #num basis functions for f3= year effect
J_f4 <- 4 #num basis functions for f4= week effect
standata <- list(M_f1= M_f1, L_f1= c_f1*max(abs(x)), J_f3= J_f3, J_f4= J_f4, x= x[,1], y= y[,1], N= length(x), period_year= period_year, period_week= period_week)
str(standata)
# Run Stan
stanout <- stan(file = "C:/GABRIEL_20180206/GIFLE/Proyecto-GAMs/Basisfunctions/BF-1D/Birthday_project/stancode_v3.stan", data= standata, iter= 50, warmup= 20, chains= 1, thin= 1, algorithm= "NUTS", verbose= FALSE, control=list(adapt_delta =0.99, max_treedepth= 15))
# Load the output
load("C:/GABRIEL_20192301/Proyecto-GAMs/Basisfunctions/BF-1D/Birthday_project/stanout_100iter.rData")
ls()
#Storing the results
f <- summary(stanout, pars = c("f"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
f1 <- summary(stanout, pars = c("f1"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
f3 <- summary(stanout, pars = c("f3"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
f4 <- summary(stanout, pars = c("f4"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
f5 <- summary(stanout, pars = c("f5"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
rho <- summary(stanout, pars = c("rho"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
alpha <- summary(stanout, pars = c("alpha"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
sigma <- summary(stanout, pars = c("sigma"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
tau <- summary(stanout, pars = c("tau"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
lambda_h <- summary(stanout, pars = c("lambda_h"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
z <- summary(stanout, pars = c("z"), probs = c(0.025, 0.5, 0.975), digits_summary = 4)$summary
#Printing the results
print(rho[,c(1,3,7,8)])
print(alpha[,c(1,3,7,8)])
print(sigma[,c(1,3,7,8)])
print(head(f[,c(1,3,7,8)]))
print(head(f1[,c(1,3,7,8)]))
print(head(f3[,c(1,3,7,8)]))
print(head(f4[,c(1,3,7,8)]))
print(head(f5[,c(1,3,7,8)]))
print(tau[,c(1,3,7,8)])
print(head(lambda_h[,c(1,3,7,8)]))
print(head(z[,c(1,3,7,8)]))
### PLOT OF ONLY ONE YEAR
dev.new()
par(mai=c(1.02, 1.10, 0.82, 0.42))
data_year <- data[data$year==1972,]
ind <- data_year$id
axis_labels_at <- aggregate(data_year, by=list(data_year$month), FUN=min)$id
plot(ind, (y[ind]*std_y+m_y)/m_y, type="p", pch=21, bg=grey(0.7), cex=0.9, col=grey(0.4), xlab="", ylab="", lwd=1, ylim=c(0.7,1.2), mgp= c(3.5, 1, 0), frame.plot = TRUE, yaxs="r", cex.lab=2.5, las=1, xaxt="n", yaxt="n",fg=grey(0.5), family="serif")
axis(1, at = axis_labels_at, labels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"), tick = TRUE, lty = 1, mgp= c(3, 1.4, 0), las=1, cex.axis=2.5, font=1, col=grey(0.5), col.ticks=grey(0.3), family="")
axis(2, at = NULL, labels = TRUE, tick = TRUE, lty = 1, mgp= c(3, 0.7, 0), las=1, cex.axis=2.5, font=5, col=grey(0.5), col.ticks=grey(0.3)) #mgp= c(2.5, 0.7, 0)
title(xlab ="Month", mgp= c(3.7, 1, 0), cex.lab=2, las=1)
title(ylab ="Proportion of births over the mean", mgp= c(4.1, 0.7, 0), cex.lab=2, las=1)
lines(ind, (f[ind,1]*std_y+m_y)/m_y, col="black", lwd=1) # f
# lines(range(ind), c(1,1), lty=2)
abline(h=1, lty=2) # mean
lines(ind, (f1[ind,1]*std_y+m_y)/m_y, col=2, lwd=2) # f1 smoth trend
lines(ind, (f3[ind,1]*std_y+m_y)/m_y, col=3, lwd=2) # f3 year effect
lines(ind, (f5[ind,1]*std_y+m_y)/m_y, col=6, lwd=2) # f5 horseshoe
#labels special days
text(ind[data_year$month==1&data_year$day==1], y = 0.85, labels = "New year", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==2&data_year$day==14], y = 1.04, labels = "Valentine's day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==2&data_year$day==29], y = 0.98, labels = "Leap day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==4&data_year$day==1], y = 0.985, labels = "April 1st", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==5&data_year$day==27], y = 0.98, labels = "Memorial day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==7&data_year$day==4], y = 0.86, labels = "Independence day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==9&data_year$day==2], y = 0.94, labels = "Labor day", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==10&data_year$day==30], y = 0.99, labels = "Halloween", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==11&data_year$day==25], y = 0.94, labels = "Thanks-giving", pos =NULL, offset = 0, family="serif", cex=1.3)
text(ind[data_year$month==12&data_year$day==25], y = 0.82, labels = "Christmas", pos =NULL, offset = 0, family="serif", cex=1.3)
# abline(v= ind[data_year$day==13], lty=2, col="grey")
legend("topleft",inset=c(0.1,0.05),legend=c("Observations",TeX('Long-term trend ($f_1$)'),TeX('Year effects ($f_2$)'),TeX('Special days effects ($f_4$)'),TeX('$\\mu$=f_1+f_2+f_3+f_4$')), col=c(grey(0.4),2,3,6,"black"), lty=c(NA,1,1,1,1), pch=c(20,NA,NA,NA,NA), lwd=c(2,3,3,3,3), cex=1.7, xpd=TRUE, bty="n", y.intersp=1, x.intersp=0.8, text.font=1, ncol=2, seg.len=1.3)
### PLOT OF ALL THE YEARS
dev.new()
par(mai=c(1.02, 1.10, 0.82, 0.42))
str(data[,])
ind <- data$id
labels_at = aggregate(data, by=list(data$year), FUN=min)$id
plot(ind, (y[ind]*std_y+m_y)/m_y, type="p", pch=20, bg=grey(0.4), cex=0.6, col=grey(0.5), xlab="", ylab="", lwd=1, ylim=c(0.7,1.3), mgp= c(3.5, 1, 0), frame.plot = TRUE, yaxs="r", cex.lab=2.5, las=1, xaxt="n", yaxt="n",fg=grey(0.5), family="serif")
axis(1, at = labels_at, c("1969","1970","1971","1972","1973","1974","1975","1976","1977","1978","1979","1980","1981","1982","1983","1984","1985","1986","1987","1988"), tick = TRUE, lty = 1, mgp= c(3, 1.4, 0), las=1, cex.axis=2.5, font=1, col=grey(0.5), col.ticks=grey(0.3), family="")
axis(2, at = NULL, labels = TRUE, tick = TRUE, lty = 1, mgp= c(3, 0.7, 0), las=1, cex.axis=2.5, font=5, col=grey(0.5), col.ticks=grey(0.3)) #mgp= c(2.5, 0.7, 0)
title(xlab ="Year", mgp= c(3.7, 1, 0), cex.lab=2, las=1)
title(ylab ="Proportion of births over the mean", mgp= c(4.1, 0.7, 0), cex.lab=2, las=1)
# lines(ind, (f[ind,1]*std_y+m_y)/m_y, col="grey", lwd=1) # f
# lines(c(0,7305), c(1,1), lty=2, lwd=2)
abline(h=1, lty=2) # mean
lines(ind, (f1[ind,1]*std_y+m_y)/m_y, col=2, lwd=2) # smooth trend
lines(ind, (f3[ind,1]*std_y+m_y)/m_y, col=3, lwd=2) # year effect
# lines(ind1[], (f5_BF[[1]][,1]*sd_y+m_y)/m_y, col=6, lwd=1) # horseshoe
legend("topleft",inset=c(0.25,0.03),legend=c("Observations",TeX('Long-term trend ($f_1$)'),TeX('Year effects ($f_2$)')), col=c(grey(0.4),2,3,"grey"), lty=c(NA,1,1,1), pch=c(20,NA,NA,NA), lwd=c(1,3,3,3), cex=1.7, xpd=TRUE, bty="n", y.intersp=1, x.intersp=0.8, text.font=1, ncol=1, seg.len=1.5)
### PLOT OF ONLY FIRST MOUNTH
dev.new()
par(mai=c(1.02, 1.10, 0.82, 0.42))
data_month <- data[data$month==1&data$year==1972,]
ind <- data_month$id
axis_labels_at <- aggregate(data_month, by=list(data_month$day), FUN=min)$id
# id_week <- data_month[3:9,]$id
id_week <- data_month$id[data_month$day_of_week==1]
plot(ind, (y[ind]*std_y+m_y)/m_y, type="p", pch=21, bg=grey(0.7), cex=1.2, col=grey(0.4), xlab="", ylab="", lwd=1, ylim=c(0.7,1.2), mgp= c(3.5, 1, 0), frame.plot = TRUE, yaxs="r", cex.lab=2.5, las=1, xaxt="n", yaxt="n",fg=grey(0.5), family="serif")
axis(1, at = axis_labels_at, labels = as.character(1:31), tick = TRUE, lty = 1, mgp= c(3, 1.4, 0), las=1, cex.axis=2.5, font=1, col=grey(0.5), col.ticks=grey(0.3), family="")
axis(1, at = id_week, labels = rep(c("Monday"),5), tick = TRUE, lty = 1, mgp= c(-1, -1.2, 0), las=1, cex.axis=1.5, font=1, col=grey(0.5), col.ticks=grey(0.3), family="")
axis(2, at = NULL, labels = TRUE, tick = TRUE, lty = 1, mgp= c(3, 0.7, 0), las=1, cex.axis=2.5, font=5, col=grey(0.5), col.ticks=grey(0.3)) #mgp= c(2.5, 0.7, 0)
title(xlab ="Day", mgp= c(3.7, 1, 0), cex.lab=2, las=1)
title(ylab ="Proportion of births over the mean", mgp= c(4.1, 0.7, 0), cex.lab=2, las=1)
lines(ind, (f[ind,1]*std_y+m_y)/m_y, col="black", lwd=2) # f
# lines(range(ind), c(1,1), lty=2)
abline(h=1, lty=2) # mean
lines(ind, (f1[ind,1]*std_y+m_y)/m_y, col=2, lwd=2) # f1 smoth trend
lines(ind, (f3[ind,1]*std_y+m_y)/m_y, col=3, lwd=2) # f3 year effect
lines(ind, (f4[ind,1]*std_y+m_y)/m_y, col=4, lwd=2) # f4 week effect
lines(ind, (f5[ind,1]*std_y+m_y)/m_y, col=6, lwd=2) # f5 horseshoe
abline(v=id_week, lty=2, col="grey")
#labels special days
text(ind[data_month$month==1&data_month$day==1], y = 0.85, labels = "New year", pos =NULL, offset = 0, family="serif", cex=1.3)
legend("topleft",inset=c(0.12,0.03),legend=c("Observations",TeX('Long-term trend ($f_1$)'),TeX('Year effects ($f_2$)'),TeX('Week effects ($f_3$)'),TeX('Special-days effects ($f_4$)'),TeX('$\\mu$=f_1+f_2+f_3+f_4$')), col=c(grey(0.4),2,3,4,6,"black"), lty=c(NA,1,1,1,1,1), pch=c(20,NA,NA,NA,NA,NA), lwd=c(1,3,3,3,3,3), cex=1.7, xpd=TRUE, bty="n", y.intersp=1, x.intersp=0.8, text.font=1, ncol=2, seg.len=1.3)
### PLOT THE WEEK EFFECT
dev.new()
str(data[,])
ind <- data$id
plot(ind[6:12], (f4[6:12,1]*std_y+m_y)/m_y, type="b", lty=1, pch=1, cex=1, col=4, xlab= "", ylab= "births/mean_births",cex.lab=1.5, cex.axis=1.5, xaxt="n", ylim=c(0.7,1.3))
axis(1, at = c(6:12), labels = c("Mon","Tue","Wed","Thu","Fri","Sat","Sun"), tick = TRUE)
abline(h=1, lty=2)
legend("topleft",legend=c("Week effect (W)"), col=c(4), lty=c(1), lwd=c(2), cex= 1.3)
### PLOT THE FIRST FOUR YEARS
dev.new()
data_year <- data[data$year==1969 | data$year==1970 | data$year==1971 | data$year==1972,]
ind <- data_year$id
axis_labels_at <- aggregate(data_year, by=list(data_year$year), FUN=min)$id
plot(ind, (y[ind]*std_y+m_y)/m_y, type="p", lty=1, pch=18, cex=0.4, col="black", xlab= "", ylab= "births/mean_births",cex.lab=1.5, cex.axis=1.5, xaxt="n", ylim=c(0.7,1.3))
axis(1, at = axis_labels_at, labels = c("1969","1970","1971","1972"), tick = TRUE, cex.axis=1.5)
lines(ind, (f[ind,1]*std_y+m_y)/m_y, col="grey", lwd=1) # f
lines(range(ind), c(1,1), lty=2, lwd=2) # mean
lines(ind, (f1[ind,1]*std_y+m_y)/m_y, col=2, lwd=1) # f1
lines(ind, (f3[ind,1]*std_y+m_y)/m_y, col=3, lwd=1) # f3
lines(ind, (f5[ind,1]*std_y+m_y)/m_y, col=6, lwd=1) # f5
legend("topleft",legend=c("Observations","Mean","Smooth trend (S)","Year effect (Y)","Special days effect (E)","S + Y + E + Week effect"), col=c(1,1,2,3,6,"grey"), lty=c(NA,2,1,1,1,1), pch=c(18,NA,NA,NA,NA,NA), lwd=c(1,2,3,3,3,3), cex=1.3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hpp_outcomes.R
\docType{data}
\name{hpp_outcomes}
\alias{hpp_outcomes}
\title{Outcomes in the analysis}
\format{
An object of class \code{character} of length 9.
}
\usage{
hpp_outcomes
}
\description{
Character vector of outcomes in the order the pipeline processes in.
}
\keyword{datasets}
| /man/hpp_outcomes.Rd | no_license | softloud/hppapp | R | false | true | 369 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hpp_outcomes.R
\docType{data}
\name{hpp_outcomes}
\alias{hpp_outcomes}
\title{Outcomes in the analysis}
\format{
An object of class \code{character} of length 9.
}
\usage{
hpp_outcomes
}
\description{
Character vector of outcomes in the order the pipeline processes in.
}
\keyword{datasets}
|
this <- system('hostname', TRUE)
if (this == "LAPTOP-IVSPBGCA") {
dp <- "C:/github/diets/scripts"
} else {
dp <- "C:/Users/jccaro/diets/"
}
# now with a function by country
# that is cleaner, and it
# makes it easy to parellelize, if necessary
country_intake <- function(cons, cont) {
years <- unique(cons$year)
yout <- list()
for (j in 1:length(years)) {
year_cons <- cons[cons$year == years[j], c("group", "value")]
intake <- nutrientIntake(year_cons, content, verbose=FALSE)
# adjust intake stuff here
intake <- aggregate(intake[, "intake", drop=FALSE], intake[, c("tag", "unit", "desc")], sum, na.rm=TRUE)
intake$year <- years[j]
yout[[j]] <- intake
}
do.call(rbind, yout)
}
library(diets)
consumption <- readRDS(file.path(dp, "FBS.rds"))
consumption <- consumption[consumption$Element == "Food supply (kcal/capita/day)", c("ISO3", "Year", "Item", "Value")]
colnames(consumption) <- c("country", "year", "group", "value")
content <- nutrientContent(continent="", redpalmoil=0.5, orangesweetpot=0.2)
fort <- readRDS(system.file("ex/fortification.rds", package="diets"))
#fcontent <- fortify(content, fort)
countries <- unique(consumption$country)
##for testing
#countries <- countries[1:5]
s <- Sys.time()
out <- list()
for (i in 1:length(countries)){
print(countries[i]); flush.console()
country_cons <- consumption[consumption$country == countries[i], ]
# here some logic to select the content based on the country (continent membership)
out[[i]] <- data.frame(country=countries[i], country_intake(country_cons, content))
}
e <- Sys.time()
(e - s) / length(countries)
lapply(out[1:4], head)
x <- do.call(rbind, out)
head(x)
tail(x)
| /scripts/R/test3.R | no_license | juancaros/diets | R | false | false | 1,671 | r |
this <- system('hostname', TRUE)
if (this == "LAPTOP-IVSPBGCA") {
dp <- "C:/github/diets/scripts"
} else {
dp <- "C:/Users/jccaro/diets/"
}
# now with a function by country
# that is cleaner, and it
# makes it easy to parellelize, if necessary
country_intake <- function(cons, cont) {
years <- unique(cons$year)
yout <- list()
for (j in 1:length(years)) {
year_cons <- cons[cons$year == years[j], c("group", "value")]
intake <- nutrientIntake(year_cons, content, verbose=FALSE)
# adjust intake stuff here
intake <- aggregate(intake[, "intake", drop=FALSE], intake[, c("tag", "unit", "desc")], sum, na.rm=TRUE)
intake$year <- years[j]
yout[[j]] <- intake
}
do.call(rbind, yout)
}
library(diets)
consumption <- readRDS(file.path(dp, "FBS.rds"))
consumption <- consumption[consumption$Element == "Food supply (kcal/capita/day)", c("ISO3", "Year", "Item", "Value")]
colnames(consumption) <- c("country", "year", "group", "value")
content <- nutrientContent(continent="", redpalmoil=0.5, orangesweetpot=0.2)
fort <- readRDS(system.file("ex/fortification.rds", package="diets"))
#fcontent <- fortify(content, fort)
countries <- unique(consumption$country)
##for testing
#countries <- countries[1:5]
s <- Sys.time()
out <- list()
for (i in 1:length(countries)){
print(countries[i]); flush.console()
country_cons <- consumption[consumption$country == countries[i], ]
# here some logic to select the content based on the country (continent membership)
out[[i]] <- data.frame(country=countries[i], country_intake(country_cons, content))
}
e <- Sys.time()
(e - s) / length(countries)
lapply(out[1:4], head)
x <- do.call(rbind, out)
head(x)
tail(x)
|
plnormAlt <-
function (q, mean = exp(1/2), cv = sqrt(exp(1) - 1), lower.tail = TRUE,
log.p = FALSE)
{
names.q <- names(q)
arg.mat <- cbind.no.warn(q = as.vector(q), mean = as.vector(mean),
cv = as.vector(cv))
for (i in c("q", "mean", "cv")) assign(i, arg.mat[, i])
na.index <- is_na_matrix(arg.mat)
if (all(na.index))
p <- rep(NA, length(q))
else {
if (any(c(mean[!na.index], cv[!na.index]) < .Machine$double.eps))
stop("All values of 'mean' and 'cv' must be positive.")
sdlog <- sqrt(log(1 + cv^2))
meanlog <- log(mean) - (sdlog^2)/2
p <- plnorm(q = q, meanlog = meanlog, sdlog = sdlog,
lower.tail = lower.tail, log.p = log.p)
}
if (!is.null(names.q))
names(p) <- rep(names.q, length = length(p))
else names(p) <- NULL
p
}
| /R/plnormAlt.R | no_license | alexkowa/EnvStats | R | false | false | 848 | r | plnormAlt <-
function (q, mean = exp(1/2), cv = sqrt(exp(1) - 1), lower.tail = TRUE,
log.p = FALSE)
{
names.q <- names(q)
arg.mat <- cbind.no.warn(q = as.vector(q), mean = as.vector(mean),
cv = as.vector(cv))
for (i in c("q", "mean", "cv")) assign(i, arg.mat[, i])
na.index <- is_na_matrix(arg.mat)
if (all(na.index))
p <- rep(NA, length(q))
else {
if (any(c(mean[!na.index], cv[!na.index]) < .Machine$double.eps))
stop("All values of 'mean' and 'cv' must be positive.")
sdlog <- sqrt(log(1 + cv^2))
meanlog <- log(mean) - (sdlog^2)/2
p <- plnorm(q = q, meanlog = meanlog, sdlog = sdlog,
lower.tail = lower.tail, log.p = log.p)
}
if (!is.null(names.q))
names(p) <- rep(names.q, length = length(p))
else names(p) <- NULL
p
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comps.R
\name{mma_ind_effects}
\alias{mma_ind_effects}
\title{Indirect Effects Extraction for MMA}
\usage{
mma_ind_effects(model)
}
\arguments{
\item{model}{mma fit object}
}
\description{
Extracts the formulas from a mma object
}
| /man/mma_ind_effects.Rd | no_license | TysonStanley/MarginalMediation | R | false | true | 309 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comps.R
\name{mma_ind_effects}
\alias{mma_ind_effects}
\title{Indirect Effects Extraction for MMA}
\usage{
mma_ind_effects(model)
}
\arguments{
\item{model}{mma fit object}
}
\description{
Extracts the formulas from a mma object
}
|
testlist <- list(mu = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0), impl = NULL, sigma_impl = 0)
result <- do.call(metafolio::impl_error,testlist)
str(result) | /metafolio/inst/testfiles/impl_error/libFuzzer_impl_error/impl_error_valgrind_files/1612989284-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 145 | r | testlist <- list(mu = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0), impl = NULL, sigma_impl = 0)
result <- do.call(metafolio::impl_error,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rwe_tools.R
\name{rweGetLambda}
\alias{rweGetLambda}
\title{Get weights}
\usage{
rweGetLambda(
A,
rs = NULL,
ns1.trt = NULL,
ns1.ctl = NULL,
ns0,
m.lambda = c("rs", "even", "inverse"),
...
)
}
\arguments{
\item{A}{target number of subjects to be borrowed}
\item{m.lambda}{method to split A. rs: by overlapping coefficient; even: by
minimizing trt and control imbalance in numbers}
}
\value{
power parameter before standardization
}
\description{
Get weights
}
| /man/rweGetLambda.Rd | no_license | olssol/rwetools | R | false | true | 553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rwe_tools.R
\name{rweGetLambda}
\alias{rweGetLambda}
\title{Get weights}
\usage{
rweGetLambda(
A,
rs = NULL,
ns1.trt = NULL,
ns1.ctl = NULL,
ns0,
m.lambda = c("rs", "even", "inverse"),
...
)
}
\arguments{
\item{A}{target number of subjects to be borrowed}
\item{m.lambda}{method to split A. rs: by overlapping coefficient; even: by
minimizing trt and control imbalance in numbers}
}
\value{
power parameter before standardization
}
\description{
Get weights
}
|
library(dplyr)
skip_if_not_installed("withr")
# ------------------------------------------------------------------------------
# dplyr_reconstruct()
test_that("dplyr_reconstruct() returns an rset subclass if `x` retains rset structure", {
for (x in rset_subclasses) {
expect_identical(dplyr_reconstruct(x, x), x)
expect_s3_class_rset(dplyr_reconstruct(x, x))
}
})
test_that("dplyr_reconstruct() returns bare tibble if `x` loses rset structure", {
for (x in rset_subclasses) {
col <- x[1]
row <- x[0, ]
expect_s3_class_bare_tibble(dplyr_reconstruct(col, x))
expect_s3_class_bare_tibble(dplyr_reconstruct(row, x))
}
})
test_that("dplyr_reconstruct() retains extra attributes of `to` when not falling back", {
for (x in rset_subclasses) {
to <- x
attr(to, "foo") <- "bar"
x_tbl <- x[1]
expect_identical(attr(dplyr_reconstruct(x, to), "foo"), "bar")
expect_identical(attr(dplyr_reconstruct(x_tbl, to), "foo"), NULL)
expect_s3_class_rset(dplyr_reconstruct(x, to))
expect_s3_class_bare_tibble(dplyr_reconstruct(x_tbl, to))
}
})
# ------------------------------------------------------------------------------
# dplyr_col_modify()
test_that("can add columns and retain rset class", {
for (x in rset_subclasses) {
cols <- list(x = rep(1, vec_size(x)))
result <- dplyr_col_modify(x, cols)
expect_s3_class_rset(result)
expect_identical(result$x, cols$x)
}
})
test_that("modifying rset columns removes rset class", {
for (x in rset_subclasses) {
cols <- list(splits = rep(1, vec_size(x)))
result <- dplyr_col_modify(x, cols)
expect_s3_class_bare_tibble(result)
expect_identical(result$splits, cols$splits)
}
for (x in rset_subclasses) {
cols <- list(id = rep(1, vec_size(x)))
result <- dplyr_col_modify(x, cols)
expect_s3_class_bare_tibble(result)
expect_identical(result$id, cols$id)
}
})
test_that("replacing rset columns with the exact same column retains rset class", {
for (x in rset_subclasses) {
cols <- list(splits = x$splits)
result <- dplyr_col_modify(x, cols)
expect_s3_class_rset(result)
expect_identical(result, x)
}
})
test_that("for nested_cv, `inner_resamples` is also a protected column", {
x <- rset_subclasses$nested_cv
cols <- list(inner_resamples = rep(1, vec_size(x)))
expect_s3_class_bare_tibble(dplyr_col_modify(x, cols))
})
# ------------------------------------------------------------------------------
# dplyr_row_slice()
test_that("row slicing generally removes the rset subclass", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(dplyr_row_slice(x, 0))
}
})
test_that("row slicing and duplicating any rows removes the rset subclass", {
# Remove rsets with only 1 row
subclasses <- rset_subclasses
subclasses$apparent <- NULL
subclasses$validation_split <- NULL
subclasses$validation_time_split <- NULL
subclasses$group_validation_split <- NULL
subclasses$validation_set <- NULL
for (x in subclasses) {
loc <- seq_len(nrow(x))
loc[length(loc)] <- 1L
expect_s3_class_bare_tibble(dplyr_row_slice(x, loc))
}
})
test_that("row slicing and selecting everything keeps the rset subclass", {
for (x in rset_subclasses) {
loc <- seq_len(nrow(x))
expect_s3_class_rset(dplyr_row_slice(x, loc))
}
})
test_that("rset subclass is kept if row order is changed but all rows are present", {
for (x in rset_subclasses) {
loc <- rev(seq_len(nrow(x)))
expect_s3_class_rset(dplyr_row_slice(x, loc))
}
})
# ------------------------------------------------------------------------------
# mutate()
test_that("mutate() can keep rset class", {
for (x in rset_subclasses) {
expect_s3_class_rset(mutate(x, x = 1))
expect_identical(mutate(x, x = 1)$x, rep(1, vec_size(x)))
}
})
test_that("mutate() drops rset class if any rset columns are touched", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(mutate(x, splits = 1))
expect_s3_class_bare_tibble(mutate(x, id = 1))
expect_identical(mutate(x, splits = 1)$splits, rep(1, vec_size(x)))
expect_identical(mutate(x, id = 1)$id, rep(1, vec_size(x)))
}
})
test_that("mutate() keeps rset class if replacement rset column is same as original", {
for (x in rset_subclasses) {
expect_s3_class_rset(mutate(x, splits = splits))
expect_s3_class_rset(mutate(x, id = id))
}
})
test_that("adding a column that looks like an `id` drops the class", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(mutate(x, id9 = 1))
}
})
# ------------------------------------------------------------------------------
# arrange()
test_that("arrange() keeps rset class when row order is modified", {
for (x in rset_subclasses) {
x <- mutate(x, rn = row_number())
expect_s3_class_rset(arrange(x, desc(rn)))
}
})
test_that("arrange() keeps rset class when row order is untouched", {
for (x in rset_subclasses) {
expect_s3_class_rset(arrange(x))
x <- mutate(x, rn = row_number())
expect_s3_class_rset(arrange(x, rn))
}
})
# ------------------------------------------------------------------------------
# filter()
test_that("filter() drops rset class when rows are modified", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(filter(x, 0 == 1))
expect_s3_class_bare_tibble(filter(x, is.numeric(id)))
}
})
test_that("filter() keeps rset class if row order is untouched", {
for (x in rset_subclasses) {
expect_s3_class_rset(filter(x))
expect_s3_class_rset(filter(x, is.character(id)))
}
})
# ------------------------------------------------------------------------------
# rename()
test_that("renaming can keep the rset class", {
for (x in rset_subclasses) {
x <- mutate(x, a = 1)
x <- rename(x, b = a)
expect_s3_class_rset(x)
}
})
test_that("renaming `id` at all drops the rset class", {
for (x in rset_subclasses) {
x <- rename(x, id9 = id)
expect_s3_class_bare_tibble(x)
}
})
test_that("renaming `id` to a non-id name drops the rset class", {
for (x in rset_subclasses) {
x <- rename(x, stuff = id)
expect_s3_class_bare_tibble(x)
}
})
test_that("for nested_cv, renaming `inner_resamples` drops the rset class", {
x <- rset_subclasses$nested_cv
x <- rename(x, inner_stuff = inner_resamples)
expect_s3_class_bare_tibble(x)
})
# ------------------------------------------------------------------------------
# select()
test_that("select() can keep rset class", {
for (x in rset_subclasses) {
expect_s3_class_rset(select(x, everything()))
}
})
test_that("select() drops rset class if any rset columns aren't selected", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(select(x, id))
expect_s3_class_bare_tibble(select(x, splits))
}
})
# ------------------------------------------------------------------------------
# slice()
test_that("slice() drops rset class when rows are modified", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(slice(x, 0))
}
})
test_that("slice() keeps rset class when rows are untouched", {
for (x in rset_subclasses) {
expect_s3_class_rset(slice(x, seq_len(nrow(x))))
}
})
# ------------------------------------------------------------------------------
# summarise()
test_that("summarise() always drops the rset class", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(summarise(x, y = 1))
expect_s3_class_bare_tibble(summarise(x, splits = splits[1], id = id[1]))
}
})
# ------------------------------------------------------------------------------
# group_by()
test_that("group_by() always returns a bare grouped-df or bare tibble", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(group_by(x))
expect_s3_class(group_by(x, splits), c("grouped_df", "tbl_df", "tbl", "data.frame"), exact = TRUE)
}
})
# ------------------------------------------------------------------------------
# ungroup()
test_that("ungroup() returns a rset", {
for (x in rset_subclasses) {
expect_s3_class_rset(ungroup(x))
}
})
# ------------------------------------------------------------------------------
# relocate()
test_that("can relocate() and keep the class", {
for (x in rset_subclasses) {
x <- relocate(x, id)
expect_s3_class_rset(x)
}
})
# ------------------------------------------------------------------------------
# distinct()
test_that("distinct() keeps the class if everything is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(distinct(x))
}
})
test_that("distinct() drops the class if any rset columns are lost", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(distinct(x, splits))
}
})
# ------------------------------------------------------------------------------
# left_join()
test_that("left_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(left_join(x, x, by = names(x)))
y <- tibble(id = x$id[[1]], x = 1)
expect_s3_class_rset(left_join(x, y, by = "id"))
}
})
test_that("left_join() can lose rset class if rows are added", {
for (x in rset_subclasses) {
y <- tibble(id = x$id[[1]], x = 1:2)
expect_s3_class_bare_tibble(left_join(x, y, by = "id", multiple = "all",
relationship = "many-to-many"))
}
})
# ------------------------------------------------------------------------------
# right_join()
test_that("right_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(right_join(x, x, by = names(x)))
x_names <- names(x)
id_names <- x_names[col_starts_with_id(x_names)]
y <- mutate(select(x, all_of(id_names)), x = 1)
expect_s3_class_rset(right_join(x, y, by = id_names))
}
})
test_that("right_join() can lose rset class if rows are added", {
for (x in rset_subclasses) {
y <- tibble(id = x$id[[1]], x = 1:2)
expect_s3_class_bare_tibble(right_join(x, y, by = "id", multiple = "all",
relationship = "many-to-many"))
}
})
test_that("right_join() restores to the type of first input", {
for (x in rset_subclasses) {
y <- tibble(id = x$id[[1]], x = 1)
# technically rset structure is intact, but `y` is a bare tibble!
expect_s3_class_bare_tibble(right_join(y, x, by = "id", multiple = "all"))
}
})
# ------------------------------------------------------------------------------
# full_join()
test_that("full_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(full_join(x, x, by = names(x)))
}
})
test_that("full_join() can lose rset class if rows are added", {
for (x in rset_subclasses) {
y <- tibble(id = "foo", x = 1)
expect_s3_class_bare_tibble(full_join(x, y, by = "id"))
}
})
# ------------------------------------------------------------------------------
# anti_join()
test_that("anti_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
y <- tibble(id = "foo")
expect_s3_class_rset(anti_join(x, y, by = "id"))
}
})
test_that("anti_join() can lose rset class if rows are removed", {
for (x in rset_subclasses) {
y <- tibble(id = x$id[[1]], x = 1)
expect_s3_class_bare_tibble(anti_join(x, y, by = "id"))
}
})
# ------------------------------------------------------------------------------
# semi_join()
test_that("semi_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(semi_join(x, x, by = names(x)))
}
})
test_that("semi_join() can lose rset class if rows are removed", {
for (x in rset_subclasses) {
y <- tibble(id = "foo", x = 1)
expect_s3_class_bare_tibble(semi_join(x, y, by = "id"))
}
})
# ------------------------------------------------------------------------------
# nest_join()
test_that("nest_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
y <- mutate(x, foo = "bar")
expect_s3_class_rset(nest_join(x, y, by = names(x)))
}
})
# ------------------------------------------------------------------------------
# bind_rows()
test_that("bind_rows() keeps the class if there are no new rows/cols and the first object is an rset subclass", {
for (x in rset_subclasses) {
expect_s3_class_rset(bind_rows(x))
expect_s3_class_rset(bind_rows(x, tibble()))
expect_s3_class_bare_tibble(bind_rows(tibble(), x))
}
})
test_that("bind_rows() drops the class with new rows", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(bind_rows(x, x))
}
})
# ------------------------------------------------------------------------------
# bind_cols()
test_that("bind_cols() keeps the class if there are no new rows and the first object is an rset subclass", {
for (x in rset_subclasses) {
expect_s3_class_rset(bind_cols(x))
expect_s3_class_rset(bind_cols(x, tibble(x = 1)))
expect_s3_class_bare_tibble(bind_cols(tibble(x = 1), x))
}
})
test_that("bind_cols() drops the class with new rows", {
# Use rset subclass with 1 row, these get recycled
x <- rset_subclasses$apparent
expect_s3_class_bare_tibble(bind_cols(x, tibble(x = 1:2)))
})
| /tests/testthat/test-compat-dplyr.R | permissive | tidymodels/rsample | R | false | false | 13,312 | r | library(dplyr)
skip_if_not_installed("withr")
# ------------------------------------------------------------------------------
# dplyr_reconstruct()
test_that("dplyr_reconstruct() returns an rset subclass if `x` retains rset structure", {
for (x in rset_subclasses) {
expect_identical(dplyr_reconstruct(x, x), x)
expect_s3_class_rset(dplyr_reconstruct(x, x))
}
})
test_that("dplyr_reconstruct() returns bare tibble if `x` loses rset structure", {
for (x in rset_subclasses) {
col <- x[1]
row <- x[0, ]
expect_s3_class_bare_tibble(dplyr_reconstruct(col, x))
expect_s3_class_bare_tibble(dplyr_reconstruct(row, x))
}
})
test_that("dplyr_reconstruct() retains extra attributes of `to` when not falling back", {
for (x in rset_subclasses) {
to <- x
attr(to, "foo") <- "bar"
x_tbl <- x[1]
expect_identical(attr(dplyr_reconstruct(x, to), "foo"), "bar")
expect_identical(attr(dplyr_reconstruct(x_tbl, to), "foo"), NULL)
expect_s3_class_rset(dplyr_reconstruct(x, to))
expect_s3_class_bare_tibble(dplyr_reconstruct(x_tbl, to))
}
})
# ------------------------------------------------------------------------------
# dplyr_col_modify()
test_that("can add columns and retain rset class", {
for (x in rset_subclasses) {
cols <- list(x = rep(1, vec_size(x)))
result <- dplyr_col_modify(x, cols)
expect_s3_class_rset(result)
expect_identical(result$x, cols$x)
}
})
test_that("modifying rset columns removes rset class", {
for (x in rset_subclasses) {
cols <- list(splits = rep(1, vec_size(x)))
result <- dplyr_col_modify(x, cols)
expect_s3_class_bare_tibble(result)
expect_identical(result$splits, cols$splits)
}
for (x in rset_subclasses) {
cols <- list(id = rep(1, vec_size(x)))
result <- dplyr_col_modify(x, cols)
expect_s3_class_bare_tibble(result)
expect_identical(result$id, cols$id)
}
})
test_that("replacing rset columns with the exact same column retains rset class", {
for (x in rset_subclasses) {
cols <- list(splits = x$splits)
result <- dplyr_col_modify(x, cols)
expect_s3_class_rset(result)
expect_identical(result, x)
}
})
test_that("for nested_cv, `inner_resamples` is also a protected column", {
x <- rset_subclasses$nested_cv
cols <- list(inner_resamples = rep(1, vec_size(x)))
expect_s3_class_bare_tibble(dplyr_col_modify(x, cols))
})
# ------------------------------------------------------------------------------
# dplyr_row_slice()
test_that("row slicing generally removes the rset subclass", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(dplyr_row_slice(x, 0))
}
})
test_that("row slicing and duplicating any rows removes the rset subclass", {
# Remove rsets with only 1 row
subclasses <- rset_subclasses
subclasses$apparent <- NULL
subclasses$validation_split <- NULL
subclasses$validation_time_split <- NULL
subclasses$group_validation_split <- NULL
subclasses$validation_set <- NULL
for (x in subclasses) {
loc <- seq_len(nrow(x))
loc[length(loc)] <- 1L
expect_s3_class_bare_tibble(dplyr_row_slice(x, loc))
}
})
test_that("row slicing and selecting everything keeps the rset subclass", {
for (x in rset_subclasses) {
loc <- seq_len(nrow(x))
expect_s3_class_rset(dplyr_row_slice(x, loc))
}
})
test_that("rset subclass is kept if row order is changed but all rows are present", {
for (x in rset_subclasses) {
loc <- rev(seq_len(nrow(x)))
expect_s3_class_rset(dplyr_row_slice(x, loc))
}
})
# ------------------------------------------------------------------------------
# mutate()
test_that("mutate() can keep rset class", {
for (x in rset_subclasses) {
expect_s3_class_rset(mutate(x, x = 1))
expect_identical(mutate(x, x = 1)$x, rep(1, vec_size(x)))
}
})
test_that("mutate() drops rset class if any rset columns are touched", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(mutate(x, splits = 1))
expect_s3_class_bare_tibble(mutate(x, id = 1))
expect_identical(mutate(x, splits = 1)$splits, rep(1, vec_size(x)))
expect_identical(mutate(x, id = 1)$id, rep(1, vec_size(x)))
}
})
test_that("mutate() keeps rset class if replacement rset column is same as original", {
for (x in rset_subclasses) {
expect_s3_class_rset(mutate(x, splits = splits))
expect_s3_class_rset(mutate(x, id = id))
}
})
test_that("adding a column that looks like an `id` drops the class", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(mutate(x, id9 = 1))
}
})
# ------------------------------------------------------------------------------
# arrange()
test_that("arrange() keeps rset class when row order is modified", {
for (x in rset_subclasses) {
x <- mutate(x, rn = row_number())
expect_s3_class_rset(arrange(x, desc(rn)))
}
})
test_that("arrange() keeps rset class when row order is untouched", {
for (x in rset_subclasses) {
expect_s3_class_rset(arrange(x))
x <- mutate(x, rn = row_number())
expect_s3_class_rset(arrange(x, rn))
}
})
# ------------------------------------------------------------------------------
# filter()
test_that("filter() drops rset class when rows are modified", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(filter(x, 0 == 1))
expect_s3_class_bare_tibble(filter(x, is.numeric(id)))
}
})
test_that("filter() keeps rset class if row order is untouched", {
for (x in rset_subclasses) {
expect_s3_class_rset(filter(x))
expect_s3_class_rset(filter(x, is.character(id)))
}
})
# ------------------------------------------------------------------------------
# rename()
test_that("renaming can keep the rset class", {
for (x in rset_subclasses) {
x <- mutate(x, a = 1)
x <- rename(x, b = a)
expect_s3_class_rset(x)
}
})
test_that("renaming `id` at all drops the rset class", {
for (x in rset_subclasses) {
x <- rename(x, id9 = id)
expect_s3_class_bare_tibble(x)
}
})
test_that("renaming `id` to a non-id name drops the rset class", {
for (x in rset_subclasses) {
x <- rename(x, stuff = id)
expect_s3_class_bare_tibble(x)
}
})
test_that("for nested_cv, renaming `inner_resamples` drops the rset class", {
x <- rset_subclasses$nested_cv
x <- rename(x, inner_stuff = inner_resamples)
expect_s3_class_bare_tibble(x)
})
# ------------------------------------------------------------------------------
# select()
test_that("select() can keep rset class", {
for (x in rset_subclasses) {
expect_s3_class_rset(select(x, everything()))
}
})
test_that("select() drops rset class if any rset columns aren't selected", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(select(x, id))
expect_s3_class_bare_tibble(select(x, splits))
}
})
# ------------------------------------------------------------------------------
# slice()
test_that("slice() drops rset class when rows are modified", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(slice(x, 0))
}
})
test_that("slice() keeps rset class when rows are untouched", {
for (x in rset_subclasses) {
expect_s3_class_rset(slice(x, seq_len(nrow(x))))
}
})
# ------------------------------------------------------------------------------
# summarise()
test_that("summarise() always drops the rset class", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(summarise(x, y = 1))
expect_s3_class_bare_tibble(summarise(x, splits = splits[1], id = id[1]))
}
})
# ------------------------------------------------------------------------------
# group_by()
test_that("group_by() always returns a bare grouped-df or bare tibble", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(group_by(x))
expect_s3_class(group_by(x, splits), c("grouped_df", "tbl_df", "tbl", "data.frame"), exact = TRUE)
}
})
# ------------------------------------------------------------------------------
# ungroup()
test_that("ungroup() returns a rset", {
for (x in rset_subclasses) {
expect_s3_class_rset(ungroup(x))
}
})
# ------------------------------------------------------------------------------
# relocate()
test_that("can relocate() and keep the class", {
for (x in rset_subclasses) {
x <- relocate(x, id)
expect_s3_class_rset(x)
}
})
# ------------------------------------------------------------------------------
# distinct()
test_that("distinct() keeps the class if everything is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(distinct(x))
}
})
test_that("distinct() drops the class if any rset columns are lost", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(distinct(x, splits))
}
})
# ------------------------------------------------------------------------------
# left_join()
test_that("left_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(left_join(x, x, by = names(x)))
y <- tibble(id = x$id[[1]], x = 1)
expect_s3_class_rset(left_join(x, y, by = "id"))
}
})
test_that("left_join() can lose rset class if rows are added", {
for (x in rset_subclasses) {
y <- tibble(id = x$id[[1]], x = 1:2)
expect_s3_class_bare_tibble(left_join(x, y, by = "id", multiple = "all",
relationship = "many-to-many"))
}
})
# ------------------------------------------------------------------------------
# right_join()
test_that("right_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(right_join(x, x, by = names(x)))
x_names <- names(x)
id_names <- x_names[col_starts_with_id(x_names)]
y <- mutate(select(x, all_of(id_names)), x = 1)
expect_s3_class_rset(right_join(x, y, by = id_names))
}
})
test_that("right_join() can lose rset class if rows are added", {
for (x in rset_subclasses) {
y <- tibble(id = x$id[[1]], x = 1:2)
expect_s3_class_bare_tibble(right_join(x, y, by = "id", multiple = "all",
relationship = "many-to-many"))
}
})
test_that("right_join() restores to the type of first input", {
for (x in rset_subclasses) {
y <- tibble(id = x$id[[1]], x = 1)
# technically rset structure is intact, but `y` is a bare tibble!
expect_s3_class_bare_tibble(right_join(y, x, by = "id", multiple = "all"))
}
})
# ------------------------------------------------------------------------------
# full_join()
test_that("full_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(full_join(x, x, by = names(x)))
}
})
test_that("full_join() can lose rset class if rows are added", {
for (x in rset_subclasses) {
y <- tibble(id = "foo", x = 1)
expect_s3_class_bare_tibble(full_join(x, y, by = "id"))
}
})
# ------------------------------------------------------------------------------
# anti_join()
test_that("anti_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
y <- tibble(id = "foo")
expect_s3_class_rset(anti_join(x, y, by = "id"))
}
})
test_that("anti_join() can lose rset class if rows are removed", {
for (x in rset_subclasses) {
y <- tibble(id = x$id[[1]], x = 1)
expect_s3_class_bare_tibble(anti_join(x, y, by = "id"))
}
})
# ------------------------------------------------------------------------------
# semi_join()
test_that("semi_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
expect_s3_class_rset(semi_join(x, x, by = names(x)))
}
})
test_that("semi_join() can lose rset class if rows are removed", {
for (x in rset_subclasses) {
y <- tibble(id = "foo", x = 1)
expect_s3_class_bare_tibble(semi_join(x, y, by = "id"))
}
})
# ------------------------------------------------------------------------------
# nest_join()
test_that("nest_join() can keep rset class if rset structure is intact", {
for (x in rset_subclasses) {
y <- mutate(x, foo = "bar")
expect_s3_class_rset(nest_join(x, y, by = names(x)))
}
})
# ------------------------------------------------------------------------------
# bind_rows()
test_that("bind_rows() keeps the class if there are no new rows/cols and the first object is an rset subclass", {
for (x in rset_subclasses) {
expect_s3_class_rset(bind_rows(x))
expect_s3_class_rset(bind_rows(x, tibble()))
expect_s3_class_bare_tibble(bind_rows(tibble(), x))
}
})
test_that("bind_rows() drops the class with new rows", {
for (x in rset_subclasses) {
expect_s3_class_bare_tibble(bind_rows(x, x))
}
})
# ------------------------------------------------------------------------------
# bind_cols()
test_that("bind_cols() keeps the class if there are no new rows and the first object is an rset subclass", {
for (x in rset_subclasses) {
expect_s3_class_rset(bind_cols(x))
expect_s3_class_rset(bind_cols(x, tibble(x = 1)))
expect_s3_class_bare_tibble(bind_cols(tibble(x = 1), x))
}
})
test_that("bind_cols() drops the class with new rows", {
# Use rset subclass with 1 row, these get recycled
x <- rset_subclasses$apparent
expect_s3_class_bare_tibble(bind_cols(x, tibble(x = 1:2)))
})
|
# setClass for "fdPar"
# setClass("fdPar", representation(fd = "fd",
# Lfd = "Lfd",
# lambda = "numeric",
# estimate = "logical",
# penmat = "matrix",))
# Generator function of class fdPar
fdPar <- function(fdobj=NULL, Lfdobj=NULL, lambda=0, estimate=TRUE,
penmat=NULL){
# Sets up a functional parameter object
# Arguments:
# FDOBJ ... A functional data object.
# The basis for this object is used to define
# the functional parameter, or functional
# parameters of FDOBJ has replications.
# When an initial value is required for iterative
# estimation of a functional parameter, the coefficients
# will give the initial values for the iteration.
# LFDOBJ ... A linear differential operator value or a derivative
# value for penalizing the roughness of the object.
# By default, this is 0.
# LAMBDA ... The penalty parameter controlling the smoothness of
# the estimated parameter. By default this is 0.
# ESTIMATE ... If nonzero, the parameter is estimated; if zero, the
# parameter is held fixed at this value.
# By default, this is 1.
# PENMAT ... The penalty matrix.
# In repeated calls to SMOOTH_BASIS, if this is
# saved, then the penalty does not need evaluating
# repeatedly. Don't use, though, if LFDOBJ or LAMBDA
# are changed in the calculation.
#
# An alternative argument list:
# The first argument can also be a basis object. In this case, an
# FD object is set up with an empty coefficient matrix.
# For many purposes, the coefficient array is either not needed, or
# supplied later.
#
# Return:
# FDPAROBJ ... A functional parameter object
# Last modified 16 April 2021 by Jim Ramsay
# ----------------------------------------------------------------------
# Default fdPar objects
# ----------------------------------------------------------------------
if(!inherits(fdobj, 'fd')) {
# the first argument is not an fd object
if (is.null(fdobj)) {
# fdPar called without arguments
fdobj = fd()
} else {
if (inherits(fdobj, "basisfd")) {
# if the first argument is a basis object, convert it to
# a default FD object with an zero square coefficient matrix.
nbasis <- fdobj$nbasis
dropind <- fdobj$dropind
nbasis <- nbasis - length(dropind)
coefs <- matrix(0,nbasis,nbasis)
fdnames <- list('time', 'reps 1', 'values')
if(!is.null(fdobj$names)){
basisnames <- {
if(length(dropind)>0)
fdobj$names[-dropind]
else
fdobj$names
}
dimnames(coefs) <- list(basisnames, NULL)
fdnames[[1]] <- basisnames
}
fdobj <- fd(coefs, fdobj, fdnames)
}
else if(is.numeric(fdobj))fdobj <- fd(fdobj)
else stop("First argument is neither a functional data object ",
"nor a basis object.")
}
} else {
# the first object is an fd object, and we need nbasis later
nbasis <- fdobj$basis$nbasis
}
# ----------------------------------------------------------------------
# Check parameters
# ----------------------------------------------------------------------
# check Lfdobj
{
if (is.null(Lfdobj)) {
if(fdobj$basis$type=='fourier'){
rng <- fdobj$basis$rangeval
Lfdobj <- vec2Lfd(c(0,(2*pi/diff(rng))^2,0), rng)
# warning("Provding default Lfdobj = harmonic acceleration ",
# "operator on c(", rng[1], ', ', rng[2],
# ') = vec2Lfd(c(0,(2*pi/diff(rng))^2,0), rng);',
# ' [default prior to fda 2.1.0: int2Lfd(0)].')
} else {
norder <- {
if (fdobj$basis$type=='bspline') norder.bspline(fdobj$basis)
else 2
}
Lfdobj <- int2Lfd(max(0, norder-2))
}
}
else
Lfdobj <- int2Lfd(Lfdobj)
}
if (!inherits(Lfdobj, "Lfd"))
stop("'Lfdobj' is not a linear differential operator object.")
# check lambda
if (!is.numeric(lambda)) stop("Class of LAMBDA is not numeric.")
if (lambda < 0) stop("LAMBDA is negative.")
# check estimate
if (!is.logical(estimate)) stop("Class of ESTIMATE is not logical.")
# check penmat
if (!is.null(penmat)) {
if (!is.numeric(penmat)) stop("PENMAT is not numeric.")
# penmatsize <- size(penmat)
penmatsize <- dim(penmat)
if (any(penmatsize != nbasis)) stop("Dimensions of PENMAT are not correct.")
}
# ----------------------------------------------------------------------
# set up the fdPar object
# ----------------------------------------------------------------------
# S4 definition
# fdParobj <- new("fdPar", fd=fdobj, Lfd=Lfdobj, lambda=lambda, estimate=estimate,
# penmat=penmat)
# S3 definition
fdParobj <- list(fd=fdobj, Lfd=Lfdobj, lambda=lambda, estimate=estimate,
penmat=penmat)
oldClass(fdParobj) <- "fdPar"
fdParobj
}
# ----------------------------------------------------------------------
# "print" method for "fdPar"
print.fdPar <- function(x, ...)
{
object <- x
cat("Functional parameter object:\n\n")
print("Functional data object:")
print.fd(object$fd)
print("Linear differential operator object:")
print.Lfd(object$Lfd)
cat(paste("\nSmoothing parameter =",object$lambda,"\n"))
cat(paste("\nEstimation status =",object$estimate,"\n"))
if (!is.null(object$penmat)) {
print("Penalty matrix:")
print(object$penmat)
}
}
# ----------------------------------------------------------------------
# "summary" method for "fdPar"
summary.fdPar <- function(object, ...)
{
cat("Functional parameter object:\n\n")
print("Functional data object:")
summary.fd(object$fd)
print("Linear differential operator object:")
summary.Lfd(object$Lfd)
cat(paste("\nSmoothing parameter =",object$lambda,"\n"))
cat(paste("\nEstimation status =",object$estimate,"\n"))
if (!is.null(object$penmat))
print(paste("Penalty matrix dimensions:",dim(object$penmat)))
}
| /R/fdPar.R | no_license | jfontestad/fda | R | false | false | 6,437 | r | # setClass for "fdPar"
# setClass("fdPar", representation(fd = "fd",
# Lfd = "Lfd",
# lambda = "numeric",
# estimate = "logical",
# penmat = "matrix",))
# Generator function of class fdPar
fdPar <- function(fdobj=NULL, Lfdobj=NULL, lambda=0, estimate=TRUE,
penmat=NULL){
# Sets up a functional parameter object
# Arguments:
# FDOBJ ... A functional data object.
# The basis for this object is used to define
# the functional parameter, or functional
# parameters of FDOBJ has replications.
# When an initial value is required for iterative
# estimation of a functional parameter, the coefficients
# will give the initial values for the iteration.
# LFDOBJ ... A linear differential operator value or a derivative
# value for penalizing the roughness of the object.
# By default, this is 0.
# LAMBDA ... The penalty parameter controlling the smoothness of
# the estimated parameter. By default this is 0.
# ESTIMATE ... If nonzero, the parameter is estimated; if zero, the
# parameter is held fixed at this value.
# By default, this is 1.
# PENMAT ... The penalty matrix.
# In repeated calls to SMOOTH_BASIS, if this is
# saved, then the penalty does not need evaluating
# repeatedly. Don't use, though, if LFDOBJ or LAMBDA
# are changed in the calculation.
#
# An alternative argument list:
# The first argument can also be a basis object. In this case, an
# FD object is set up with an empty coefficient matrix.
# For many purposes, the coefficient array is either not needed, or
# supplied later.
#
# Return:
# FDPAROBJ ... A functional parameter object
# Last modified 16 April 2021 by Jim Ramsay
# ----------------------------------------------------------------------
# Default fdPar objects
# ----------------------------------------------------------------------
if(!inherits(fdobj, 'fd')) {
# the first argument is not an fd object
if (is.null(fdobj)) {
# fdPar called without arguments
fdobj = fd()
} else {
if (inherits(fdobj, "basisfd")) {
# if the first argument is a basis object, convert it to
# a default FD object with an zero square coefficient matrix.
nbasis <- fdobj$nbasis
dropind <- fdobj$dropind
nbasis <- nbasis - length(dropind)
coefs <- matrix(0,nbasis,nbasis)
fdnames <- list('time', 'reps 1', 'values')
if(!is.null(fdobj$names)){
basisnames <- {
if(length(dropind)>0)
fdobj$names[-dropind]
else
fdobj$names
}
dimnames(coefs) <- list(basisnames, NULL)
fdnames[[1]] <- basisnames
}
fdobj <- fd(coefs, fdobj, fdnames)
}
else if(is.numeric(fdobj))fdobj <- fd(fdobj)
else stop("First argument is neither a functional data object ",
"nor a basis object.")
}
} else {
# the first object is an fd object, and we need nbasis later
nbasis <- fdobj$basis$nbasis
}
# ----------------------------------------------------------------------
# Check parameters
# ----------------------------------------------------------------------
# check Lfdobj
{
if (is.null(Lfdobj)) {
if(fdobj$basis$type=='fourier'){
rng <- fdobj$basis$rangeval
Lfdobj <- vec2Lfd(c(0,(2*pi/diff(rng))^2,0), rng)
# warning("Provding default Lfdobj = harmonic acceleration ",
# "operator on c(", rng[1], ', ', rng[2],
# ') = vec2Lfd(c(0,(2*pi/diff(rng))^2,0), rng);',
# ' [default prior to fda 2.1.0: int2Lfd(0)].')
} else {
norder <- {
if (fdobj$basis$type=='bspline') norder.bspline(fdobj$basis)
else 2
}
Lfdobj <- int2Lfd(max(0, norder-2))
}
}
else
Lfdobj <- int2Lfd(Lfdobj)
}
if (!inherits(Lfdobj, "Lfd"))
stop("'Lfdobj' is not a linear differential operator object.")
# check lambda
if (!is.numeric(lambda)) stop("Class of LAMBDA is not numeric.")
if (lambda < 0) stop("LAMBDA is negative.")
# check estimate
if (!is.logical(estimate)) stop("Class of ESTIMATE is not logical.")
# check penmat
if (!is.null(penmat)) {
if (!is.numeric(penmat)) stop("PENMAT is not numeric.")
# penmatsize <- size(penmat)
penmatsize <- dim(penmat)
if (any(penmatsize != nbasis)) stop("Dimensions of PENMAT are not correct.")
}
# ----------------------------------------------------------------------
# set up the fdPar object
# ----------------------------------------------------------------------
# S4 definition
# fdParobj <- new("fdPar", fd=fdobj, Lfd=Lfdobj, lambda=lambda, estimate=estimate,
# penmat=penmat)
# S3 definition
fdParobj <- list(fd=fdobj, Lfd=Lfdobj, lambda=lambda, estimate=estimate,
penmat=penmat)
oldClass(fdParobj) <- "fdPar"
fdParobj
}
# ----------------------------------------------------------------------
# "print" method for "fdPar"
print.fdPar <- function(x, ...)
{
object <- x
cat("Functional parameter object:\n\n")
print("Functional data object:")
print.fd(object$fd)
print("Linear differential operator object:")
print.Lfd(object$Lfd)
cat(paste("\nSmoothing parameter =",object$lambda,"\n"))
cat(paste("\nEstimation status =",object$estimate,"\n"))
if (!is.null(object$penmat)) {
print("Penalty matrix:")
print(object$penmat)
}
}
# ----------------------------------------------------------------------
# "summary" method for "fdPar"
summary.fdPar <- function(object, ...)
{
cat("Functional parameter object:\n\n")
print("Functional data object:")
summary.fd(object$fd)
print("Linear differential operator object:")
summary.Lfd(object$Lfd)
cat(paste("\nSmoothing parameter =",object$lambda,"\n"))
cat(paste("\nEstimation status =",object$estimate,"\n"))
if (!is.null(object$penmat))
print(paste("Penalty matrix dimensions:",dim(object$penmat)))
}
|
discard <- function(treat, pscore, option, X) {
n.obs <- length(treat)
pmax0 <- max(pscore[treat==0])
pmax1 <- max(pscore[treat==1])
pmin0 <- min(pscore[treat==0])
pmin1 <- min(pscore[treat==1])
if (is.logical(option)) # user input
return(option)
else if (option == "none") # keep all units
discarded <- rep(FALSE, n.obs)
else if (option == "both") # discard units outside of common support
discarded <- (pscore < max(pmin0, pmin1) | pscore > min(pmax0, pmax1))
else if (option == "control") # discard control units only
discarded <- (pscore < pmin1 | pscore > pmax1)
else if (option == "treat") # discard treated units only
discarded <- (pscore < pmin0 | pscore > pmax0)
else if (any(grep(option, c("hull.control", "hull.treat", "hull.both")))) {
## convext hull stuff
# if (!("WhatIf" %in% .packages(all = TRUE)))
# install.packages("WhatIf")
# if (!("lpSolve" %in% .packages(all = TRUE)))
# install.packages("lpSolve")
require(WhatIf)
# require(lpSolve)
discarded <- rep(FALSE, n.obs)
if (option == "hull.control"){ # discard units not in T convex hull
wif <- whatif(cfact = X[treat==0,], data = X[treat==1,])
discarded[treat==0] <- !wif$in.hull
} else if (option == "hull.treat") {
wif <- whatif(cfact = X[treat==1,], data = X[treat==0,])
discarded[treat==1] <- !wif$in.hull
} else if (option == "hull.both"){ # discard units not in T&C convex hull
wif <- whatif(cfact = cbind(1-treat, X), data = cbind(treat, X))
discarded <- !wif$in.hull
}
else
stop("invalid input for `discard'")
} else
stop("invalid input for `discard'")
names(discarded) <- names(treat)
return(discarded)
}
| /MatchIt/R/discard.R | no_license | ingted/R-Examples | R | false | false | 1,742 | r | discard <- function(treat, pscore, option, X) {
n.obs <- length(treat)
pmax0 <- max(pscore[treat==0])
pmax1 <- max(pscore[treat==1])
pmin0 <- min(pscore[treat==0])
pmin1 <- min(pscore[treat==1])
if (is.logical(option)) # user input
return(option)
else if (option == "none") # keep all units
discarded <- rep(FALSE, n.obs)
else if (option == "both") # discard units outside of common support
discarded <- (pscore < max(pmin0, pmin1) | pscore > min(pmax0, pmax1))
else if (option == "control") # discard control units only
discarded <- (pscore < pmin1 | pscore > pmax1)
else if (option == "treat") # discard treated units only
discarded <- (pscore < pmin0 | pscore > pmax0)
else if (any(grep(option, c("hull.control", "hull.treat", "hull.both")))) {
## convext hull stuff
# if (!("WhatIf" %in% .packages(all = TRUE)))
# install.packages("WhatIf")
# if (!("lpSolve" %in% .packages(all = TRUE)))
# install.packages("lpSolve")
require(WhatIf)
# require(lpSolve)
discarded <- rep(FALSE, n.obs)
if (option == "hull.control"){ # discard units not in T convex hull
wif <- whatif(cfact = X[treat==0,], data = X[treat==1,])
discarded[treat==0] <- !wif$in.hull
} else if (option == "hull.treat") {
wif <- whatif(cfact = X[treat==1,], data = X[treat==0,])
discarded[treat==1] <- !wif$in.hull
} else if (option == "hull.both"){ # discard units not in T&C convex hull
wif <- whatif(cfact = cbind(1-treat, X), data = cbind(treat, X))
discarded <- !wif$in.hull
}
else
stop("invalid input for `discard'")
} else
stop("invalid input for `discard'")
names(discarded) <- names(treat)
return(discarded)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neuro_surface.R
\docType{methods}
\name{series,NeuroSurfaceVector,numeric-method}
\alias{series,NeuroSurfaceVector,numeric-method}
\alias{series,NeuroSurfaceVector,integer-method}
\alias{series,NeuroSurfaceVector,ROISurface-method}
\alias{series,NeuroSurface,numeric-method}
\title{extract a series of values for a surface vector}
\usage{
\S4method{series}{NeuroSurfaceVector,numeric}(x, i)
\S4method{series}{NeuroSurfaceVector,integer}(x, i)
\S4method{series}{NeuroSurfaceVector,ROISurface}(x, i)
\S4method{series}{NeuroSurface,numeric}(x, i)
}
\arguments{
\item{x}{the object to extract series from}
\item{i}{the indices of the series set}
}
\value{
a class of type \code{Matrix}
}
\description{
extract a series of values for a surface vector
}
| /man/series.Rd | no_license | bbuchsbaum/neurosurf | R | false | true | 830 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neuro_surface.R
\docType{methods}
\name{series,NeuroSurfaceVector,numeric-method}
\alias{series,NeuroSurfaceVector,numeric-method}
\alias{series,NeuroSurfaceVector,integer-method}
\alias{series,NeuroSurfaceVector,ROISurface-method}
\alias{series,NeuroSurface,numeric-method}
\title{extract a series of values for a surface vector}
\usage{
\S4method{series}{NeuroSurfaceVector,numeric}(x, i)
\S4method{series}{NeuroSurfaceVector,integer}(x, i)
\S4method{series}{NeuroSurfaceVector,ROISurface}(x, i)
\S4method{series}{NeuroSurface,numeric}(x, i)
}
\arguments{
\item{x}{the object to extract series from}
\item{i}{the indices of the series set}
}
\value{
a class of type \code{Matrix}
}
\description{
extract a series of values for a surface vector
}
|
#### practice with probability distributions ####
# calculating the likelihood requires understanding how to work
# with probability distributions. let's start with the binomial
# distribution.
# the binomial distribution describes the number of "success" in
# some number of "trials" given some probability of success. it
# is the "coin flipping" distribution.
# in R, we can draw a sample from a binomial distribution with rbinom()
# this isn't the first time you've seen rbinom, but here it is again,
# with the parameters spelled out. you can try changing these to explore
# how the function works.
number.of.samples <- 2
number.of.trials.per.sample <- 10
probability.of.success <- 0.7
rbinom(number.of.samples, number.of.trials.per.sample, probability.of.success)
# below, use rbinom to create sample data for the following scenario.
# an experiment tests whether subjects have ESP. 100 subjects are each given
# the opportunity to predict whether a randomly generated number is odd or even.
# each subject makes 20 guesses.
# of course, ESP doesn't exist, so the probability of a successful guess is 0.50.
# store the result in a vector called esp.data
esp.data <- rbinom(100, 20, .50)
# a quick way to visualize a distribution is with the hist() function:
hist(esp.data)
# what if we want to know the probability of someone getting exactly 10 guesses
# correct out of 20, if they are randomly guessing? for that we use the density
# function of the binomial: dbinom()
value.to.check <- 10
number.of.trials <- 20
probability.of.success <- 0.5
dbinom(value.to.check, number.of.trials, probability.of.success)
# use dbinom to find out the probability of someone answering 87 out of 100
# questions correctly, if they have a 0.9 probability of giving a correct answer
# for each individual question.
dbinom(87, 100, .9)
# with dbinom, you can use a vector as the first argument, to check the probability
# of multiple values at the same time:
values <- c(3, 4, 5)
dbinom(values, 8, 0.5)
# using this shortcut, *plot* the full distribution (probability mass function)
# for flipping 16 fair coins, counting the total number of heads.
# hint: create one vector for the different possible outcomes
# then use dbinom to calculate the probability of all of the elements in the vector
?plot
x <- 0:16
y <- dbinom(x, 17, .5)
plot(x,y)
# quick detour #
# here's a quick tip about plot() or hist()
# if you want to change the range on the x-axis, you can use the xlim argument:
hist.sample <- rbinom(100, 100, 0.5)
hist(hist.sample)
hist(hist.sample, xlim=c(0,100)) # compare this plot to the line above.
# the normal distribution ##
# normal (gaussian) distributions are characterized by two parameters: mean and standard deviation
# the mean is the the location of the peak of the distribution, and the sd controls the width.
# the smaller the sd, the more peaked the distribution is in the center.
# like the binomial, there are rnorm() and dnorm() functions.
# generate 100 samples from a normal distribution with mean 0 and standard deviation 10.
# then use hist() to create a histogram of these samples.
hist.set <- rnorm(100, mean = 0, sd = 10)
hist(hist.set)
?rnorm
# now plot the probability density function of this distribution.
# use the same strategy as you did above with the binomial to find the density of the normal
# distribution with mean 0 and sd 10 for values between -50 and 50. the distribution is continuous
# so, choose a reasonably small step size between values (remember the seq() function).
hist.set2 <- seq(-50,50, .5)
prob.density <- dnorm(hist.set2, mean = 0, sd = 10)
plot(hist.set2, prob.density)
#### practice calculating likelihoods ####
# here's some data from 10 participants in an ESP experiment like the one described
# above. each subject had 20 guesses. the number of correct guesses is reported.
esp.practice.data <- data.frame(subject=1:10, n.correct=c(11,10,6,10,6,12,10,8,9,11))
# calculate the likelihood (regular, not log) for this data for three different values
# of the probability of success parameter: 0.4, 0.5, and 0.6.
# hint: prod() will multiple all elements of a vector together.
a <- prod(dbinom(esp.practice.data$n.correct, 20, .4))
b <- prod(dbinom(esp.practice.data$n.correct, 20, .5))
c <- prod(dbinom(esp.practice.data$n.correct, 20, .6))
which.max(c(a,b,c))
b>c
# which parameter value of those options is most likely?
0.5
# here is a sample of response times for a single subject from a rapid decision making experiment.
rt.sample <- c(391.5845, 411.9970, 358.6373, 505.3099, 616.2892, 481.0751, 422.3132, 511.7213, 205.2692, 522.3433, 370.1850,
517.4617, 332.3344, 316.8760, 395.5431, 231.7831, 399.8646, 238.9064, 299.7924, 474.7512, 271.6326, 423.4861,
379.7867, 212.7789, 233.8291, 472.4591, 534.2131, 453.9655, 408.3443, 352.3001)
# calculate the **log** likelihood for this data assuming that it is generated by a normal
# distribution for each of the following parameters.
# hint: sum() adds the numbers in a vector. log() is the natural log function, or log=T for dnorm().
?rnorm
# 1) mean 350, sd 50
aa <- sum(dnorm((rt.sample), mean = 350, sd = 50, log=TRUE))
# 2) mean 400, sd 50
bb <- sum(dnorm((rt.sample), mean = 400, sd = 50, log=TRUE))
# 3) mean 450, sd 50
cc <- sum(dnorm((rt.sample), mean = 450, sd = 50, log=TRUE))
# 4) mean 350, sd 100
dd <- sum(dnorm((rt.sample), mean = 350, sd = 100, log=TRUE))
# 5) mean 400, sd 100
ee <- sum(dnorm((rt.sample), mean = 400, sd = 100, log=TRUE))
# 6) mean 450, sd 100
ff <- sum(dnorm((rt.sample), mean = 450, sd = 100, log=TRUE))
# which parameter set has the highest likelihood?
which.max(c(aa,bb,cc,dd,ee,ff))
ee #5
# here is a set of data for a subject in a categorization experiment, modeled with GCM.
# calculate the log likelihood of the parameters in the model (which i am not showing you).
# the point here is to know what to do when the model gives you a predicted probability
# of a response, and the data is either that response or not that response.
# hint: add a new column indicating the likelihood of each response, using mapply. then convert to log and add.
# if you do this correctly, the answer is -10.84249.
gcm.practice.data <- data.frame(correct.response = c(T, T, T, T, F, T, T, F, T, T, T, F, F, T, T, F, T, T, T, T),
gcm.probability.correct = c(0.84, 0.80, 0.84, 0.80, 0.79, 0.86, 0.89, 0.87, 0.69, 0.85, 0.75,
0.74, 0.82, 0.85, 0.87, 0.69, 0.83, 0.87, 0.80, 0.76))
gcm.practice.data$likelihood <- mapply(function(response, prob){
if(response == T){
return(prob)}
else{
return(1-prob)}
},
gcm.practice.data$correct.response, gcm.practice.data$gcm.probability.correct)
gcmloglikelihood <- sum(log(gcm.practice.data$likelihood))
#### maximum likelihood estimation ####
# the same search strategies we used for parameter fitting with rmse can be used with likelihoods,
# including grid search.
# here are the number of correct responses each subject gave in an experiment in which they had to
# decide if two images were the same or different. there were 40 trials for each subject
same.diff.data <- c(32, 29, 31, 34, 26, 29, 31, 34, 29, 31, 30, 29, 31, 34, 33, 27, 32, 29, 29, 27)
# we can model this experiment's data as 40 coin flips for each subject. use grid search to plot the likelihood
# function for values of theta (probability of a correct response) between 0.5 and 0.9, in steps of 0.01.
# start by writing a function that calculates the likelihood (not log) for the entire set of data given a value of theta.
likelihood.grid <- function(theta){
like.coin <- dbinom(same.diff.data, 40, theta)
return(prod(like.coin))
}
# then use sapply to run the function for each possible value of theta in the set. use seq() to generate the
# set of possible values. plot the set of values on the x axis and the corresponding likelihoods on the y axis.
theta.grid <- seq(0.5, 0.9, 0.01)
parameters <- expand.grid(list(theta=theta.grid))
parameters$likelihoods <- sapply(theta.grid, function(x) {return(likelihood.grid(x)) })
best.theta <- max(parameters$likelihoods)
plot(theta.grid, parameters$likelihoods)
# the "true" underlying value i used to generate the data was 0.75. does that match up with the grid search?
## mle with optim()
# in this section, you'll do model recovery for a descriptive model of the linear relationship
# between two continuous variables.
# let's assume that variable y is a linear function of variable x, such that:
# y = 4 + 0.8x
# create a vector of x values from 0 to 100, and the corresponding vector of y values,
# then plot these with x values on the x axis, and y values on the y axis.
x <- seq(0:100)
y <- 4 + 0.8*x
plot(x,y)
# now let's assume that the relationship between x and y isn't perfect. there's a bit of random
# noise. add a random sample from a normal distribution with mean 0 and sd 10 to each y value.
# hint: there are 101 y values, so you need 101 samples.
# plot the data again, with the new noisy y values.
x <- seq(0:100)
y <- 4 + 0.8*x + (rnorm(x , mean = 0, sd = 10))
plot(x,y)
# there are three parameter values that control this plot,
# the intercept of the line: 4
# the slope of the line: 0.8
# the sd of the normal distribution: 10
# say that we observe a point of data, x = 20 and y = 27.
# the linear equation, y <- 4 + 0.8*x, predicts that when x is 20 y should also be 20.
4 + 0.8*29
# our model of this data assumes that the relationship is not perfect.
# we assume that there is noise so that when x is 20, the most likely value of y should be 4 + 0.8*x,
# but other values of y are possible. we describe the probability of different values of y with a normal
# distribution. when x is 20, then the normal distribution of y values is centered on 20, because the line
# says that y is 4 + 0.8x. using the normal distribution is helpful because it gives us a way to quantify
# how likely a value of 19 is compared to a value of 16, when x is 20.
# here's an example
x.observed <- 20
y.predicted <- 4 + 0.8*x.observed
# to find out the probability of y.observed=20, use dnorm()
# the mean of the normal distribution should be y.predicted (the distribution is centered on the line)
# and the sd of the normal is a parameter we will estimate, but for the demo below, is set to 10.
y.observed <- 20
dnorm(y.observed, y.predicted, 10)
# write the code to see how likely it is that y will be 33 when x is 29? (assuming sd = 10)
# the correct answer is 0.03371799...
x.observed <- 29
y.predicted <- 4 + 0.8*x.observed
y.observed <- 33
dnorm(y.observed, y.predicted, 10)
# now generalize your solution to compute the likelihood of each value of y that you generated above.
# in other words, write the code that takes a vector of x and y values, and returns the probability
# of each pair given that the relationship between x and y is y <- 4 + 0.8*x and the normal distribution has an sd of 10.
gen.likelihood <- function(x,y){
x.observed <- x
y.predicted <- 4 + 0.8*x.observed
y.observed <- y
dnorm(y.observed, y.predicted, 10)}
# now generalize your solution one step further. write a function that takes in a vector of parameters,
# where parameters[1] is the intercept, parameters[2] is the slope, and parameters[3] is the sd of the normal,
# and returns the total **negative log likelihood**. remember, we want the negative log likelihood because
# optim() will find the set of parameters that minimizes a function.
x.observed <- seq(0,100,1)
y.observed <- 4 + 0.8*x.observed + (rnorm(x.observed , mean = 0, sd = 10))
par.likelihood <- function(parameters){
i <- parameters[1]
s <- parameters[2]
sd <- parameters[3]
y.predicted <- s*x.observed + i
if(sd <= 0){
return(NA)}
else{
neg.log.likelihood <- sum(dnorm(y.observed, mean = y.predicted, sd=sd, log=TRUE))
return(neg.log.likelihood)}}
# use optim() and Nelder-Mead to search for the best fitting parameters. remember to ensure that sd > 0
# and return NA if it is not.
fit <- optim(c(1,1,1), par.likelihood, method="Nelder-Mead")
?optim
# finally, plot the best fitting line on your points by using the abline() function, and the parameters that optim() found.
plot(x.observed, y.observed)
abline(a=4, b=.8, col='red')
abline(a=fit$par[1], b=fit$par[2], col='blue') | /lab-4.R | no_license | Vassar-COGS282-2016/lab-4-thelittledipster | R | false | false | 12,717 | r | #### practice with probability distributions ####
# calculating the likelihood requires understanding how to work
# with probability distributions. let's start with the binomial
# distribution.
# the binomial distribution describes the number of "success" in
# some number of "trials" given some probability of success. it
# is the "coin flipping" distribution.
# in R, we can draw a sample from a binomial distribution with rbinom()
# this isn't the first time you've seen rbinom, but here it is again,
# with the parameters spelled out. you can try changing these to explore
# how the function works.
number.of.samples <- 2
number.of.trials.per.sample <- 10
probability.of.success <- 0.7
rbinom(number.of.samples, number.of.trials.per.sample, probability.of.success)
# below, use rbinom to create sample data for the following scenario.
# an experiment tests whether subjects have ESP. 100 subjects are each given
# the opportunity to predict whether a randomly generated number is odd or even.
# each subject makes 20 guesses.
# of course, ESP doesn't exist, so the probability of a successful guess is 0.50.
# store the result in a vector called esp.data
esp.data <- rbinom(100, 20, .50)
# a quick way to visualize a distribution is with the hist() function:
hist(esp.data)
# what if we want to know the probability of someone getting exactly 10 guesses
# correct out of 20, if they are randomly guessing? for that we use the density
# function of the binomial: dbinom()
value.to.check <- 10
number.of.trials <- 20
probability.of.success <- 0.5
dbinom(value.to.check, number.of.trials, probability.of.success)
# use dbinom to find out the probability of someone answering 87 out of 100
# questions correctly, if they have a 0.9 probability of giving a correct answer
# for each individual question.
dbinom(87, 100, .9)
# with dbinom, you can use a vector as the first argument, to check the probability
# of multiple values at the same time:
values <- c(3, 4, 5)
dbinom(values, 8, 0.5)
# using this shortcut, *plot* the full distribution (probability mass function)
# for flipping 16 fair coins, counting the total number of heads.
# hint: create one vector for the different possible outcomes
# then use dbinom to calculate the probability of all of the elements in the vector
?plot
x <- 0:16
y <- dbinom(x, 17, .5)
plot(x,y)
# quick detour #
# here's a quick tip about plot() or hist()
# if you want to change the range on the x-axis, you can use the xlim argument:
hist.sample <- rbinom(100, 100, 0.5)
hist(hist.sample)
hist(hist.sample, xlim=c(0,100)) # compare this plot to the line above.
# the normal distribution ##
# normal (gaussian) distributions are characterized by two parameters: mean and standard deviation
# the mean is the the location of the peak of the distribution, and the sd controls the width.
# the smaller the sd, the more peaked the distribution is in the center.
# like the binomial, there are rnorm() and dnorm() functions.
# generate 100 samples from a normal distribution with mean 0 and standard deviation 10.
# then use hist() to create a histogram of these samples.
hist.set <- rnorm(100, mean = 0, sd = 10)
hist(hist.set)
?rnorm
# now plot the probability density function of this distribution.
# use the same strategy as you did above with the binomial to find the density of the normal
# distribution with mean 0 and sd 10 for values between -50 and 50. the distribution is continuous
# so, choose a reasonably small step size between values (remember the seq() function).
hist.set2 <- seq(-50,50, .5)
prob.density <- dnorm(hist.set2, mean = 0, sd = 10)
plot(hist.set2, prob.density)
#### practice calculating likelihoods ####
# here's some data from 10 participants in an ESP experiment like the one described
# above. each subject had 20 guesses. the number of correct guesses is reported.
esp.practice.data <- data.frame(subject=1:10, n.correct=c(11,10,6,10,6,12,10,8,9,11))
# calculate the likelihood (regular, not log) for this data for three different values
# of the probability of success parameter: 0.4, 0.5, and 0.6.
# hint: prod() will multiple all elements of a vector together.
a <- prod(dbinom(esp.practice.data$n.correct, 20, .4))
b <- prod(dbinom(esp.practice.data$n.correct, 20, .5))
c <- prod(dbinom(esp.practice.data$n.correct, 20, .6))
which.max(c(a,b,c))
b>c
# which parameter value of those options is most likely?
0.5
# here is a sample of response times for a single subject from a rapid decision making experiment.
rt.sample <- c(391.5845, 411.9970, 358.6373, 505.3099, 616.2892, 481.0751, 422.3132, 511.7213, 205.2692, 522.3433, 370.1850,
517.4617, 332.3344, 316.8760, 395.5431, 231.7831, 399.8646, 238.9064, 299.7924, 474.7512, 271.6326, 423.4861,
379.7867, 212.7789, 233.8291, 472.4591, 534.2131, 453.9655, 408.3443, 352.3001)
# calculate the **log** likelihood for this data assuming that it is generated by a normal
# distribution for each of the following parameters.
# hint: sum() adds the numbers in a vector. log() is the natural log function, or log=T for dnorm().
?rnorm
# 1) mean 350, sd 50
aa <- sum(dnorm((rt.sample), mean = 350, sd = 50, log=TRUE))
# 2) mean 400, sd 50
bb <- sum(dnorm((rt.sample), mean = 400, sd = 50, log=TRUE))
# 3) mean 450, sd 50
cc <- sum(dnorm((rt.sample), mean = 450, sd = 50, log=TRUE))
# 4) mean 350, sd 100
dd <- sum(dnorm((rt.sample), mean = 350, sd = 100, log=TRUE))
# 5) mean 400, sd 100
ee <- sum(dnorm((rt.sample), mean = 400, sd = 100, log=TRUE))
# 6) mean 450, sd 100
ff <- sum(dnorm((rt.sample), mean = 450, sd = 100, log=TRUE))
# which parameter set has the highest likelihood?
which.max(c(aa,bb,cc,dd,ee,ff))
ee #5
# here is a set of data for a subject in a categorization experiment, modeled with GCM.
# calculate the log likelihood of the parameters in the model (which i am not showing you).
# the point here is to know what to do when the model gives you a predicted probability
# of a response, and the data is either that response or not that response.
# hint: add a new column indicating the likelihood of each response, using mapply. then convert to log and add.
# if you do this correctly, the answer is -10.84249.
gcm.practice.data <- data.frame(correct.response = c(T, T, T, T, F, T, T, F, T, T, T, F, F, T, T, F, T, T, T, T),
gcm.probability.correct = c(0.84, 0.80, 0.84, 0.80, 0.79, 0.86, 0.89, 0.87, 0.69, 0.85, 0.75,
0.74, 0.82, 0.85, 0.87, 0.69, 0.83, 0.87, 0.80, 0.76))
gcm.practice.data$likelihood <- mapply(function(response, prob){
if(response == T){
return(prob)}
else{
return(1-prob)}
},
gcm.practice.data$correct.response, gcm.practice.data$gcm.probability.correct)
gcmloglikelihood <- sum(log(gcm.practice.data$likelihood))
#### maximum likelihood estimation ####
# the same search strategies we used for parameter fitting with rmse can be used with likelihoods,
# including grid search.
# here are the number of correct responses each subject gave in an experiment in which they had to
# decide if two images were the same or different. there were 40 trials for each subject
same.diff.data <- c(32, 29, 31, 34, 26, 29, 31, 34, 29, 31, 30, 29, 31, 34, 33, 27, 32, 29, 29, 27)
# we can model this experiment's data as 40 coin flips for each subject. use grid search to plot the likelihood
# function for values of theta (probability of a correct response) between 0.5 and 0.9, in steps of 0.01.
# start by writing a function that calculates the likelihood (not log) for the entire set of data given a value of theta.
likelihood.grid <- function(theta){
like.coin <- dbinom(same.diff.data, 40, theta)
return(prod(like.coin))
}
# then use sapply to run the function for each possible value of theta in the set. use seq() to generate the
# set of possible values. plot the set of values on the x axis and the corresponding likelihoods on the y axis.
theta.grid <- seq(0.5, 0.9, 0.01)
parameters <- expand.grid(list(theta=theta.grid))
parameters$likelihoods <- sapply(theta.grid, function(x) {return(likelihood.grid(x)) })
best.theta <- max(parameters$likelihoods)
plot(theta.grid, parameters$likelihoods)
# the "true" underlying value i used to generate the data was 0.75. does that match up with the grid search?
## mle with optim()
# in this section, you'll do model recovery for a descriptive model of the linear relationship
# between two continuous variables.
# let's assume that variable y is a linear function of variable x, such that:
# y = 4 + 0.8x
# create a vector of x values from 0 to 100, and the corresponding vector of y values,
# then plot these with x values on the x axis, and y values on the y axis.
x <- seq(0:100)
y <- 4 + 0.8*x
plot(x,y)
# now let's assume that the relationship between x and y isn't perfect. there's a bit of random
# noise. add a random sample from a normal distribution with mean 0 and sd 10 to each y value.
# hint: there are 101 y values, so you need 101 samples.
# plot the data again, with the new noisy y values.
x <- seq(0:100)
y <- 4 + 0.8*x + (rnorm(x , mean = 0, sd = 10))
plot(x,y)
# there are three parameter values that control this plot,
# the intercept of the line: 4
# the slope of the line: 0.8
# the sd of the normal distribution: 10
# say that we observe a point of data, x = 20 and y = 27.
# the linear equation, y <- 4 + 0.8*x, predicts that when x is 20 y should also be 20.
4 + 0.8*29
# our model of this data assumes that the relationship is not perfect.
# we assume that there is noise so that when x is 20, the most likely value of y should be 4 + 0.8*x,
# but other values of y are possible. we describe the probability of different values of y with a normal
# distribution. when x is 20, then the normal distribution of y values is centered on 20, because the line
# says that y is 4 + 0.8x. using the normal distribution is helpful because it gives us a way to quantify
# how likely a value of 19 is compared to a value of 16, when x is 20.
# here's an example
x.observed <- 20
y.predicted <- 4 + 0.8*x.observed
# to find out the probability of y.observed=20, use dnorm()
# the mean of the normal distribution should be y.predicted (the distribution is centered on the line)
# and the sd of the normal is a parameter we will estimate, but for the demo below, is set to 10.
y.observed <- 20
dnorm(y.observed, y.predicted, 10)
# write the code to see how likely it is that y will be 33 when x is 29? (assuming sd = 10)
# the correct answer is 0.03371799...
x.observed <- 29
y.predicted <- 4 + 0.8*x.observed
y.observed <- 33
dnorm(y.observed, y.predicted, 10)
# now generalize your solution to compute the likelihood of each value of y that you generated above.
# in other words, write the code that takes a vector of x and y values, and returns the probability
# of each pair given that the relationship between x and y is y <- 4 + 0.8*x and the normal distribution has an sd of 10.
gen.likelihood <- function(x,y){
x.observed <- x
y.predicted <- 4 + 0.8*x.observed
y.observed <- y
dnorm(y.observed, y.predicted, 10)}
# now generalize your solution one step further. write a function that takes in a vector of parameters,
# where parameters[1] is the intercept, parameters[2] is the slope, and parameters[3] is the sd of the normal,
# and returns the total **negative log likelihood**. remember, we want the negative log likelihood because
# optim() will find the set of parameters that minimizes a function.
x.observed <- seq(0,100,1)
y.observed <- 4 + 0.8*x.observed + (rnorm(x.observed , mean = 0, sd = 10))
par.likelihood <- function(parameters){
i <- parameters[1]
s <- parameters[2]
sd <- parameters[3]
y.predicted <- s*x.observed + i
if(sd <= 0){
return(NA)}
else{
neg.log.likelihood <- sum(dnorm(y.observed, mean = y.predicted, sd=sd, log=TRUE))
return(neg.log.likelihood)}}
# use optim() and Nelder-Mead to search for the best fitting parameters. remember to ensure that sd > 0
# and return NA if it is not.
fit <- optim(c(1,1,1), par.likelihood, method="Nelder-Mead")
?optim
# finally, plot the best fitting line on your points by using the abline() function, and the parameters that optim() found.
plot(x.observed, y.observed)
abline(a=4, b=.8, col='red')
abline(a=fit$par[1], b=fit$par[2], col='blue') |
library(devFunc)
### Name: checkRanges
### Title: Checking if the value of a numeric or integer variable (of
### length 1) is located within a certain range.
### Aliases: checkRanges
### ** Examples
someValue <- 2
checkRanges(list(someValue), list(c('<', 3)))
## No test:
someValue <- '2'
checkRanges(list(someValue), list(c('<', 3)))
checkRanges(list(someValue), list(c(1.5, 3)))
## End(No test)
someValue <- 6
someOtherValue <- 5
checkRanges(list(someValue, someOtherValue), list(c('>=', 2.5), c('>=', 2.5, '<=', 5)))
## No test:
checkRanges(list(someValue, someOtherValue), list(c('>=', 2.5), c('>=', 2.5, '<', 5)))
checkRanges(list(someValue, someOtherValue), list(c('>=', 2.5, '<=', 5), c('>=', 2.5, '<', 5)))
## End(No test)
| /data/genthat_extracted_code/devFunc/examples/checkRanges.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 744 | r | library(devFunc)
### Name: checkRanges
### Title: Checking if the value of a numeric or integer variable (of
### length 1) is located within a certain range.
### Aliases: checkRanges
### ** Examples
someValue <- 2
checkRanges(list(someValue), list(c('<', 3)))
## No test:
someValue <- '2'
checkRanges(list(someValue), list(c('<', 3)))
checkRanges(list(someValue), list(c(1.5, 3)))
## End(No test)
someValue <- 6
someOtherValue <- 5
checkRanges(list(someValue, someOtherValue), list(c('>=', 2.5), c('>=', 2.5, '<=', 5)))
## No test:
checkRanges(list(someValue, someOtherValue), list(c('>=', 2.5), c('>=', 2.5, '<', 5)))
checkRanges(list(someValue, someOtherValue), list(c('>=', 2.5, '<=', 5), c('>=', 2.5, '<', 5)))
## End(No test)
|
library("igraph")
#(a)
NodesNum=1000
g = barabasi.game(NodesNum,directed=FALSE)
par(mfrow=c(1,2))
plot(degree.distribution(g), main='Degree Distribution',xlab = "degree",ylab="density")
plot(degree.distribution(g),log='xy', main='Degree Distribution in log',xlab = "degree",ylab="density")
Connectivity=Diameter=numeric(0)
for (i in 1:100){
g = barabasi.game(NodesNum,directed=FALSE)
Connectivity = c(Connectivity, is.connected(g))
Diameter = c(Diameter, diameter(g))
}
Con_avg = mean(Connectivity)
Dia_avg = mean(Diameter)
#(b)
cl = clusters(g)
gccIndex = which.max(cl$csize)
nonGccNodes = (1:vcount(g))[cl$membership != gccIndex]
gcc = delete.vertices(g,nonGccNodes)
struct = fastgreedy.community(gcc)
mod = modularity(struct)
#(c)
NodesNum2 = 10000
g2 = barabasi.game(NodesNum2,directed=FALSE)
#par(mfrow=c(1,2))
#plot(degree.distribution(g2), main='Degree Distribution',xlab = "degree",ylab="density")
#plot(degree.distribution(g2),log='xy', main='Degree Distribution in log',xlab = "degree",ylab="density")
cl2 = clusters(g2)
gccIndex2 = which.max(cl2$csize)
nonGccNodes2 = (1:vcount(g2))[cl2$membership != gccIndex2]
gcc2 = delete.vertices(g2,nonGccNodes)
struct2 = fastgreedy.community(gcc2)
mod2 = modularity(struct2)
#(d)
DG=numeric(0)
for(i in 1:1000){
rand = sample(1000,1)
neib = neighbors(g,rand)
if (length(neib)==1)
picked = neib
else
picked = sample(neib, 1)
DG = c(DG,degree(g,picked))
}
plot(density(DG),main='Degree distribution', xlab='degree', ylab='density')
| /hw1/problem2.R | no_license | realmichaelzyy/EE232_SocialNetworkGraph | R | false | false | 1,513 | r | library("igraph")
#(a)
NodesNum=1000
g = barabasi.game(NodesNum,directed=FALSE)
par(mfrow=c(1,2))
plot(degree.distribution(g), main='Degree Distribution',xlab = "degree",ylab="density")
plot(degree.distribution(g),log='xy', main='Degree Distribution in log',xlab = "degree",ylab="density")
Connectivity=Diameter=numeric(0)
for (i in 1:100){
g = barabasi.game(NodesNum,directed=FALSE)
Connectivity = c(Connectivity, is.connected(g))
Diameter = c(Diameter, diameter(g))
}
Con_avg = mean(Connectivity)
Dia_avg = mean(Diameter)
#(b)
cl = clusters(g)
gccIndex = which.max(cl$csize)
nonGccNodes = (1:vcount(g))[cl$membership != gccIndex]
gcc = delete.vertices(g,nonGccNodes)
struct = fastgreedy.community(gcc)
mod = modularity(struct)
#(c)
NodesNum2 = 10000
g2 = barabasi.game(NodesNum2,directed=FALSE)
#par(mfrow=c(1,2))
#plot(degree.distribution(g2), main='Degree Distribution',xlab = "degree",ylab="density")
#plot(degree.distribution(g2),log='xy', main='Degree Distribution in log',xlab = "degree",ylab="density")
cl2 = clusters(g2)
gccIndex2 = which.max(cl2$csize)
nonGccNodes2 = (1:vcount(g2))[cl2$membership != gccIndex2]
gcc2 = delete.vertices(g2,nonGccNodes)
struct2 = fastgreedy.community(gcc2)
mod2 = modularity(struct2)
#(d)
DG=numeric(0)
for(i in 1:1000){
rand = sample(1000,1)
neib = neighbors(g,rand)
if (length(neib)==1)
picked = neib
else
picked = sample(neib, 1)
DG = c(DG,degree(g,picked))
}
plot(density(DG),main='Degree distribution', xlab='degree', ylab='density')
|
#######################################################################################
# only Original data
rm(list=ls())
library(keras)
library(MASS)
library(caret)
use_session_with_seed(1, disable_parallel_cpu = FALSE)
## read csv file for model
d_score <- read.csv("driving_score_180ea.csv")
## create folds , 10-fold Cross Validation
fld <- createFolds(d_score$result, k=10)
## loss for each fold
temp_loss <- c()
for(i in 1:10) {
x_train <- as.matrix(d_score[-fld[[i]], -4])
y_train <- as.matrix(d_score[-fld[[i]], 4])
x_test <- as.matrix(d_score[fld[[i]], -4])
y_test <- as.matrix(d_score[fld[[i]], 4])
# create model
model = keras_model_sequential()
model %>%
layer_dense(input_shape = ncol(x_train), units = 128, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 128, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 64, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 32, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 16, activation = "relu") %>%
layer_dense(units = 1)
summary(model)
# add a loss function and optimizer
model %>%
compile(
loss = "logcosh",
optimizer = "Nadam",
metrics = list("mean_absolute_error")
)
fit = model %>%
fit(
x = x_train,
y = y_train,
batch_size = 128,
epochs = 5000
)
# Training and evaluation
model %>% evaluate(x_test, y_test, verbose = 0)
# print predicted value
pred = model %>% predict(x_test)
compare <- cbind(y_test, pred, pred-y_test)
## calculate RMSE
RMSE <- sqrt( sum( ( pred-y_test )^2 ) / nrow(compare) )
temp_loss[i] <- RMSE
}
temp_loss
mean(temp_loss)
#######################################################################################
# only Original data
rm(list=ls())
library(keras)
library(MASS)
library(caret)
use_session_with_seed(1, disable_parallel_cpu = FALSE)
## read csv file for model
d_score <- read.csv("cluster_origin.csv")
## create folds , 10-fold Cross Validation
fld <- createFolds(d_score$result, k=10)
temp_loss <- c() # loss for each fold
for(i in 1:10) {
x_train <- as.matrix(d_score[-fld[[i]], -4])
y_train <- as.matrix(d_score[-fld[[i]], 4])
x_test <- as.matrix(d_score[fld[[i]], -4])
y_test <- as.matrix(d_score[fld[[i]], 4])
# create model
model = keras_model_sequential()
model %>%
layer_dense(input_shape = ncol(x_train), units = 128, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 128, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 64, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 32, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 16, activation = "relu") %>%
layer_dense(units = 1)
summary(model)
# add a loss function and optimizer
model %>%
compile(
loss = "logcosh",
optimizer = "Nadam",
metrics = list("mean_absolute_error")
)
fit = model %>%
fit(
x = x_train,
y = y_train,
batch_size = 256,
epochs = 1000
)
# Training and evaluation
model %>% evaluate(x_test, y_test, verbose = 0)
# print predicted value
pred = model %>% predict(x_test)
compare <- cbind(y_test, pred, pred-y_test)
## calculate RMSE
lm_loss_avg <- sqrt( sum( ( pred-y_test )^2 ) / nrow(compare) )
temp_loss[i] <- lm_loss_avg
}
temp_loss
mean(temp_loss)
#######################################################################################
| /R_code/Multi-layer Perception_10_fold.R | no_license | achieve0410/R | R | false | false | 3,825 | r |
#######################################################################################
# only Original data
rm(list=ls())
library(keras)
library(MASS)
library(caret)
use_session_with_seed(1, disable_parallel_cpu = FALSE)
## read csv file for model
d_score <- read.csv("driving_score_180ea.csv")
## create folds , 10-fold Cross Validation
fld <- createFolds(d_score$result, k=10)
## loss for each fold
temp_loss <- c()
for(i in 1:10) {
x_train <- as.matrix(d_score[-fld[[i]], -4])
y_train <- as.matrix(d_score[-fld[[i]], 4])
x_test <- as.matrix(d_score[fld[[i]], -4])
y_test <- as.matrix(d_score[fld[[i]], 4])
# create model
model = keras_model_sequential()
model %>%
layer_dense(input_shape = ncol(x_train), units = 128, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 128, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 64, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 32, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 16, activation = "relu") %>%
layer_dense(units = 1)
summary(model)
# add a loss function and optimizer
model %>%
compile(
loss = "logcosh",
optimizer = "Nadam",
metrics = list("mean_absolute_error")
)
fit = model %>%
fit(
x = x_train,
y = y_train,
batch_size = 128,
epochs = 5000
)
# Training and evaluation
model %>% evaluate(x_test, y_test, verbose = 0)
# print predicted value
pred = model %>% predict(x_test)
compare <- cbind(y_test, pred, pred-y_test)
## calculate RMSE
RMSE <- sqrt( sum( ( pred-y_test )^2 ) / nrow(compare) )
temp_loss[i] <- RMSE
}
temp_loss
mean(temp_loss)
#######################################################################################
# only Original data
rm(list=ls())
library(keras)
library(MASS)
library(caret)
use_session_with_seed(1, disable_parallel_cpu = FALSE)
## read csv file for model
d_score <- read.csv("cluster_origin.csv")
## create folds , 10-fold Cross Validation
fld <- createFolds(d_score$result, k=10)
temp_loss <- c() # loss for each fold
for(i in 1:10) {
x_train <- as.matrix(d_score[-fld[[i]], -4])
y_train <- as.matrix(d_score[-fld[[i]], 4])
x_test <- as.matrix(d_score[fld[[i]], -4])
y_test <- as.matrix(d_score[fld[[i]], 4])
# create model
model = keras_model_sequential()
model %>%
layer_dense(input_shape = ncol(x_train), units = 128, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 128, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 64, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 32, activation = "relu") %>%
layer_dropout(rate = 0.05) %>%
layer_dense(units = 16, activation = "relu") %>%
layer_dense(units = 1)
summary(model)
# add a loss function and optimizer
model %>%
compile(
loss = "logcosh",
optimizer = "Nadam",
metrics = list("mean_absolute_error")
)
fit = model %>%
fit(
x = x_train,
y = y_train,
batch_size = 256,
epochs = 1000
)
# Training and evaluation
model %>% evaluate(x_test, y_test, verbose = 0)
# print predicted value
pred = model %>% predict(x_test)
compare <- cbind(y_test, pred, pred-y_test)
## calculate RMSE
lm_loss_avg <- sqrt( sum( ( pred-y_test )^2 ) / nrow(compare) )
temp_loss[i] <- lm_loss_avg
}
temp_loss
mean(temp_loss)
#######################################################################################
|
# File: xgapui/R/generated\java\xgap4brassica\R/Strain.R
# Copyright: GBIC 2000-2.009, all rights reserved
# Date: June 2, 2009
#
# generator: org.molgenis.generators.R.REntityGen 3.3.0-testing
#
# This file provides action methods to MOLGENIS for entity Strain
#
# THIS FILE HAS BEEN GENERATED, PLEASE DO NOT EDIT!
#
#create valid data_frame for Strain
.create.strain <- function(data_frame, value_list, .usesession=T, .verbose=T)
{
#convert to data_frame, remove null columns
if(!is.data.frame(data_frame))
{
if(is.matrix(data_frame))
{
data_frame <- as.data.frame(data_frame)
}
else if(is.list(data_frame))
{
data_frame <- as.data.frame(data_frame[!sapply(data_frame, is.null)])
}
#transform non-null values into data.frame
else
{
data_frame <- as.data.frame(value_list[!sapply(value_list, is.null)])
}
}
#add missing xref values from session parameters (optional)
if(.usesession && is.null(data_frame$investigation__id) && !is.null(.MOLGENIS$session.investigation.id))
{
data_frame$investigation = .MOLGENIS$session.investigation.id
if(.verbose)
{
cat("Using investigation (id='",.MOLGENIS$session.investigation.id,"'", sep="")
cat(", name='",.MOLGENIS$session.investigation.name,"'", sep="")
cat(") from session (.usession = T).\n")
}
}
return(data_frame)
}
#freely find Strain
find.strain <- function( type=NULL , id=NULL , name=NULL , investigation_id=NULL, investigation_name=NULL , species_id=NULL, species_name=NULL , straintype=NULL , founderstrains=NULL , .usesession = T, .verbose=T )
{
#add session parameters
if(.usesession && is.null(investigation_id) && !is.null(.MOLGENIS$session.investigation.id))
{
investigation_id = .MOLGENIS$session.investigation.id
cat("Using investigation_id (id='",.MOLGENIS$session.investigation.id,"'", sep="")
cat(", name='",.MOLGENIS$session.investigation.name,"'", sep="")
cat(") from session (.usession = T).\n")
}
result <- MOLGENIS.find( "xgap4brassica.data.types.Strain", mget(ls(),environment()), .verbose=.verbose)
#use secondary key as rownames
#rownames(result)<-result$name
return(result)
}
#add data.frame of Strain or each column individually
#note: each column must have the same length
add.strain <- function(.data_frame=NULL, name=NULL, investigation_id=NULL, investigation_name=NULL, species_id=NULL, species_name=NULL, straintype=NULL, founderstrains=NULL, .usesession = T, .verbose=T )
{
.data_frame = .create.strain(.data_frame, mget(ls(),environment()), .usesession = .usesession, .verbose = .verbose)
return( MOLGENIS.update("xgap4brassica.data.types.Strain", .data_frame, "ADD", .verbose=.verbose) )
}
#remove data.frame of Strain or just one row using named arguments.
remove.strain <- function( .data_frame=NULL, id=NULL, name=NULL, investigation=NULL, type=NULL, .usesession = T )
{
#todo: translate to skey to pkey
.data_frame = .create.strain(.data_frame, mget(ls(),environment()), .usesession = .usesession)
return( MOLGENIS.update("xgap4brassica.data.types.Strain", .data_frame, "REMOVE") )
}
use.strain<-function(id=NULL, name=NULL, investigation=NULL, type=NULL)
{
#add session parameters
if(is.null(investigation) && !is.null(.MOLGENIS$session.investigation.id))
{
investigation = .MOLGENIS$session.investigation.id
cat("Using investigation (id='",.MOLGENIS$session.investigation.id,"'", sep="")
cat(", name='",.MOLGENIS$session.investigation.name,"'", sep="")
cat(") from session.\n")
}
#retrieve the strain by pkey or skey
row<-F
if(!is.null(id))
{
row<-find.strain(id=id)
}
else if( !(is.null(name) ||is.null(investigation) ||is.null(type)) )
{
row<-find.strain(name=name,investigation=investigation,type=type)
}
else
{
stop('you need to provide {id} or {name and investigation and type}')
}
#if exists, put in session
if(!is.logical(row) && nrow(row) == 1)
{
cat("Using strain with:\n")
cat("\tid=",row$id,"\n")
.MOLGENIS$session.strain.id<<-row$id
cat("\tname=",row$name,"\n")
.MOLGENIS$session.strain.name<<-row$name
cat("\tinvestigation=",row$investigation,"\n")
.MOLGENIS$session.strain.investigation<<-row$investigation
cat("\ttype=",row$type,"\n")
.MOLGENIS$session.strain.type<<-row$type
}
else
{
cat("Did not find strain using ","id=",id,"name=",name,"investigation=",investigation,"type=",type,"\n")
}
} | /R/Strain.R | no_license | joerivandervelde/MolgenisInterface | R | false | false | 4,583 | r |
# File: xgapui/R/generated\java\xgap4brassica\R/Strain.R
# Copyright: GBIC 2000-2.009, all rights reserved
# Date: June 2, 2009
#
# generator: org.molgenis.generators.R.REntityGen 3.3.0-testing
#
# This file provides action methods to MOLGENIS for entity Strain
#
# THIS FILE HAS BEEN GENERATED, PLEASE DO NOT EDIT!
#
#create valid data_frame for Strain
.create.strain <- function(data_frame, value_list, .usesession=T, .verbose=T)
{
#convert to data_frame, remove null columns
if(!is.data.frame(data_frame))
{
if(is.matrix(data_frame))
{
data_frame <- as.data.frame(data_frame)
}
else if(is.list(data_frame))
{
data_frame <- as.data.frame(data_frame[!sapply(data_frame, is.null)])
}
#transform non-null values into data.frame
else
{
data_frame <- as.data.frame(value_list[!sapply(value_list, is.null)])
}
}
#add missing xref values from session parameters (optional)
if(.usesession && is.null(data_frame$investigation__id) && !is.null(.MOLGENIS$session.investigation.id))
{
data_frame$investigation = .MOLGENIS$session.investigation.id
if(.verbose)
{
cat("Using investigation (id='",.MOLGENIS$session.investigation.id,"'", sep="")
cat(", name='",.MOLGENIS$session.investigation.name,"'", sep="")
cat(") from session (.usession = T).\n")
}
}
return(data_frame)
}
#freely find Strain
find.strain <- function( type=NULL , id=NULL , name=NULL , investigation_id=NULL, investigation_name=NULL , species_id=NULL, species_name=NULL , straintype=NULL , founderstrains=NULL , .usesession = T, .verbose=T )
{
#add session parameters
if(.usesession && is.null(investigation_id) && !is.null(.MOLGENIS$session.investigation.id))
{
investigation_id = .MOLGENIS$session.investigation.id
cat("Using investigation_id (id='",.MOLGENIS$session.investigation.id,"'", sep="")
cat(", name='",.MOLGENIS$session.investigation.name,"'", sep="")
cat(") from session (.usession = T).\n")
}
result <- MOLGENIS.find( "xgap4brassica.data.types.Strain", mget(ls(),environment()), .verbose=.verbose)
#use secondary key as rownames
#rownames(result)<-result$name
return(result)
}
#add data.frame of Strain or each column individually
#note: each column must have the same length
add.strain <- function(.data_frame=NULL, name=NULL, investigation_id=NULL, investigation_name=NULL, species_id=NULL, species_name=NULL, straintype=NULL, founderstrains=NULL, .usesession = T, .verbose=T )
{
.data_frame = .create.strain(.data_frame, mget(ls(),environment()), .usesession = .usesession, .verbose = .verbose)
return( MOLGENIS.update("xgap4brassica.data.types.Strain", .data_frame, "ADD", .verbose=.verbose) )
}
#remove data.frame of Strain or just one row using named arguments.
remove.strain <- function( .data_frame=NULL, id=NULL, name=NULL, investigation=NULL, type=NULL, .usesession = T )
{
#todo: translate to skey to pkey
.data_frame = .create.strain(.data_frame, mget(ls(),environment()), .usesession = .usesession)
return( MOLGENIS.update("xgap4brassica.data.types.Strain", .data_frame, "REMOVE") )
}
use.strain<-function(id=NULL, name=NULL, investigation=NULL, type=NULL)
{
#add session parameters
if(is.null(investigation) && !is.null(.MOLGENIS$session.investigation.id))
{
investigation = .MOLGENIS$session.investigation.id
cat("Using investigation (id='",.MOLGENIS$session.investigation.id,"'", sep="")
cat(", name='",.MOLGENIS$session.investigation.name,"'", sep="")
cat(") from session.\n")
}
#retrieve the strain by pkey or skey
row<-F
if(!is.null(id))
{
row<-find.strain(id=id)
}
else if( !(is.null(name) ||is.null(investigation) ||is.null(type)) )
{
row<-find.strain(name=name,investigation=investigation,type=type)
}
else
{
stop('you need to provide {id} or {name and investigation and type}')
}
#if exists, put in session
if(!is.logical(row) && nrow(row) == 1)
{
cat("Using strain with:\n")
cat("\tid=",row$id,"\n")
.MOLGENIS$session.strain.id<<-row$id
cat("\tname=",row$name,"\n")
.MOLGENIS$session.strain.name<<-row$name
cat("\tinvestigation=",row$investigation,"\n")
.MOLGENIS$session.strain.investigation<<-row$investigation
cat("\ttype=",row$type,"\n")
.MOLGENIS$session.strain.type<<-row$type
}
else
{
cat("Did not find strain using ","id=",id,"name=",name,"investigation=",investigation,"type=",type,"\n")
}
} |
install.packages("devtools")
install.packages("rjson")
install.packages("bit64")
install.packages("httr")
install.packages("plyr")
install.packages("twitteR")
# sessionInfo()
api_key <- "BSLD13fGUpdsQQUQ2r1I5qR4y"
api_secret <- "6KK6cSounHxgJCwnh2cI5p1g2q9RzF5cGZI0h3q1uquSvXWCG1"
access_token <- "47717736-IF0PhKjohLlJEd6xK2yWY5H2FQH3vRvd7neT9dGjz"
access_token_secret <- "pJLM1zeiAzlVWVCGaLSMltDAfvEzdsYpRntiUoNW4ENEg"
#INSTALL RTOOLS
# find_rtools()
install.packages("rtools")
# devtools::install_github("jrowen/twitteR", ref = "oauth_httr_1_0",version="0.6.1")
library(devtools)
library(plyr)
library(twitteR)
install.packages("httr")
install.packages("twitteR")
library(twitteR)
library(devtools) #if not installed, do that obviously
#A restart of R might be necessary if you previously had httr installed.
library(httr)
# setup_twitter_oauth(consumerKey, consumerSecret, accessKey, accessSecret)
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
tweets=searchTwitter('#OmPuri',n=100)
df <- do.call("rbind", lapply(tweets, as.data.frame))
# install.packages("C:/Users/Admin/Downloads/twitteR_1.1.8.tar.gz",repos=NULL, type="source",dependencies = TRUE)
# install.packages("C:/Users/Admin/Downloads/plyr_1.8.2.tar.gz",repos=NULL, type="source",dependencies = TRUE)
# install.packages("C:/Users/Admin/Downloads/httr_0.6.1.tar.gz",repos=NULL, type="source",dependencies = TRUE)
install.packages('tm')
library(tm)
text <- df$text
review_source <- VectorSource(text)
corpus <- Corpus(review_source)
#corpus <- tm_map(corpus,
# content_transformer(function(x) iconv(x, to='UTF-8-MAC', sub='byte')),
# mc.cores=1)
corpus <- tm_map(corpus,
content_transformer(function(x) iconv(x, to='UTF-8', sub='byte')),
mc.cores=1)
corpus <- tm_map(corpus,removePunctuation)
corpus <- tm_map(corpus,content_transformer(tolower))
corpus <- tm_map(corpus,stripWhitespace)
corpus <- tm_map(corpus,removeWords,stopwords("english"))
t.tdm <- TermDocumentMatrix(corpus)
t.m <- as.matrix(t.tdm)
t.v <- sort(rowSums(t.m),decreasing=TRUE)
t.d <- data.frame(word = names(t.v),freq=t.v)
head(t.d)
write.csv(t.d,"C:/Users/Gagan/Desktop/MUIT/Data Mining/textminingcodespptoutputplots/testmiFirst.csv")
#install.packages("wordcloud")
library("wordcloud")
library("RColorBrewer")
pal2 <- brewer.pal(8,"Dark2")
png("C:/Users/Gagan/Desktop/MUIT/Data Mining/textminingcodespptoutputplots/wordcloud_om.png", width=3280,height=2800)
wordcloud(t.d$word,t.d$freq, scale=c(8,.2),min.freq=3,
max.words=Inf, random.order=FALSE, rot.per=.15, colors=pal2)
dev.off()
| /TwitterTextMining.R | no_license | gogiraj/Sentimental-Analysis | R | false | false | 2,726 | r | install.packages("devtools")
install.packages("rjson")
install.packages("bit64")
install.packages("httr")
install.packages("plyr")
install.packages("twitteR")
# sessionInfo()
api_key <- "BSLD13fGUpdsQQUQ2r1I5qR4y"
api_secret <- "6KK6cSounHxgJCwnh2cI5p1g2q9RzF5cGZI0h3q1uquSvXWCG1"
access_token <- "47717736-IF0PhKjohLlJEd6xK2yWY5H2FQH3vRvd7neT9dGjz"
access_token_secret <- "pJLM1zeiAzlVWVCGaLSMltDAfvEzdsYpRntiUoNW4ENEg"
#INSTALL RTOOLS
# find_rtools()
install.packages("rtools")
# devtools::install_github("jrowen/twitteR", ref = "oauth_httr_1_0",version="0.6.1")
library(devtools)
library(plyr)
library(twitteR)
install.packages("httr")
install.packages("twitteR")
library(twitteR)
library(devtools) #if not installed, do that obviously
#A restart of R might be necessary if you previously had httr installed.
library(httr)
# setup_twitter_oauth(consumerKey, consumerSecret, accessKey, accessSecret)
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
tweets=searchTwitter('#OmPuri',n=100)
df <- do.call("rbind", lapply(tweets, as.data.frame))
# install.packages("C:/Users/Admin/Downloads/twitteR_1.1.8.tar.gz",repos=NULL, type="source",dependencies = TRUE)
# install.packages("C:/Users/Admin/Downloads/plyr_1.8.2.tar.gz",repos=NULL, type="source",dependencies = TRUE)
# install.packages("C:/Users/Admin/Downloads/httr_0.6.1.tar.gz",repos=NULL, type="source",dependencies = TRUE)
install.packages('tm')
library(tm)
text <- df$text
review_source <- VectorSource(text)
corpus <- Corpus(review_source)
#corpus <- tm_map(corpus,
# content_transformer(function(x) iconv(x, to='UTF-8-MAC', sub='byte')),
# mc.cores=1)
corpus <- tm_map(corpus,
content_transformer(function(x) iconv(x, to='UTF-8', sub='byte')),
mc.cores=1)
corpus <- tm_map(corpus,removePunctuation)
corpus <- tm_map(corpus,content_transformer(tolower))
corpus <- tm_map(corpus,stripWhitespace)
corpus <- tm_map(corpus,removeWords,stopwords("english"))
t.tdm <- TermDocumentMatrix(corpus)
t.m <- as.matrix(t.tdm)
t.v <- sort(rowSums(t.m),decreasing=TRUE)
t.d <- data.frame(word = names(t.v),freq=t.v)
head(t.d)
write.csv(t.d,"C:/Users/Gagan/Desktop/MUIT/Data Mining/textminingcodespptoutputplots/testmiFirst.csv")
#install.packages("wordcloud")
library("wordcloud")
library("RColorBrewer")
pal2 <- brewer.pal(8,"Dark2")
png("C:/Users/Gagan/Desktop/MUIT/Data Mining/textminingcodespptoutputplots/wordcloud_om.png", width=3280,height=2800)
wordcloud(t.d$word,t.d$freq, scale=c(8,.2),min.freq=3,
max.words=Inf, random.order=FALSE, rot.per=.15, colors=pal2)
dev.off()
|
context("collect_parameters")
if ((!on_cran()) || interactive()) {load(test_path("helper_data.Rda"))}
test_that("collect_parameters dispatch works", {
skip_on_cran()
expect_error(
1 %>% collect_parameters(),
"currently implemented for numeric objects"
)
expect_error(
mtcars %>% collect_parameters(),
"currently implemented for data.frame objects"
)
})
test_that("collect_parameters errors informatively with bad arguments", {
skip_on_cran()
expect_error(
st_reg_1 %>% collect_parameters("the first one"),
"must be the name given"
)
expect_error(
stacks() %>% collect_parameters("all of them"),
"must be the name given"
)
})
test_that("collect_parameters on a data stack works (regression)", {
skip_on_cran()
res <- collect_parameters(st_reg_1, "reg_res_svm")
res2 <- collect_parameters(st_reg_2, "reg_res_sp")
res3 <-
collect_parameters(
stacks() %>% add_candidates(reg_res_lr, name = "lr"),
"lr"
)
expect_true(check_inherits(res, "tbl_df"))
expect_true(check_inherits(res2, "tbl_df"))
expect_true(check_inherits(res3, "tbl_df"))
expect_equal(ncol(res), 3)
expect_equal(nrow(res), 5)
expect_equal(ncol(res2), 2)
expect_equal(nrow(res2), 9)
expect_equal(ncol(res3), 1)
expect_equal(nrow(res3), 1)
})
test_that("collect_parameters on a model stack works (regression)", {
skip_on_cran()
res <- collect_parameters(st_reg_1_, "reg_res_svm")
res2 <- collect_parameters(st_reg_2 %>% blend_predictions(), "reg_res_sp")
expect_true(check_inherits(res, "tbl_df"))
expect_equal(ncol(res), 4)
expect_equal(nrow(res), 5)
expect_equal(ncol(res2), 3)
expect_equal(nrow(res2), 9)
expect_true(
all(
c("member",
dials::parameters(st_reg_1_$model_defs$reg_res_svm) %>% pull(id),
"coef") %in%
colnames(res)
)
)
})
# collecting parameters on a classification stack is a bit
# trickier, so test separately
test_that("collect_parameters works (classification)", {
skip_on_cran()
res <- collect_parameters(st_class_1, "class_res_rf")
res2 <- collect_parameters(st_class_1 %>% blend_predictions(), "class_res_rf")
expect_true(check_inherits(res, "tbl_df"))
expect_true(check_inherits(res2, "tbl_df"))
expect_equal(ncol(res), 3)
expect_equal(nrow(res), 10)
expect_equal(ncol(res2), 6)
expect_equal(nrow(res2), 60)
})
| /tests/testthat/test_collect_parameters.R | permissive | yadevi/stacks | R | false | false | 2,428 | r | context("collect_parameters")
if ((!on_cran()) || interactive()) {load(test_path("helper_data.Rda"))}
test_that("collect_parameters dispatch works", {
skip_on_cran()
expect_error(
1 %>% collect_parameters(),
"currently implemented for numeric objects"
)
expect_error(
mtcars %>% collect_parameters(),
"currently implemented for data.frame objects"
)
})
test_that("collect_parameters errors informatively with bad arguments", {
skip_on_cran()
expect_error(
st_reg_1 %>% collect_parameters("the first one"),
"must be the name given"
)
expect_error(
stacks() %>% collect_parameters("all of them"),
"must be the name given"
)
})
test_that("collect_parameters on a data stack works (regression)", {
skip_on_cran()
res <- collect_parameters(st_reg_1, "reg_res_svm")
res2 <- collect_parameters(st_reg_2, "reg_res_sp")
res3 <-
collect_parameters(
stacks() %>% add_candidates(reg_res_lr, name = "lr"),
"lr"
)
expect_true(check_inherits(res, "tbl_df"))
expect_true(check_inherits(res2, "tbl_df"))
expect_true(check_inherits(res3, "tbl_df"))
expect_equal(ncol(res), 3)
expect_equal(nrow(res), 5)
expect_equal(ncol(res2), 2)
expect_equal(nrow(res2), 9)
expect_equal(ncol(res3), 1)
expect_equal(nrow(res3), 1)
})
test_that("collect_parameters on a model stack works (regression)", {
skip_on_cran()
res <- collect_parameters(st_reg_1_, "reg_res_svm")
res2 <- collect_parameters(st_reg_2 %>% blend_predictions(), "reg_res_sp")
expect_true(check_inherits(res, "tbl_df"))
expect_equal(ncol(res), 4)
expect_equal(nrow(res), 5)
expect_equal(ncol(res2), 3)
expect_equal(nrow(res2), 9)
expect_true(
all(
c("member",
dials::parameters(st_reg_1_$model_defs$reg_res_svm) %>% pull(id),
"coef") %in%
colnames(res)
)
)
})
# collecting parameters on a classification stack is a bit
# trickier, so test separately
test_that("collect_parameters works (classification)", {
skip_on_cran()
res <- collect_parameters(st_class_1, "class_res_rf")
res2 <- collect_parameters(st_class_1 %>% blend_predictions(), "class_res_rf")
expect_true(check_inherits(res, "tbl_df"))
expect_true(check_inherits(res2, "tbl_df"))
expect_equal(ncol(res), 3)
expect_equal(nrow(res), 10)
expect_equal(ncol(res2), 6)
expect_equal(nrow(res2), 60)
})
|
library(parallel)
load("sims-case1.gzip")
n.cores <- 10
(n.datasets <- length(sims.case1))
(n.batches <- ceiling(n.datasets/n.cores))
(batch.size <- n.datasets/n.batches)
cl <- makeCluster(n.cores)
## This function will fit stage 1 to each dataset when called from a unique core
do.jags.stage1 <- function(dataset) {
jd <- with(dataset,
list(K = dim(u)[2],
J=nrow(n.all), T=ncol(n.all),
xlim = xlim, ylim = ylim,
x = x, n.marked=n.marked,
y=y.marked[1:max(dataset$n.marked),,],
u=u[1:max(dataset$n.marked),,,]))
ji <- function() {
## Initialize activity centers for marked guys
## si <- dataset$latent$s
si <- array(NA, c(max(jd$n.marked), 2, jd$T))
for(t in 1:jd$T) {
for(i in 1:dataset$n.marked[t]) {
trps <- jd$y[i,,t]>0
if(any(trps))
si[i,,t] <- colMeans(jd$x[trps,,drop=FALSE])
else {
si[i,,t] <- colMeans(jd$u[i,,t,])
## Sometimes u is outside S
si[i,1,t] <- max(si[i,1,t], jd$xlim[1])
si[i,1,t] <- min(si[i,1,t], jd$xlim[2])
si[i,2,t] <- max(si[i,2,t], jd$ylim[1])
si[i,2,t] <- min(si[i,2,t], jd$ylim[2])
}
}
}
list(s=si[1:max(jd$n.marked),,],
sigmaMean=runif(1, 600, 700),
lam0Mean=runif(1, 0.02, 0.03))
}
jp <- c("sigmaMean", "lam0Mean")
library(rjags)
jm <- jags.model(file="gsmr-stage1.jag", data=jd, inits=ji, n.adapt=100)
jc <- coda.samples(jm, jp, n.iter=12000)
return(jc)
}
## This function will fit stage 2 to each dataset when called from a unique core
do.jags.stage2 <- function(dataset) {
M <- 175 ## You can change M to anything less than the M used to simulate data
jd2 <- with(dataset,
list(M=M,
K = dim(u)[2],
J=nrow(n.all), T=ncol(n.all),
xlim = xlim, ylim = ylim,
Area = Area, x = x,
## n=n.unmarked, ## BUG FIX: 2021-08-10
n=n.all,
mean.log.sigma.lam0=mean.log.sigma.lam0,
vcov.log.sigma.lam0=vcov.log.sigma.lam0))
ji2 <- function() {
## Initialize activity centers for marked guys
si <- dataset$latent$s
list(s=si[1:jd2$M,,], z=matrix(1,jd2$M,jd2$T),
beta0.ED = runif(1, 0.1, 0.5), beta1.ED = 0,
log.sigma.lam0=c(log(500), log(0.05)),
alpha=0,
eps.sd=0.1, epsilon=rep(0, jd2$T) )
}
jp2 <- c("sigmaMean", "lam0Mean", "beta0.ED", "beta1.ED", "alpha", "N", "ED")
library(rjags)
jm2 <- jags.model(file="gsmr-stage2.jag", data=jd2, inits=ji2, n.adapt=100)
jc2 <- coda.samples(jm2, jp2, n.iter=12000)
return(jc2)
}
## Lists to hold posterior samples for each dataset
samples.stage1 <- vector(mode="list", length=n.datasets)
samples.stage2 <- vector(mode="list", length=n.datasets)
## Loop over batches
for(i in 1:n.batches) {
library(coda)
cat("Doing batch", i, format(Sys.time()), "\n")
batch <- seq(1+batch.size*(i-1), length.out=batch.size)
samples.stage1[batch] <- parSapply(cl=cl, X=sims.case1[batch],
FUN=do.jags.stage1)
if(i == 1)
save(samples.stage1, file="samples_stage1.gzip")
for(j in batch) {
samps <- log(as.matrix(samples.stage1[[j]]))
## Next line is a bug fix. Order was wrong before 2021-07-06
samps <- samps[,c("sigmaMean", "lam0Mean")]
xbar <- colMeans(samps)
xvar <- var(samps)
sims.case1[[j]]$mean.log.sigma.lam0 <- xbar
sims.case1[[j]]$vcov.log.sigma.lam0 <- xvar
}
samples.stage2[batch] <- parSapply(cl=cl, X=sims.case1[batch],
FUN=do.jags.stage2)
if(i==1)
save(samples.stage2, file="samples_stage2.gzip")
gc()
}
save(samples.stage1, file="samples_stage1.gzip")
save(samples.stage2, file="samples_stage2.gzip")
cat("Done", format(Sys.time()), "\n")
## Doing batch 1 2021-09-13 11:35:59
## Doing batch 2 2021-09-14 10:51:18
## Doing batch 3 2021-09-15 10:24:43
## Doing batch 4 2021-09-16 09:27:51
## Doing batch 5 2021-09-17 08:05:15
## Doing batch 6 2021-09-18 11:00:07
## Doing batch 7 2021-09-19 09:42:16
## Doing batch 8 2021-09-20 06:06:00
## Doing batch 9 2021-09-21 05:58:25
## Doing batch 10 2021-09-22 02:51:05
## Done 2021-09-23 01:46:30
## Doing batch 1 2021-06-09 16:06:33
## cat("Done", format(Sys.time()), "\n")
## Doing batch 2 2021-06-10 13:03:53
## Doing batch 3 2021-06-11 09:14:23
## Doing batch 4 2021-06-12 03:53:16
## Doing batch 5 2021-06-13 01:06:20
## Doing batch 6 2021-06-14 12:08:49
## Doing batch 7 2021-06-16 03:09:37
## Doing batch 8 2021-06-17 02:45:22
## Doing batch 9 2021-06-17 21:29:46
## Doing batch 10 2021-06-18 19:44:27
## > Done 2021-06-19 15:10:14
## > source("fit_case1_two-stage.R")
## Doing batch 1 2021-07-06 11:52:33
## Doing batch 2 2021-07-07 12:21:00
## Doing batch 3 2021-07-08 12:39:58
## Doing batch 4 2021-07-09 13:09:19
## Doing batch 5 2021-07-10 13:33:46
## Doing batch 6 2021-07-11 14:05:51
## Doing batch 7 2021-07-12 17:54:47
## Doing batch 8 2021-07-13 19:55:52
## Doing batch 9 2021-07-14 20:42:30
## Doing batch 10 2021-07-16 13:29:31
## Done 2021-07-18 04:21:47
## Doing batch 1 2021-08-11 08:24:55
## save(samples.stage2, file="samples_stage2.gzip")
## cat("Done", format(Sys.time()), "\n")
## Doing batch 2 2021-08-11 23:17:00
## Doing batch 3 2021-08-12 14:03:58
## Doing batch 4 2021-08-13 04:57:20
## Doing batch 5 2021-08-13 19:39:11
## Doing batch 6 2021-08-14 10:10:28
## Doing batch 7 2021-08-15 00:50:28
## Doing batch 8 2021-08-15 15:45:19
## Doing batch 9 2021-08-16 06:31:28
## Doing batch 10 2021-08-16 21:19:42
## > > Done 2021-08-17 12:08:01
| /supp/sim/fit_case1_two-stage.R | permissive | rbchan/monitor-cam-telem | R | false | false | 6,087 | r | library(parallel)
load("sims-case1.gzip")
n.cores <- 10
(n.datasets <- length(sims.case1))
(n.batches <- ceiling(n.datasets/n.cores))
(batch.size <- n.datasets/n.batches)
cl <- makeCluster(n.cores)
## This function will fit stage 1 to each dataset when called from a unique core
do.jags.stage1 <- function(dataset) {
jd <- with(dataset,
list(K = dim(u)[2],
J=nrow(n.all), T=ncol(n.all),
xlim = xlim, ylim = ylim,
x = x, n.marked=n.marked,
y=y.marked[1:max(dataset$n.marked),,],
u=u[1:max(dataset$n.marked),,,]))
ji <- function() {
## Initialize activity centers for marked guys
## si <- dataset$latent$s
si <- array(NA, c(max(jd$n.marked), 2, jd$T))
for(t in 1:jd$T) {
for(i in 1:dataset$n.marked[t]) {
trps <- jd$y[i,,t]>0
if(any(trps))
si[i,,t] <- colMeans(jd$x[trps,,drop=FALSE])
else {
si[i,,t] <- colMeans(jd$u[i,,t,])
## Sometimes u is outside S
si[i,1,t] <- max(si[i,1,t], jd$xlim[1])
si[i,1,t] <- min(si[i,1,t], jd$xlim[2])
si[i,2,t] <- max(si[i,2,t], jd$ylim[1])
si[i,2,t] <- min(si[i,2,t], jd$ylim[2])
}
}
}
list(s=si[1:max(jd$n.marked),,],
sigmaMean=runif(1, 600, 700),
lam0Mean=runif(1, 0.02, 0.03))
}
jp <- c("sigmaMean", "lam0Mean")
library(rjags)
jm <- jags.model(file="gsmr-stage1.jag", data=jd, inits=ji, n.adapt=100)
jc <- coda.samples(jm, jp, n.iter=12000)
return(jc)
}
## This function will fit stage 2 to each dataset when called from a unique core
do.jags.stage2 <- function(dataset) {
M <- 175 ## You can change M to anything less than the M used to simulate data
jd2 <- with(dataset,
list(M=M,
K = dim(u)[2],
J=nrow(n.all), T=ncol(n.all),
xlim = xlim, ylim = ylim,
Area = Area, x = x,
## n=n.unmarked, ## BUG FIX: 2021-08-10
n=n.all,
mean.log.sigma.lam0=mean.log.sigma.lam0,
vcov.log.sigma.lam0=vcov.log.sigma.lam0))
ji2 <- function() {
## Initialize activity centers for marked guys
si <- dataset$latent$s
list(s=si[1:jd2$M,,], z=matrix(1,jd2$M,jd2$T),
beta0.ED = runif(1, 0.1, 0.5), beta1.ED = 0,
log.sigma.lam0=c(log(500), log(0.05)),
alpha=0,
eps.sd=0.1, epsilon=rep(0, jd2$T) )
}
jp2 <- c("sigmaMean", "lam0Mean", "beta0.ED", "beta1.ED", "alpha", "N", "ED")
library(rjags)
jm2 <- jags.model(file="gsmr-stage2.jag", data=jd2, inits=ji2, n.adapt=100)
jc2 <- coda.samples(jm2, jp2, n.iter=12000)
return(jc2)
}
## Lists to hold posterior samples for each dataset
samples.stage1 <- vector(mode="list", length=n.datasets)
samples.stage2 <- vector(mode="list", length=n.datasets)
## Loop over batches
for(i in 1:n.batches) {
library(coda)
cat("Doing batch", i, format(Sys.time()), "\n")
batch <- seq(1+batch.size*(i-1), length.out=batch.size)
samples.stage1[batch] <- parSapply(cl=cl, X=sims.case1[batch],
FUN=do.jags.stage1)
if(i == 1)
save(samples.stage1, file="samples_stage1.gzip")
for(j in batch) {
samps <- log(as.matrix(samples.stage1[[j]]))
## Next line is a bug fix. Order was wrong before 2021-07-06
samps <- samps[,c("sigmaMean", "lam0Mean")]
xbar <- colMeans(samps)
xvar <- var(samps)
sims.case1[[j]]$mean.log.sigma.lam0 <- xbar
sims.case1[[j]]$vcov.log.sigma.lam0 <- xvar
}
samples.stage2[batch] <- parSapply(cl=cl, X=sims.case1[batch],
FUN=do.jags.stage2)
if(i==1)
save(samples.stage2, file="samples_stage2.gzip")
gc()
}
save(samples.stage1, file="samples_stage1.gzip")
save(samples.stage2, file="samples_stage2.gzip")
cat("Done", format(Sys.time()), "\n")
## Doing batch 1 2021-09-13 11:35:59
## Doing batch 2 2021-09-14 10:51:18
## Doing batch 3 2021-09-15 10:24:43
## Doing batch 4 2021-09-16 09:27:51
## Doing batch 5 2021-09-17 08:05:15
## Doing batch 6 2021-09-18 11:00:07
## Doing batch 7 2021-09-19 09:42:16
## Doing batch 8 2021-09-20 06:06:00
## Doing batch 9 2021-09-21 05:58:25
## Doing batch 10 2021-09-22 02:51:05
## Done 2021-09-23 01:46:30
## Doing batch 1 2021-06-09 16:06:33
## cat("Done", format(Sys.time()), "\n")
## Doing batch 2 2021-06-10 13:03:53
## Doing batch 3 2021-06-11 09:14:23
## Doing batch 4 2021-06-12 03:53:16
## Doing batch 5 2021-06-13 01:06:20
## Doing batch 6 2021-06-14 12:08:49
## Doing batch 7 2021-06-16 03:09:37
## Doing batch 8 2021-06-17 02:45:22
## Doing batch 9 2021-06-17 21:29:46
## Doing batch 10 2021-06-18 19:44:27
## > Done 2021-06-19 15:10:14
## > source("fit_case1_two-stage.R")
## Doing batch 1 2021-07-06 11:52:33
## Doing batch 2 2021-07-07 12:21:00
## Doing batch 3 2021-07-08 12:39:58
## Doing batch 4 2021-07-09 13:09:19
## Doing batch 5 2021-07-10 13:33:46
## Doing batch 6 2021-07-11 14:05:51
## Doing batch 7 2021-07-12 17:54:47
## Doing batch 8 2021-07-13 19:55:52
## Doing batch 9 2021-07-14 20:42:30
## Doing batch 10 2021-07-16 13:29:31
## Done 2021-07-18 04:21:47
## Doing batch 1 2021-08-11 08:24:55
## save(samples.stage2, file="samples_stage2.gzip")
## cat("Done", format(Sys.time()), "\n")
## Doing batch 2 2021-08-11 23:17:00
## Doing batch 3 2021-08-12 14:03:58
## Doing batch 4 2021-08-13 04:57:20
## Doing batch 5 2021-08-13 19:39:11
## Doing batch 6 2021-08-14 10:10:28
## Doing batch 7 2021-08-15 00:50:28
## Doing batch 8 2021-08-15 15:45:19
## Doing batch 9 2021-08-16 06:31:28
## Doing batch 10 2021-08-16 21:19:42
## > > Done 2021-08-17 12:08:01
|
##############################------------PCA-----------##########################################
##############################------------PCA-----------##########################################
getwd() # Check the working directory. You should keep your data file in this folder
# setwd("mention the path of your folder") # this allows you to set a folder of your choice
# as the working directory
setwd("/Users/farshad/Documents/github/Dataregression-scripts") # this allows you to set a folder of your choice
# Griliches Wages data
Grilches <- read.csv("Griliches.csv") # read data from the file in the working directory
gril_subset <- Grilches[,c("iq" , "age80", "school80", "expr80", "tenure80")] # select only these variables for
# further analysis
pca_grils <- prcomp(gril_subset, center = TRUE, scale. = TRUE)
summary(pca_grils)# output of PCA model
# Proportion of Variance explained by 5 PCs 0.3969 0.2760 0.1495 0.1184 0.05907
######################################XXXXXX--END--XXXXXXXXXXXXX################################### | /example_practice_pca_subset.R | no_license | fuadar/Dataregression-scripts | R | false | false | 1,051 | r | ##############################------------PCA-----------##########################################
##############################------------PCA-----------##########################################
getwd() # Check the working directory. You should keep your data file in this folder
# setwd("mention the path of your folder") # this allows you to set a folder of your choice
# as the working directory
setwd("/Users/farshad/Documents/github/Dataregression-scripts") # this allows you to set a folder of your choice
# Griliches Wages data
Grilches <- read.csv("Griliches.csv") # read data from the file in the working directory
gril_subset <- Grilches[,c("iq" , "age80", "school80", "expr80", "tenure80")] # select only these variables for
# further analysis
pca_grils <- prcomp(gril_subset, center = TRUE, scale. = TRUE)
summary(pca_grils)# output of PCA model
# Proportion of Variance explained by 5 PCs 0.3969 0.2760 0.1495 0.1184 0.05907
######################################XXXXXX--END--XXXXXXXXXXXXX################################### |
\name{Levene.Tests}
\alias{Levene.Tests}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Levene Tests
}
\description{
Levene Tests for a matrix of continuous variables and a grouping factor.
}
\usage{
Levene.Tests(X, groups = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
The matrix of continuous variables
}
\item{groups}{
The factor with the groups
}
}
\details{
Levene Tests for a matrix of continuous variables and a grouping factor.
}
\value{
The organized output
}
\author{
Jose Luis Vicente Villardon
}
\examples{
data(wine)
Levene.Tests(wine[,4:7], wine$Group)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
| /man/Levene.Tests.Rd | no_license | villardon/MultBiplotR | R | false | false | 978 | rd | \name{Levene.Tests}
\alias{Levene.Tests}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Levene Tests
}
\description{
Levene Tests for a matrix of continuous variables and a grouping factor.
}
\usage{
Levene.Tests(X, groups = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
The matrix of continuous variables
}
\item{groups}{
The factor with the groups
}
}
\details{
Levene Tests for a matrix of continuous variables and a grouping factor.
}
\value{
The organized output
}
\author{
Jose Luis Vicente Villardon
}
\examples{
data(wine)
Levene.Tests(wine[,4:7], wine$Group)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
|
/libc/etc/locale/man.r | no_license | paulohrpinheiro/tropix-libs | R | false | false | 3,558 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tlmixture.R
\name{tlmixture}
\alias{tlmixture}
\title{Targeted learning for exposure mixtures}
\usage{
tlmixture(
data,
outcome,
exposures,
quantiles_mixtures = 3L,
quantiles_exposures = 4L,
folds_cvtmle = 2L,
folds_sl = 2L,
estimator_outcome = c("SL.mean", "SL.glmnet"),
estimator_propensity = estimator_outcome,
cluster_exposures = FALSE,
mixture_fn = mixture_glm,
refit_mixtures = TRUE,
verbose = FALSE
)
}
\arguments{
\item{data}{Data frame with outcome, exposure, and adjustment variables.}
\item{outcome}{Name of the outcome variable.}
\item{exposures}{A vector of exposure names, or (not yet supported) a list where each element is
a vector of pre-clustered exposures.}
\item{quantiles_mixtures}{Number of quantiles to use for discretizing mixture
(default 3 - low, medium, high).}
\item{quantiles_exposures}{Number of quantiles to use for discretizing continuous exposures
(default 4).}
\item{folds_cvtmle}{Number of CV-TMLE folds (default 2).}
\item{folds_sl}{Number of SL folds during outcome and propensity estimation.}
\item{estimator_outcome}{SuperLearner library for outcome estimation.}
\item{estimator_propensity}{SuperLearner library for propensity estimation.}
\item{cluster_exposures}{Whether to automatically cluster a vector of exposures into
sub-groups (default FALSE; TRUE not yet supported).}
\item{mixture_fn}{Current options: mixture_glm, mixture_pls, or mixture_sl}
\item{refit_mixtures}{After CV-TMEL, refit mixture functions to full dataset.}
\item{verbose}{If TRUE, display more detailed info during execution.}
}
\description{
This is our main function.
}
| /man/tlmixture.Rd | permissive | ck37/tlmixture | R | false | true | 1,703 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tlmixture.R
\name{tlmixture}
\alias{tlmixture}
\title{Targeted learning for exposure mixtures}
\usage{
tlmixture(
data,
outcome,
exposures,
quantiles_mixtures = 3L,
quantiles_exposures = 4L,
folds_cvtmle = 2L,
folds_sl = 2L,
estimator_outcome = c("SL.mean", "SL.glmnet"),
estimator_propensity = estimator_outcome,
cluster_exposures = FALSE,
mixture_fn = mixture_glm,
refit_mixtures = TRUE,
verbose = FALSE
)
}
\arguments{
\item{data}{Data frame with outcome, exposure, and adjustment variables.}
\item{outcome}{Name of the outcome variable.}
\item{exposures}{A vector of exposure names, or (not yet supported) a list where each element is
a vector of pre-clustered exposures.}
\item{quantiles_mixtures}{Number of quantiles to use for discretizing mixture
(default 3 - low, medium, high).}
\item{quantiles_exposures}{Number of quantiles to use for discretizing continuous exposures
(default 4).}
\item{folds_cvtmle}{Number of CV-TMLE folds (default 2).}
\item{folds_sl}{Number of SL folds during outcome and propensity estimation.}
\item{estimator_outcome}{SuperLearner library for outcome estimation.}
\item{estimator_propensity}{SuperLearner library for propensity estimation.}
\item{cluster_exposures}{Whether to automatically cluster a vector of exposures into
sub-groups (default FALSE; TRUE not yet supported).}
\item{mixture_fn}{Current options: mixture_glm, mixture_pls, or mixture_sl}
\item{refit_mixtures}{After CV-TMEL, refit mixture functions to full dataset.}
\item{verbose}{If TRUE, display more detailed info during execution.}
}
\description{
This is our main function.
}
|
# The purpose of these test was initially see if the installation performed as
# initially planned. It works.
# But we cannot enable this test for CRAN because it will take some time and
# may not work due to the PyTorch installation process.
# The major problem I found with these tests is that the `torch_config`
# objects do not update after issuing a new `install_pytorch`.
#
skip("do nothing")
# skip_if_no_torch()
context("install_pytorch, live, no dry-run")
# devtools::reload(pkg = ".", quiet = FALSE)
# unloadNamespace("rTorch")
# library(rTorch)
test_that("PyTorch 1.6, Python 3.7, pandas", {
res <- install_pytorch(version = "1.6", conda_python_version = "3.7",
extra_packages = "pandas",
dry_run = FALSE)
#detach("package:rTorch", unload=TRUE)
#require(rTorch)
# devtools::reload(pkg = ".", quiet = FALSE)
# use torch_config for live test
# unloadNamespace("rTorch")
# detach("package:rTorch", unload=TRUE)
# library(rTorch)
res <- torch_config()
expect_equal(res$available, TRUE)
expect_equal(res$version_str, "1.6.0")
expect_equal(res$python_version, "3.7")
expect_equal(res$numpy_version, "1.19.1")
expect_equal(res$env_name, "r-torch")
})
test_that("PyTorch 1.4, Python 3.6, pandas, matplotlib install from the console", {
res <- install_pytorch(version = "1.4", conda_python_version = "3.6",
extra_packages = c("pandas", "matplotlib"),
dry_run = FALSE)
# detach("package:rTorch", unload=TRUE)
# require(rTorch)
# devtools::reload(pkg = ".", quiet = FALSE)
# unloadNamespace("rTorch")
# detach("package:rTorch", unload=TRUE)
# library(rTorch)
res <- torch_config()
expect_equal(res$available, TRUE)
expect_equal(res$version_str, "1.4.0")
expect_equal(res$python_version, "3.6")
expect_equal(res$numpy_version, "1.19.1")
expect_equal(res$env_name, "r-torch")
})
# library(rTorch)
# pkg <- "package:rTorch"
# detach(pkg, character.only = TRUE)
# sessionInfo()
# library(rTorch)
# .rs.restartR()
# library(rTorch)
# library(testthat)
res <- install_pytorch(version = "1.3", conda_python_version = "3.6",
extra_packages = c("pandas", "matplotlib"),
dry_run = FALSE)
test_that("PyTorch 1.3, Python 3.6, pandas, matplotlib install from the console", {
res <- torch_config()
expect_equal(res$available, TRUE)
expect_equal(res$version_str, "1.3.0")
expect_equal(res$python_version, "3.6")
expect_equal(res$numpy_version, "1.19.1")
expect_equal(res$env_name, "r-torch")
sessionInfo()
})
| /tests/testthat/test-install_rtorch_live.R | permissive | wvqusrai/rTorch | R | false | false | 2,693 | r | # The purpose of these test was initially see if the installation performed as
# initially planned. It works.
# But we cannot enable this test for CRAN because it will take some time and
# may not work due to the PyTorch installation process.
# The major problem I found with these tests is that the `torch_config`
# objects do not update after issuing a new `install_pytorch`.
#
skip("do nothing")
# skip_if_no_torch()
context("install_pytorch, live, no dry-run")
# devtools::reload(pkg = ".", quiet = FALSE)
# unloadNamespace("rTorch")
# library(rTorch)
test_that("PyTorch 1.6, Python 3.7, pandas", {
res <- install_pytorch(version = "1.6", conda_python_version = "3.7",
extra_packages = "pandas",
dry_run = FALSE)
#detach("package:rTorch", unload=TRUE)
#require(rTorch)
# devtools::reload(pkg = ".", quiet = FALSE)
# use torch_config for live test
# unloadNamespace("rTorch")
# detach("package:rTorch", unload=TRUE)
# library(rTorch)
res <- torch_config()
expect_equal(res$available, TRUE)
expect_equal(res$version_str, "1.6.0")
expect_equal(res$python_version, "3.7")
expect_equal(res$numpy_version, "1.19.1")
expect_equal(res$env_name, "r-torch")
})
test_that("PyTorch 1.4, Python 3.6, pandas, matplotlib install from the console", {
res <- install_pytorch(version = "1.4", conda_python_version = "3.6",
extra_packages = c("pandas", "matplotlib"),
dry_run = FALSE)
# detach("package:rTorch", unload=TRUE)
# require(rTorch)
# devtools::reload(pkg = ".", quiet = FALSE)
# unloadNamespace("rTorch")
# detach("package:rTorch", unload=TRUE)
# library(rTorch)
res <- torch_config()
expect_equal(res$available, TRUE)
expect_equal(res$version_str, "1.4.0")
expect_equal(res$python_version, "3.6")
expect_equal(res$numpy_version, "1.19.1")
expect_equal(res$env_name, "r-torch")
})
# library(rTorch)
# pkg <- "package:rTorch"
# detach(pkg, character.only = TRUE)
# sessionInfo()
# library(rTorch)
# .rs.restartR()
# library(rTorch)
# library(testthat)
res <- install_pytorch(version = "1.3", conda_python_version = "3.6",
extra_packages = c("pandas", "matplotlib"),
dry_run = FALSE)
test_that("PyTorch 1.3, Python 3.6, pandas, matplotlib install from the console", {
res <- torch_config()
expect_equal(res$available, TRUE)
expect_equal(res$version_str, "1.3.0")
expect_equal(res$python_version, "3.6")
expect_equal(res$numpy_version, "1.19.1")
expect_equal(res$env_name, "r-torch")
sessionInfo()
})
|
Renv = new.env(parent = globalenv())
var1 <- rnorm(200)
var2 <- rnorm(200)
var3 <- sample( c(0, 1), 200, replace = TRUE)
dataf<- data.frame(var1 = var1,var2 =var2, var3 = var3,offset=1)
#rownames(var4) <- 1:nrow(var4)
Renv$dataf <- dataf
FLenv = as.FL(Renv)
test_that("glm: execution for binomial ",{
result = eval_expect_equal({
glmobj <- glm(var3 ~ var1 + var2, data=dataf, family = "binomial")
coeffs <- coef(glmobj)
},Renv,FLenv,
expectation = "coeffs",
noexpectation = "glmobj",
check.attributes=F,
tolerance = .000001
)
})
test_that("glm: equality of coefficients, residuals, fitted.values, df.residual for binomial",{
result = eval_expect_equal({
coeffs2 <- glmobj$coefficients
res <- glmobj$residuals
fitteds <- glmobj$fitted.values
dfres <- glmobj$df.residual
},Renv,FLenv,
noexpectation = "glmobj",
tolerance = .000001,
check.attribute = F
)
})
#summary, plot??
| /tests/testthat/test_glm_binomial.R | no_license | amalshri/AdapteR | R | false | false | 959 | r | Renv = new.env(parent = globalenv())
var1 <- rnorm(200)
var2 <- rnorm(200)
var3 <- sample( c(0, 1), 200, replace = TRUE)
dataf<- data.frame(var1 = var1,var2 =var2, var3 = var3,offset=1)
#rownames(var4) <- 1:nrow(var4)
Renv$dataf <- dataf
FLenv = as.FL(Renv)
test_that("glm: execution for binomial ",{
result = eval_expect_equal({
glmobj <- glm(var3 ~ var1 + var2, data=dataf, family = "binomial")
coeffs <- coef(glmobj)
},Renv,FLenv,
expectation = "coeffs",
noexpectation = "glmobj",
check.attributes=F,
tolerance = .000001
)
})
test_that("glm: equality of coefficients, residuals, fitted.values, df.residual for binomial",{
result = eval_expect_equal({
coeffs2 <- glmobj$coefficients
res <- glmobj$residuals
fitteds <- glmobj$fitted.values
dfres <- glmobj$df.residual
},Renv,FLenv,
noexpectation = "glmobj",
tolerance = .000001,
check.attribute = F
)
})
#summary, plot??
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orthonormalise-unorthonormalise.R
\name{unstandardize}
\alias{unstandardize}
\title{Unstandardise Coefficients}
\usage{
unstandardize(beta, centers, scales, cs)
}
\arguments{
\item{beta}{coef matrix (1 row per coef)}
\item{centers}{col means}
\item{scales}{col sds}
\item{cs}{alternative list with elements 'centers' and 'scales'}
}
\description{
Unstandardise Coefficients
}
| /man/unstandardize.Rd | no_license | antiphon/PenGE | R | false | true | 457 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orthonormalise-unorthonormalise.R
\name{unstandardize}
\alias{unstandardize}
\title{Unstandardise Coefficients}
\usage{
unstandardize(beta, centers, scales, cs)
}
\arguments{
\item{beta}{coef matrix (1 row per coef)}
\item{centers}{col means}
\item{scales}{col sds}
\item{cs}{alternative list with elements 'centers' and 'scales'}
}
\description{
Unstandardise Coefficients
}
|
# *****************************************************************************
# File : far.R
# ************************************************************
# Description :
# Functional Autoregressive functions and methods
# Version : 2.2
# Date : 2007-10-01
# ************************************************************
# Author : Julien Damon <julien.damon@gmail.com>
# License : LGPL
# URL: https://github.com/Looping027/far
# *****************************************************************************
# *****************************************************************************
# Title : far
# ************************************************************
# Description :
# Modelization of Vectorized Functional Processes
# Version : 2.1
# Date : 2005-01-10
# *****************************************************************************
far <- function(data, y, x, kn, center=TRUE, na.rm=TRUE, joined=FALSE)
{
if ( (!is.null(class(data)))
&& (class((data)) != "fdata")) # test the type of data
stop("data is not of class fdata")
call <- match.call()
# find dimensions
n <- ncol(data[[1]])
# find variables
if (missing(y))
{
if (missing(x))
{
x <- NULL
y <- names(data)
} else {
y <- x
x <- NULL
}
} else {
if (missing(x)) x <- NULL
}
# find dimensions and test
variables <- c(y,x)
nx <- length(x)
ny <- length(y)
r <- nx+ny
if (joined) # if joined estimation
{
if (missing(kn)) kn <- r
if (1 != length(kn)) {
stop("Gives only one kn in joined estimation.")
}
} else {
if (missing(kn)) kn <- rep(1,r)
if (r != length(kn)) {
stop("Gives a kn value for each variable. Dimension are different.")
}
}
# adapt data to the model chosen
data.adapt <- list()
if (nx>0) n <- n-1
for (i in 1:length(y))
data.adapt[[y[i]]] <- (data[[y[i]]])[,1:n,drop=FALSE]
if (nx>0) for (i in 1:length(x))
data.adapt[[x[i]]] <- (data[[x[i]]])[,-1,drop=FALSE]
class(data.adapt) <- "fdata"
# Removing of non available data if required
if (na.rm) {
listobs <- c(apply(!is.na(data.adapt),2,all))
listobs2 <- c(FALSE,listobs) * c(listobs,FALSE) == 1
} else {
listobs <- rep(TRUE,n)
listobs2 <- c(FALSE,rep(TRUE,n-1),FALSE)
}
nbobs <- sum(listobs == TRUE)
nbobs2 <- sum(listobs2 == TRUE)
# centering
if (center) {
f1 <- function(x,listobs) matrix(apply(x[,listobs],1,mean),ncol=1)
databar <- lapply(data.adapt,f1,listobs)
class(databar)<-"fdata"
data <- list()
for (i in 1:length(variables))
data[[variables[i]]] <-
sweep(data.adapt[[variables[i]]],1,databar[[variables[i]]],"-")
class(data) <- "fdata"
} else {
databar <- NULL
data<-data.adapt
}
# Begining of the estimation
# --------------------------
if (joined)
{
eigenvector <- list()
eigenvalues <- list()
length(eigenvector) <- 1
length(eigenvalues) <- 1
# Calculation of the subspaces obtained from the covariance matrix
nrowdata <- c(0,cumsum(unlist(lapply(data,nrow))))
datacent <- matrix(0,nrow=nrowdata[r+1],ncol=nbobs)
for (i in 1:r)
datacent[(nrowdata[i]+1):nrowdata[i+1],] <-
(data[[i]])[,listobs,drop=FALSE]
sdbase <- eigen(datacent %*% t(datacent / nbobs))
eigenvector[[1]] <- sdbase$vectors[, 1:kn,drop=FALSE]
eigenvalues[[1]] <- as.matrix(sdbase$values/nrowdata[r+1])
# Determination of the projection matrix
datacent <- matrix(0,nrow=nrowdata[r+1],ncol=n)
for (i in 1:r)
datacent[(nrowdata[i]+1):nrowdata[i+1],] <- data[[i]]
Proj <- t(eigenvector[[1]]) %*% datacent
} else {
eigenvector <- list()
eigenvalues <- list()
Projdata <- list()
length(eigenvector) <- r
length(eigenvalues) <- r
length(Projdata) <- r
# Calculation of the subspaces obtained from the covariance matrix
for (i in 1:r)
{
datacent <- (data[[i]])[,listobs]
sdbase <- eigen(datacent %*% t(datacent / nbobs))
eigenvector[[i]] <- sdbase$vectors[, 1:kn[i],drop=FALSE]
eigenvalues[[i]] <- as.matrix(sdbase$values/nrow(datacent))
Projdata[[i]] <- t(eigenvector[[i]]) %*% data[[i]]
}
# Determination of the projection matrix
Proj <- matrix(0,ncol=n,nrow=sum(kn))
kkn <- c(0,kn)
for (k in 1:r)
Proj[sum(kkn[1:k])+(1:kkn[k+1]),] <- Projdata[[k]]
}
# Calculation of the correlation matrix rho
Delta <- Proj[,listobs2[-(n+1)],drop=FALSE] %*%
t(Proj[,listobs2[-1],drop=FALSE])
InvG <- invgen(Proj[,listobs,drop=FALSE] %*%
t(Proj[,listobs,drop=FALSE]))
rho <- Delta %*% InvG * nbobs / nbobs2
# result
output <- list(
call = call,
data = data,
databar = databar,
y = y,
x = x,
v = eigenvector,
values = eigenvalues,
rho = rho,
nbvar = r,
kn = kn,
joined = joined)
class(output) <- "far"
return(output)
}
# *****************************************************************************
# Title : print.far
# ************************************************************
# Description :
# print method for the 'far' model
# Version : 1.0
# Date : 2001-03-27
# *****************************************************************************
print.far<-function(x, ..., digits = max(3, getOption("digits") - 3),
na.print = "", file="", append=TRUE)
{
variables <- c(x$y,x$x)
cat("Functional Autoregressive Model\n",file=file,append=append)
cat("Call: ", deparse(x$call), "\n\n",file=file,append=append)
if (x$joined)
{
cat("Joined variable\n",file=file,append=append)
cat("Dimension of the subspace: ", format(x$kn, digits = digits),
"\n",file=file,append=append)
var.explained <- (x$values[[1]])^2
cat("Explained Variance: ", format(sum(var.explained[1:x$kn[1]])/
sum(var.explained)*100,
digits = digits), "%\n",file=file,append=append)
cat("Estimated first Eigen values of the Covariance: ",
format((x$values[[1]])[1:x$kn], digits = digits),
"\n\n",file=file,append=append)
} else {
# printed for each variable
for (i in 1:length(x$kn))
{
cat("Variable: ", variables[i], "\n",file=file,append=append)
cat("Dimension of the subspace: ", format(x$kn[[i]],
digits = digits), "\n",file=file,append=append)
var.explained <- (x$values[[i]])^2
cat("Explained Variance: ", format(sum(var.explained[1:x$kn[i]])/
sum(var.explained)*100,
digits = digits), "%\n",file=file,append=append)
cat("Estimated first Eigen values of the Covariance: ",
format((x$values[[i]])[1:x$kn[i]], digits = digits),
"\n\n",file=file,append=append)
}
}
cat("Estimated correlation Matrix in adequate subspace: \n",
file=file,append=append)
if (file=="")
print(round(x$rho,3))
else
for (i in 1:nrow(x$rho))
cat(format(x$rho[i,],digits=digits),"\n",file=file,append=append)
cat("\n",file=file,append=append)
invisible(x)
}
# *****************************************************************************
# Title : coef.far
# ************************************************************
# Description :
# coef method for the 'far' model
# Version : 1.1
# Date : 2003-06-11
# *****************************************************************************
coef.far<-function (object, ...)
{
return(object$rho)
}
# *****************************************************************************
# Title : plot.far
# ************************************************************
# Description :
# plot method for the 'far' model
# Version : 2.0
# Date : 2001-07-06
# *****************************************************************************
plot.far <- function(x,...)
{
xval <- rownames((x$data)[[1]])
kn <- x$kn
n <- length(x$kn)
names <- names(x$data)
if (x$joined)
{
matplot(x=1:nrow(x$v[[1]]),y=x$v[[1]],type='l',xlab="time",
ylab="",main=paste(names,collapse=", "),...)
range.plot <- (par()$usr[c(1,4)])
legend(x=range.plot[1],y=range.plot[2],
legend=paste("v",1:kn[1]),lty=1:kn[1],col=1:kn[1])
} else {
for (i in 1:n)
{
matplot(x=xval,y=x$v[[i]],type='l',xlab="time",
ylab="",main=paste(names[i]),...)
range.plot <- (par()$usr[c(1,4)])
legend(x=range.plot[1],y=range.plot[2],
legend=paste("v",1:kn[i]),lty=1:kn[i],col=1:kn[i])
}
}
invisible()
}
# *****************************************************************************
# Title : predict.far
# ************************************************************
# Description :
# Computation of prediction for the class model "far"
# Version : 2.0
# Date : 2001-07-09
# *****************************************************************************
predict.far<-function(object, ..., newdata = NULL, label, na.rm=TRUE,
positive=FALSE)
{
if ( (!is.null(class(object)))
&& (class((object)) != "far")) # test the type of data
stop("object is not of class far")
if ( (!is.null(class(newdata)))
&& (class((newdata)) != "fdata")) # test the type of data
stop("newdata is not of class fdata")
x <- object$x
y <- object$y
nx <- length(x)
ny <- length(y)
r <- nx+ny
n <- ncol(newdata[[object$y[1]]])
if (nx>0) # if there is auxiliary variables
{
label <- (colnames(newdata[[y[1]]]))[-1]
data <- list()
if (is.null(object$databar))
{
for (i in 1:ny)
data[[y[i]]] <- (newdata[[y[i]]])[,-n,drop=FALSE]
for (i in 1:nx)
data[[x[i]]] <- (newdata[[x[i]]])[,-1,drop=FALSE]
} else {
for (i in 1:ny)
data[[y[i]]] <- sweep((newdata[[y[i]]])[,-n,drop=FALSE],1,
object$databar[[y[i]]],"-")
for (i in 1:nx)
data[[x[i]]] <- sweep((newdata[[x[i]]])[,-1,drop=FALSE],1,
object$databar[[x[i]]],"-")
}
class(data) <- "fdata"
n <- (n-1)
} else {
if (missing(label))
label <- c(colnames(newdata[[y[1]]])[-1],paste(n+1))
else
label <- c(colnames(newdata[[y[1]]])[-1],label)
data <- list()
if (is.null(object$databar))
{
for (i in 1:ny) {
data[[y[i]]] <- newdata[[y[i]]]
}
} else {
for (i in 1:ny) {
data[[y[i]]] <- sweep((newdata[[y[i]]]),1,
object$databar[[y[i]]],"-")
}
}
class(data) <- "fdata"
}
kn <- object$kn
if (na.rm) {
listobs <- c(apply(!is.na(data),2,all))
} else {
listobs <- rep(TRUE,n)
}
nbobs <- sum(listobs==TRUE)
if (object$joined)
{
nrowdata <- c(0,cumsum(unlist(lapply(data,nrow))))
datacent <- matrix(0,ncol=nbobs,nrow=nrowdata[r+1])
for (k in (1:r)) {
datacent[(nrowdata[k]+1):nrowdata[k+1],] <-
(data[[k]])[,listobs,drop=FALSE]
}
datacent <- t(object$v[[1]]) %*% datacent
pred <- list()
length(pred) <- ny
pred2 <- object$v[[1]] %*% (object$rho %*% datacent)
for (i in (1:ny)) {
pred[[i]] <- pred2[(nrowdata[i]+1):nrowdata[i+1],,drop=FALSE]
}
} else {
datacent <- matrix(0,ncol=nbobs,nrow=sum(kn))
kkn <- c(0,kn)
for (k in (1:r)) {
datacent[sum(kkn[1:k])+(1:kkn[k+1]),] <-
t(object$v[[k]]) %*% ((data[[k]])[,listobs,drop=FALSE])
}
pred <- list()
length(pred) <- ny
for (i in (1:ny)) {
pred[[i]] <- object$v[[i]] %*% (object$rho %*%
datacent)[sum(kkn[1:i])+(1:kkn[i+1]),,drop=FALSE]
}
}
for (i in (1:ny))
{
if (!is.null(object$databar))
pred[[i]] <- sweep((pred[[i]]),1,object$databar[[i]],"+")
if (positive)
pred[[i]] <- (pred[[i]]+abs(pred[[i]]))/2
rownames(pred[[i]]) <- rownames(data[[i]])
colnames(pred[[i]]) <- label[listobs]
}
names(pred) <- object$y
class(pred) <- "fdata"
return(pred)
}
# *****************************************************************************
# Title : far.cv
# ************************************************************
# Description :
# Croos validation for the Model of Vectorized Functional Processes
# Version : 1.2
# Date : 2007-10-01
# *****************************************************************************
far.cv <- function(data, y, x, kn, ncv, cvcrit, center=TRUE, na.rm=TRUE,
joined=FALSE)
{
if (class(data) != "fdata") # test the type of data
stop("data is not of class fdata")
call <- match.call()
# find dimensions
n <- ncol(data[[1]])
if (missing(ncv)) ncv <- round(n/5)
n1 <- (n-ncv)
# find variables
if (missing(y))
{
if (missing(x))
{
x <- NULL
y <- names(data)
} else {
y <- x
x <- NULL
}
} else {
if (missing(x)) x <- NULL
}
if (missing(cvcrit))
{
cvcrit <- y
}
# find dimensions and test
variables <- c(y,x)
nx <- length(x)
ny <- length(y)
ncrit <- length(cvcrit)
r <- nx+ny
dim1 <- unlist(lapply(data,nrow))
dim1 <- dim1[variables]
if (joined) # if joined estimation
{
if (missing(kn)) kn <- sum(dim1)
if (1 != length(kn)) stop("Gives only one kn in joined estimation.")
} else {
if (missing(kn)) kn <- dim1
if (r != length(kn))
stop("Gives a kn value for each variable. Dimension are different.")
}
# adapt data to the model chosen
data.apprent <- list()
data.test <- list()
if (nx>0)
{
n1 <- n1-1
}
for (i in 1:length(y))
{
data.apprent[[y[i]]] <- (data[[y[i]]])[,1:n1,drop=FALSE]
data.test[[y[i]]] <- (data[[y[i]]])[,n1+(1:ncv),drop=FALSE]
}
if (nx>0) for (i in 1:length(x))
{
data.apprent[[x[i]]] <- (data[[x[i]]])[,1+(1:n1),drop=FALSE]
data.test[[x[i]]] <- (data[[x[i]]])[,n1+1+(1:ncv),drop=FALSE]
}
class(data.apprent) <- "fdata"
class(data.test) <- "fdata"
# Removing non available data if required
if (na.rm) {
listobs <- c(apply(!is.na(data.apprent),2,all))
listobs2 <- c(FALSE,listobs) * c(listobs,FALSE) == 1
listobs.test <- c(apply(!is.na(data.test),2,all))
} else {
listobs <- rep(TRUE,n1)
listobs2 <- c(FALSE,rep(TRUE,n1-1),FALSE)
listobs.test <- rep(TRUE,ncv)
}
nbobs <- sum(listobs == TRUE)
nbobs2 <- sum(listobs2 == TRUE)
nbobs.test <- sum(listobs.test == TRUE)
# centering
if (center) {
f0 <- function(x,listobs) matrix(apply(x[,listobs],1,mean),ncol=1)
databar <- lapply(data.apprent,f0,listobs)
class(databar)<-"fdata"
data <- list()
for (i in 1:length(variables))
data[[variables[i]]] <-
sweep(data.apprent[[variables[i]]],1,
databar[[variables[i]]],"-")
class(data) <- "fdata"
data2 <- list()
for (i in 1:length(variables))
data2[[variables[i]]] <-
sweep(data.test[[variables[i]]],1,
databar[[variables[i]]],"-")
class(data2) <- "fdata"
} else {
databar <- NULL
data<-data.apprent
data2<-data.test
}
# Begining the estimation
# -----------------------
if (joined)
{
eigenvector <- list()
eigenvalues <- list()
length(eigenvector) <- 1
length(eigenvalues) <- 1
# Calculation of the subspaces obtained from the covariance matrix
nrowdata <- c(0,cumsum(dim1))
datacent <- matrix(0,nrow=nrowdata[r+1],ncol=nbobs)
for (i in 1:r)
datacent[(nrowdata[i]+1):nrowdata[i+1],] <-
(data[[i]])[,listobs,drop=FALSE]
sdbase <- eigen(datacent %*% t(datacent / nbobs))
eigenvector[[1]] <- sdbase$vectors[, 1:kn,drop=FALSE]
eigenvalues[[1]] <- as.matrix(sdbase$values/nrowdata[r+1])
# Determination of the projection matrix
datacent <- matrix(0,nrow=nrowdata[r+1],ncol=n1)
datacent2 <- matrix(0,nrow=nrowdata[r+1],ncol=nbobs.test)
for (i in 1:r)
{
datacent[(nrowdata[i]+1):nrowdata[i+1],] <- data[[i]]
datacent2[(nrowdata[i]+1):nrowdata[i+1],] <-
data2[[i]][,listobs.test,drop=FALSE]
}
} else {
eigenvector <- list()
eigenvalues <- list()
Projdata <- list()
Projdata2 <- list()
length(eigenvector) <- r
length(eigenvalues) <- r
length(Projdata) <- r
length(Projdata2) <- r
# Calculation of the subspaces obtained from the covariance matrix
for (i in 1:r)
{
datacent <- (data[[i]])[,listobs]
sdbase <- eigen(datacent %*% t(datacent / nbobs))
eigenvector[[i]] <- sdbase$vectors[, 1:kn[i],drop=FALSE]
eigenvalues[[i]] <- as.matrix(sdbase$values/nrow(datacent))
Projdata[[i]] <- t(eigenvector[[i]]) %*% data[[i]]
Projdata2[[i]] <- t(eigenvector[[i]]) %*%
data2[[i]][,listobs.test,drop=FALSE]
}
}
# Begining of the cross validation
# --------------------------------
output <- matrix(0,ncol=length(kn)+6,nrow=prod(kn))
f1<-function(x) mean(apply(abs(x),2,mean),na.rm=TRUE)
f2<-function(x) mean(sqrt(apply(x^2,2,mean)),na.rm=TRUE)
f3<-function(x) mean(apply(abs(x),2,max),na.rm=TRUE)
f4<-function(x) mean(abs(x),na.rm=TRUE)
f5<-function(x) sqrt(mean(x^2,na.rm=TRUE))
f6<-function(x) max(abs(x),na.rm=TRUE)
pk <- prod(kn)
lk <- length(kn)
if (joined)
{
for (k in 1:kn)
{
# projection
Proj <- t(eigenvector[[1]][,1:k,drop=FALSE]) %*% datacent
Proj2 <- t(eigenvector[[1]][,1:k,drop=FALSE]) %*% datacent2
# Calculation of the correlation matrix rho
Delta <- Proj[,listobs2[-(n1+1)],drop=FALSE] %*%
t(Proj[,listobs2[-1],drop=FALSE])
InvG <- invgen(Proj[,listobs,drop=FALSE] %*%
t(Proj[,listobs,drop=FALSE]))
rho <- Delta %*% InvG * nbobs / nbobs2
# Prediction
pred2 <- (eigenvector[[1]][,1:k,drop=FALSE]) %*% rho %*% Proj2
pred <- list()
pred.max <- list()
for (i in 1:ny)
{
pred[[y[i]]] <- pred2[(nrowdata[i]+1):nrowdata[i+1],
-nbobs.test,drop=FALSE]
rownames(pred[[y[i]]]) <- rownames(data[[y[i]]])
colnames(pred[[y[i]]]) <-
(colnames(data2[[y[i]]])[c(FALSE,listobs.test)])[-nbobs.test]
}
# Calculation of errors
for (i in 1:ncrit)
{
pred.max[[cvcrit[i]]] <- apply((data2[[cvcrit[i]]])[,
colnames(pred[[cvcrit[i]]]),drop=FALSE],2,max)-
apply(pred[[cvcrit[i]]],2,max)
pred[[cvcrit[i]]] <- ((data2[[cvcrit[i]]])[,
colnames(pred[[cvcrit[i]]]),drop=FALSE]-
pred[[cvcrit[i]]])
}
output[k,2] <- mean(unlist(lapply(pred,f1)))
output[k,3] <- mean(unlist(lapply(pred,f2)))
output[k,4] <- mean(unlist(lapply(pred,f3)))
output[k,5] <- mean(unlist(lapply(pred.max,f4)))
output[k,6] <- mean(unlist(lapply(pred.max,f5)))
output[k,7] <- mean(unlist(lapply(pred.max,f6)))
output[k,1] <- k
}
} else {
kn2<-c(1,kn)
for (i in 1:length(kn))
output[,i]<-rep(rep(1:kn[i],rep(pk/prod(kn[1:i]),kn[i])),
prod(kn2[1:i]))
for (j in 1:pk)
{
kn <- output[j,1:lk]
# Determination of the projection matrix
Proj <- matrix(0,ncol=n1,nrow=sum(kn))
Proj2 <- matrix(0,ncol=nbobs.test,nrow=sum(kn))
kkn <- c(0,kn)
for (k in 1:r)
{
Proj[sum(kkn[1:k])+(1:kkn[k+1]),] <-
Projdata[[k]][1:kn[k],,drop=FALSE]
Proj2[sum(kkn[1:k])+(1:kkn[k+1]),] <-
Projdata2[[k]][1:kn[k],,drop=FALSE]
}
# Calculation of the correlation matrix rho
Delta <- Proj[,listobs2[-(n1+1)],drop=FALSE] %*%
t(Proj[,listobs2[-1],drop=FALSE])
InvG <- invgen(Proj[,listobs,drop=FALSE] %*%
t(Proj[,listobs,drop=FALSE]))
rho <- Delta %*% InvG * nbobs / nbobs2
# Prediction
pred <- list()
pred.max <- list()
for (i in 1:ny)
{
pred[[y[i]]] <- (eigenvector[[1]][,1:kn[i],drop=FALSE]) %*%
(rho %*% Proj2)[sum(kkn[1:i])+(1:kkn[i+1]),
-nbobs.test,drop=FALSE]
rownames(pred[[y[i]]]) <- rownames(data[[y[i]]])
colnames(pred[[y[i]]]) <-
(colnames(data2[[y[i]]])[c(FALSE,listobs.test)])[-nbobs.test]
}
# Calculation of errors
for (i in 1:ncrit)
{
pred.max[[cvcrit[i]]] <-
apply((data2[[cvcrit[i]]])[,colnames(pred[[cvcrit[i]]]),
drop=FALSE],2,max)-apply(pred[[cvcrit[i]]],2,max)
pred[[cvcrit[i]]] <-
((data2[[cvcrit[i]]])[,colnames(pred[[cvcrit[i]]]),
drop=FALSE]-pred[[cvcrit[i]]])
}
output[j,lk+1] <- mean(unlist(lapply(pred,f1)))
output[j,lk+2] <- mean(unlist(lapply(pred,f2)))
output[j,lk+3] <- mean(unlist(lapply(pred,f3)))
output[j,lk+4] <- mean(unlist(lapply(pred.max,f4)))
output[j,lk+5] <- mean(unlist(lapply(pred.max,f5)))
output[j,lk+6] <- mean(unlist(lapply(pred.max,f6)))
}
}
# result
dimnames(output) <- list(NULL,c(paste("k",1:lk,sep=""),
"L1","L2","Linf","L1max","L2max","Linfmax"))
moutput1<-min(output[,length(kn)+1])
moutput1<-output[output[,length(kn)+1]==moutput1,]
moutput2<-min(output[,length(kn)+2])
moutput2<-output[output[,length(kn)+2]==moutput2,]
moutput3<-min(output[,length(kn)+3])
moutput3<-output[output[,length(kn)+3]==moutput3,]
moutput4<-min(output[,length(kn)+4])
moutput4<-output[output[,length(kn)+4]==moutput4,]
moutput5<-min(output[,length(kn)+5])
moutput5<-output[output[,length(kn)+5]==moutput5,]
moutput6<-min(output[,length(kn)+6])
moutput6<-output[output[,length(kn)+6]==moutput6,]
invisible(list("cv"=output,
"minL1"=moutput1,
"minL2"=moutput2,
"minLinf"=moutput3,
"minL1max"=moutput4,
"minL2max"=moutput5,
"minLinfmax"=moutput6))
}
| /far/R/far.R | no_license | ingted/R-Examples | R | false | false | 25,103 | r | # *****************************************************************************
# File : far.R
# ************************************************************
# Description :
# Functional Autoregressive functions and methods
# Version : 2.2
# Date : 2007-10-01
# ************************************************************
# Author : Julien Damon <julien.damon@gmail.com>
# License : LGPL
# URL: https://github.com/Looping027/far
# *****************************************************************************
# *****************************************************************************
# Title : far
# ************************************************************
# Description :
# Modelization of Vectorized Functional Processes
# Version : 2.1
# Date : 2005-01-10
# *****************************************************************************
far <- function(data, y, x, kn, center=TRUE, na.rm=TRUE, joined=FALSE)
{
if ( (!is.null(class(data)))
&& (class((data)) != "fdata")) # test the type of data
stop("data is not of class fdata")
call <- match.call()
# find dimensions
n <- ncol(data[[1]])
# find variables
if (missing(y))
{
if (missing(x))
{
x <- NULL
y <- names(data)
} else {
y <- x
x <- NULL
}
} else {
if (missing(x)) x <- NULL
}
# find dimensions and test
variables <- c(y,x)
nx <- length(x)
ny <- length(y)
r <- nx+ny
if (joined) # if joined estimation
{
if (missing(kn)) kn <- r
if (1 != length(kn)) {
stop("Gives only one kn in joined estimation.")
}
} else {
if (missing(kn)) kn <- rep(1,r)
if (r != length(kn)) {
stop("Gives a kn value for each variable. Dimension are different.")
}
}
# adapt data to the model chosen
data.adapt <- list()
if (nx>0) n <- n-1
for (i in 1:length(y))
data.adapt[[y[i]]] <- (data[[y[i]]])[,1:n,drop=FALSE]
if (nx>0) for (i in 1:length(x))
data.adapt[[x[i]]] <- (data[[x[i]]])[,-1,drop=FALSE]
class(data.adapt) <- "fdata"
# Removing of non available data if required
if (na.rm) {
listobs <- c(apply(!is.na(data.adapt),2,all))
listobs2 <- c(FALSE,listobs) * c(listobs,FALSE) == 1
} else {
listobs <- rep(TRUE,n)
listobs2 <- c(FALSE,rep(TRUE,n-1),FALSE)
}
nbobs <- sum(listobs == TRUE)
nbobs2 <- sum(listobs2 == TRUE)
# centering
if (center) {
f1 <- function(x,listobs) matrix(apply(x[,listobs],1,mean),ncol=1)
databar <- lapply(data.adapt,f1,listobs)
class(databar)<-"fdata"
data <- list()
for (i in 1:length(variables))
data[[variables[i]]] <-
sweep(data.adapt[[variables[i]]],1,databar[[variables[i]]],"-")
class(data) <- "fdata"
} else {
databar <- NULL
data<-data.adapt
}
# Begining of the estimation
# --------------------------
if (joined)
{
eigenvector <- list()
eigenvalues <- list()
length(eigenvector) <- 1
length(eigenvalues) <- 1
# Calculation of the subspaces obtained from the covariance matrix
nrowdata <- c(0,cumsum(unlist(lapply(data,nrow))))
datacent <- matrix(0,nrow=nrowdata[r+1],ncol=nbobs)
for (i in 1:r)
datacent[(nrowdata[i]+1):nrowdata[i+1],] <-
(data[[i]])[,listobs,drop=FALSE]
sdbase <- eigen(datacent %*% t(datacent / nbobs))
eigenvector[[1]] <- sdbase$vectors[, 1:kn,drop=FALSE]
eigenvalues[[1]] <- as.matrix(sdbase$values/nrowdata[r+1])
# Determination of the projection matrix
datacent <- matrix(0,nrow=nrowdata[r+1],ncol=n)
for (i in 1:r)
datacent[(nrowdata[i]+1):nrowdata[i+1],] <- data[[i]]
Proj <- t(eigenvector[[1]]) %*% datacent
} else {
eigenvector <- list()
eigenvalues <- list()
Projdata <- list()
length(eigenvector) <- r
length(eigenvalues) <- r
length(Projdata) <- r
# Calculation of the subspaces obtained from the covariance matrix
for (i in 1:r)
{
datacent <- (data[[i]])[,listobs]
sdbase <- eigen(datacent %*% t(datacent / nbobs))
eigenvector[[i]] <- sdbase$vectors[, 1:kn[i],drop=FALSE]
eigenvalues[[i]] <- as.matrix(sdbase$values/nrow(datacent))
Projdata[[i]] <- t(eigenvector[[i]]) %*% data[[i]]
}
# Determination of the projection matrix
Proj <- matrix(0,ncol=n,nrow=sum(kn))
kkn <- c(0,kn)
for (k in 1:r)
Proj[sum(kkn[1:k])+(1:kkn[k+1]),] <- Projdata[[k]]
}
# Calculation of the correlation matrix rho
Delta <- Proj[,listobs2[-(n+1)],drop=FALSE] %*%
t(Proj[,listobs2[-1],drop=FALSE])
InvG <- invgen(Proj[,listobs,drop=FALSE] %*%
t(Proj[,listobs,drop=FALSE]))
rho <- Delta %*% InvG * nbobs / nbobs2
# result
output <- list(
call = call,
data = data,
databar = databar,
y = y,
x = x,
v = eigenvector,
values = eigenvalues,
rho = rho,
nbvar = r,
kn = kn,
joined = joined)
class(output) <- "far"
return(output)
}
# *****************************************************************************
# Title : print.far
# ************************************************************
# Description :
# print method for the 'far' model
# Version : 1.0
# Date : 2001-03-27
# *****************************************************************************
print.far<-function(x, ..., digits = max(3, getOption("digits") - 3),
na.print = "", file="", append=TRUE)
{
variables <- c(x$y,x$x)
cat("Functional Autoregressive Model\n",file=file,append=append)
cat("Call: ", deparse(x$call), "\n\n",file=file,append=append)
if (x$joined)
{
cat("Joined variable\n",file=file,append=append)
cat("Dimension of the subspace: ", format(x$kn, digits = digits),
"\n",file=file,append=append)
var.explained <- (x$values[[1]])^2
cat("Explained Variance: ", format(sum(var.explained[1:x$kn[1]])/
sum(var.explained)*100,
digits = digits), "%\n",file=file,append=append)
cat("Estimated first Eigen values of the Covariance: ",
format((x$values[[1]])[1:x$kn], digits = digits),
"\n\n",file=file,append=append)
} else {
# printed for each variable
for (i in 1:length(x$kn))
{
cat("Variable: ", variables[i], "\n",file=file,append=append)
cat("Dimension of the subspace: ", format(x$kn[[i]],
digits = digits), "\n",file=file,append=append)
var.explained <- (x$values[[i]])^2
cat("Explained Variance: ", format(sum(var.explained[1:x$kn[i]])/
sum(var.explained)*100,
digits = digits), "%\n",file=file,append=append)
cat("Estimated first Eigen values of the Covariance: ",
format((x$values[[i]])[1:x$kn[i]], digits = digits),
"\n\n",file=file,append=append)
}
}
cat("Estimated correlation Matrix in adequate subspace: \n",
file=file,append=append)
if (file=="")
print(round(x$rho,3))
else
for (i in 1:nrow(x$rho))
cat(format(x$rho[i,],digits=digits),"\n",file=file,append=append)
cat("\n",file=file,append=append)
invisible(x)
}
# *****************************************************************************
# Title : coef.far
# ************************************************************
# Description :
# coef method for the 'far' model
# Version : 1.1
# Date : 2003-06-11
# *****************************************************************************
coef.far<-function (object, ...)
{
return(object$rho)
}
# *****************************************************************************
# Title : plot.far
# ************************************************************
# Description :
# plot method for the 'far' model
# Version : 2.0
# Date : 2001-07-06
# *****************************************************************************
plot.far <- function(x,...)
{
xval <- rownames((x$data)[[1]])
kn <- x$kn
n <- length(x$kn)
names <- names(x$data)
if (x$joined)
{
matplot(x=1:nrow(x$v[[1]]),y=x$v[[1]],type='l',xlab="time",
ylab="",main=paste(names,collapse=", "),...)
range.plot <- (par()$usr[c(1,4)])
legend(x=range.plot[1],y=range.plot[2],
legend=paste("v",1:kn[1]),lty=1:kn[1],col=1:kn[1])
} else {
for (i in 1:n)
{
matplot(x=xval,y=x$v[[i]],type='l',xlab="time",
ylab="",main=paste(names[i]),...)
range.plot <- (par()$usr[c(1,4)])
legend(x=range.plot[1],y=range.plot[2],
legend=paste("v",1:kn[i]),lty=1:kn[i],col=1:kn[i])
}
}
invisible()
}
# *****************************************************************************
# Title : predict.far
# ************************************************************
# Description :
# Computation of prediction for the class model "far"
# Version : 2.0
# Date : 2001-07-09
# *****************************************************************************
predict.far<-function(object, ..., newdata = NULL, label, na.rm=TRUE,
positive=FALSE)
{
if ( (!is.null(class(object)))
&& (class((object)) != "far")) # test the type of data
stop("object is not of class far")
if ( (!is.null(class(newdata)))
&& (class((newdata)) != "fdata")) # test the type of data
stop("newdata is not of class fdata")
x <- object$x
y <- object$y
nx <- length(x)
ny <- length(y)
r <- nx+ny
n <- ncol(newdata[[object$y[1]]])
if (nx>0) # if there is auxiliary variables
{
label <- (colnames(newdata[[y[1]]]))[-1]
data <- list()
if (is.null(object$databar))
{
for (i in 1:ny)
data[[y[i]]] <- (newdata[[y[i]]])[,-n,drop=FALSE]
for (i in 1:nx)
data[[x[i]]] <- (newdata[[x[i]]])[,-1,drop=FALSE]
} else {
for (i in 1:ny)
data[[y[i]]] <- sweep((newdata[[y[i]]])[,-n,drop=FALSE],1,
object$databar[[y[i]]],"-")
for (i in 1:nx)
data[[x[i]]] <- sweep((newdata[[x[i]]])[,-1,drop=FALSE],1,
object$databar[[x[i]]],"-")
}
class(data) <- "fdata"
n <- (n-1)
} else {
if (missing(label))
label <- c(colnames(newdata[[y[1]]])[-1],paste(n+1))
else
label <- c(colnames(newdata[[y[1]]])[-1],label)
data <- list()
if (is.null(object$databar))
{
for (i in 1:ny) {
data[[y[i]]] <- newdata[[y[i]]]
}
} else {
for (i in 1:ny) {
data[[y[i]]] <- sweep((newdata[[y[i]]]),1,
object$databar[[y[i]]],"-")
}
}
class(data) <- "fdata"
}
kn <- object$kn
if (na.rm) {
listobs <- c(apply(!is.na(data),2,all))
} else {
listobs <- rep(TRUE,n)
}
nbobs <- sum(listobs==TRUE)
if (object$joined)
{
nrowdata <- c(0,cumsum(unlist(lapply(data,nrow))))
datacent <- matrix(0,ncol=nbobs,nrow=nrowdata[r+1])
for (k in (1:r)) {
datacent[(nrowdata[k]+1):nrowdata[k+1],] <-
(data[[k]])[,listobs,drop=FALSE]
}
datacent <- t(object$v[[1]]) %*% datacent
pred <- list()
length(pred) <- ny
pred2 <- object$v[[1]] %*% (object$rho %*% datacent)
for (i in (1:ny)) {
pred[[i]] <- pred2[(nrowdata[i]+1):nrowdata[i+1],,drop=FALSE]
}
} else {
datacent <- matrix(0,ncol=nbobs,nrow=sum(kn))
kkn <- c(0,kn)
for (k in (1:r)) {
datacent[sum(kkn[1:k])+(1:kkn[k+1]),] <-
t(object$v[[k]]) %*% ((data[[k]])[,listobs,drop=FALSE])
}
pred <- list()
length(pred) <- ny
for (i in (1:ny)) {
pred[[i]] <- object$v[[i]] %*% (object$rho %*%
datacent)[sum(kkn[1:i])+(1:kkn[i+1]),,drop=FALSE]
}
}
for (i in (1:ny))
{
if (!is.null(object$databar))
pred[[i]] <- sweep((pred[[i]]),1,object$databar[[i]],"+")
if (positive)
pred[[i]] <- (pred[[i]]+abs(pred[[i]]))/2
rownames(pred[[i]]) <- rownames(data[[i]])
colnames(pred[[i]]) <- label[listobs]
}
names(pred) <- object$y
class(pred) <- "fdata"
return(pred)
}
# *****************************************************************************
# Title : far.cv
# ************************************************************
# Description :
# Croos validation for the Model of Vectorized Functional Processes
# Version : 1.2
# Date : 2007-10-01
# *****************************************************************************
far.cv <- function(data, y, x, kn, ncv, cvcrit, center=TRUE, na.rm=TRUE,
joined=FALSE)
{
if (class(data) != "fdata") # test the type of data
stop("data is not of class fdata")
call <- match.call()
# find dimensions
n <- ncol(data[[1]])
if (missing(ncv)) ncv <- round(n/5)
n1 <- (n-ncv)
# find variables
if (missing(y))
{
if (missing(x))
{
x <- NULL
y <- names(data)
} else {
y <- x
x <- NULL
}
} else {
if (missing(x)) x <- NULL
}
if (missing(cvcrit))
{
cvcrit <- y
}
# find dimensions and test
variables <- c(y,x)
nx <- length(x)
ny <- length(y)
ncrit <- length(cvcrit)
r <- nx+ny
dim1 <- unlist(lapply(data,nrow))
dim1 <- dim1[variables]
if (joined) # if joined estimation
{
if (missing(kn)) kn <- sum(dim1)
if (1 != length(kn)) stop("Gives only one kn in joined estimation.")
} else {
if (missing(kn)) kn <- dim1
if (r != length(kn))
stop("Gives a kn value for each variable. Dimension are different.")
}
# adapt data to the model chosen
data.apprent <- list()
data.test <- list()
if (nx>0)
{
n1 <- n1-1
}
for (i in 1:length(y))
{
data.apprent[[y[i]]] <- (data[[y[i]]])[,1:n1,drop=FALSE]
data.test[[y[i]]] <- (data[[y[i]]])[,n1+(1:ncv),drop=FALSE]
}
if (nx>0) for (i in 1:length(x))
{
data.apprent[[x[i]]] <- (data[[x[i]]])[,1+(1:n1),drop=FALSE]
data.test[[x[i]]] <- (data[[x[i]]])[,n1+1+(1:ncv),drop=FALSE]
}
class(data.apprent) <- "fdata"
class(data.test) <- "fdata"
# Removing non available data if required
if (na.rm) {
listobs <- c(apply(!is.na(data.apprent),2,all))
listobs2 <- c(FALSE,listobs) * c(listobs,FALSE) == 1
listobs.test <- c(apply(!is.na(data.test),2,all))
} else {
listobs <- rep(TRUE,n1)
listobs2 <- c(FALSE,rep(TRUE,n1-1),FALSE)
listobs.test <- rep(TRUE,ncv)
}
nbobs <- sum(listobs == TRUE)
nbobs2 <- sum(listobs2 == TRUE)
nbobs.test <- sum(listobs.test == TRUE)
# centering
if (center) {
f0 <- function(x,listobs) matrix(apply(x[,listobs],1,mean),ncol=1)
databar <- lapply(data.apprent,f0,listobs)
class(databar)<-"fdata"
data <- list()
for (i in 1:length(variables))
data[[variables[i]]] <-
sweep(data.apprent[[variables[i]]],1,
databar[[variables[i]]],"-")
class(data) <- "fdata"
data2 <- list()
for (i in 1:length(variables))
data2[[variables[i]]] <-
sweep(data.test[[variables[i]]],1,
databar[[variables[i]]],"-")
class(data2) <- "fdata"
} else {
databar <- NULL
data<-data.apprent
data2<-data.test
}
# Begining the estimation
# -----------------------
if (joined)
{
eigenvector <- list()
eigenvalues <- list()
length(eigenvector) <- 1
length(eigenvalues) <- 1
# Calculation of the subspaces obtained from the covariance matrix
nrowdata <- c(0,cumsum(dim1))
datacent <- matrix(0,nrow=nrowdata[r+1],ncol=nbobs)
for (i in 1:r)
datacent[(nrowdata[i]+1):nrowdata[i+1],] <-
(data[[i]])[,listobs,drop=FALSE]
sdbase <- eigen(datacent %*% t(datacent / nbobs))
eigenvector[[1]] <- sdbase$vectors[, 1:kn,drop=FALSE]
eigenvalues[[1]] <- as.matrix(sdbase$values/nrowdata[r+1])
# Determination of the projection matrix
datacent <- matrix(0,nrow=nrowdata[r+1],ncol=n1)
datacent2 <- matrix(0,nrow=nrowdata[r+1],ncol=nbobs.test)
for (i in 1:r)
{
datacent[(nrowdata[i]+1):nrowdata[i+1],] <- data[[i]]
datacent2[(nrowdata[i]+1):nrowdata[i+1],] <-
data2[[i]][,listobs.test,drop=FALSE]
}
} else {
eigenvector <- list()
eigenvalues <- list()
Projdata <- list()
Projdata2 <- list()
length(eigenvector) <- r
length(eigenvalues) <- r
length(Projdata) <- r
length(Projdata2) <- r
# Calculation of the subspaces obtained from the covariance matrix
for (i in 1:r)
{
datacent <- (data[[i]])[,listobs]
sdbase <- eigen(datacent %*% t(datacent / nbobs))
eigenvector[[i]] <- sdbase$vectors[, 1:kn[i],drop=FALSE]
eigenvalues[[i]] <- as.matrix(sdbase$values/nrow(datacent))
Projdata[[i]] <- t(eigenvector[[i]]) %*% data[[i]]
Projdata2[[i]] <- t(eigenvector[[i]]) %*%
data2[[i]][,listobs.test,drop=FALSE]
}
}
# Begining of the cross validation
# --------------------------------
output <- matrix(0,ncol=length(kn)+6,nrow=prod(kn))
f1<-function(x) mean(apply(abs(x),2,mean),na.rm=TRUE)
f2<-function(x) mean(sqrt(apply(x^2,2,mean)),na.rm=TRUE)
f3<-function(x) mean(apply(abs(x),2,max),na.rm=TRUE)
f4<-function(x) mean(abs(x),na.rm=TRUE)
f5<-function(x) sqrt(mean(x^2,na.rm=TRUE))
f6<-function(x) max(abs(x),na.rm=TRUE)
pk <- prod(kn)
lk <- length(kn)
if (joined)
{
for (k in 1:kn)
{
# projection
Proj <- t(eigenvector[[1]][,1:k,drop=FALSE]) %*% datacent
Proj2 <- t(eigenvector[[1]][,1:k,drop=FALSE]) %*% datacent2
# Calculation of the correlation matrix rho
Delta <- Proj[,listobs2[-(n1+1)],drop=FALSE] %*%
t(Proj[,listobs2[-1],drop=FALSE])
InvG <- invgen(Proj[,listobs,drop=FALSE] %*%
t(Proj[,listobs,drop=FALSE]))
rho <- Delta %*% InvG * nbobs / nbobs2
# Prediction
pred2 <- (eigenvector[[1]][,1:k,drop=FALSE]) %*% rho %*% Proj2
pred <- list()
pred.max <- list()
for (i in 1:ny)
{
pred[[y[i]]] <- pred2[(nrowdata[i]+1):nrowdata[i+1],
-nbobs.test,drop=FALSE]
rownames(pred[[y[i]]]) <- rownames(data[[y[i]]])
colnames(pred[[y[i]]]) <-
(colnames(data2[[y[i]]])[c(FALSE,listobs.test)])[-nbobs.test]
}
# Calculation of errors
for (i in 1:ncrit)
{
pred.max[[cvcrit[i]]] <- apply((data2[[cvcrit[i]]])[,
colnames(pred[[cvcrit[i]]]),drop=FALSE],2,max)-
apply(pred[[cvcrit[i]]],2,max)
pred[[cvcrit[i]]] <- ((data2[[cvcrit[i]]])[,
colnames(pred[[cvcrit[i]]]),drop=FALSE]-
pred[[cvcrit[i]]])
}
output[k,2] <- mean(unlist(lapply(pred,f1)))
output[k,3] <- mean(unlist(lapply(pred,f2)))
output[k,4] <- mean(unlist(lapply(pred,f3)))
output[k,5] <- mean(unlist(lapply(pred.max,f4)))
output[k,6] <- mean(unlist(lapply(pred.max,f5)))
output[k,7] <- mean(unlist(lapply(pred.max,f6)))
output[k,1] <- k
}
} else {
kn2<-c(1,kn)
for (i in 1:length(kn))
output[,i]<-rep(rep(1:kn[i],rep(pk/prod(kn[1:i]),kn[i])),
prod(kn2[1:i]))
for (j in 1:pk)
{
kn <- output[j,1:lk]
# Determination of the projection matrix
Proj <- matrix(0,ncol=n1,nrow=sum(kn))
Proj2 <- matrix(0,ncol=nbobs.test,nrow=sum(kn))
kkn <- c(0,kn)
for (k in 1:r)
{
Proj[sum(kkn[1:k])+(1:kkn[k+1]),] <-
Projdata[[k]][1:kn[k],,drop=FALSE]
Proj2[sum(kkn[1:k])+(1:kkn[k+1]),] <-
Projdata2[[k]][1:kn[k],,drop=FALSE]
}
# Calculation of the correlation matrix rho
Delta <- Proj[,listobs2[-(n1+1)],drop=FALSE] %*%
t(Proj[,listobs2[-1],drop=FALSE])
InvG <- invgen(Proj[,listobs,drop=FALSE] %*%
t(Proj[,listobs,drop=FALSE]))
rho <- Delta %*% InvG * nbobs / nbobs2
# Prediction
pred <- list()
pred.max <- list()
for (i in 1:ny)
{
pred[[y[i]]] <- (eigenvector[[1]][,1:kn[i],drop=FALSE]) %*%
(rho %*% Proj2)[sum(kkn[1:i])+(1:kkn[i+1]),
-nbobs.test,drop=FALSE]
rownames(pred[[y[i]]]) <- rownames(data[[y[i]]])
colnames(pred[[y[i]]]) <-
(colnames(data2[[y[i]]])[c(FALSE,listobs.test)])[-nbobs.test]
}
# Calculation of errors
for (i in 1:ncrit)
{
pred.max[[cvcrit[i]]] <-
apply((data2[[cvcrit[i]]])[,colnames(pred[[cvcrit[i]]]),
drop=FALSE],2,max)-apply(pred[[cvcrit[i]]],2,max)
pred[[cvcrit[i]]] <-
((data2[[cvcrit[i]]])[,colnames(pred[[cvcrit[i]]]),
drop=FALSE]-pred[[cvcrit[i]]])
}
output[j,lk+1] <- mean(unlist(lapply(pred,f1)))
output[j,lk+2] <- mean(unlist(lapply(pred,f2)))
output[j,lk+3] <- mean(unlist(lapply(pred,f3)))
output[j,lk+4] <- mean(unlist(lapply(pred.max,f4)))
output[j,lk+5] <- mean(unlist(lapply(pred.max,f5)))
output[j,lk+6] <- mean(unlist(lapply(pred.max,f6)))
}
}
# result
dimnames(output) <- list(NULL,c(paste("k",1:lk,sep=""),
"L1","L2","Linf","L1max","L2max","Linfmax"))
moutput1<-min(output[,length(kn)+1])
moutput1<-output[output[,length(kn)+1]==moutput1,]
moutput2<-min(output[,length(kn)+2])
moutput2<-output[output[,length(kn)+2]==moutput2,]
moutput3<-min(output[,length(kn)+3])
moutput3<-output[output[,length(kn)+3]==moutput3,]
moutput4<-min(output[,length(kn)+4])
moutput4<-output[output[,length(kn)+4]==moutput4,]
moutput5<-min(output[,length(kn)+5])
moutput5<-output[output[,length(kn)+5]==moutput5,]
moutput6<-min(output[,length(kn)+6])
moutput6<-output[output[,length(kn)+6]==moutput6,]
invisible(list("cv"=output,
"minL1"=moutput1,
"minL2"=moutput2,
"minLinf"=moutput3,
"minL1max"=moutput4,
"minL2max"=moutput5,
"minLinfmax"=moutput6))
}
|
library(spatialLIBD)
modeling_results <- fetch_data(type = "modeling_results")
sig_genes <- sig_genes_extract(
n = 10,
modeling_results = modeling_results,
model_type = names(modeling_results)[2],
reverse = FALSE,
sce_layer = fetch_data(type = "sce_layer")
)
save(sig_genes, file = "/dcs04/lieber/lcolladotor/spatialDLPFC_LIBD4035/spatialDLPFC/processed-data/rdata/spe/sig_genes_manual_annotations.rda")
| /code/analysis/extract_layer_sig_genes_manual_annotations.R | no_license | LieberInstitute/spatialDLPFC | R | false | false | 426 | r | library(spatialLIBD)
modeling_results <- fetch_data(type = "modeling_results")
sig_genes <- sig_genes_extract(
n = 10,
modeling_results = modeling_results,
model_type = names(modeling_results)[2],
reverse = FALSE,
sce_layer = fetch_data(type = "sce_layer")
)
save(sig_genes, file = "/dcs04/lieber/lcolladotor/spatialDLPFC_LIBD4035/spatialDLPFC/processed-data/rdata/spe/sig_genes_manual_annotations.rda")
|
library(ggplot2)
library(dplyr)
wd <- getwd()
setwd("2")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
data <- filter(NEI, fips == "24510")
png(filename = "plot3.png", width = 480, height = 480, units = "px")
g <- ggplot(data, aes(year, Emissions, color = type))
print(g + geom_line(stat = "Summary", fun.y = "sum")
+ ylab(expression('Total PM2.5 Emissions'))
+ ggtitle("Total Emissions in Baltimore City from 1999 to 2008"))
dev.off()
setwd(wd) | /plot3.R | no_license | tomashaber/ExData_Plotting2 | R | false | false | 512 | r | library(ggplot2)
library(dplyr)
wd <- getwd()
setwd("2")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
data <- filter(NEI, fips == "24510")
png(filename = "plot3.png", width = 480, height = 480, units = "px")
g <- ggplot(data, aes(year, Emissions, color = type))
print(g + geom_line(stat = "Summary", fun.y = "sum")
+ ylab(expression('Total PM2.5 Emissions'))
+ ggtitle("Total Emissions in Baltimore City from 1999 to 2008"))
dev.off()
setwd(wd) |
###############################
#Run 100 random subsets (node subsets) and compare \hat{eta}
#number of nodes in the random subsets is not fixed
###############################
setwd('./')
source('./random_subsets.R')
source('../R/read.R')
source('../multi_samples/ms_dm_call.R')
source('./post_analysis.R')
#args = commandArgs(trailingOnly=TRUE)
#mode = args[1]
mode = "cancer"
neighbor5000<-readRDS('neighbor5000.rds')
net5000<- graph_from_adjacency_matrix(neighbor5000, mode="undirected")
if (mode=="cancer") {
glist<-as.character(read.table('../../rao2014/pathway/genes_cancer_pathway', sep="\n", header=F)$V1)
} else if (mode =="signal") {
glist<-as.character(read.table('../../rao2014/pathway/genes_signal_GO.txt', sep="\n", header=F)$V1)
}
#Because this HiC data doesn't contain the Y chromosome
#Need to filter this gene list
cancer = colnames(neighbor5000)[colnames(neighbor5000)%in%glist]
total_iter = 3000
B = 200
N = 1000 #100 random subsets
strata<-make_strata(net5000)
rand_subsets=make_subset_nodes_statified(neighbor5000, strata,length(cancer), N)
bounds_a<-c(0,100)
bounds_t<-c(0,100)
inis_par<-c(1,0,1)
all_eta<-rep(NA, N)
#first column is estimate, second column lower CI
#fourth column lower bound
all_eta_CI<-matrix(NA, nrow=N, ncol=5)
#Updated 20180922
#Not saving all ret because ran out of memory
#all_ret<-list()
i = 1
while (i <=N) {
nb<-rand_subsets[[i]]
n = nrow(nb)
inis_w<-rnorm(n, 0, 1)
neighbor_int<-as.integer(nb)
nodes<-colnames(nb)
y<-data[data$Ensembl_ID%in%nodes,'count']
evalues<-eigen(nb)$value
eta_min = 1/min(evalues)
eta_max = 1/max(evalues)
vars = c(0.8,0.6,(eta_max-eta_min)/2,0.5)
bounds_e<-c(eta_min, eta_max)
ret<-dm_call_wrapper(total_iter, n, y, neighbor_int, vars, bounds_a, bounds_e, bounds_t, inis_par, inis_w)
jumps <-get_jump_frequency(ret, total_iter, length(cancer))
jumps_vec<-c(jumps$w, jumps$alpha, jumps$eta, jumps$tau2)
#all_ret[[i]]<-ret
if (sum(jumps_vec<0.7)==4 & sum(jumps_vec>0.2)==4) {
newret<-delete_burn_in(ret, B, nb)
eta_hat<-mean(newret$eta)
eta_ttest<-t.test(newret$eta)
eta_l<-round(eta_ttest$conf.int[1],5)
eta_u<-round(eta_ttest$conf.int[2],5)
cat("Estimated eta is", mean(newret$eta), "(", eta_l, ",", eta_u, ")\n")
all_eta[i]<-eta_hat
all_eta_CI[i,]<-c(eta_hat, eta_l, eta_u, eta_min, eta_max)
i = i+1
} else {
cat("Jump counts are invalid, results are disregarded\n")
}
rm("ret")
}
#########################33
save.image(file=paste("./rand_nodes_",mode,"_workspace.RData",sep = ""))
saveRDS(all_eta_CI, file = paste("./rand_nodes_",mode,"_all_eta_CI",sep=""))
| /analysis/tuning_rand_nodes_cancer.R | no_license | ashleyzhou972/MRF | R | false | false | 2,638 | r | ###############################
#Run 100 random subsets (node subsets) and compare \hat{eta}
#number of nodes in the random subsets is not fixed
###############################
setwd('./')
source('./random_subsets.R')
source('../R/read.R')
source('../multi_samples/ms_dm_call.R')
source('./post_analysis.R')
#args = commandArgs(trailingOnly=TRUE)
#mode = args[1]
mode = "cancer"
neighbor5000<-readRDS('neighbor5000.rds')
net5000<- graph_from_adjacency_matrix(neighbor5000, mode="undirected")
if (mode=="cancer") {
glist<-as.character(read.table('../../rao2014/pathway/genes_cancer_pathway', sep="\n", header=F)$V1)
} else if (mode =="signal") {
glist<-as.character(read.table('../../rao2014/pathway/genes_signal_GO.txt', sep="\n", header=F)$V1)
}
#Because this HiC data doesn't contain the Y chromosome
#Need to filter this gene list
cancer = colnames(neighbor5000)[colnames(neighbor5000)%in%glist]
total_iter = 3000
B = 200
N = 1000 #100 random subsets
strata<-make_strata(net5000)
rand_subsets=make_subset_nodes_statified(neighbor5000, strata,length(cancer), N)
bounds_a<-c(0,100)
bounds_t<-c(0,100)
inis_par<-c(1,0,1)
all_eta<-rep(NA, N)
#first column is estimate, second column lower CI
#fourth column lower bound
all_eta_CI<-matrix(NA, nrow=N, ncol=5)
#Updated 20180922
#Not saving all ret because ran out of memory
#all_ret<-list()
i = 1
while (i <=N) {
nb<-rand_subsets[[i]]
n = nrow(nb)
inis_w<-rnorm(n, 0, 1)
neighbor_int<-as.integer(nb)
nodes<-colnames(nb)
y<-data[data$Ensembl_ID%in%nodes,'count']
evalues<-eigen(nb)$value
eta_min = 1/min(evalues)
eta_max = 1/max(evalues)
vars = c(0.8,0.6,(eta_max-eta_min)/2,0.5)
bounds_e<-c(eta_min, eta_max)
ret<-dm_call_wrapper(total_iter, n, y, neighbor_int, vars, bounds_a, bounds_e, bounds_t, inis_par, inis_w)
jumps <-get_jump_frequency(ret, total_iter, length(cancer))
jumps_vec<-c(jumps$w, jumps$alpha, jumps$eta, jumps$tau2)
#all_ret[[i]]<-ret
if (sum(jumps_vec<0.7)==4 & sum(jumps_vec>0.2)==4) {
newret<-delete_burn_in(ret, B, nb)
eta_hat<-mean(newret$eta)
eta_ttest<-t.test(newret$eta)
eta_l<-round(eta_ttest$conf.int[1],5)
eta_u<-round(eta_ttest$conf.int[2],5)
cat("Estimated eta is", mean(newret$eta), "(", eta_l, ",", eta_u, ")\n")
all_eta[i]<-eta_hat
all_eta_CI[i,]<-c(eta_hat, eta_l, eta_u, eta_min, eta_max)
i = i+1
} else {
cat("Jump counts are invalid, results are disregarded\n")
}
rm("ret")
}
#########################33
save.image(file=paste("./rand_nodes_",mode,"_workspace.RData",sep = ""))
saveRDS(all_eta_CI, file = paste("./rand_nodes_",mode,"_all_eta_CI",sep=""))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rprebinom1logpost.R
\name{rprebinom1logpost}
\alias{rprebinom1logpost}
\title{Calculate log-posterior in R}
\usage{
rprebinom1logpost(testPars, stanObj)
}
\arguments{
\item{testPars}{A named list of parameters to use}
\item{stanObj}{A StanNetRun object to supply the data}
}
\value{
The value of the log-posterior distribution at the test parameters and data
}
\description{
Computes the log-posterior for a non-centred random effect model with binomial likelihood using the logit link and default priors
}
| /man/rprebinom1logpost.Rd | no_license | augustinewigle/StanNet | R | false | true | 586 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rprebinom1logpost.R
\name{rprebinom1logpost}
\alias{rprebinom1logpost}
\title{Calculate log-posterior in R}
\usage{
rprebinom1logpost(testPars, stanObj)
}
\arguments{
\item{testPars}{A named list of parameters to use}
\item{stanObj}{A StanNetRun object to supply the data}
}
\value{
The value of the log-posterior distribution at the test parameters and data
}
\description{
Computes the log-posterior for a non-centred random effect model with binomial likelihood using the logit link and default priors
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coin.R
\name{rare_coin}
\alias{rare_coin}
\title{Rare coin class}
\usage{
rare_coin(name, year, ...)
}
\arguments{
\item{name}{str, name of coin}
\item{year}{int, year coin was made}
\item{...}{parameters to feed into \code{coin}
subclass}
}
\value{
rare_coin object
}
\description{
Rare coin class
}
\examples{
rare_coin(name = "Lincoln penny", year = 1972)
}
| /man/rare_coin.Rd | permissive | benjaminleroy/coin | R | false | true | 442 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coin.R
\name{rare_coin}
\alias{rare_coin}
\title{Rare coin class}
\usage{
rare_coin(name, year, ...)
}
\arguments{
\item{name}{str, name of coin}
\item{year}{int, year coin was made}
\item{...}{parameters to feed into \code{coin}
subclass}
}
\value{
rare_coin object
}
\description{
Rare coin class
}
\examples{
rare_coin(name = "Lincoln penny", year = 1972)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioRad.R
\name{[.vpts}
\alias{[.vpts}
\title{Subset `vpts`}
\usage{
\method{[}{vpts}(x, i)
}
\arguments{
\item{x}{object of class 'vpts'}
\item{i}{indices specifying elements to extract}
}
\description{
Extract by index from a vpts
}
| /man/sub-.vpts.Rd | permissive | macheng94/bioRad | R | false | true | 313 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioRad.R
\name{[.vpts}
\alias{[.vpts}
\title{Subset `vpts`}
\usage{
\method{[}{vpts}(x, i)
}
\arguments{
\item{x}{object of class 'vpts'}
\item{i}{indices specifying elements to extract}
}
\description{
Extract by index from a vpts
}
|
ggplot <- ggplot2::ggplot
aes <- ggplot2::aes
geom_point <- ggplot2::geom_point
geom_line <- ggplot2::geom_line
scale_fill_viridis_c <- ggplot2::scale_fill_viridis_c
labs <- ggplot2::labs
theme <- ggplot2::theme
scale_x_continuous <- ggplot2::scale_x_continuous
margin <- ggplot2::margin
#' Title
#'
#' @param sigma_Z the standard error
#' @param the_radius radius of the window of the moving average (many are possibles ex : c(1, 2, 3))
#' @param the_directions ex : list(c(0,1), c(1,1))
#' @param xlabs the label of x axis
#' @param ylabs the label of y axis
#' @param x A string : "Distance" by default, or "Distance_km"
#' @param y A string : ""Expected covariance"" by default, or "Expected correlation
#' @param rayon A string for the unit of radius : "Radius" by default, or "Radius_km"
#' @param the_scale how many km/pixel
#' @param max the maximum value for x axis (the minimum is always zero)
#' @param maxy_sup the maximum value for y axis
#' @param maxy_inf the minimum value for y axis
#' @param connect TRUE by default : to connect points for a given radius and direction
#' @param director_vector TRUE by default : the given direction is transformed : it takes the director vector
#'
#' @return a plot of the expected covariance/correlation according to the distance
#' @export
#'
#' @examples
#' plot_expected_cov(1, c(1,2,3), list(c(0,1), c(1,1), c(1,2)))
plot_expected_cov <- function(sigma_Z, the_radius, the_directions, xlabs ="Distance between variables", ylabs ="Theoritical covariance", x = "Distance", y = "Theoritical_covariance", rayon = "Radius", the_scale = 1, max = "", maxy_sup ="", maxy_inf="", connect = TRUE, director_vector = TRUE){
df <- data_frame_expected_cov(sigma_Z, the_radius, the_directions, the_scale)
colnames(df) <- c("Distance", "Distance_km", "Theoritical_covariance", "Theoritical_correlation", "Direction", "Radius", "Radius_km")
################COVARIANCE###################@
df$Radius <- as.factor(df$Radius)
df$Radius_km <- as.factor(df$Radius_km)
if(y == "Theoritical_covariance"){
if(x== "Distance"){p <- ggplot(data = df, aes(x = Distance, y = Theoritical_covariance))}
if(x== "Distance_km"){p <- ggplot(data = df, aes(x = Distance_km, y = Theoritical_covariance))}
if (rayon == "Radius"){p <- p + geom_point(aes(col = Radius, shape = Direction))}
if (rayon == "Radius_km"){p <- p + geom_point(aes(col = Radius_km, shape = Direction))}
if(connect == TRUE){
if (rayon == "Radius"){p <- p + geom_line(aes(col = Radius, group = interaction(Direction, Radius)))}
if (rayon == "Radius_km"){p <- p + geom_line(aes(col = Radius_km, group = interaction(Direction, Radius_km)))}
}
#return(p)
}
###########CORRELATION###############
if(y == "Theoritical_correlation"){
if(x== "Distance"){p <- ggplot(data = df, aes(x = Distance, y = Theoritical_correlation))}
if(x== "Distance_km"){p <- ggplot(data = df, aes(x = Distance_km, y = Theoritical_correlation))}
if (rayon == "Radius"){p <- p + geom_point(aes(shape = Direction, col = factor(Radius)))}
if (rayon == "Radius_km"){p <- p + geom_point(aes(shape = Direction, col = factor(Radius_km_unit)))}
if(connect == TRUE){
if (rayon == "Radius"){p <- p + geom_line(aes(col = factor(Radius)))}
if (rayon == "Radius_km"){p <- p + geom_line(aes(col = factor(Radius_km_unit)))}
}
}
p <- p +
viridis::scale_color_viridis(discrete = TRUE, option = "B")+
theme(
legend.position = c(.95, .95),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6)
)
if (max != ""){p <- p + scale_x_continuous(breaks = scales::pretty_breaks()) # + xlim(min=0, max = max)
}
if (maxy_sup != ""){p <- p } #+ ylim(min = maxy_inf, max = maxy_sup) }
p <- p +
guides(col = ggplot2::guide_legend("Radius r \n of the \n moving average's \n window"))+
labs(x = xlabs, y = ylabs)
gg_ply <- plotly::ggplotly(p) #|>
# #plotly::layout(legend = list(title=list(text="Radius r \n of the \n moving average's \n window"))) |>
# plotly::layout(legend = list(orientation = "v", x = 0.6, y = 0.99))
gg_ply <- plotly::hide_legend(gg_ply)
return(gg_ply)
}
| /R/plot_expected_cov.R | permissive | C-Juliette/randomfields | R | false | false | 4,231 | r | ggplot <- ggplot2::ggplot
aes <- ggplot2::aes
geom_point <- ggplot2::geom_point
geom_line <- ggplot2::geom_line
scale_fill_viridis_c <- ggplot2::scale_fill_viridis_c
labs <- ggplot2::labs
theme <- ggplot2::theme
scale_x_continuous <- ggplot2::scale_x_continuous
margin <- ggplot2::margin
#' Title
#'
#' @param sigma_Z the standard error
#' @param the_radius radius of the window of the moving average (many are possibles ex : c(1, 2, 3))
#' @param the_directions ex : list(c(0,1), c(1,1))
#' @param xlabs the label of x axis
#' @param ylabs the label of y axis
#' @param x A string : "Distance" by default, or "Distance_km"
#' @param y A string : ""Expected covariance"" by default, or "Expected correlation
#' @param rayon A string for the unit of radius : "Radius" by default, or "Radius_km"
#' @param the_scale how many km/pixel
#' @param max the maximum value for x axis (the minimum is always zero)
#' @param maxy_sup the maximum value for y axis
#' @param maxy_inf the minimum value for y axis
#' @param connect TRUE by default : to connect points for a given radius and direction
#' @param director_vector TRUE by default : the given direction is transformed : it takes the director vector
#'
#' @return a plot of the expected covariance/correlation according to the distance
#' @export
#'
#' @examples
#' plot_expected_cov(1, c(1,2,3), list(c(0,1), c(1,1), c(1,2)))
plot_expected_cov <- function(sigma_Z, the_radius, the_directions, xlabs ="Distance between variables", ylabs ="Theoritical covariance", x = "Distance", y = "Theoritical_covariance", rayon = "Radius", the_scale = 1, max = "", maxy_sup ="", maxy_inf="", connect = TRUE, director_vector = TRUE){
df <- data_frame_expected_cov(sigma_Z, the_radius, the_directions, the_scale)
colnames(df) <- c("Distance", "Distance_km", "Theoritical_covariance", "Theoritical_correlation", "Direction", "Radius", "Radius_km")
################COVARIANCE###################@
df$Radius <- as.factor(df$Radius)
df$Radius_km <- as.factor(df$Radius_km)
if(y == "Theoritical_covariance"){
if(x== "Distance"){p <- ggplot(data = df, aes(x = Distance, y = Theoritical_covariance))}
if(x== "Distance_km"){p <- ggplot(data = df, aes(x = Distance_km, y = Theoritical_covariance))}
if (rayon == "Radius"){p <- p + geom_point(aes(col = Radius, shape = Direction))}
if (rayon == "Radius_km"){p <- p + geom_point(aes(col = Radius_km, shape = Direction))}
if(connect == TRUE){
if (rayon == "Radius"){p <- p + geom_line(aes(col = Radius, group = interaction(Direction, Radius)))}
if (rayon == "Radius_km"){p <- p + geom_line(aes(col = Radius_km, group = interaction(Direction, Radius_km)))}
}
#return(p)
}
###########CORRELATION###############
if(y == "Theoritical_correlation"){
if(x== "Distance"){p <- ggplot(data = df, aes(x = Distance, y = Theoritical_correlation))}
if(x== "Distance_km"){p <- ggplot(data = df, aes(x = Distance_km, y = Theoritical_correlation))}
if (rayon == "Radius"){p <- p + geom_point(aes(shape = Direction, col = factor(Radius)))}
if (rayon == "Radius_km"){p <- p + geom_point(aes(shape = Direction, col = factor(Radius_km_unit)))}
if(connect == TRUE){
if (rayon == "Radius"){p <- p + geom_line(aes(col = factor(Radius)))}
if (rayon == "Radius_km"){p <- p + geom_line(aes(col = factor(Radius_km_unit)))}
}
}
p <- p +
viridis::scale_color_viridis(discrete = TRUE, option = "B")+
theme(
legend.position = c(.95, .95),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6)
)
if (max != ""){p <- p + scale_x_continuous(breaks = scales::pretty_breaks()) # + xlim(min=0, max = max)
}
if (maxy_sup != ""){p <- p } #+ ylim(min = maxy_inf, max = maxy_sup) }
p <- p +
guides(col = ggplot2::guide_legend("Radius r \n of the \n moving average's \n window"))+
labs(x = xlabs, y = ylabs)
gg_ply <- plotly::ggplotly(p) #|>
# #plotly::layout(legend = list(title=list(text="Radius r \n of the \n moving average's \n window"))) |>
# plotly::layout(legend = list(orientation = "v", x = 0.6, y = 0.99))
gg_ply <- plotly::hide_legend(gg_ply)
return(gg_ply)
}
|
source(system.file(file.path('tests', 'test_utils.R'), package = 'nimble'))
## Tests for copy() [implemented as nimCopy], values(), and values()<-
## These use some of the same internals (accessors), so they are in the same testing file.
## These tests use lists of nimbleFunctions, initialization code, and testing code.
## They include copying to and from models and/or modelValues, using default arguments, using logProb = [TRUE|FALSE], and using the same or different blocks of variables.
## Checks are made internally for uncompiled and compiled cases. Then uncompiled and compiled outcomes are compared to check that they behaved identically.
RwarnLevel <- options('warn')$warn
options(warn = 1)
nimbleVerboseSetting <- nimbleOptions('verbose')
nimbleOptions(verbose = FALSE)
context('Testing of nimCopy and values')
#############
## Here is a model with deterministic and stochastic variables of dimensions 0-4, including a multivariate node
copyTestModelCode <- nimbleCode({
x0 ~ dnorm(0,1); d0 <- x0 + 10000
for(i in 1:4) {x1[i] ~ dnorm(0,1); d1[i] <- x1[i]+10000}
for(i in 1:4) for(j in 1:4) {x2[i,j] ~ dnorm(0,1); d2[i,j] <- x2[i,j]+10000}
for(i in 1:4) for(j in 1:4) for(k in 1:4) {x3[i,j,k] ~ dnorm(0, 1); d3[i,j,k] <- x3[i,j,k]+10000}
for(i in 1:4) for(j in 1:4) for(k in 1:4) for(l in 1:4) {x4[i,j,k,l] ~ dnorm(0, 1); d4[i,j,k,l] <- x4[i,j,k,l]+10000}
v1[1:4] ~ dmnorm(v1mu[1:4], v1sigma[1:4, 1:4]) ## not testing indexing here!
for(i in 1:5) w1[ 2:5, i] ~ dmnorm(v1mu[1:4], v1sigma[1:4, 1:4])
for(i in 1:4) foo[i] <- w1[i+1, 1]
})
copyTestConstants <- list()
copyTestData <- list(v1mu = rep(0,4), v1sigma = diag(4))
## A list of nimbleFunctions used for various cases
copyTestNFcodeList <- list(
## First 3 are ModelToMV
nfModelToMVall = quote({
nimbleFunction(
setup = function(from, to, rowTo = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, to = to, rowTo = rowTo, logProb = logProb)
}
)
}),
nfModelToMVsomeSame = quote({
nimbleFunction(
setup = function(from, nodes, to, rowTo = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, nodes = nodes, to = to, rowTo = rowTo, logProb = logProb)
}
)
}),
nfModelToMVsomeDiff = quote({
nimbleFunction(
setup = function(from, nodes, to, nodesTo, rowTo = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, nodes = nodes, to = to, rowTo = rowTo, nodesTo = nodesTo, logProb = logProb)
}
)
}),
## Next 3 are MVToModel
nfMVToModelAll = quote({
nimbleFunction(
setup = function(from, to, row = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, to = to, row = row, logProb = logProb)
}
)
}),
nfMVToModelSomeSame = quote({
nimbleFunction(
setup = function(from, nodes, to, row = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, nodes = nodes, to = to, row = row, logProb = logProb)
}
)
}),
nfMVToModelSomeDiff = quote({
nimbleFunction(
setup = function(from, nodes, to, nodesTo, row = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, nodes = nodes, to = to, nodesTo = nodesTo, row = row, logProb = logProb)
}
)
})
)
## A list of inputs for runOneCopyTest
copyTestCaseList <- list(
## First 2 are Model2MV checking default of all nodes if nodes arg is not provided
Model2MVall_logProbTRUE = list(
label = 'Model2MVall_logProbTRUE', ## A label
nfName = 'nfModelToMVall', ## Name of a nimbleFunction from above list to define as nf
seed = round(runif(1, 1, 10000)), ## A seed generated once then the same for compiled and uncompiled cases
initCode = quote({simulate(m); calculate(m)}), ## Code to initiate the model and/or modelValues
compile = c(FALSE, TRUE), ## sequence of yes/no compile
compareRtoCpp = TRUE, ## Should uncompiled and compiled be compared? When TRUE, only works if compile == c(FALSE, TRUE), in that order
nfMcode = quote({nf(m, mv, logProb = TRUE)}), ## code for specializing the nf
testThatLines = quote({ ## code for testing
for(oneName in m$getVarNames(includeLogProb = TRUE)) {
test_that('single', expect_identical(m[[oneName]], mv[[oneName]][[3]],
info = 'Model2MVall_logProbTRUE'))
}
})
),
Model2MVall_logProbFALSE = list(
label = 'Model2MVall_logProbFALSE',
nfName = 'nfModelToMVall',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, logProb = FALSE)}),
testThatLines = quote({
for(oneName in m$getVarNames(includeLogProb = FALSE)) {
test_that('single', expect_identical(m[[oneName]], mv[[oneName]][[3]],
info = 'Model2MVall_logProbFALSE'))
}
lpNames <- m$getVarNames(includeLogProb = TRUE)
lpNames <- lpNames[grep('logProb_', lpNames)]
for(oneName in lpNames) {
test_that('single', expect_false(identical(m[[oneName]], mv[[oneName]][[3]])))
}
})
),
## Next 2 copy some blocks of some variables, using the same for from and to (nodes provided but nodesTo not provided)
Model2MVsomeSame_logProbFALSE = list( ## some = some blocks of nodes, ## same = same nodes from and to
label = 'Model2MVsomeSame_logProbFALSE',
nfName = 'nfModelToMVsomeSame',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, nodes = c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]'), logProb = FALSE)}),
testThatLines = quote({
info <- 'Model2MVsomeSame_logProbFALSE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[2:3], as.numeric(mv[['x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[2:3], as.numeric(mv[['d1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][2:3, 2:3]), as.numeric(mv[['x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][2:3, 2:3]), as.numeric(mv[['d2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][2:3, 2:3, 2:3]), as.numeric(mv[['x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][2:3, 2:3, 2:3]), as.numeric(mv[['d3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['d4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][2:3]), as.numeric(mv[['v1']][[3]][2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][2:3, 2:3]), as.numeric(mv[['w1']][[3]][2:3, 2:3]), info = info ))
})
),
Model2MVsomeSame_logProbTRUE = list(
label = 'Model2MVsomeSame_logProbTRUE',
nfName = 'nfModelToMVsomeSame',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, nodes = c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]'), logProb = TRUE)}),
testThatLines = quote({
info = 'Model2MVsomeSame_logProbTRUE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[2:3], as.numeric(mv[['x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[2:3], as.numeric(mv[['d1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][2:3, 2:3]), as.numeric(mv[['x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][2:3, 2:3]), as.numeric(mv[['d2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][2:3, 2:3, 2:3]), as.numeric(mv[['x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][2:3, 2:3, 2:3]), as.numeric(mv[['d3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['d4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][2:3]), as.numeric(mv[['v1']][[3]][2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][2:3, 2:3]), as.numeric(mv[['w1']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x0']]), as.numeric(mv[['logProb_x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x1']])[2:3], as.numeric(mv[['logProb_x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x2']][2:3, 2:3]), as.numeric(mv[['logProb_x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x3']][2:3, 2:3, 2:3]), as.numeric(mv[['logProb_x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['logProb_x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_v1']][1]), as.numeric(mv[['logProb_v1']][[3]][1]), info = info )) ## Note these last two use the collapsing of logProb vars for multivariate nodes
test_that('single', expect_identical(as.numeric(m[['logProb_w1']][2, 2:3]), as.numeric(mv[['logProb_w1']][[3]][2, 2:3]), info = info ))
})
),
## Next 2 copy some node blocks, with different nodes and nodesTo
Model2MVsomeDiff_logProbFALSE = list( ## diff = different nodesTo from nodes
label = 'Model2MVsomeDiff_logProbFALSE',
nfName = 'nfModelToMVsomeDiff',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, nodes = c('x0','d0','x1[1:2]','d1[1:2]','x2[1:2,1:2]','d2[1:2, 1:2]',
'x3[1:2,1:2,1:2]','d3[1:2,1:2,1:2]', 'x4[1:2,1:2,1:2,1:2]','d4[1:2,1:2,1:2,1:2]',
'v1[1:2]', 'w1[1:2,1:2]'),
nodesTo = c('x0','d0','x1[3:4]','d1[3:4]','x2[3:4,3:4]','d2[3:4, 3:4]',
'x3[3:4,3:4,3:4]','d3[3:4,3:4,3:4]', 'x4[3:4,3:4,3:4,3:4]','d4[3:4,3:4,3:4,3:4]',
'v1[3:4]', 'w1[3:4,3:4]'),
logProb = FALSE)}),
testThatLines = quote({
info = 'Model2MVsomeDiff_logProbFALSE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[1:2], as.numeric(mv[['x1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[1:2], as.numeric(mv[['d1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][1:2, 1:2]), as.numeric(mv[['x2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][1:2, 1:2]), as.numeric(mv[['d2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][1:2, 1:2, 1:2]), as.numeric(mv[['x3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][1:2, 1:2, 1:2]), as.numeric(mv[['d3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['x4']][[3]][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['d4']][[3]][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][1:2]), as.numeric(mv[['v1']][[3]][3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][1:2, 1:2]), as.numeric(mv[['w1']][[3]][3:4, 3:4]), info = info ))
})
),
Model2MVsomeDiff_logProbTRUE = list( ## diff = different nodesTo from nodes
label = 'Model2MVsomeDiff_logProbTRUE',
nfName = 'nfModelToMVsomeDiff',
seed = round(runif(1, 1, 10000)),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, nodes = c('x0','d0','x1[1:2]','d1[1:2]','x2[1:2,1:2]','d2[1:2, 1:2]',
'x3[1:2,1:2,1:2]','d3[1:2,1:2,1:2]', 'x4[1:2,1:2,1:2,1:2]','d4[1:2,1:2,1:2,1:2]',
'v1[1:2]', 'w1[1:2,1:2]'),
nodesTo = c('x0','d0','x1[3:4]','d1[3:4]','x2[3:4,3:4]','d2[3:4, 3:4]',
'x3[3:4,3:4,3:4]','d3[3:4,3:4,3:4]', 'x4[3:4,3:4,3:4,3:4]','d4[3:4,3:4,3:4,3:4]',
'v1[3:4]', 'w1[3:4,3:4]'),
logProb = TRUE)}),
testThatLines = quote({
info = 'Model2MVsomeDiff_logProbTRUE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[1:2], as.numeric(mv[['x1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[1:2], as.numeric(mv[['d1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][1:2, 1:2]), as.numeric(mv[['x2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][1:2, 1:2]), as.numeric(mv[['d2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][1:2, 1:2, 1:2]), as.numeric(mv[['x3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][1:2, 1:2, 1:2]), as.numeric(mv[['d3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['x4']][[3]][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['d4']][[3]][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][1:2]), as.numeric(mv[['v1']][[3]][3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][1:2, 1:2]), as.numeric(mv[['w1']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x0']]), as.numeric(mv[['logProb_x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x1']])[1:2], as.numeric(mv[['logProb_x1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x2']][1:2, 1:2]), as.numeric(mv[['logProb_x2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x3']][1:2, 1:2, 1:2]), as.numeric(mv[['logProb_x3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['logProb_x4']][[3]][3:4, 3:4, 3:4, 3:4]) ))
test_that('single', expect_identical(as.numeric(m[['logProb_v1']][1]), as.numeric(mv[['logProb_v1']][[3]][1]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_w1']][2, 1:2]), as.numeric(mv[['logProb_w1']][[3]][2, 3:4]), info = info )) ## curious case, logProbs go in [2, i]
})
)
)
## This list of cases is for copying from a modelValues to a model
copyTestCaseListMVtoModel <- list(
## First 2 are MV2Model with all nodes (default blank nodes arg)
MV2ModelAll_logProbTRUE = list(
label = 'MV2ModelAll_logProbTRUE',
nfName = 'nfMVToModelAll',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, logProb = TRUE)}),
testThatLines = quote({
for(oneName in mv$getVarNames(includeLogProb = TRUE)) {
test_that('single', expect_identical(m[[oneName]], mv[[oneName]][[3]], info = 'MV2ModelAll_logProbTRUE' ))
}
})
),
MV2ModelAll_logProbFALSE = list(
label = 'MV2ModelAll_logProbFALSE',
nfName = 'nfMVToModelAll',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, logProb = FALSE)}),
testThatLines = quote({
for(oneName in mv$getVarNames(includeLogProb = FALSE)) {
test_that('single', expect_identical(m[[oneName]], mv[[oneName]][[3]] ))
}
lpNames <- mv$getVarNames(includeLogProb = TRUE)
lpNames <- lpNames[grep('logProb_', lpNames)]
for(oneName in lpNames) {
test_that('single', expect_false(identical(m[[oneName]], mv[[oneName]][[3]])))
}
})
),
## Next 2 are MV2Model with nodes but not notesTo
MV2ModelSomeSame_logProbFALSE = list( ## some = some blocks of nodes, ## same = same nodes from and to
label = 'MV2ModelSomeSame_logProbFALSE',
nfName = 'nfMVToModelSomeSame',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, nodes = c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]'), logProb = FALSE)}),
testThatLines = quote({
info <- 'MV2ModelSomeSame_logProbFALSE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[2:3], as.numeric(mv[['x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[2:3], as.numeric(mv[['d1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][2:3, 2:3]), as.numeric(mv[['x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][2:3, 2:3]), as.numeric(mv[['d2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][2:3, 2:3, 2:3]), as.numeric(mv[['x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][2:3, 2:3, 2:3]), as.numeric(mv[['d3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['d4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][2:3]), as.numeric(mv[['v1']][[3]][2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][2:3, 2:3]), as.numeric(mv[['w1']][[3]][2:3, 2:3]), info = info ))
})
),
MV2ModelsomeSame_logProbTRUE = list(
label = 'MV2ModelSomeSame_logProbTRUE',
nfName = 'nfMVToModelSomeSame',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, nodes = c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]'), logProb = TRUE)}),
testThatLines = quote({
info <- 'MV2ModelSomeSame_logProbTRUE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[2:3], as.numeric(mv[['x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[2:3], as.numeric(mv[['d1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][2:3, 2:3]), as.numeric(mv[['x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][2:3, 2:3]), as.numeric(mv[['d2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][2:3, 2:3, 2:3]), as.numeric(mv[['x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][2:3, 2:3, 2:3]), as.numeric(mv[['d3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['d4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][2:3]), as.numeric(mv[['v1']][[3]][2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][2:3, 2:3]), as.numeric(mv[['w1']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x0']]), as.numeric(mv[['logProb_x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x1']])[2:3], as.numeric(mv[['logProb_x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x2']][2:3, 2:3]), as.numeric(mv[['logProb_x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x3']][2:3, 2:3, 2:3]), as.numeric(mv[['logProb_x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['logProb_x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_v1']][1]), as.numeric(mv[['logProb_v1']][[3]][1]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_w1']][2, 2:3]), as.numeric(mv[['logProb_w1']][[3]][2, 2:3]), info = info ))
})
),
## Next 2 are MV2Model with nodes and nodesTo provided
MV2ModelSomeDiff_logProbFALSE = list( ## diff = different nodesTo from nodes
label = 'MV2ModelsomeDiff_logProbFALSE',
nfName = 'nfMVToModelSomeDiff',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, nodes = c('x0','d0','x1[1:2]','d1[1:2]','x2[1:2,1:2]','d2[1:2, 1:2]',
'x3[1:2,1:2,1:2]','d3[1:2,1:2,1:2]', 'x4[1:2,1:2,1:2,1:2]','d4[1:2,1:2,1:2,1:2]',
'v1[1:2]', 'w1[1:2,1:2]'),
nodesTo = c('x0','d0','x1[3:4]','d1[3:4]','x2[3:4,3:4]','d2[3:4, 3:4]',
'x3[3:4,3:4,3:4]','d3[3:4,3:4,3:4]', 'x4[3:4,3:4,3:4,3:4]','d4[3:4,3:4,3:4,3:4]',
'v1[3:4]', 'w1[3:4,3:4]'),
logProb = FALSE)}),
testThatLines = quote({
info <- 'MV2ModelsomeDiff_logProbFALSE'
test_that('single', expect_identical(as.numeric(mv[['x0']][[3]]), as.numeric(m[['x0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d0']][[3]]), as.numeric(m[['d0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x1']][[3]])[1:2], as.numeric(m[['x1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['d1']][[3]])[1:2], as.numeric(m[['d1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['x2']][[3]][1:2, 1:2]), as.numeric(m[['x2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d2']][[3]][1:2, 1:2]), as.numeric(m[['d2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['x3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['d3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['x4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['d4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['v1']][[3]][1:2]), as.numeric(m[['v1']][3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['w1']][[3]][1:2, 1:2]), as.numeric(m[['w1']][3:4, 3:4]), info = info ))
})
),
MV2ModelSomeDiff_logProbTRUE = list( ## diff = different nodesTo from nodes
label = 'MV2ModelsomeDiff_logProbTRUE',
nfName = 'nfMVToModelSomeDiff',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, nodes = c('x0','d0','x1[1:2]','d1[1:2]','x2[1:2,1:2]','d2[1:2, 1:2]',
'x3[1:2,1:2,1:2]','d3[1:2,1:2,1:2]', 'x4[1:2,1:2,1:2,1:2]','d4[1:2,1:2,1:2,1:2]',
'v1[1:2]', 'w1[1:2,1:2]'),
nodesTo = c('x0','d0','x1[3:4]','d1[3:4]','x2[3:4,3:4]','d2[3:4, 3:4]',
'x3[3:4,3:4,3:4]','d3[3:4,3:4,3:4]', 'x4[3:4,3:4,3:4,3:4]','d4[3:4,3:4,3:4,3:4]',
'v1[3:4]', 'w1[3:4,3:4]'),
logProb = TRUE)}),
testThatLines = quote({
info = 'MV2ModelsomeDiff_logProbTRUE'
test_that('single', expect_identical(as.numeric(mv[['x0']][[3]]), as.numeric(m[['x0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d0']][[3]]), as.numeric(m[['d0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x1']][[3]])[1:2], as.numeric(m[['x1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['d1']][[3]])[1:2], as.numeric(m[['d1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['x2']][[3]][1:2, 1:2]), as.numeric(m[['x2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d2']][[3]][1:2, 1:2]), as.numeric(m[['d2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['x3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['d3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['x4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['d4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['v1']][[3]][1:2]), as.numeric(m[['v1']][3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['w1']][[3]][1:2, 1:2]), as.numeric(m[['w1']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x0']][[3]]), as.numeric(m[['logProb_x0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x1']][[3]])[1:2], as.numeric(m[['logProb_x1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x2']][[3]][1:2, 1:2]), as.numeric(m[['logProb_x2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['logProb_x3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['logProb_x4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_v1']][[3]][1]), as.numeric(m[['logProb_v1']][1]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_w1']][[3]][2, 1:2]), as.numeric(m[['logProb_w1']][2, 3:4]), info = info ))
})
)
)
## This is code that works for all cases to check that the state of compiled and uncompiled models are identical
compareCompiledToUncompiled_code <- quote({
for(oneName in m$getVarNames(includeLogProb = TRUE)) {
info <- paste("compareCompiledToUncompiled",oneName)
test_that('single', expect_identical(as.numeric(origM[[oneName]]), as.numeric(m[[oneName]]), info = info ))
}
for(oneName in mv$getVarNames(includeLogProb = TRUE)) {
info <- paste("compareCompiledToUncompiled",oneName)
test_that('single', expect_identical(as.numeric(origMV[[oneName]][[3]]), as.numeric(mv[[oneName]][[3]], info = info )))
}
})
## Function to iterate through test cases
runCopyTests <- function(testCaseList = copyTestCaseList, testModelCode = copyTestModelCode, testNFcodeList = copyTestNFcodeList, dirName = NULL, verbose = nimbleOptions()$verbose) {
for(copyTestCase in testCaseList) {
runOneCopyTest(copyTestCase, testModelCode = testModelCode, testNFcodeList = testNFcodeList, dirName = dirName, verbose = verbose)
}
}
## Function to handle one test case
runOneCopyTest <- function(copyTestCase, testModelCode, testNFcodeList, dirName = NULL, verbose = nimbleOptions()$verbose) {
if(verbose) writeLines(paste0('Testing ', copyTestCase$label))
compileVec <- copyTestCase$compile
for(compile in compileVec) {
if(verbose) writeLines(paste0('COMPILE = ', compile))
m <- nimbleModel(testModelCode, constants = copyTestConstants, data = copyTestData)
mv <- modelValues(m, 3)
set.seed(copyTestCase$seed)
eval(copyTestCase$initCode)
nf <- eval(testNFcodeList[[ copyTestCase$nfName ]])
nfM <- eval(copyTestCase$nfMcode)
if(compile) {
cm <- compileNimble(m, nfM, dirName = dirName)
m <- cm$m
nfM <- cm$nfM
mv <- mv$CobjectInterface
}
runans <- try(nfM$run())
if(copyTestCase$compareRtoCpp & !compile) {
origM <- m
origMV <- mv
}
if(inherits(runans, 'try-error')) stop(paste('Error executing a copy test from case ', copyTestCase$label))
eval(copyTestCase$testThatLine)
if(copyTestCase$compareRtoCpp & compile) {
eval(compareCompiledToUncompiled_code)
}
}
}
###############
## Master call:
runCopyTests(copyTestCaseList, dirName = getwd(), verbose = nimbleOptions()$verbose)
runCopyTests(copyTestCaseListMVtoModel, dirName = getwd(), verbose = nimbleOptions()$verbose)
###################
### Testing for values() and values()<-
### The general layout that follows is similar to above but is completely separate
## List of nimbleFunction definitions to use
copyTestNFcodeListValues <- list(
nfGetValues = quote({
nimbleFunction(
setup = function(model, nodes){},
run = function() {
P <- values(model, nodes)
return(P)
returnType(double(1))
})
}),
nfSetValues = quote({
nimbleFunction(
setup = function(model, nodes){},
run = function(P = double(1)) {
values(model, nodes) <<- P
})
})
)
## List of comparison cases, similar to above but without comparing compiled to uncompiled (not applicable)
copyTestCaseListValues <- list(
getValues = list(
label = 'getValues',
nfName = 'nfGetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
P <- nfM$run()
checkP <- numeric()
for(i in nodes) checkP <- c(checkP, m[[i]])
test_that('getValues', expect_identical(P, checkP, info = "getValues copyTestCaseListValues"))
})
),
setValues = list(
label = 'setValues',
nfName = 'nfSetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
P <- numeric()
for(i in nodes) P <- c(P, m[[i]]) ## gets checkP up to the right length
P[] <- rnorm(length(P))
nfM$run(P)
i <- 0;
for(oneName in nodes) {
modelP <- m[[oneName]]
checkP <- P[i + 1:length(modelP)]
i <- i + length(modelP)
test_that('setValues', expect_identical(as.numeric(modelP), as.numeric(checkP ), info = "setValues copyTestCaseListValues"))
}
})
)
)
## Iterate through the value tests
runValuesTests <- function(testCaseList = copyTestCaseListValues, testModelCode = copyTestModelCode, testNFcodeList = copyTestNFcodeListValues, testNFconstantsList = copyTestConstants, testNFdataList = copyTestData, dirName = NULL, verbose = nimbleOptions()$verbose) {
for(copyTestCase in testCaseList) {
runOneValuesTest(copyTestCase, testModelCode = testModelCode, testNFcodeList = testNFcodeList, testNFconstantsList = testNFconstantsList, testNFdataList = testNFdataList, dirName = dirName, verbose = verbose)
}
}
## run one value test case
runOneValuesTest <- function(copyTestCase, testModelCode, testNFcodeList, testNFconstantsList, testNFdataList, dirName = NULL, verbose = nimbleOptions()$verbose) {
if(verbose) writeLines(paste0('Testing ', copyTestCase$label))
compileVec <- copyTestCase$compile
for(compile in compileVec) {
if(verbose) writeLines(paste0('COMPILE = ', compile))
m <- nimbleModel(testModelCode, constants = testNFconstantsList, data = testNFdataList)
set.seed(copyTestCase$seed)
eval(copyTestCase$initCode)
nf <- eval(testNFcodeList[[ copyTestCase$nfName ]])
nfM <- eval(copyTestCase$nfMcode)
if(compile) {
cm <- compileNimble(m, nfM, dirName = dirName)
m <- cm$m
nfM <- cm$nfM
}
eval(copyTestCase$testThatLine)
}
}
## Master call for value() and value()<- tests
runValuesTests(copyTestCaseListValues)
###################
### Testing for values() and values()<- with indexing of node vector inside values()
### The general layout that follows is similar to above but is completely separate
## List of comparison cases, similar to above but without comparing compiled to uncompiled (not applicable)
copyTestCaseListValuesIndexed <- list(
getValues = list(
label = 'getValues',
nfName = 'nfGetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
for(i in seq_along(nodes)) {
P <- nfM$run(i)
test_that('getValues', expect_identical(P, as.numeric(m[[nodes[i]]]), info = "getValues copyTestCaseListValuesIndexed"))
}
})
),
setValues = list(
label = 'setValues',
nfName = 'nfSetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
for(i in seq_along(nodes)) {
oneName <- nodes[i]
P <- rnorm(length(m[[oneName]]))
nfM$run(P, i)
test_that('setValues', expect_identical(as.numeric(m[[oneName]]), P, info = "setValues copyTestCaseListValuesIndexed"))
}
})
)
)
## List of nimbleFunction definitions to use
copyTestNFcodeListValuesIndexed <- list(
nfGetValues = quote({
nimbleFunction(
setup = function(model, nodes){},
run = function(index = double(0)) {
P <- values(model, nodes[index])
return(P)
returnType(double(1))
})
}),
nfSetValues = quote({
nimbleFunction(
setup = function(model, nodes){},
run = function(P = double(1), index = double(0)) {
values(model, nodes[index]) <<- P
})
})
)
runValuesTests(copyTestCaseListValuesIndexed, testNFcodeList = copyTestNFcodeListValuesIndexed)
## List of comparison cases, similar to above but without comparing compiled to uncompiled (not applicable)
copyTestCaseListValuesIndexedLoop <- list(
getValues = list(
label = 'getValues',
nfName = 'nfGetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- m$expandNodeNames('mu')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
P <- nfM$run()
test_that('getValues', expect_identical(as.numeric(P), as.numeric(m[['mu']]), info = "getValues copyTestCaseListValuesIndexedLoop"))
})
),
setValues = list(
label = 'setValues',
nfName = 'nfSetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- m$expandNodeNames('mu')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
P <- rnorm(length(m$expandNodeNames('mu')))
nfM$run(P)
test_that('setValues', expect_identical(as.numeric(P), as.numeric(m[['mu']]), info = "setValues copyTestCaseListValuesIndexedLoop"))
})
)
)
copyTestNFcodeListValuesIndexedLoop <- list(
nfGetValues = quote({
nimbleFunction(
setup = function(model, nodes){
nn <- length(nodes)
},
run = function() {
P <- numeric(nn)
for(i in 1:nn)
P[i] <- values(model, nodes[i])[1]
return(P)
returnType(double(1))
})
}),
nfSetValues = quote({
nimbleFunction(
setup = function(model, nodes){
nn <- length(nodes)
},
run = function(P = double(1)) {
tmp <- numeric(1)
for(i in 1:nn) {
tmp[1] <- P[i]
values(model, nodes[i]) <<- tmp
}
})
})
)
copyTestModelCode <- nimbleCode({
for(i in 1:n) {
y[i] ~ dnorm(mu[i], 1)
mu[i] ~ dnorm(0, 1)
}
})
n <- 10
copyTestConstants <- list(n = n)
copyTestData <- list(y = rnorm(10))
runValuesTests(copyTestCaseListValuesIndexedLoop, testNFcodeList = copyTestNFcodeListValuesIndexedLoop)
options(warn = RwarnLevel)
nimbleOptions(verbose = nimbleVerboseSetting)
| /data/genthat_extracted_code/nimble/tests/test-copy.R | no_license | surayaaramli/typeRrh | R | false | false | 44,286 | r | source(system.file(file.path('tests', 'test_utils.R'), package = 'nimble'))
## Tests for copy() [implemented as nimCopy], values(), and values()<-
## These use some of the same internals (accessors), so they are in the same testing file.
## These tests use lists of nimbleFunctions, initialization code, and testing code.
## They include copying to and from models and/or modelValues, using default arguments, using logProb = [TRUE|FALSE], and using the same or different blocks of variables.
## Checks are made internally for uncompiled and compiled cases. Then uncompiled and compiled outcomes are compared to check that they behaved identically.
RwarnLevel <- options('warn')$warn
options(warn = 1)
nimbleVerboseSetting <- nimbleOptions('verbose')
nimbleOptions(verbose = FALSE)
context('Testing of nimCopy and values')
#############
## Here is a model with deterministic and stochastic variables of dimensions 0-4, including a multivariate node
copyTestModelCode <- nimbleCode({
x0 ~ dnorm(0,1); d0 <- x0 + 10000
for(i in 1:4) {x1[i] ~ dnorm(0,1); d1[i] <- x1[i]+10000}
for(i in 1:4) for(j in 1:4) {x2[i,j] ~ dnorm(0,1); d2[i,j] <- x2[i,j]+10000}
for(i in 1:4) for(j in 1:4) for(k in 1:4) {x3[i,j,k] ~ dnorm(0, 1); d3[i,j,k] <- x3[i,j,k]+10000}
for(i in 1:4) for(j in 1:4) for(k in 1:4) for(l in 1:4) {x4[i,j,k,l] ~ dnorm(0, 1); d4[i,j,k,l] <- x4[i,j,k,l]+10000}
v1[1:4] ~ dmnorm(v1mu[1:4], v1sigma[1:4, 1:4]) ## not testing indexing here!
for(i in 1:5) w1[ 2:5, i] ~ dmnorm(v1mu[1:4], v1sigma[1:4, 1:4])
for(i in 1:4) foo[i] <- w1[i+1, 1]
})
copyTestConstants <- list()
copyTestData <- list(v1mu = rep(0,4), v1sigma = diag(4))
## A list of nimbleFunctions used for various cases
copyTestNFcodeList <- list(
## First 3 are ModelToMV
nfModelToMVall = quote({
nimbleFunction(
setup = function(from, to, rowTo = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, to = to, rowTo = rowTo, logProb = logProb)
}
)
}),
nfModelToMVsomeSame = quote({
nimbleFunction(
setup = function(from, nodes, to, rowTo = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, nodes = nodes, to = to, rowTo = rowTo, logProb = logProb)
}
)
}),
nfModelToMVsomeDiff = quote({
nimbleFunction(
setup = function(from, nodes, to, nodesTo, rowTo = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, nodes = nodes, to = to, rowTo = rowTo, nodesTo = nodesTo, logProb = logProb)
}
)
}),
## Next 3 are MVToModel
nfMVToModelAll = quote({
nimbleFunction(
setup = function(from, to, row = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, to = to, row = row, logProb = logProb)
}
)
}),
nfMVToModelSomeSame = quote({
nimbleFunction(
setup = function(from, nodes, to, row = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, nodes = nodes, to = to, row = row, logProb = logProb)
}
)
}),
nfMVToModelSomeDiff = quote({
nimbleFunction(
setup = function(from, nodes, to, nodesTo, row = 3, logProb = TRUE) {},
run = function() {
nimCopy(from = from, nodes = nodes, to = to, nodesTo = nodesTo, row = row, logProb = logProb)
}
)
})
)
## A list of inputs for runOneCopyTest
copyTestCaseList <- list(
## First 2 are Model2MV checking default of all nodes if nodes arg is not provided
Model2MVall_logProbTRUE = list(
label = 'Model2MVall_logProbTRUE', ## A label
nfName = 'nfModelToMVall', ## Name of a nimbleFunction from above list to define as nf
seed = round(runif(1, 1, 10000)), ## A seed generated once then the same for compiled and uncompiled cases
initCode = quote({simulate(m); calculate(m)}), ## Code to initiate the model and/or modelValues
compile = c(FALSE, TRUE), ## sequence of yes/no compile
compareRtoCpp = TRUE, ## Should uncompiled and compiled be compared? When TRUE, only works if compile == c(FALSE, TRUE), in that order
nfMcode = quote({nf(m, mv, logProb = TRUE)}), ## code for specializing the nf
testThatLines = quote({ ## code for testing
for(oneName in m$getVarNames(includeLogProb = TRUE)) {
test_that('single', expect_identical(m[[oneName]], mv[[oneName]][[3]],
info = 'Model2MVall_logProbTRUE'))
}
})
),
Model2MVall_logProbFALSE = list(
label = 'Model2MVall_logProbFALSE',
nfName = 'nfModelToMVall',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, logProb = FALSE)}),
testThatLines = quote({
for(oneName in m$getVarNames(includeLogProb = FALSE)) {
test_that('single', expect_identical(m[[oneName]], mv[[oneName]][[3]],
info = 'Model2MVall_logProbFALSE'))
}
lpNames <- m$getVarNames(includeLogProb = TRUE)
lpNames <- lpNames[grep('logProb_', lpNames)]
for(oneName in lpNames) {
test_that('single', expect_false(identical(m[[oneName]], mv[[oneName]][[3]])))
}
})
),
## Next 2 copy some blocks of some variables, using the same for from and to (nodes provided but nodesTo not provided)
Model2MVsomeSame_logProbFALSE = list( ## some = some blocks of nodes, ## same = same nodes from and to
label = 'Model2MVsomeSame_logProbFALSE',
nfName = 'nfModelToMVsomeSame',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, nodes = c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]'), logProb = FALSE)}),
testThatLines = quote({
info <- 'Model2MVsomeSame_logProbFALSE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[2:3], as.numeric(mv[['x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[2:3], as.numeric(mv[['d1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][2:3, 2:3]), as.numeric(mv[['x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][2:3, 2:3]), as.numeric(mv[['d2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][2:3, 2:3, 2:3]), as.numeric(mv[['x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][2:3, 2:3, 2:3]), as.numeric(mv[['d3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['d4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][2:3]), as.numeric(mv[['v1']][[3]][2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][2:3, 2:3]), as.numeric(mv[['w1']][[3]][2:3, 2:3]), info = info ))
})
),
Model2MVsomeSame_logProbTRUE = list(
label = 'Model2MVsomeSame_logProbTRUE',
nfName = 'nfModelToMVsomeSame',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, nodes = c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]'), logProb = TRUE)}),
testThatLines = quote({
info = 'Model2MVsomeSame_logProbTRUE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[2:3], as.numeric(mv[['x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[2:3], as.numeric(mv[['d1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][2:3, 2:3]), as.numeric(mv[['x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][2:3, 2:3]), as.numeric(mv[['d2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][2:3, 2:3, 2:3]), as.numeric(mv[['x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][2:3, 2:3, 2:3]), as.numeric(mv[['d3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['d4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][2:3]), as.numeric(mv[['v1']][[3]][2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][2:3, 2:3]), as.numeric(mv[['w1']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x0']]), as.numeric(mv[['logProb_x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x1']])[2:3], as.numeric(mv[['logProb_x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x2']][2:3, 2:3]), as.numeric(mv[['logProb_x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x3']][2:3, 2:3, 2:3]), as.numeric(mv[['logProb_x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['logProb_x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_v1']][1]), as.numeric(mv[['logProb_v1']][[3]][1]), info = info )) ## Note these last two use the collapsing of logProb vars for multivariate nodes
test_that('single', expect_identical(as.numeric(m[['logProb_w1']][2, 2:3]), as.numeric(mv[['logProb_w1']][[3]][2, 2:3]), info = info ))
})
),
## Next 2 copy some node blocks, with different nodes and nodesTo
Model2MVsomeDiff_logProbFALSE = list( ## diff = different nodesTo from nodes
label = 'Model2MVsomeDiff_logProbFALSE',
nfName = 'nfModelToMVsomeDiff',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, nodes = c('x0','d0','x1[1:2]','d1[1:2]','x2[1:2,1:2]','d2[1:2, 1:2]',
'x3[1:2,1:2,1:2]','d3[1:2,1:2,1:2]', 'x4[1:2,1:2,1:2,1:2]','d4[1:2,1:2,1:2,1:2]',
'v1[1:2]', 'w1[1:2,1:2]'),
nodesTo = c('x0','d0','x1[3:4]','d1[3:4]','x2[3:4,3:4]','d2[3:4, 3:4]',
'x3[3:4,3:4,3:4]','d3[3:4,3:4,3:4]', 'x4[3:4,3:4,3:4,3:4]','d4[3:4,3:4,3:4,3:4]',
'v1[3:4]', 'w1[3:4,3:4]'),
logProb = FALSE)}),
testThatLines = quote({
info = 'Model2MVsomeDiff_logProbFALSE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[1:2], as.numeric(mv[['x1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[1:2], as.numeric(mv[['d1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][1:2, 1:2]), as.numeric(mv[['x2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][1:2, 1:2]), as.numeric(mv[['d2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][1:2, 1:2, 1:2]), as.numeric(mv[['x3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][1:2, 1:2, 1:2]), as.numeric(mv[['d3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['x4']][[3]][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['d4']][[3]][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][1:2]), as.numeric(mv[['v1']][[3]][3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][1:2, 1:2]), as.numeric(mv[['w1']][[3]][3:4, 3:4]), info = info ))
})
),
Model2MVsomeDiff_logProbTRUE = list( ## diff = different nodesTo from nodes
label = 'Model2MVsomeDiff_logProbTRUE',
nfName = 'nfModelToMVsomeDiff',
seed = round(runif(1, 1, 10000)),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(m, mv, nodes = c('x0','d0','x1[1:2]','d1[1:2]','x2[1:2,1:2]','d2[1:2, 1:2]',
'x3[1:2,1:2,1:2]','d3[1:2,1:2,1:2]', 'x4[1:2,1:2,1:2,1:2]','d4[1:2,1:2,1:2,1:2]',
'v1[1:2]', 'w1[1:2,1:2]'),
nodesTo = c('x0','d0','x1[3:4]','d1[3:4]','x2[3:4,3:4]','d2[3:4, 3:4]',
'x3[3:4,3:4,3:4]','d3[3:4,3:4,3:4]', 'x4[3:4,3:4,3:4,3:4]','d4[3:4,3:4,3:4,3:4]',
'v1[3:4]', 'w1[3:4,3:4]'),
logProb = TRUE)}),
testThatLines = quote({
info = 'Model2MVsomeDiff_logProbTRUE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[1:2], as.numeric(mv[['x1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[1:2], as.numeric(mv[['d1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][1:2, 1:2]), as.numeric(mv[['x2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][1:2, 1:2]), as.numeric(mv[['d2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][1:2, 1:2, 1:2]), as.numeric(mv[['x3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][1:2, 1:2, 1:2]), as.numeric(mv[['d3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['x4']][[3]][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['d4']][[3]][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][1:2]), as.numeric(mv[['v1']][[3]][3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][1:2, 1:2]), as.numeric(mv[['w1']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x0']]), as.numeric(mv[['logProb_x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x1']])[1:2], as.numeric(mv[['logProb_x1']][[3]])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x2']][1:2, 1:2]), as.numeric(mv[['logProb_x2']][[3]][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x3']][1:2, 1:2, 1:2]), as.numeric(mv[['logProb_x3']][[3]][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x4']][1:2, 1:2, 1:2, 1:2]), as.numeric(mv[['logProb_x4']][[3]][3:4, 3:4, 3:4, 3:4]) ))
test_that('single', expect_identical(as.numeric(m[['logProb_v1']][1]), as.numeric(mv[['logProb_v1']][[3]][1]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_w1']][2, 1:2]), as.numeric(mv[['logProb_w1']][[3]][2, 3:4]), info = info )) ## curious case, logProbs go in [2, i]
})
)
)
## This list of cases is for copying from a modelValues to a model
copyTestCaseListMVtoModel <- list(
## First 2 are MV2Model with all nodes (default blank nodes arg)
MV2ModelAll_logProbTRUE = list(
label = 'MV2ModelAll_logProbTRUE',
nfName = 'nfMVToModelAll',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, logProb = TRUE)}),
testThatLines = quote({
for(oneName in mv$getVarNames(includeLogProb = TRUE)) {
test_that('single', expect_identical(m[[oneName]], mv[[oneName]][[3]], info = 'MV2ModelAll_logProbTRUE' ))
}
})
),
MV2ModelAll_logProbFALSE = list(
label = 'MV2ModelAll_logProbFALSE',
nfName = 'nfMVToModelAll',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, logProb = FALSE)}),
testThatLines = quote({
for(oneName in mv$getVarNames(includeLogProb = FALSE)) {
test_that('single', expect_identical(m[[oneName]], mv[[oneName]][[3]] ))
}
lpNames <- mv$getVarNames(includeLogProb = TRUE)
lpNames <- lpNames[grep('logProb_', lpNames)]
for(oneName in lpNames) {
test_that('single', expect_false(identical(m[[oneName]], mv[[oneName]][[3]])))
}
})
),
## Next 2 are MV2Model with nodes but not notesTo
MV2ModelSomeSame_logProbFALSE = list( ## some = some blocks of nodes, ## same = same nodes from and to
label = 'MV2ModelSomeSame_logProbFALSE',
nfName = 'nfMVToModelSomeSame',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, nodes = c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]'), logProb = FALSE)}),
testThatLines = quote({
info <- 'MV2ModelSomeSame_logProbFALSE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[2:3], as.numeric(mv[['x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[2:3], as.numeric(mv[['d1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][2:3, 2:3]), as.numeric(mv[['x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][2:3, 2:3]), as.numeric(mv[['d2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][2:3, 2:3, 2:3]), as.numeric(mv[['x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][2:3, 2:3, 2:3]), as.numeric(mv[['d3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['d4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][2:3]), as.numeric(mv[['v1']][[3]][2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][2:3, 2:3]), as.numeric(mv[['w1']][[3]][2:3, 2:3]), info = info ))
})
),
MV2ModelsomeSame_logProbTRUE = list(
label = 'MV2ModelSomeSame_logProbTRUE',
nfName = 'nfMVToModelSomeSame',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, nodes = c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]'), logProb = TRUE)}),
testThatLines = quote({
info <- 'MV2ModelSomeSame_logProbTRUE'
test_that('single', expect_identical(as.numeric(m[['x0']]), as.numeric(mv[['x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d0']]), as.numeric(mv[['d0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x1']])[2:3], as.numeric(mv[['x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['d1']])[2:3], as.numeric(mv[['d1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['x2']][2:3, 2:3]), as.numeric(mv[['x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d2']][2:3, 2:3]), as.numeric(mv[['d2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x3']][2:3, 2:3, 2:3]), as.numeric(mv[['x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d3']][2:3, 2:3, 2:3]), as.numeric(mv[['d3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['d4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['d4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['v1']][2:3]), as.numeric(mv[['v1']][[3]][2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['w1']][2:3, 2:3]), as.numeric(mv[['w1']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x0']]), as.numeric(mv[['logProb_x0']][[3]]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x1']])[2:3], as.numeric(mv[['logProb_x1']][[3]])[2:3], info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x2']][2:3, 2:3]), as.numeric(mv[['logProb_x2']][[3]][2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x3']][2:3, 2:3, 2:3]), as.numeric(mv[['logProb_x3']][[3]][2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_x4']][2:3, 2:3, 2:3, 2:3]), as.numeric(mv[['logProb_x4']][[3]][2:3, 2:3, 2:3, 2:3]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_v1']][1]), as.numeric(mv[['logProb_v1']][[3]][1]), info = info ))
test_that('single', expect_identical(as.numeric(m[['logProb_w1']][2, 2:3]), as.numeric(mv[['logProb_w1']][[3]][2, 2:3]), info = info ))
})
),
## Next 2 are MV2Model with nodes and nodesTo provided
MV2ModelSomeDiff_logProbFALSE = list( ## diff = different nodesTo from nodes
label = 'MV2ModelsomeDiff_logProbFALSE',
nfName = 'nfMVToModelSomeDiff',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, nodes = c('x0','d0','x1[1:2]','d1[1:2]','x2[1:2,1:2]','d2[1:2, 1:2]',
'x3[1:2,1:2,1:2]','d3[1:2,1:2,1:2]', 'x4[1:2,1:2,1:2,1:2]','d4[1:2,1:2,1:2,1:2]',
'v1[1:2]', 'w1[1:2,1:2]'),
nodesTo = c('x0','d0','x1[3:4]','d1[3:4]','x2[3:4,3:4]','d2[3:4, 3:4]',
'x3[3:4,3:4,3:4]','d3[3:4,3:4,3:4]', 'x4[3:4,3:4,3:4,3:4]','d4[3:4,3:4,3:4,3:4]',
'v1[3:4]', 'w1[3:4,3:4]'),
logProb = FALSE)}),
testThatLines = quote({
info <- 'MV2ModelsomeDiff_logProbFALSE'
test_that('single', expect_identical(as.numeric(mv[['x0']][[3]]), as.numeric(m[['x0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d0']][[3]]), as.numeric(m[['d0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x1']][[3]])[1:2], as.numeric(m[['x1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['d1']][[3]])[1:2], as.numeric(m[['d1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['x2']][[3]][1:2, 1:2]), as.numeric(m[['x2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d2']][[3]][1:2, 1:2]), as.numeric(m[['d2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['x3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['d3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['x4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['d4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['v1']][[3]][1:2]), as.numeric(m[['v1']][3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['w1']][[3]][1:2, 1:2]), as.numeric(m[['w1']][3:4, 3:4]), info = info ))
})
),
MV2ModelSomeDiff_logProbTRUE = list( ## diff = different nodesTo from nodes
label = 'MV2ModelsomeDiff_logProbTRUE',
nfName = 'nfMVToModelSomeDiff',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m); nimCopy(from = m, to = mv, rowTo = 3, logProb = TRUE); simulate(m); calculate(m)}),
compile = c(FALSE, TRUE),
compareRtoCpp = TRUE,
nfMcode = quote({nf(mv, m, nodes = c('x0','d0','x1[1:2]','d1[1:2]','x2[1:2,1:2]','d2[1:2, 1:2]',
'x3[1:2,1:2,1:2]','d3[1:2,1:2,1:2]', 'x4[1:2,1:2,1:2,1:2]','d4[1:2,1:2,1:2,1:2]',
'v1[1:2]', 'w1[1:2,1:2]'),
nodesTo = c('x0','d0','x1[3:4]','d1[3:4]','x2[3:4,3:4]','d2[3:4, 3:4]',
'x3[3:4,3:4,3:4]','d3[3:4,3:4,3:4]', 'x4[3:4,3:4,3:4,3:4]','d4[3:4,3:4,3:4,3:4]',
'v1[3:4]', 'w1[3:4,3:4]'),
logProb = TRUE)}),
testThatLines = quote({
info = 'MV2ModelsomeDiff_logProbTRUE'
test_that('single', expect_identical(as.numeric(mv[['x0']][[3]]), as.numeric(m[['x0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d0']][[3]]), as.numeric(m[['d0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x1']][[3]])[1:2], as.numeric(m[['x1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['d1']][[3]])[1:2], as.numeric(m[['d1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['x2']][[3]][1:2, 1:2]), as.numeric(m[['x2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d2']][[3]][1:2, 1:2]), as.numeric(m[['d2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['x3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['d3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['x4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['x4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['d4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['d4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['v1']][[3]][1:2]), as.numeric(m[['v1']][3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['w1']][[3]][1:2, 1:2]), as.numeric(m[['w1']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x0']][[3]]), as.numeric(m[['logProb_x0']]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x1']][[3]])[1:2], as.numeric(m[['logProb_x1']])[3:4], info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x2']][[3]][1:2, 1:2]), as.numeric(m[['logProb_x2']][3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x3']][[3]][1:2, 1:2, 1:2]), as.numeric(m[['logProb_x3']][3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_x4']][[3]][1:2, 1:2, 1:2, 1:2]), as.numeric(m[['logProb_x4']][3:4, 3:4, 3:4, 3:4]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_v1']][[3]][1]), as.numeric(m[['logProb_v1']][1]), info = info ))
test_that('single', expect_identical(as.numeric(mv[['logProb_w1']][[3]][2, 1:2]), as.numeric(m[['logProb_w1']][2, 3:4]), info = info ))
})
)
)
## This is code that works for all cases to check that the state of compiled and uncompiled models are identical
compareCompiledToUncompiled_code <- quote({
for(oneName in m$getVarNames(includeLogProb = TRUE)) {
info <- paste("compareCompiledToUncompiled",oneName)
test_that('single', expect_identical(as.numeric(origM[[oneName]]), as.numeric(m[[oneName]]), info = info ))
}
for(oneName in mv$getVarNames(includeLogProb = TRUE)) {
info <- paste("compareCompiledToUncompiled",oneName)
test_that('single', expect_identical(as.numeric(origMV[[oneName]][[3]]), as.numeric(mv[[oneName]][[3]], info = info )))
}
})
## Function to iterate through test cases
runCopyTests <- function(testCaseList = copyTestCaseList, testModelCode = copyTestModelCode, testNFcodeList = copyTestNFcodeList, dirName = NULL, verbose = nimbleOptions()$verbose) {
for(copyTestCase in testCaseList) {
runOneCopyTest(copyTestCase, testModelCode = testModelCode, testNFcodeList = testNFcodeList, dirName = dirName, verbose = verbose)
}
}
## Function to handle one test case
runOneCopyTest <- function(copyTestCase, testModelCode, testNFcodeList, dirName = NULL, verbose = nimbleOptions()$verbose) {
if(verbose) writeLines(paste0('Testing ', copyTestCase$label))
compileVec <- copyTestCase$compile
for(compile in compileVec) {
if(verbose) writeLines(paste0('COMPILE = ', compile))
m <- nimbleModel(testModelCode, constants = copyTestConstants, data = copyTestData)
mv <- modelValues(m, 3)
set.seed(copyTestCase$seed)
eval(copyTestCase$initCode)
nf <- eval(testNFcodeList[[ copyTestCase$nfName ]])
nfM <- eval(copyTestCase$nfMcode)
if(compile) {
cm <- compileNimble(m, nfM, dirName = dirName)
m <- cm$m
nfM <- cm$nfM
mv <- mv$CobjectInterface
}
runans <- try(nfM$run())
if(copyTestCase$compareRtoCpp & !compile) {
origM <- m
origMV <- mv
}
if(inherits(runans, 'try-error')) stop(paste('Error executing a copy test from case ', copyTestCase$label))
eval(copyTestCase$testThatLine)
if(copyTestCase$compareRtoCpp & compile) {
eval(compareCompiledToUncompiled_code)
}
}
}
###############
## Master call:
runCopyTests(copyTestCaseList, dirName = getwd(), verbose = nimbleOptions()$verbose)
runCopyTests(copyTestCaseListMVtoModel, dirName = getwd(), verbose = nimbleOptions()$verbose)
###################
### Testing for values() and values()<-
### The general layout that follows is similar to above but is completely separate
## List of nimbleFunction definitions to use
copyTestNFcodeListValues <- list(
nfGetValues = quote({
nimbleFunction(
setup = function(model, nodes){},
run = function() {
P <- values(model, nodes)
return(P)
returnType(double(1))
})
}),
nfSetValues = quote({
nimbleFunction(
setup = function(model, nodes){},
run = function(P = double(1)) {
values(model, nodes) <<- P
})
})
)
## List of comparison cases, similar to above but without comparing compiled to uncompiled (not applicable)
copyTestCaseListValues <- list(
getValues = list(
label = 'getValues',
nfName = 'nfGetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
P <- nfM$run()
checkP <- numeric()
for(i in nodes) checkP <- c(checkP, m[[i]])
test_that('getValues', expect_identical(P, checkP, info = "getValues copyTestCaseListValues"))
})
),
setValues = list(
label = 'setValues',
nfName = 'nfSetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
P <- numeric()
for(i in nodes) P <- c(P, m[[i]]) ## gets checkP up to the right length
P[] <- rnorm(length(P))
nfM$run(P)
i <- 0;
for(oneName in nodes) {
modelP <- m[[oneName]]
checkP <- P[i + 1:length(modelP)]
i <- i + length(modelP)
test_that('setValues', expect_identical(as.numeric(modelP), as.numeric(checkP ), info = "setValues copyTestCaseListValues"))
}
})
)
)
## Iterate through the value tests
runValuesTests <- function(testCaseList = copyTestCaseListValues, testModelCode = copyTestModelCode, testNFcodeList = copyTestNFcodeListValues, testNFconstantsList = copyTestConstants, testNFdataList = copyTestData, dirName = NULL, verbose = nimbleOptions()$verbose) {
for(copyTestCase in testCaseList) {
runOneValuesTest(copyTestCase, testModelCode = testModelCode, testNFcodeList = testNFcodeList, testNFconstantsList = testNFconstantsList, testNFdataList = testNFdataList, dirName = dirName, verbose = verbose)
}
}
## run one value test case
runOneValuesTest <- function(copyTestCase, testModelCode, testNFcodeList, testNFconstantsList, testNFdataList, dirName = NULL, verbose = nimbleOptions()$verbose) {
if(verbose) writeLines(paste0('Testing ', copyTestCase$label))
compileVec <- copyTestCase$compile
for(compile in compileVec) {
if(verbose) writeLines(paste0('COMPILE = ', compile))
m <- nimbleModel(testModelCode, constants = testNFconstantsList, data = testNFdataList)
set.seed(copyTestCase$seed)
eval(copyTestCase$initCode)
nf <- eval(testNFcodeList[[ copyTestCase$nfName ]])
nfM <- eval(copyTestCase$nfMcode)
if(compile) {
cm <- compileNimble(m, nfM, dirName = dirName)
m <- cm$m
nfM <- cm$nfM
}
eval(copyTestCase$testThatLine)
}
}
## Master call for value() and value()<- tests
runValuesTests(copyTestCaseListValues)
###################
### Testing for values() and values()<- with indexing of node vector inside values()
### The general layout that follows is similar to above but is completely separate
## List of comparison cases, similar to above but without comparing compiled to uncompiled (not applicable)
copyTestCaseListValuesIndexed <- list(
getValues = list(
label = 'getValues',
nfName = 'nfGetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
for(i in seq_along(nodes)) {
P <- nfM$run(i)
test_that('getValues', expect_identical(P, as.numeric(m[[nodes[i]]]), info = "getValues copyTestCaseListValuesIndexed"))
}
})
),
setValues = list(
label = 'setValues',
nfName = 'nfSetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- c('x0','d0','x1[2:3]','d1[2:3]','x2[2:3,2:3]','d2[2:3, 2:3]',
'x3[2:3,2:3,2:3]','d3[2:3,2:3,2:3]', 'x4[2:3,2:3,2:3,2:3]','d4[2:3,2:3,2:3,2:3]',
'v1[2:3]', 'w1[2:3,2:3]')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
for(i in seq_along(nodes)) {
oneName <- nodes[i]
P <- rnorm(length(m[[oneName]]))
nfM$run(P, i)
test_that('setValues', expect_identical(as.numeric(m[[oneName]]), P, info = "setValues copyTestCaseListValuesIndexed"))
}
})
)
)
## List of nimbleFunction definitions to use
copyTestNFcodeListValuesIndexed <- list(
nfGetValues = quote({
nimbleFunction(
setup = function(model, nodes){},
run = function(index = double(0)) {
P <- values(model, nodes[index])
return(P)
returnType(double(1))
})
}),
nfSetValues = quote({
nimbleFunction(
setup = function(model, nodes){},
run = function(P = double(1), index = double(0)) {
values(model, nodes[index]) <<- P
})
})
)
runValuesTests(copyTestCaseListValuesIndexed, testNFcodeList = copyTestNFcodeListValuesIndexed)
## List of comparison cases, similar to above but without comparing compiled to uncompiled (not applicable)
copyTestCaseListValuesIndexedLoop <- list(
getValues = list(
label = 'getValues',
nfName = 'nfGetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- m$expandNodeNames('mu')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
P <- nfM$run()
test_that('getValues', expect_identical(as.numeric(P), as.numeric(m[['mu']]), info = "getValues copyTestCaseListValuesIndexedLoop"))
})
),
setValues = list(
label = 'setValues',
nfName = 'nfSetValues',
seed = round(runif(1, 1, 10000)),
initCode = quote({simulate(m); calculate(m);
nodes <- m$expandNodeNames('mu')}),
compile = c(FALSE, TRUE),
nfMcode = quote({nf(m, nodes = nodes)}),
testThatLines = quote({
P <- rnorm(length(m$expandNodeNames('mu')))
nfM$run(P)
test_that('setValues', expect_identical(as.numeric(P), as.numeric(m[['mu']]), info = "setValues copyTestCaseListValuesIndexedLoop"))
})
)
)
copyTestNFcodeListValuesIndexedLoop <- list(
nfGetValues = quote({
nimbleFunction(
setup = function(model, nodes){
nn <- length(nodes)
},
run = function() {
P <- numeric(nn)
for(i in 1:nn)
P[i] <- values(model, nodes[i])[1]
return(P)
returnType(double(1))
})
}),
nfSetValues = quote({
nimbleFunction(
setup = function(model, nodes){
nn <- length(nodes)
},
run = function(P = double(1)) {
tmp <- numeric(1)
for(i in 1:nn) {
tmp[1] <- P[i]
values(model, nodes[i]) <<- tmp
}
})
})
)
copyTestModelCode <- nimbleCode({
for(i in 1:n) {
y[i] ~ dnorm(mu[i], 1)
mu[i] ~ dnorm(0, 1)
}
})
n <- 10
copyTestConstants <- list(n = n)
copyTestData <- list(y = rnorm(10))
runValuesTests(copyTestCaseListValuesIndexedLoop, testNFcodeList = copyTestNFcodeListValuesIndexedLoop)
options(warn = RwarnLevel)
nimbleOptions(verbose = nimbleVerboseSetting)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/themes.R
\name{theme_vader}
\alias{theme_vader}
\title{Apply Sith Lord Darth Vader}
\usage{
theme_vader(x, fontsize = 11)
}
\arguments{
\item{x}{a flextable object}
\item{fontsize}{font size in pixel}
}
\description{
Apply Sith Lord Darth Vader theme to a flextable
}
\examples{
ftab <- flextable(iris)
ftab <- theme_vader(ftab)
}
\seealso{
Other flextable theme:
\code{\link{my_theme_booktabs}()},
\code{\link{theme_alafoli}()},
\code{\link{theme_booktabs}()},
\code{\link{theme_box}()},
\code{\link{theme_tron_legacy}()},
\code{\link{theme_tron}()},
\code{\link{theme_vanilla}()},
\code{\link{theme_zebra}()}
}
\concept{flextable theme}
| /man/theme_vader.Rd | no_license | NewGraphEnvironment/flextable | R | false | true | 719 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/themes.R
\name{theme_vader}
\alias{theme_vader}
\title{Apply Sith Lord Darth Vader}
\usage{
theme_vader(x, fontsize = 11)
}
\arguments{
\item{x}{a flextable object}
\item{fontsize}{font size in pixel}
}
\description{
Apply Sith Lord Darth Vader theme to a flextable
}
\examples{
ftab <- flextable(iris)
ftab <- theme_vader(ftab)
}
\seealso{
Other flextable theme:
\code{\link{my_theme_booktabs}()},
\code{\link{theme_alafoli}()},
\code{\link{theme_booktabs}()},
\code{\link{theme_box}()},
\code{\link{theme_tron_legacy}()},
\code{\link{theme_tron}()},
\code{\link{theme_vanilla}()},
\code{\link{theme_zebra}()}
}
\concept{flextable theme}
|
cp.a=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='A')&(RIGHT_AA=='P')) )
cp.f=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='F')&(RIGHT_AA=='P')) )
cp.l=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='L')&(RIGHT_AA=='P')) )
cp.s=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='S')&(RIGHT_AA=='P')) )
cp.y=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='Y')&(RIGHT_AA=='P')) )
cp.c=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='C')&(RIGHT_AA=='P')) )
cp.w=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='W')&(RIGHT_AA=='P')) )
cp.p=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='P')&(RIGHT_AA=='P')) )
cp.h=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='H')&(RIGHT_AA=='P')) )
cp.q=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='Q')&(RIGHT_AA=='P')) )
cp.r=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='R')&(RIGHT_AA=='P')) )
cp.i=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='I')&(RIGHT_AA=='P')) )
cp.w=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='M')&(RIGHT_AA=='P')) )
cp.t=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='T')&(RIGHT_AA=='P')) )
cp.n=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='N')&(RIGHT_AA=='P')) )
cp.k=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='K')&(RIGHT_AA=='P')) )
cp.v=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='V')&(RIGHT_AA=='P')) )
cp.d=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='D')&(RIGHT_AA=='P')) )
cp.e=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='E')&(RIGHT_AA=='P')) )
cp.g=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='G')&(RIGHT_AA=='P')) ) | /MSP Files/yeast/cpquery.all.R | no_license | jdkoola/MSPFileParser | R | false | false | 1,519 | r | cp.a=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='A')&(RIGHT_AA=='P')) )
cp.f=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='F')&(RIGHT_AA=='P')) )
cp.l=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='L')&(RIGHT_AA=='P')) )
cp.s=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='S')&(RIGHT_AA=='P')) )
cp.y=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='Y')&(RIGHT_AA=='P')) )
cp.c=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='C')&(RIGHT_AA=='P')) )
cp.w=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='W')&(RIGHT_AA=='P')) )
cp.p=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='P')&(RIGHT_AA=='P')) )
cp.h=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='H')&(RIGHT_AA=='P')) )
cp.q=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='Q')&(RIGHT_AA=='P')) )
cp.r=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='R')&(RIGHT_AA=='P')) )
cp.i=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='I')&(RIGHT_AA=='P')) )
cp.w=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='M')&(RIGHT_AA=='P')) )
cp.t=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='T')&(RIGHT_AA=='P')) )
cp.n=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='N')&(RIGHT_AA=='P')) )
cp.k=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='K')&(RIGHT_AA=='P')) )
cp.v=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='V')&(RIGHT_AA=='P')) )
cp.d=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='D')&(RIGHT_AA=='P')) )
cp.e=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='E')&(RIGHT_AA=='P')) )
cp.g=cpquery(aic.fit, ION_TYPE=='B_ION', ((LEFT_AA=='G')&(RIGHT_AA=='P')) ) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tTest.R
\name{computeEsMinSafeT}
\alias{computeEsMinSafeT}
\title{Helper function: Computes the minimal clinically relevant standardised mean difference for the safe t-test
nPlan and beta.}
\usage{
computeEsMinSafeT(
nPlan,
alpha = 0.05,
beta = 0.2,
alternative = c("twoSided", "greater", "less"),
testType = c("oneSample", "paired", "twoSample"),
lowN = 3,
highN = 1e+06,
ratio = 1
)
}
\arguments{
\item{nPlan}{vector of max length 2 representing the planned sample sizes.}
\item{alpha}{numeric in (0, 1) that specifies the tolerable type I error control --independent of n-- that the
designed test has to adhere to. Note that it also defines the rejection rule e10 > 1/alpha.}
\item{beta}{numeric in (0, 1) that specifies the tolerable type II error control necessary to calculate both
the sample sizes and deltaS, which defines the test. Note that 1-beta defines the power.}
\item{alternative}{a character string specifying the alternative hypothesis must be one of "twoSided" (default),
"greater" or "less".}
\item{testType}{either one of "oneSample", "paired", "twoSample".}
\item{lowN}{integer minimal sample size of the (first) sample when computing the power due to
optional stopping. Default lowN is set 1.}
\item{highN}{integer minimal sample size of the (first) sample when computing the power due to
optional stopping. Default highN is set 1e6.}
\item{ratio}{numeric > 0 representing the randomisation ratio of condition 2 over condition 1. If testType
is not equal to "twoSample", or if nPlan is of length(1) then ratio=1.}
}
\value{
a list which contains at least nPlan and the phiS the parameter that defines the safe test
}
\description{
Helper function: Computes the minimal clinically relevant standardised mean difference for the safe t-test
nPlan and beta.
}
| /man/computeEsMinSafeT.Rd | no_license | AlexanderLyNL/safestats | R | false | true | 1,880 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tTest.R
\name{computeEsMinSafeT}
\alias{computeEsMinSafeT}
\title{Helper function: Computes the minimal clinically relevant standardised mean difference for the safe t-test
nPlan and beta.}
\usage{
computeEsMinSafeT(
nPlan,
alpha = 0.05,
beta = 0.2,
alternative = c("twoSided", "greater", "less"),
testType = c("oneSample", "paired", "twoSample"),
lowN = 3,
highN = 1e+06,
ratio = 1
)
}
\arguments{
\item{nPlan}{vector of max length 2 representing the planned sample sizes.}
\item{alpha}{numeric in (0, 1) that specifies the tolerable type I error control --independent of n-- that the
designed test has to adhere to. Note that it also defines the rejection rule e10 > 1/alpha.}
\item{beta}{numeric in (0, 1) that specifies the tolerable type II error control necessary to calculate both
the sample sizes and deltaS, which defines the test. Note that 1-beta defines the power.}
\item{alternative}{a character string specifying the alternative hypothesis must be one of "twoSided" (default),
"greater" or "less".}
\item{testType}{either one of "oneSample", "paired", "twoSample".}
\item{lowN}{integer minimal sample size of the (first) sample when computing the power due to
optional stopping. Default lowN is set 1.}
\item{highN}{integer minimal sample size of the (first) sample when computing the power due to
optional stopping. Default highN is set 1e6.}
\item{ratio}{numeric > 0 representing the randomisation ratio of condition 2 over condition 1. If testType
is not equal to "twoSample", or if nPlan is of length(1) then ratio=1.}
}
\value{
a list which contains at least nPlan and the phiS the parameter that defines the safe test
}
\description{
Helper function: Computes the minimal clinically relevant standardised mean difference for the safe t-test
nPlan and beta.
}
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373456767L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609874151-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 729 | r | testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373456767L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) |
.CV.nnet = function(Input, Target, size=c(2,4,6, 8), decay=c(0.001, 0.01, 0.05, 0.1), maxit=200, nbCV=5, W=NULL){
# require(pROC, quietly=T)
Eval = data.frame(matrix(0, ncol=3, nrow=16, dimnames=list(NULL, c("Size", "Decay", "AUC"))))
Eval[,1] = rep(size,4)
Eval[,2] = rep(decay, each=4)
for(i in 1:nbCV){
set.seed(555)
Samp = SampleMat2(Target, 0.5)
if(is.null(W)){
Eval[,3] = Eval[,3] + apply(Eval[,1:2], 1, Samp, Target, Input, FUN=function(x, Samp, Target, Input){
nn = nnet(eval(parse(text = paste("Target[Samp$calibration]",
paste(.scopeExpSyst(Input[1:10, ,drop=FALSE], "GBM"), collapse = "")))),data=Input[Samp$calibration, ,drop=FALSE],
size = x[1], decay = x[2], maxit = maxit, trace = FALSE)
AUC = as.numeric(pROC::auc(pROC::roc(Target[Samp$evaluation], predict(nn, Input[Samp$evaluation,,drop=FALSE]))))
return(AUC)
})
} else{
Eval[,3] = Eval[,3] + apply(Eval[,1:2], 1, Samp, Target, Input, W, FUN=function(x, Samp, Target, Input, W){
nn = nnet(eval(parse(text = paste("Target[Samp$calibration]",
paste(.scopeExpSyst(Input[1:10, ,drop=FALSE], "GBM"), collapse = "")))),data=Input[Samp$calibration, ,drop=FALSE],
weights=W[Samp$calibration], size = x[1], decay = x[2], maxit = maxit, trace = FALSE)
AUC = as.numeric(pROC::auc(pROC::roc(Target[Samp$evaluation], as.numeric(predict(nn, Input[Samp$evaluation,,drop=FALSE])))))
return(AUC)
})
}
}
Eval[,3] = Eval[,3]/nbCV
z =which.max(Eval[,3])
return(Eval[z, 1:2])
}
| /R/CVnnet.R | no_license | MirzaCengic/biomod2 | R | false | false | 1,663 | r | .CV.nnet = function(Input, Target, size=c(2,4,6, 8), decay=c(0.001, 0.01, 0.05, 0.1), maxit=200, nbCV=5, W=NULL){
# require(pROC, quietly=T)
Eval = data.frame(matrix(0, ncol=3, nrow=16, dimnames=list(NULL, c("Size", "Decay", "AUC"))))
Eval[,1] = rep(size,4)
Eval[,2] = rep(decay, each=4)
for(i in 1:nbCV){
set.seed(555)
Samp = SampleMat2(Target, 0.5)
if(is.null(W)){
Eval[,3] = Eval[,3] + apply(Eval[,1:2], 1, Samp, Target, Input, FUN=function(x, Samp, Target, Input){
nn = nnet(eval(parse(text = paste("Target[Samp$calibration]",
paste(.scopeExpSyst(Input[1:10, ,drop=FALSE], "GBM"), collapse = "")))),data=Input[Samp$calibration, ,drop=FALSE],
size = x[1], decay = x[2], maxit = maxit, trace = FALSE)
AUC = as.numeric(pROC::auc(pROC::roc(Target[Samp$evaluation], predict(nn, Input[Samp$evaluation,,drop=FALSE]))))
return(AUC)
})
} else{
Eval[,3] = Eval[,3] + apply(Eval[,1:2], 1, Samp, Target, Input, W, FUN=function(x, Samp, Target, Input, W){
nn = nnet(eval(parse(text = paste("Target[Samp$calibration]",
paste(.scopeExpSyst(Input[1:10, ,drop=FALSE], "GBM"), collapse = "")))),data=Input[Samp$calibration, ,drop=FALSE],
weights=W[Samp$calibration], size = x[1], decay = x[2], maxit = maxit, trace = FALSE)
AUC = as.numeric(pROC::auc(pROC::roc(Target[Samp$evaluation], as.numeric(predict(nn, Input[Samp$evaluation,,drop=FALSE])))))
return(AUC)
})
}
}
Eval[,3] = Eval[,3]/nbCV
z =which.max(Eval[,3])
return(Eval[z, 1:2])
}
|
set.seed(1234)
Tmx_annual <- lapply(1:100, function(rep){
# three seed stages, 1 reproductive
# lower adult survival then reproduction is higher
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[4,1:3] <- rlnorm(1, meanlog = s, sdlog = 0.5)
t_ij[2,1] <- t_ij[3,2] <- runif(1, min = 0, max = s)
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/ (sum(t_ij[,i]) + runif(1,0.01,0.1)) # make there be some death, 80-99% survival
}
}
t_ij[1,4] <- f
t_ij
})
Tmx_iteroslow <- lapply(1:100, function(Mx){
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[2,1] <- runif(1, min = 0.01, max = 0.99)
t_ij[3,2] <- runif(1, min = 0.01, max = 0.99)
t_ij[4,3] <- runif(1, min = 0.01, max = 0.99)
# stasis or retrogression
t_ij[2,2:3] <- t_ij[3,3:4] <- t_ij[4,4] <- s
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/(sum(t_ij[,i]))
}
}
t_ij[1,4] <- f
t_ij
})
Tmx_semelslow <- lapply(1:100, function(Mx){
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[2,1] <- runif(1, min = 0.01, max = 0.99)
t_ij[3,2] <- runif(1, min = 0.01, max = 0.99)
t_ij[4,3] <- runif(1, min = 0.01, max = 0.99)
# stasis or retrogression
t_ij[2,2:3] <- t_ij[3,3:4] <- s
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/(sum(t_ij[,i]))
}
}
t_ij[1,4] <- f
t_ij
})
Tmx_iterofast <- lapply(1:100, function(Mx){
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[2:4,1:3] <- runif(9, min = 0.01, max = 0.99)
t_ij[2,3] <- 0
t_ij[2,2] <- t_ij[3,3] <-t_ij[4,4] <- s
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/(sum(t_ij[,i]))
}
}
t_ij[1,4] <- f
t_ij
})
Tmx_semelfast <- lapply(1:100, function(Mx){
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[2:4,1:3] <- runif(9, min = 0.01, max = 0.99)
t_ij[2,3] <- 0
t_ij[2,2] <- t_ij[3,3] <- s
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/(sum(t_ij[,i]))
}
}
t_ij[1,4] <- f
t_ij
}) | /R/VirtualSpeciesTypes.R | no_license | DenverBotanicGardens/Seed_Harvest_Modeling | R | false | false | 2,811 | r | set.seed(1234)
Tmx_annual <- lapply(1:100, function(rep){
# three seed stages, 1 reproductive
# lower adult survival then reproduction is higher
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[4,1:3] <- rlnorm(1, meanlog = s, sdlog = 0.5)
t_ij[2,1] <- t_ij[3,2] <- runif(1, min = 0, max = s)
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/ (sum(t_ij[,i]) + runif(1,0.01,0.1)) # make there be some death, 80-99% survival
}
}
t_ij[1,4] <- f
t_ij
})
Tmx_iteroslow <- lapply(1:100, function(Mx){
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[2,1] <- runif(1, min = 0.01, max = 0.99)
t_ij[3,2] <- runif(1, min = 0.01, max = 0.99)
t_ij[4,3] <- runif(1, min = 0.01, max = 0.99)
# stasis or retrogression
t_ij[2,2:3] <- t_ij[3,3:4] <- t_ij[4,4] <- s
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/(sum(t_ij[,i]))
}
}
t_ij[1,4] <- f
t_ij
})
Tmx_semelslow <- lapply(1:100, function(Mx){
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[2,1] <- runif(1, min = 0.01, max = 0.99)
t_ij[3,2] <- runif(1, min = 0.01, max = 0.99)
t_ij[4,3] <- runif(1, min = 0.01, max = 0.99)
# stasis or retrogression
t_ij[2,2:3] <- t_ij[3,3:4] <- s
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/(sum(t_ij[,i]))
}
}
t_ij[1,4] <- f
t_ij
})
Tmx_iterofast <- lapply(1:100, function(Mx){
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[2:4,1:3] <- runif(9, min = 0.01, max = 0.99)
t_ij[2,3] <- 0
t_ij[2,2] <- t_ij[3,3] <-t_ij[4,4] <- s
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/(sum(t_ij[,i]))
}
}
t_ij[1,4] <- f
t_ij
})
Tmx_semelfast <- lapply(1:100, function(Mx){
s <- runif(1,0,0.8) # adult survival
f <- -1.25 * ((s + 4.6)^2) + 36.45 + rnorm(1, mean = 0, sd = 0.1) # add some variance around the line
t_ij <- matrix(0, nrow = 4, ncol = 4)
t_ij[2:4,1:3] <- runif(9, min = 0.01, max = 0.99)
t_ij[2,3] <- 0
t_ij[2,2] <- t_ij[3,3] <- s
if(any(colSums(t_ij)>=1)){
for(i in which(colSums(t_ij)>=1)){
t_ij[,i] <- t_ij[,i]/(sum(t_ij[,i]))
}
}
t_ij[1,4] <- f
t_ij
}) |
testlist <- list(type = 943208504L, z = 7.11750304968487e-38)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609889171-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 115 | r | testlist <- list(type = 943208504L, z = 7.11750304968487e-38)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
library(shiny)
calculateBMI1 <- function(weight1, height1) (weight1 / (height1 * height1)) * 703
calculateBMI2 <- function(weight2, height2) (weight2 / (height2 * height2))
# Define server logic for slider examples
shinyServer (function(input, output) {
#print user input height (in)
output$inputHeightin <- renderPrint({input$height1})
#print user input weight (lbs)
output$inputWeightlbs <- renderPrint({input$weight1})
#calculate BMI
output$resultBMI1 <- renderPrint({calculateBMI1(input$weight1, input$height1)})
#print user input height (m)
output$inputHeightm <- renderPrint({input$height2})
#print user input weight (kg)
output$inputWeightkg<- renderPrint({input$weight2})
#calculate BMI
output$resultBMI2 <- renderPrint({calculateBMI2(input$weight2, input$height2)})
})
| /server.R | no_license | kchuying/Developing-Data-Products | R | false | false | 850 | r | library(shiny)
calculateBMI1 <- function(weight1, height1) (weight1 / (height1 * height1)) * 703
calculateBMI2 <- function(weight2, height2) (weight2 / (height2 * height2))
# Define server logic for slider examples
shinyServer (function(input, output) {
#print user input height (in)
output$inputHeightin <- renderPrint({input$height1})
#print user input weight (lbs)
output$inputWeightlbs <- renderPrint({input$weight1})
#calculate BMI
output$resultBMI1 <- renderPrint({calculateBMI1(input$weight1, input$height1)})
#print user input height (m)
output$inputHeightm <- renderPrint({input$height2})
#print user input weight (kg)
output$inputWeightkg<- renderPrint({input$weight2})
#calculate BMI
output$resultBMI2 <- renderPrint({calculateBMI2(input$weight2, input$height2)})
})
|
testlist <- list(scale = 0, shape = 9.88875053165086e-316)
result <- do.call(bama:::rand_igamma,testlist)
str(result) | /bama/inst/testfiles/rand_igamma/AFL_rand_igamma/rand_igamma_valgrind_files/1615926249-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 117 | r | testlist <- list(scale = 0, shape = 9.88875053165086e-316)
result <- do.call(bama:::rand_igamma,testlist)
str(result) |
# FIXME: not used anywhere?
matchDataFrameSubset = function(df, ss, factors.as.chars = TRUE) {
checkArg(df, c("list", "data.frame"))
checkArg(ss, c("list", "data.frame"))
if (!isProperlyNamed(df))
stop("'df' is not proberbly named")
if (!isProperlyNamed(ss))
stop("'ss' is not proberbly named")
if (any(names(ss) %nin% names(df)))
stop("Names of 'ss' not found in 'df'")
if (is.list(df))
df = as.data.frame(df, stringsAsFactors = FALSE)
if (is.list(ss))
ss = as.data.frame(ss, stringsAsFactors = FALSE)
df = subset(df, select = names(ss))
if (factors.as.chars) {
df = convertDataFrameCols(df, factors.as.char = TRUE)
ss = convertDataFrameCols(ss, factors.as.char = TRUE)
}
conv = function(x) rawToChar(serialize(x, connection = NULL, ascii = TRUE))
match(rowSapply(ss, conv, use.names = FALSE), rowSapply(df, conv, use.names = FALSE))
}
| /BBmisc/R/matchDataFrameSubset.R | no_license | ingted/R-Examples | R | false | false | 892 | r | # FIXME: not used anywhere?
matchDataFrameSubset = function(df, ss, factors.as.chars = TRUE) {
checkArg(df, c("list", "data.frame"))
checkArg(ss, c("list", "data.frame"))
if (!isProperlyNamed(df))
stop("'df' is not proberbly named")
if (!isProperlyNamed(ss))
stop("'ss' is not proberbly named")
if (any(names(ss) %nin% names(df)))
stop("Names of 'ss' not found in 'df'")
if (is.list(df))
df = as.data.frame(df, stringsAsFactors = FALSE)
if (is.list(ss))
ss = as.data.frame(ss, stringsAsFactors = FALSE)
df = subset(df, select = names(ss))
if (factors.as.chars) {
df = convertDataFrameCols(df, factors.as.char = TRUE)
ss = convertDataFrameCols(ss, factors.as.char = TRUE)
}
conv = function(x) rawToChar(serialize(x, connection = NULL, ascii = TRUE))
match(rowSapply(ss, conv, use.names = FALSE), rowSapply(df, conv, use.names = FALSE))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sensor_processing.R
\name{process_hexoskin}
\alias{process_hexoskin}
\title{Process hexoskin data}
\usage{
process_hexoskin(filepath, filename, fileinfo, metainfilename)
}
\arguments{
\item{dbname}{the database.}
\item{host}{database host, usually 'localhost'}
}
\value{
Nothing
}
\description{
This function uses the utilities provided by the makers of hexoskin to convert
the binary files in the ZIP to CSVs. Then we combine the individual pieces into
one file
}
\examples{
xyz
}
\seealso{
Other postgresql functions: \code{\link{add_tables_db}},
\code{\link{agg_unit_ok}}, \code{\link{aggregate_data}},
\code{\link{already_uploaded}},
\code{\link{backup_database}},
\code{\link{column_exists}}, \code{\link{column_types}},
\code{\link{create_database}}, \code{\link{delete_data}},
\code{\link{get_column_names}},
\code{\link{get_connection}},
\code{\link{get_filenames_forSubject}},
\code{\link{get_filenames}}, \code{\link{get_row_count}},
\code{\link{get_sensor_data}},
\code{\link{get_subjectid}},
\code{\link{kill_pg_connections}},
\code{\link{list_tables}},
\code{\link{restore_database}},
\code{\link{table_exists}},
\code{\link{upload_postgres}},
\code{\link{valid_connection}}
}
| /man/process_hexoskin.Rd | no_license | zevross-spatial/rpackage-sensorDataImport | R | false | true | 1,303 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sensor_processing.R
\name{process_hexoskin}
\alias{process_hexoskin}
\title{Process hexoskin data}
\usage{
process_hexoskin(filepath, filename, fileinfo, metainfilename)
}
\arguments{
\item{dbname}{the database.}
\item{host}{database host, usually 'localhost'}
}
\value{
Nothing
}
\description{
This function uses the utilities provided by the makers of hexoskin to convert
the binary files in the ZIP to CSVs. Then we combine the individual pieces into
one file
}
\examples{
xyz
}
\seealso{
Other postgresql functions: \code{\link{add_tables_db}},
\code{\link{agg_unit_ok}}, \code{\link{aggregate_data}},
\code{\link{already_uploaded}},
\code{\link{backup_database}},
\code{\link{column_exists}}, \code{\link{column_types}},
\code{\link{create_database}}, \code{\link{delete_data}},
\code{\link{get_column_names}},
\code{\link{get_connection}},
\code{\link{get_filenames_forSubject}},
\code{\link{get_filenames}}, \code{\link{get_row_count}},
\code{\link{get_sensor_data}},
\code{\link{get_subjectid}},
\code{\link{kill_pg_connections}},
\code{\link{list_tables}},
\code{\link{restore_database}},
\code{\link{table_exists}},
\code{\link{upload_postgres}},
\code{\link{valid_connection}}
}
|
library(dplyr)
library(stringr)
library(tidyr)
flavors = sort(c("c4.large", "m4.large", "r3.large", "c3.large", "m3.medium"))
f_str = str_sub(flavors, 1, 2)
df_analysis = read.table(file = "../data/hp/scaling-analysis-SM-SF.dat", header = T)
df1 = df_analysis %>% select(jobId, flavor, metric_base, len, ecu_viol, mem_viol, tviol, cost_total) %>% dplyr::group_by(jobId, flavor, metric_base) %>% ungroup() %>% distinct()
dfp = df1 %>% group_by(flavor, metric_base) %>% mutate(n = sum(len)) %>% summarise(ECU = sum(ecu_viol) / nth(n, 1), Memory = sum(mem_viol) / nth(n, 1))
dfp = dfp %>% gather("metric", "value", 3:4)
dfp$metric_base = as.character(dfp$metric_base)
dfp$metric_base[dfp$metric_base == "ecu"] = "ECU"
dfp$metric_base[dfp$metric_base == "mem"] = "Memória"
write.table(dfp, file = "../data/hp/3a_colateral_violations-hp-data.dat", row.names = F)
| /analysis/multiple_types/hp/hp-3a-SM-SF-violation-analysis.R | permissive | fabiomorais/ASaaS | R | false | false | 877 | r | library(dplyr)
library(stringr)
library(tidyr)
flavors = sort(c("c4.large", "m4.large", "r3.large", "c3.large", "m3.medium"))
f_str = str_sub(flavors, 1, 2)
df_analysis = read.table(file = "../data/hp/scaling-analysis-SM-SF.dat", header = T)
df1 = df_analysis %>% select(jobId, flavor, metric_base, len, ecu_viol, mem_viol, tviol, cost_total) %>% dplyr::group_by(jobId, flavor, metric_base) %>% ungroup() %>% distinct()
dfp = df1 %>% group_by(flavor, metric_base) %>% mutate(n = sum(len)) %>% summarise(ECU = sum(ecu_viol) / nth(n, 1), Memory = sum(mem_viol) / nth(n, 1))
dfp = dfp %>% gather("metric", "value", 3:4)
dfp$metric_base = as.character(dfp$metric_base)
dfp$metric_base[dfp$metric_base == "ecu"] = "ECU"
dfp$metric_base[dfp$metric_base == "mem"] = "Memória"
write.table(dfp, file = "../data/hp/3a_colateral_violations-hp-data.dat", row.names = F)
|
%% OK
\name{denCVBwSelC}
\alias{denCVBwSelC}
\title{ CV bandwidth selector for density }
\description{
Computes Cross Validation bandwidth selector for the
Parzen--Rosenblatt density estimator...
}
\usage{
denCVBwSelC(x, kernel = gaussK, weig = rep(1, length(x)),
interval = .lokestOptInt)
}
\arguments{
\item{x}{ vector with data points. }
\item{kernel}{ Kernel used to perform the estimation, see \code{\link{Kernels}}. }
\item{weig}{ Vector of weights for observations. }
\item{interval}{ A range of values where to look for the bandwidth
parameter. }
}
\details{
The selector is implemented using its definition.
}
\value{
A numeric value with the bandwidth.
}
\references{
Fan, J. and Gijbels, I.
\emph{ Local polynomial modelling and its applications\/}.
Chapman & Hall, London (1996).
Wand, M.~P. and Jones, M.~C.
\emph{ Kernel smoothing\/}.
Chapman and Hall Ltd., London (1995).
}
\author{ Jorge Luis Ojeda Cabrera.
}
\seealso{ \code{\link{bw.nrd0}}, \code{\link[KernSmooth]{dpik}}.
}
\examples{
stdy <- function(size=100,rVar=rnorm,dVar=dnorm,kernel=gaussK,x=NULL)
{
if( is.null(x) ) x <- rVar(size)
Tc <- system.time( dbwc <- denCVBwSelC(x,kernel) )[3]
ucvT <- system.time( ucvBw <- bw.ucv(x,lower=0.00001,upper=2.0) )[3]
nrdT <- system.time( nrdBw <- bw.nrd(x) )[3]
{
xeval <- seq( min(x)+dbwc , max(x)-dbwc ,length=50)
hist(x,probability=TRUE)
lines(xeval,trueDen <- dVar(xeval),col="red")
lines(density(x),col="cyan")
xevalDenc <- PRDenEstC(x,xeval,dbwc,kernel)
dencMSE <- mean( (trueDen-xevalDenc)^2 )
xevalucvDen <- PRDenEstC(x,xeval,ucvBw,kernel)
ucvMSE <- mean( (trueDen-xevalucvDen)^2 )
xevalDenNrd <- PRDenEstC(x,xeval,nrdBw,kernel)
nrdMSE <- mean( (trueDen-xevalDenNrd)^2 )
lines(xevalDenc,col="green")
lines(xevalucvDen,col="blue")
lines(xevalDenNrd,col="grey")
}
return( cbind( bwVal=c(evalC=dbwc,ucvBw=ucvBw,nrdBw=nrdBw),
mse=c(dencMSE,ucvMSE,nrdMSE),
time=c(Tc,ucvT,nrdT) ) )
}
stdy(100,kernel=gaussK)
stdy(100,rVar=rexp,dVar=dexp,kernel=gaussK)
## check stdy with other kernel, distributions
}
\keyword{ nonparametric }
\keyword{ smooth }
| /man/denCVBwSelC.Rd | no_license | cran/locpol | R | false | false | 2,128 | rd | %% OK
\name{denCVBwSelC}
\alias{denCVBwSelC}
\title{ CV bandwidth selector for density }
\description{
Computes Cross Validation bandwidth selector for the
Parzen--Rosenblatt density estimator...
}
\usage{
denCVBwSelC(x, kernel = gaussK, weig = rep(1, length(x)),
interval = .lokestOptInt)
}
\arguments{
\item{x}{ vector with data points. }
\item{kernel}{ Kernel used to perform the estimation, see \code{\link{Kernels}}. }
\item{weig}{ Vector of weights for observations. }
\item{interval}{ A range of values where to look for the bandwidth
parameter. }
}
\details{
The selector is implemented using its definition.
}
\value{
A numeric value with the bandwidth.
}
\references{
Fan, J. and Gijbels, I.
\emph{ Local polynomial modelling and its applications\/}.
Chapman & Hall, London (1996).
Wand, M.~P. and Jones, M.~C.
\emph{ Kernel smoothing\/}.
Chapman and Hall Ltd., London (1995).
}
\author{ Jorge Luis Ojeda Cabrera.
}
\seealso{ \code{\link{bw.nrd0}}, \code{\link[KernSmooth]{dpik}}.
}
\examples{
stdy <- function(size=100,rVar=rnorm,dVar=dnorm,kernel=gaussK,x=NULL)
{
if( is.null(x) ) x <- rVar(size)
Tc <- system.time( dbwc <- denCVBwSelC(x,kernel) )[3]
ucvT <- system.time( ucvBw <- bw.ucv(x,lower=0.00001,upper=2.0) )[3]
nrdT <- system.time( nrdBw <- bw.nrd(x) )[3]
{
xeval <- seq( min(x)+dbwc , max(x)-dbwc ,length=50)
hist(x,probability=TRUE)
lines(xeval,trueDen <- dVar(xeval),col="red")
lines(density(x),col="cyan")
xevalDenc <- PRDenEstC(x,xeval,dbwc,kernel)
dencMSE <- mean( (trueDen-xevalDenc)^2 )
xevalucvDen <- PRDenEstC(x,xeval,ucvBw,kernel)
ucvMSE <- mean( (trueDen-xevalucvDen)^2 )
xevalDenNrd <- PRDenEstC(x,xeval,nrdBw,kernel)
nrdMSE <- mean( (trueDen-xevalDenNrd)^2 )
lines(xevalDenc,col="green")
lines(xevalucvDen,col="blue")
lines(xevalDenNrd,col="grey")
}
return( cbind( bwVal=c(evalC=dbwc,ucvBw=ucvBw,nrdBw=nrdBw),
mse=c(dencMSE,ucvMSE,nrdMSE),
time=c(Tc,ucvT,nrdT) ) )
}
stdy(100,kernel=gaussK)
stdy(100,rVar=rexp,dVar=dexp,kernel=gaussK)
## check stdy with other kernel, distributions
}
\keyword{ nonparametric }
\keyword{ smooth }
|
#' @title bin_distribution
#' @description determines probability distribution of successes a given # of trials and probability
#' @param trials representing # of trials (numeric value)
#' @param prob representing probability (numeric value)
#' @return a list of classes \code{"bindis"} and \code{"data.frame"}
#' @export
#' @examples
#' bin_distribution(5,0.5)
#' bin_distribution(1,1)
bin_distribution <- function(trials,prob){
succ <- c(1:(trials+1))
pro <- c(1:(trials+1))
for (i in 0:trials){
succ[i+1] <- i
pro[i+1] <- bin_probability(i,trials,prob)
}
dist <- data.frame(
successes = succ,
probability = pro
)
class(dist) <- c("bindis","data.frame")
return(dist)
}
#' title plot.bindis
#' description plots a bar graph of a binomial distribution
#' param dist an object of class "bindis"
#' return a bar plot
#' @export
plot.bindis <- function(dist){
if (any(class(dist) == "bindis")) {
ggplot(data = dist, aes(x = successes, y = probability)) +
geom_bar(stat = "identity")
}
}
| /binomial/R/bin_dis_and_related.R | no_license | stat133-sp19/hw-stat133-LevanaZhangUCB | R | false | false | 1,064 | r | #' @title bin_distribution
#' @description determines probability distribution of successes a given # of trials and probability
#' @param trials representing # of trials (numeric value)
#' @param prob representing probability (numeric value)
#' @return a list of classes \code{"bindis"} and \code{"data.frame"}
#' @export
#' @examples
#' bin_distribution(5,0.5)
#' bin_distribution(1,1)
bin_distribution <- function(trials,prob){
succ <- c(1:(trials+1))
pro <- c(1:(trials+1))
for (i in 0:trials){
succ[i+1] <- i
pro[i+1] <- bin_probability(i,trials,prob)
}
dist <- data.frame(
successes = succ,
probability = pro
)
class(dist) <- c("bindis","data.frame")
return(dist)
}
#' title plot.bindis
#' description plots a bar graph of a binomial distribution
#' param dist an object of class "bindis"
#' return a bar plot
#' @export
plot.bindis <- function(dist){
if (any(class(dist) == "bindis")) {
ggplot(data = dist, aes(x = successes, y = probability)) +
geom_bar(stat = "identity")
}
}
|
#' get_attributes
#'
#' get_attributes
#' @param x an "attributeList" element from an emld object
#' @param eml The full eml document, needed only if <references> outside of attributes must be resolved.
#' @return a data frame whose rows are the attributes (names of each column in the data file)
#' and whose columns describe metadata about those attributes. By default separate tables
#' are given for each type
#' @details EML metadata can use "references" elements which allow one attribute to use metadata
#' declared elsewhere in the document. This function will automatically resolve these references
#' and thus infer the correct metadata.
#' @export
#' @importFrom dplyr bind_rows
#' @examples
#' f <- system.file("tests", emld::eml_version(),
#' "eml-datasetWithAttributelevelMethods.xml", package = "emld")
#' eml <- read_eml(f)
#' get_attributes(eml$dataset$dataTable$attributeList)
get_attributes <- function(x, eml = NULL) {
attributeList <- x
## check to make sure input appears to be an attributeList
if (!("attribute" %in% names(attributeList)) & is.null(attributeList$references)) {
stop(
call. = FALSE,
"Input does not appear to be an attributeList."
)
}
## if the attributeList is referenced, get reference
if (!is.null(attributeList$references)) {
if (is.null(eml)) {
warning(
"The attributeList entered is referenced somewhere else in the eml. ",
"No eml was entered to find the attributes. ",
"Please enter the eml to get better results."
)
eml <- x
}
all_attributeLists <- eml_get(eml, "attributeList")
for (attList in all_attributeLists) {
if (attList$id == attributeList$references) {
attributeList <- attList
break
}
}
}
## get attributes
attributes <- lapply(attributeList$attribute, function(x) {
## get full attribute list
atts <- unlist(x, recursive = TRUE, use.names = TRUE)
measurementScale <- names(x$measurementScale)
domain <- names(x$measurementScale[[measurementScale]])
if (length(domain) == 1) {
## domain == "nonNumericDomain"
domain <- names(x$measurementScale[[measurementScale]][[domain]])
}
domain <- domain[grepl("Domain", domain)]
if (measurementScale == "dateTime" & is.null(domain)){
domain <- "dateTimeDomain"
}
atts <- c(atts, measurementScale = measurementScale, domain = domain)
## separate factors
atts <- atts[!grepl("enumeratedDomain", names(atts))]
## separate methods
atts <- atts[!grepl("methods", names(atts))]
## Alter names to be consistent with other tools
names(atts) <- gsub("missingValueCode.code",
"missingValueCode",
names(atts),
fixed = TRUE)
names(atts) <- gsub("standardUnit|customUnit",
"unit",
names(atts))
## Alter names of annotation label fields for accessibility
names(atts) <- gsub("annotation.valueURI.label",
"valueLabel",
names(atts),
fixed = TRUE)
names(atts) <- gsub("annotation.propertyURI.label",
"propertyLabel",
names(atts),
fixed = TRUE)
names(atts) <- gsub("annotation.propertyURI.propertyURI",
"propertyURI",
names(atts),
fixed = TRUE)
names(atts) <- gsub("annotation.valueURI.valueURI",
"valueURI",
names(atts),
fixed = TRUE)
names(atts) <- gsub(".+\\.+",
"",
names(atts))
atts <- as.data.frame(t(atts), stringsAsFactors = FALSE)
})
attributes <- dplyr::bind_rows(attributes)
## remove non_fields in attributes
non_fields <- c("enforced",
"exclusive",
"order",
"references",
"scope",
"system",
"typeSystem")
attributes <- attributes[, !(names(attributes) %in% non_fields)]
## get factors
factors <- lapply(attributeList$attribute, function(x) {
## get factors
factors <- eml_get(x, "enumeratedDomain")
## linearize factors
factors <- lapply(factors$codeDefinition, function(x) {
as.data.frame(x, stringsAsFactors = FALSE)
})
factors <- do.call(rbind, factors)
if (!is.null(factors)) {
factors$attributeName <- x$attributeName
}
return(factors)
})
factors <- dplyr::bind_rows(factors)
if (nrow(factors) > 0) {
factors <- factors[!is.na(factors$code), ]
} else {
factors <- NULL
}
# FIXME: add support for methods
out <- list(
attributes = attributes,
factors = factors
)
return(out)
} | /R/get_attributes.R | permissive | ropensci/EML | R | false | false | 4,975 | r | #' get_attributes
#'
#' get_attributes
#' @param x an "attributeList" element from an emld object
#' @param eml The full eml document, needed only if <references> outside of attributes must be resolved.
#' @return a data frame whose rows are the attributes (names of each column in the data file)
#' and whose columns describe metadata about those attributes. By default separate tables
#' are given for each type
#' @details EML metadata can use "references" elements which allow one attribute to use metadata
#' declared elsewhere in the document. This function will automatically resolve these references
#' and thus infer the correct metadata.
#' @export
#' @importFrom dplyr bind_rows
#' @examples
#' f <- system.file("tests", emld::eml_version(),
#' "eml-datasetWithAttributelevelMethods.xml", package = "emld")
#' eml <- read_eml(f)
#' get_attributes(eml$dataset$dataTable$attributeList)
get_attributes <- function(x, eml = NULL) {
attributeList <- x
## check to make sure input appears to be an attributeList
if (!("attribute" %in% names(attributeList)) & is.null(attributeList$references)) {
stop(
call. = FALSE,
"Input does not appear to be an attributeList."
)
}
## if the attributeList is referenced, get reference
if (!is.null(attributeList$references)) {
if (is.null(eml)) {
warning(
"The attributeList entered is referenced somewhere else in the eml. ",
"No eml was entered to find the attributes. ",
"Please enter the eml to get better results."
)
eml <- x
}
all_attributeLists <- eml_get(eml, "attributeList")
for (attList in all_attributeLists) {
if (attList$id == attributeList$references) {
attributeList <- attList
break
}
}
}
## get attributes
attributes <- lapply(attributeList$attribute, function(x) {
## get full attribute list
atts <- unlist(x, recursive = TRUE, use.names = TRUE)
measurementScale <- names(x$measurementScale)
domain <- names(x$measurementScale[[measurementScale]])
if (length(domain) == 1) {
## domain == "nonNumericDomain"
domain <- names(x$measurementScale[[measurementScale]][[domain]])
}
domain <- domain[grepl("Domain", domain)]
if (measurementScale == "dateTime" & is.null(domain)){
domain <- "dateTimeDomain"
}
atts <- c(atts, measurementScale = measurementScale, domain = domain)
## separate factors
atts <- atts[!grepl("enumeratedDomain", names(atts))]
## separate methods
atts <- atts[!grepl("methods", names(atts))]
## Alter names to be consistent with other tools
names(atts) <- gsub("missingValueCode.code",
"missingValueCode",
names(atts),
fixed = TRUE)
names(atts) <- gsub("standardUnit|customUnit",
"unit",
names(atts))
## Alter names of annotation label fields for accessibility
names(atts) <- gsub("annotation.valueURI.label",
"valueLabel",
names(atts),
fixed = TRUE)
names(atts) <- gsub("annotation.propertyURI.label",
"propertyLabel",
names(atts),
fixed = TRUE)
names(atts) <- gsub("annotation.propertyURI.propertyURI",
"propertyURI",
names(atts),
fixed = TRUE)
names(atts) <- gsub("annotation.valueURI.valueURI",
"valueURI",
names(atts),
fixed = TRUE)
names(atts) <- gsub(".+\\.+",
"",
names(atts))
atts <- as.data.frame(t(atts), stringsAsFactors = FALSE)
})
attributes <- dplyr::bind_rows(attributes)
## remove non_fields in attributes
non_fields <- c("enforced",
"exclusive",
"order",
"references",
"scope",
"system",
"typeSystem")
attributes <- attributes[, !(names(attributes) %in% non_fields)]
## get factors
factors <- lapply(attributeList$attribute, function(x) {
## get factors
factors <- eml_get(x, "enumeratedDomain")
## linearize factors
factors <- lapply(factors$codeDefinition, function(x) {
as.data.frame(x, stringsAsFactors = FALSE)
})
factors <- do.call(rbind, factors)
if (!is.null(factors)) {
factors$attributeName <- x$attributeName
}
return(factors)
})
factors <- dplyr::bind_rows(factors)
if (nrow(factors) > 0) {
factors <- factors[!is.na(factors$code), ]
} else {
factors <- NULL
}
# FIXME: add support for methods
out <- list(
attributes = attributes,
factors = factors
)
return(out)
} |
wd <- getwd()
setwd(file.path('..', 'common'))
source('utils.r')
source('prediction_goodness.r')
source('plotting.r')
source('preprocess.r')
setwd(wd)
packages <- c(
'RPostgreSQL', 'ggplot2', 'reshape',
'caTools', 'glmnet', 'car',
'leaps')
import(packages)
Sys.setenv(LANG = 'en')
save_best_subset <- function (res_formula, df, method, nvmax, file_path_no_ext) {
fit <- regsubsets(res_formula, data = df, nvmax = nvmax, nbest = 3, method = method, really.big = T)
for (scale in c('adjr2')) {
plot_path <- file.path(paste(file_path_no_ext, '_', scale, '.png', sep = ''))
png(filename = plot_path, width = 1366, height = 1366, pointsize = 25)
plot(fit, scale = scale)
dev.off()
print(paste('Saved plot under: ', plot_path))
}
summ <- summary(fit)
idx <- which.max(summ$adjr2)
best_vars <- colnames(summ$which)[summ$which[idx,]]
# Skip the intercept
best_vars <- best_vars[-1]
info_path <- plot_path <- paste(file_path_no_ext, 'summary.txt', sep = '_')
best_formula <- as.formula(
paste(
as.list(res_formula)[[2]],
'~',
paste(best_vars, collapse = '+')
))
print(best_formula)
fit <- lm(best_formula, data = df)
capture.output(summary(fit), file = info_path, append = FALSE)
best_vars <- best_vars[-1]
info <- paste(
paste('Best adj R2: ', max(summ$adjr2)),
paste("Best found var subset: c('", paste(best_vars, collapse = "','"), "')", sep = ''),
sep = '\n'
)
cat(info)
cat(info, file = info_path, append = TRUE)
}
main <- function () {
obs <- load_observations('complete_observations')
obs <- na.omit(obs)
vars <- colnames(obs)
vars <- vars[!(vars %in% c('id', 'station_id', 'pm10'))]
obs <- obs[, vars]
base_res_var <- 'pm2_5'
aggr_vars <- c('pm2_5', 'wind_speed', 'pressure', 'humidity',
'temperature', 'precip_rate', 'wind_dir_ew', 'wind_dir_ns')
# For calculating aggregated values
past_lags <- c(23)
future_lag <- 24
max_vars <- 15
seasons <- c('winter', 'spring', 'summer', 'autumn')
var_dir <- file.path(getwd(), 'best_subset', base_res_var)
mkdir(var_dir)
lag_results <- lapply(past_lags, function (past_lag) {
windows <- divide_into_windows(obs, past_lag, future_lag,
future_vars = c(base_res_var, 'timestamp'),
excluded_vars = c())
windows <- add_aggregated(windows, past_lag, vars = aggr_vars)
windows <- skip_past(windows)
lapply(seq(1, 4), function (season) {
season_dir <- file.path(var_dir, seasons[[season]])
mkdir(season_dir)
seasonal_windows <- windows[windows$season == season, ]
# Actual response variable has the 'future_' prefix
res_var <- paste('future', base_res_var, sep = '_')
explanatory_vars <- colnames(seasonal_windows)
explanatory_vars <- explanatory_vars[explanatory_vars != res_var]
res_formula <- as.formula(paste(res_var, '~',
paste(explanatory_vars, collapse = '+'), sep = ' '))
res_formula <- skip_colinear_variables(res_formula, seasonal_windows)
file_path <- file.path(season_dir, paste('best_subset_lag', past_lag, 'top', max_vars, sep = '_'))
save_best_subset(res_formula, seasonal_windows, 'exhaustive', max_vars, file_path)
})
})
}
main()
| /source/analysis/.backup/best-reg-subsets.r | no_license | damiankus/masters-thesis | R | false | false | 3,373 | r | wd <- getwd()
setwd(file.path('..', 'common'))
source('utils.r')
source('prediction_goodness.r')
source('plotting.r')
source('preprocess.r')
setwd(wd)
packages <- c(
'RPostgreSQL', 'ggplot2', 'reshape',
'caTools', 'glmnet', 'car',
'leaps')
import(packages)
Sys.setenv(LANG = 'en')
save_best_subset <- function (res_formula, df, method, nvmax, file_path_no_ext) {
fit <- regsubsets(res_formula, data = df, nvmax = nvmax, nbest = 3, method = method, really.big = T)
for (scale in c('adjr2')) {
plot_path <- file.path(paste(file_path_no_ext, '_', scale, '.png', sep = ''))
png(filename = plot_path, width = 1366, height = 1366, pointsize = 25)
plot(fit, scale = scale)
dev.off()
print(paste('Saved plot under: ', plot_path))
}
summ <- summary(fit)
idx <- which.max(summ$adjr2)
best_vars <- colnames(summ$which)[summ$which[idx,]]
# Skip the intercept
best_vars <- best_vars[-1]
info_path <- plot_path <- paste(file_path_no_ext, 'summary.txt', sep = '_')
best_formula <- as.formula(
paste(
as.list(res_formula)[[2]],
'~',
paste(best_vars, collapse = '+')
))
print(best_formula)
fit <- lm(best_formula, data = df)
capture.output(summary(fit), file = info_path, append = FALSE)
best_vars <- best_vars[-1]
info <- paste(
paste('Best adj R2: ', max(summ$adjr2)),
paste("Best found var subset: c('", paste(best_vars, collapse = "','"), "')", sep = ''),
sep = '\n'
)
cat(info)
cat(info, file = info_path, append = TRUE)
}
main <- function () {
obs <- load_observations('complete_observations')
obs <- na.omit(obs)
vars <- colnames(obs)
vars <- vars[!(vars %in% c('id', 'station_id', 'pm10'))]
obs <- obs[, vars]
base_res_var <- 'pm2_5'
aggr_vars <- c('pm2_5', 'wind_speed', 'pressure', 'humidity',
'temperature', 'precip_rate', 'wind_dir_ew', 'wind_dir_ns')
# For calculating aggregated values
past_lags <- c(23)
future_lag <- 24
max_vars <- 15
seasons <- c('winter', 'spring', 'summer', 'autumn')
var_dir <- file.path(getwd(), 'best_subset', base_res_var)
mkdir(var_dir)
lag_results <- lapply(past_lags, function (past_lag) {
windows <- divide_into_windows(obs, past_lag, future_lag,
future_vars = c(base_res_var, 'timestamp'),
excluded_vars = c())
windows <- add_aggregated(windows, past_lag, vars = aggr_vars)
windows <- skip_past(windows)
lapply(seq(1, 4), function (season) {
season_dir <- file.path(var_dir, seasons[[season]])
mkdir(season_dir)
seasonal_windows <- windows[windows$season == season, ]
# Actual response variable has the 'future_' prefix
res_var <- paste('future', base_res_var, sep = '_')
explanatory_vars <- colnames(seasonal_windows)
explanatory_vars <- explanatory_vars[explanatory_vars != res_var]
res_formula <- as.formula(paste(res_var, '~',
paste(explanatory_vars, collapse = '+'), sep = ' '))
res_formula <- skip_colinear_variables(res_formula, seasonal_windows)
file_path <- file.path(season_dir, paste('best_subset_lag', past_lag, 'top', max_vars, sep = '_'))
save_best_subset(res_formula, seasonal_windows, 'exhaustive', max_vars, file_path)
})
})
}
main()
|
library(data.table) # fread function
library(zoo) # approximating NAs
library(tidyr) # unite function, replacing NAs
library(dtplyr) # converting dplyr to data.table
library(dplyr) # data wrangling
library(lubridate) # handling dates
library(purrr) # handling nested data
library(glmnet) # ridge regression
library(caret) # XGBoost model, varImp function
library(ggplot2) # plotting
library(gridExtra) # plotting multiple plots together
library(tibble) # add_column function
library(multidplyr) # parallel dplyr
library(parallel) # find out amount of cores
# Reading & transforming the data ----
# Select specific items
items_to_be_plotted <- c(1047679,
819932,
364606)
# Oil price data
# Aggregate oil price to monthly and approximate NAs
oil_df <- fread("oil.csv") %>%
as_tibble() %>%
mutate(year = year(date),
month = month(date))%>%
group_by(year, month) %>%
# Select the first oil price of each month
summarise(oilprice = first(dcoilwtico)) %>%
na.approx() %>%
as_tibble() %>%
mutate(month = as.character(month))
# Sales data
sales_data <- fread("train.csv")
# Select interval and aggregate to monthly
sales_data <- sales_data %>%
lazy_dt() %>%
# Selected items only
filter(item_nbr %in% items_to_be_plotted) %>%
filter(date >= as.Date(last(date)) - years(4)) %>%
mutate(date = as.Date(date),
year = year(date),
month = month(date) %>% as.character(),
# Replace missing promotions with zero
promo = replace_na(onpromotion, 0),
store_nbr = as.character(store_nbr)) %>%
group_by(year, month, item_nbr, store_nbr) %>%
summarise(sales = sum(unit_sales),
promo = mean(promo)) %>%
# Make a date column with the first day of the months
mutate(year_month = as.Date(paste0(year, "-", month, "-01"))) %>%
as_tibble()
# Use ~67/33 training/test split
split_date <- last(sales_data$year_month) - years(1)
# Combine the sales data with the oil price data
full_data <- sales_data %>%
left_join(oil_df)
# Make lagged sales variables
full_data <- full_data %>%
arrange(item_nbr, store_nbr, year, as.numeric(month)) %>%
group_by(store_nbr, item_nbr) %>%
mutate(sales_lag12 = lag(sales, 12),
sales_lag1 = lag(sales, 1)) %>%
na.omit()
# Splitting into training and test sets ----
# Make training set with dates and actual sales
to_model <- full_data %>%
arrange(item_nbr, store_nbr, year, as.numeric(month)) %>%
group_by(item_nbr) %>%
filter(year_month <= split_date) %>%
summarise(year_month_train = list(year_month),
sales_train = list(sales))
# Make test set with dates and actual sales
to_model <- full_data %>%
arrange(item_nbr, store_nbr, year, as.numeric(month)) %>%
group_by(item_nbr) %>%
filter(year_month > split_date) %>%
summarise(year_month_test = list(year_month),
sales_test = list(sales)) %>%
inner_join(to_model, .)
# Split into training and test sets by date
train <- full_data %>%
filter(year_month <= split_date)
test <- full_data %>%
filter(year_month > split_date)
# Do model matrices for training data (dummy variables etc.)
model_data <- train %>%
group_by(item_nbr) %>%
do(training = safely(model.matrix)(sales ~
year +
factor(month, levels = 1:12) +
promo +
oilprice +
sales_lag12 +
sales_lag1,
data = .)$result[, -1]) %>%
inner_join(to_model, .)
# Do model matrices for testing data (dummy variables etc.)
model_data <- test %>%
group_by(item_nbr) %>%
do(test = safely(model.matrix)(sales ~
year +
factor(month, levels = 1:12) +
promo +
oilprice +
sales_lag12 +
sales_lag1,
data = .)$result[, -1]) %>%
inner_join(model_data, .)
# Modeling ----
# Do cross validations to extract lamdas for the ridge models
model_data <- model_data %>%
group_by(item_nbr) %>%
do(cv = safely(cv.glmnet)(pluck(.$training, 1),
pluck(.$sales_train, 1),
alpha = 0)$result) %>%
inner_join(model_data, .)
# Obtain just the lambdas
model_data <- model_data %>%
group_by(item_nbr) %>%
add_column(lambda = as.numeric(as.character(lapply(.$cv, `[[`, 9)))) %>%
# Replace possible null lambdas with NAs
mutate(lambda = modify_if(lambda, is.null, ~ NA)) %>%
select(-cv)
# Make the models ----
# Linear
linear_models <- model_data %>%
group_by(item_nbr) %>%
do(model = safely(lm)(sales ~ .,
data = cbind(sales = pluck(.$sales_train, 1),
pluck(.$training, 1)) %>%
as_tibble())$result)
# Ridge
ridge_models <- model_data %>%
group_by(item_nbr) %>%
do(model = safely(glmnet)(pluck(.$training, 1),
pluck(.$sales_train, 1),
lambda = pluck(.$lambda, 1),
alpha = 0)$result)
# XGboost with grid search
# Create cluster for parallel processing
cluster <- new_cluster(detectCores())
cluster %>%
cluster_library("purrr") %>%
cluster_library("caret")
time <- Sys.time() # ~15 min
xgb_models <- model_data %>%
partition(cluster) %>%
group_by(item_nbr) %>%
do(model = safely(train)(y = pluck(.$sales_train, 1),
x = pluck(.$training, 1),
method = "xgbTree",
metric = "RMSE",
trControl = trainControl(method = "repeatedcv"),
tuneGrid = expand.grid(
# number of trees, higher if size of data is high
nrounds = c(5, 10, 15, 20),
# smaller value prevents overfitting, 0-inf
max_depth = c(6, 10, 15, 25),
# smaller value prevents overfitting, 0-inf
eta = c(0.01, 0.05, 0.1, 0.2, 0.5),
# higher value = more conservative, 0-inf
gamma = c(0, 5),
# 0-1
colsample_bytree = c(0.1, 0.3, 0.5, 0.8, 1),
# higher value = more conservative, 0-inf
min_child_weight = 1,
# smaller value prevents overfitting, 0-1,
subsample = c(0.5, 1)),
allowParallel = TRUE)$result) %>%
collect()
(time <- Sys.time() - time)
# Model evaluation ----
# Function to make predictions and calculate accuracy measures for both sets
make_predictions <- function(model_df, lambda = FALSE){
# Make predictions using the models (training set)
# Handle ridge models separately as they have different arguments
if(lambda){
result_data <- model_df %>%
inner_join(model_data, by = "item_nbr") %>%
group_by(item_nbr) %>%
do(predictions_train = safely(predict)(pluck(.$model, 1),
s = .$lambda,
newx = pluck(.$training, 1)
)$result) %>%
inner_join(model_data, ., by = "item_nbr")
# Make predictions using the models (test set)
result_data <- model_df %>%
inner_join(model_data, by = "item_nbr") %>%
group_by(item_nbr) %>%
do(predictions_test = safely(predict)(pluck(.$model, 1),
s = .$lambda,
newx = pluck(.$test, 1))$result) %>%
inner_join(result_data, ., by = "item_nbr")
# Handle linear and XGBoost models
} else {
# Make predictions using the models (training set)
result_data <- model_df %>%
inner_join(model_data, by = "item_nbr") %>%
group_by(item_nbr) %>%
do(predictions_train = safely(predict)(pluck(.$model, 1),
newdata = pluck(.$training, 1) %>%
as_tibble()
)$result) %>%
inner_join(model_data, ., by = "item_nbr")
# Make predictions using the models (test set)
result_data <- model_df %>%
inner_join(model_data, by = "item_nbr") %>%
group_by(item_nbr) %>%
do(predictions_test = safely(predict)(pluck(.$model, 1),
newdata = pluck(.$test, 1) %>%
as_tibble())$result) %>%
inner_join(result_data, ., by = "item_nbr")
}
# Evaluation ----
# Calculate r-squareds (training set)
result_data <- result_data %>%
group_by(item_nbr) %>%
do(rsq_train = safely(cor)(pluck(.$sales_train, 1),
pluck(.$predictions_train, 1),
use = "pairwise.complete.obs")$result ^ 2) %>%
inner_join(result_data, ., by = "item_nbr")
# Calculate r-squareds (test set)
result_data <- result_data %>%
group_by(item_nbr) %>%
do(rsq_test = safely(cor)(pluck(.$sales_test, 1),
pluck(.$predictions_test, 1),
use = "pairwise.complete.obs")$result ^ 2) %>%
inner_join(result_data, ., by = "item_nbr")
# Print mean R-squareds
print(paste("Mean training set R^2:",
result_data$rsq_train %>% unlist() %>% mean() %>% substr(1, 5)))
print(paste("Mean test set R^2:",
result_data$rsq_test %>% unlist() %>% mean() %>% substr(1, 5)))
return(result_data)
}
# Make predictions for each model
pred_Linear <- make_predictions(linear_models)
pred_Ridge <- make_predictions(ridge_models, lambda = TRUE)
pred_XGB <- make_predictions(xgb_models)
# Function for plotting variable importances
importance_plot <- list()
make_importance_plots <- function(model_df, data_df, lambda = FALSE){
# Loop through all models
for(i in 1:nrow(model_df)){
# Calculate variable importances
variable_importance <- varImp(model_df$model[[i]],
lambda = data_df$lambda,
scale = TRUE)
# XGBoost variable importances are handled differently
if(class(variable_importance) == "varImp.train"){
variable_importance <- variable_importance$importance
}
# Convert type while keeping names and arrange
variable_importance <- variable_importance %>%
mutate(Variable = row.names(.),
Importance = as.numeric(Overall)) %>%
select(-Overall) %>%
arrange(-Importance) %>%
mutate(Variable = gsub("factor(month, levels = 1:12)",
"",
.$Variable,
fixed = TRUE) %>%
reorder(Importance))
# Produce plots into a list
importance_plot[[i]] <- variable_importance %>%
ggplot(aes(x = Variable,
y = Importance)) +
geom_col() +
coord_flip() +
ggtitle(paste0("Product ", model_df$item_nbr[[i]], ", ",
"test R^2 ", data_df$rsq_test[[i]] %>% substr(1, 5))) +
theme_light()
}
return(importance_plot)
}
# Collect variable importance plots to a list
plots <- c(make_importance_plots(linear_models, pred_Linear),
make_importance_plots(ridge_models, pred_Ridge, lambda = TRUE),
make_importance_plots(xgb_models, pred_XGB))
# Plot variable importances together
do.call(grid.arrange, list(
arrangeGrob(grobs = plots[1:3], top = "Linear models"),
arrangeGrob(grobs = plots[4:6], top = "Ridge models"),
arrangeGrob(grobs = plots[7:9], top = "XGBoost models"),
ncol = 3,
top = "Standardized variable importances"))
# Unnest and aggregate actuals and predicitons to plottable format
unnest_predictions <- function(data_df){
# Training set
unnested <- data_df %>%
# Carefully remove lambda column if it exists
select(-matches("lambda")) %>%
unnest(Date = year_month_train,
Actual = sales_train,
Prediction = predictions_train) %>%
rbind(data_df %>%
select(-matches("lambda")) %>%
unnest(Date = year_month_train,
Actual = sales_train,
Prediction = predictions_train)) %>%
group_by(item_nbr, Date) %>%
summarise(Actual = sum(Actual),
Prediction = sum(Prediction)) %>%
# Set negative predictions to zero
mutate(Prediction = ifelse(Prediction < 0, 0, Prediction))
# Test set
unnested <- unnested %>%
rbind(data_df %>%
select(-matches("lambda")) %>%
unnest(Date = year_month_test,
Actual = sales_test,
Prediction = predictions_test) %>%
rbind(data_df %>%
select(-matches("lambda")) %>%
unnest(Date = year_month_test,
Actual = sales_test,
Prediction = predictions_test)) %>%
group_by(item_nbr, Date) %>%
summarise(Actual = sum(Actual),
Prediction = sum(Prediction))) %>%
# Set negative predictions to zero
mutate(Prediction = ifelse(Prediction < 0, 0, Prediction)) %>%
# Filter last month since full data of that month is not available
filter(Date != last(Date))
return(unnested)
}
# Get the model names from the current environment
model_names <- ls()[startsWith(ls(), "pred_")]
# Loop for plotting actuals vs predictions
prediction_plot <- list()
for(i in 1:length(model_names)){
data_df <- model_names[i]
prediction_plot[[i]] <- unnest_predictions(get(data_df)) %>%
ggplot(aes(x = Date)) +
geom_line(aes(y = Actual), size = 1) +
geom_line(aes(y = Prediction), color = "#00BFC4", size = 1) +
geom_vline(xintercept = split_date, color = "red", alpha = 0.5, size = 1) +
# Disable scientific notation for sales
scale_y_continuous(labels = function(x) format(x, scientific = FALSE)) +
# Plot each product horizonttally
facet_grid(rows = vars(item_nbr)) +
# Get titles from current environment variable names
ggtitle(strsplit(ls()[startsWith(ls(), "pred_")][i], "_")[[1]][2]) +
ylab("Sales (pcs)") +
theme_light()
}
# Plot actuals vs predictions for each model and product together
do.call(grid.arrange, list(grobs = prediction_plot,
ncol = 3,
top = paste0("Predictions (blue) vs actuals ",
"for different models and products, ",
"red line separates training and test sets"))) | /Top3ProductsMonthlyForecast.R | no_license | KaroRonty/ProductDemandForecast | R | false | false | 15,126 | r | library(data.table) # fread function
library(zoo) # approximating NAs
library(tidyr) # unite function, replacing NAs
library(dtplyr) # converting dplyr to data.table
library(dplyr) # data wrangling
library(lubridate) # handling dates
library(purrr) # handling nested data
library(glmnet) # ridge regression
library(caret) # XGBoost model, varImp function
library(ggplot2) # plotting
library(gridExtra) # plotting multiple plots together
library(tibble) # add_column function
library(multidplyr) # parallel dplyr
library(parallel) # find out amount of cores
# Reading & transforming the data ----
# Select specific items
items_to_be_plotted <- c(1047679,
819932,
364606)
# Oil price data
# Aggregate oil price to monthly and approximate NAs
oil_df <- fread("oil.csv") %>%
as_tibble() %>%
mutate(year = year(date),
month = month(date))%>%
group_by(year, month) %>%
# Select the first oil price of each month
summarise(oilprice = first(dcoilwtico)) %>%
na.approx() %>%
as_tibble() %>%
mutate(month = as.character(month))
# Sales data
sales_data <- fread("train.csv")
# Select interval and aggregate to monthly
sales_data <- sales_data %>%
lazy_dt() %>%
# Selected items only
filter(item_nbr %in% items_to_be_plotted) %>%
filter(date >= as.Date(last(date)) - years(4)) %>%
mutate(date = as.Date(date),
year = year(date),
month = month(date) %>% as.character(),
# Replace missing promotions with zero
promo = replace_na(onpromotion, 0),
store_nbr = as.character(store_nbr)) %>%
group_by(year, month, item_nbr, store_nbr) %>%
summarise(sales = sum(unit_sales),
promo = mean(promo)) %>%
# Make a date column with the first day of the months
mutate(year_month = as.Date(paste0(year, "-", month, "-01"))) %>%
as_tibble()
# Use ~67/33 training/test split
split_date <- last(sales_data$year_month) - years(1)
# Combine the sales data with the oil price data
full_data <- sales_data %>%
left_join(oil_df)
# Make lagged sales variables
full_data <- full_data %>%
arrange(item_nbr, store_nbr, year, as.numeric(month)) %>%
group_by(store_nbr, item_nbr) %>%
mutate(sales_lag12 = lag(sales, 12),
sales_lag1 = lag(sales, 1)) %>%
na.omit()
# Splitting into training and test sets ----
# Make training set with dates and actual sales
to_model <- full_data %>%
arrange(item_nbr, store_nbr, year, as.numeric(month)) %>%
group_by(item_nbr) %>%
filter(year_month <= split_date) %>%
summarise(year_month_train = list(year_month),
sales_train = list(sales))
# Make test set with dates and actual sales
to_model <- full_data %>%
arrange(item_nbr, store_nbr, year, as.numeric(month)) %>%
group_by(item_nbr) %>%
filter(year_month > split_date) %>%
summarise(year_month_test = list(year_month),
sales_test = list(sales)) %>%
inner_join(to_model, .)
# Split into training and test sets by date
train <- full_data %>%
filter(year_month <= split_date)
test <- full_data %>%
filter(year_month > split_date)
# Do model matrices for training data (dummy variables etc.)
model_data <- train %>%
group_by(item_nbr) %>%
do(training = safely(model.matrix)(sales ~
year +
factor(month, levels = 1:12) +
promo +
oilprice +
sales_lag12 +
sales_lag1,
data = .)$result[, -1]) %>%
inner_join(to_model, .)
# Do model matrices for testing data (dummy variables etc.)
model_data <- test %>%
group_by(item_nbr) %>%
do(test = safely(model.matrix)(sales ~
year +
factor(month, levels = 1:12) +
promo +
oilprice +
sales_lag12 +
sales_lag1,
data = .)$result[, -1]) %>%
inner_join(model_data, .)
# Modeling ----
# Do cross validations to extract lamdas for the ridge models
model_data <- model_data %>%
group_by(item_nbr) %>%
do(cv = safely(cv.glmnet)(pluck(.$training, 1),
pluck(.$sales_train, 1),
alpha = 0)$result) %>%
inner_join(model_data, .)
# Obtain just the lambdas
model_data <- model_data %>%
group_by(item_nbr) %>%
add_column(lambda = as.numeric(as.character(lapply(.$cv, `[[`, 9)))) %>%
# Replace possible null lambdas with NAs
mutate(lambda = modify_if(lambda, is.null, ~ NA)) %>%
select(-cv)
# Make the models ----
# Linear
linear_models <- model_data %>%
group_by(item_nbr) %>%
do(model = safely(lm)(sales ~ .,
data = cbind(sales = pluck(.$sales_train, 1),
pluck(.$training, 1)) %>%
as_tibble())$result)
# Ridge
ridge_models <- model_data %>%
group_by(item_nbr) %>%
do(model = safely(glmnet)(pluck(.$training, 1),
pluck(.$sales_train, 1),
lambda = pluck(.$lambda, 1),
alpha = 0)$result)
# XGboost with grid search
# Create cluster for parallel processing
cluster <- new_cluster(detectCores())
cluster %>%
cluster_library("purrr") %>%
cluster_library("caret")
time <- Sys.time() # ~15 min
xgb_models <- model_data %>%
partition(cluster) %>%
group_by(item_nbr) %>%
do(model = safely(train)(y = pluck(.$sales_train, 1),
x = pluck(.$training, 1),
method = "xgbTree",
metric = "RMSE",
trControl = trainControl(method = "repeatedcv"),
tuneGrid = expand.grid(
# number of trees, higher if size of data is high
nrounds = c(5, 10, 15, 20),
# smaller value prevents overfitting, 0-inf
max_depth = c(6, 10, 15, 25),
# smaller value prevents overfitting, 0-inf
eta = c(0.01, 0.05, 0.1, 0.2, 0.5),
# higher value = more conservative, 0-inf
gamma = c(0, 5),
# 0-1
colsample_bytree = c(0.1, 0.3, 0.5, 0.8, 1),
# higher value = more conservative, 0-inf
min_child_weight = 1,
# smaller value prevents overfitting, 0-1,
subsample = c(0.5, 1)),
allowParallel = TRUE)$result) %>%
collect()
(time <- Sys.time() - time)
# Model evaluation ----
# Function to make predictions and calculate accuracy measures for both sets
make_predictions <- function(model_df, lambda = FALSE){
# Make predictions using the models (training set)
# Handle ridge models separately as they have different arguments
if(lambda){
result_data <- model_df %>%
inner_join(model_data, by = "item_nbr") %>%
group_by(item_nbr) %>%
do(predictions_train = safely(predict)(pluck(.$model, 1),
s = .$lambda,
newx = pluck(.$training, 1)
)$result) %>%
inner_join(model_data, ., by = "item_nbr")
# Make predictions using the models (test set)
result_data <- model_df %>%
inner_join(model_data, by = "item_nbr") %>%
group_by(item_nbr) %>%
do(predictions_test = safely(predict)(pluck(.$model, 1),
s = .$lambda,
newx = pluck(.$test, 1))$result) %>%
inner_join(result_data, ., by = "item_nbr")
# Handle linear and XGBoost models
} else {
# Make predictions using the models (training set)
result_data <- model_df %>%
inner_join(model_data, by = "item_nbr") %>%
group_by(item_nbr) %>%
do(predictions_train = safely(predict)(pluck(.$model, 1),
newdata = pluck(.$training, 1) %>%
as_tibble()
)$result) %>%
inner_join(model_data, ., by = "item_nbr")
# Make predictions using the models (test set)
result_data <- model_df %>%
inner_join(model_data, by = "item_nbr") %>%
group_by(item_nbr) %>%
do(predictions_test = safely(predict)(pluck(.$model, 1),
newdata = pluck(.$test, 1) %>%
as_tibble())$result) %>%
inner_join(result_data, ., by = "item_nbr")
}
# Evaluation ----
# Calculate r-squareds (training set)
result_data <- result_data %>%
group_by(item_nbr) %>%
do(rsq_train = safely(cor)(pluck(.$sales_train, 1),
pluck(.$predictions_train, 1),
use = "pairwise.complete.obs")$result ^ 2) %>%
inner_join(result_data, ., by = "item_nbr")
# Calculate r-squareds (test set)
result_data <- result_data %>%
group_by(item_nbr) %>%
do(rsq_test = safely(cor)(pluck(.$sales_test, 1),
pluck(.$predictions_test, 1),
use = "pairwise.complete.obs")$result ^ 2) %>%
inner_join(result_data, ., by = "item_nbr")
# Print mean R-squareds
print(paste("Mean training set R^2:",
result_data$rsq_train %>% unlist() %>% mean() %>% substr(1, 5)))
print(paste("Mean test set R^2:",
result_data$rsq_test %>% unlist() %>% mean() %>% substr(1, 5)))
return(result_data)
}
# Make predictions for each model
pred_Linear <- make_predictions(linear_models)
pred_Ridge <- make_predictions(ridge_models, lambda = TRUE)
pred_XGB <- make_predictions(xgb_models)
# Function for plotting variable importances
importance_plot <- list()
make_importance_plots <- function(model_df, data_df, lambda = FALSE){
# Loop through all models
for(i in 1:nrow(model_df)){
# Calculate variable importances
variable_importance <- varImp(model_df$model[[i]],
lambda = data_df$lambda,
scale = TRUE)
# XGBoost variable importances are handled differently
if(class(variable_importance) == "varImp.train"){
variable_importance <- variable_importance$importance
}
# Convert type while keeping names and arrange
variable_importance <- variable_importance %>%
mutate(Variable = row.names(.),
Importance = as.numeric(Overall)) %>%
select(-Overall) %>%
arrange(-Importance) %>%
mutate(Variable = gsub("factor(month, levels = 1:12)",
"",
.$Variable,
fixed = TRUE) %>%
reorder(Importance))
# Produce plots into a list
importance_plot[[i]] <- variable_importance %>%
ggplot(aes(x = Variable,
y = Importance)) +
geom_col() +
coord_flip() +
ggtitle(paste0("Product ", model_df$item_nbr[[i]], ", ",
"test R^2 ", data_df$rsq_test[[i]] %>% substr(1, 5))) +
theme_light()
}
return(importance_plot)
}
# Collect variable importance plots to a list
plots <- c(make_importance_plots(linear_models, pred_Linear),
make_importance_plots(ridge_models, pred_Ridge, lambda = TRUE),
make_importance_plots(xgb_models, pred_XGB))
# Plot variable importances together
do.call(grid.arrange, list(
arrangeGrob(grobs = plots[1:3], top = "Linear models"),
arrangeGrob(grobs = plots[4:6], top = "Ridge models"),
arrangeGrob(grobs = plots[7:9], top = "XGBoost models"),
ncol = 3,
top = "Standardized variable importances"))
# Unnest and aggregate actuals and predicitons to plottable format
unnest_predictions <- function(data_df){
# Training set
unnested <- data_df %>%
# Carefully remove lambda column if it exists
select(-matches("lambda")) %>%
unnest(Date = year_month_train,
Actual = sales_train,
Prediction = predictions_train) %>%
rbind(data_df %>%
select(-matches("lambda")) %>%
unnest(Date = year_month_train,
Actual = sales_train,
Prediction = predictions_train)) %>%
group_by(item_nbr, Date) %>%
summarise(Actual = sum(Actual),
Prediction = sum(Prediction)) %>%
# Set negative predictions to zero
mutate(Prediction = ifelse(Prediction < 0, 0, Prediction))
# Test set
unnested <- unnested %>%
rbind(data_df %>%
select(-matches("lambda")) %>%
unnest(Date = year_month_test,
Actual = sales_test,
Prediction = predictions_test) %>%
rbind(data_df %>%
select(-matches("lambda")) %>%
unnest(Date = year_month_test,
Actual = sales_test,
Prediction = predictions_test)) %>%
group_by(item_nbr, Date) %>%
summarise(Actual = sum(Actual),
Prediction = sum(Prediction))) %>%
# Set negative predictions to zero
mutate(Prediction = ifelse(Prediction < 0, 0, Prediction)) %>%
# Filter last month since full data of that month is not available
filter(Date != last(Date))
return(unnested)
}
# Get the model names from the current environment
model_names <- ls()[startsWith(ls(), "pred_")]
# Loop for plotting actuals vs predictions
prediction_plot <- list()
for(i in 1:length(model_names)){
data_df <- model_names[i]
prediction_plot[[i]] <- unnest_predictions(get(data_df)) %>%
ggplot(aes(x = Date)) +
geom_line(aes(y = Actual), size = 1) +
geom_line(aes(y = Prediction), color = "#00BFC4", size = 1) +
geom_vline(xintercept = split_date, color = "red", alpha = 0.5, size = 1) +
# Disable scientific notation for sales
scale_y_continuous(labels = function(x) format(x, scientific = FALSE)) +
# Plot each product horizonttally
facet_grid(rows = vars(item_nbr)) +
# Get titles from current environment variable names
ggtitle(strsplit(ls()[startsWith(ls(), "pred_")][i], "_")[[1]][2]) +
ylab("Sales (pcs)") +
theme_light()
}
# Plot actuals vs predictions for each model and product together
do.call(grid.arrange, list(grobs = prediction_plot,
ncol = 3,
top = paste0("Predictions (blue) vs actuals ",
"for different models and products, ",
"red line separates training and test sets"))) |
#' Classical confidence intervals on stratigraphic ranges
#'
#' Based on some stuff by Sadler
#'
#' @param fad scalar; first apperance datum
#' @param lad scalar; last apperance datum
#' @param h scalar; number of samples
#' @param ci scalar; confidence level
#' @return estimated true strat range
classic <- function(fad, lad, h, ci = 0.5) {
r <- abs(fad - lad)
ep <- -1 / (h - 1)
brack <- ((1 - ci) ^ ep) - 1
out <- r * brack
out
}
| /R/classic_confit.r | no_license | psmits/survivor | R | false | false | 448 | r | #' Classical confidence intervals on stratigraphic ranges
#'
#' Based on some stuff by Sadler
#'
#' @param fad scalar; first apperance datum
#' @param lad scalar; last apperance datum
#' @param h scalar; number of samples
#' @param ci scalar; confidence level
#' @return estimated true strat range
classic <- function(fad, lad, h, ci = 0.5) {
r <- abs(fad - lad)
ep <- -1 / (h - 1)
brack <- ((1 - ci) ^ ep) - 1
out <- r * brack
out
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.04,family="gaussian",standardize=FALSE)
sink('./skin_018.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/skin/skin_018.R | no_license | esbgkannan/QSMART | R | false | false | 346 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.04,family="gaussian",standardize=FALSE)
sink('./skin_018.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(9.97941197291525e-316, 6.43875055358486e+276, 9.53818252170339e+295, 1.22810566415393e+146, 1.54305433646039e-121, 1.09508336506649e+307, 4.12396126296296e-221, 2.86697304171682e-72, 5.03115083823409e+175, 1.05137142854286e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615826990-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 483 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(9.97941197291525e-316, 6.43875055358486e+276, 9.53818252170339e+295, 1.22810566415393e+146, 1.54305433646039e-121, 1.09508336506649e+307, 4.12396126296296e-221, 2.86697304171682e-72, 5.03115083823409e+175, 1.05137142854286e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
# Author: Matthew Deitz
# Editor: Xiuxia Du
# Started in June 2015
#Run this program in the same directory as the HMDB XML database file
if (!is.element("XML", installed.packages()[,1])) {
install.packages("XML")
}
library(XML)
#data frame of the results
metaboliteList<-data.frame(Accession=character(0),
Name=character(0),
Chemical_Formula=character(0),
Avg_Molecular_Weight=character(0),
Monisotopic_weight=character(0))
#for the xml file in your current working directory
for (infile in dir(getwd(),pattern="*.xml")){
data<-xmlParse(infile)
#parse the file to a list
xml_data<-xmlToList(data)
metaboliteList<-rbind(metaboliteList,
data.frame(Accession=toString(xml_data[["accession"]]),
Name=toString(xml_data[["name"]]),
Chemical_Formula=toString(xml_data[["chemical_formula"]]),
Avg_Molecular_Weight=toString(xml_data[["average_molecular_weight"]]),
Monisotopic_weight=toString(xml_data[["monisotopic_moleculate_weight"]])))
}
write.csv(metaboliteList,"HMDBMetaboliteList.csv")
| /HMDBXMLParse.R | no_license | zhengfj1994/BigDataMetabolomics_Human | R | false | false | 1,285 | r | # Author: Matthew Deitz
# Editor: Xiuxia Du
# Started in June 2015
#Run this program in the same directory as the HMDB XML database file
if (!is.element("XML", installed.packages()[,1])) {
install.packages("XML")
}
library(XML)
#data frame of the results
metaboliteList<-data.frame(Accession=character(0),
Name=character(0),
Chemical_Formula=character(0),
Avg_Molecular_Weight=character(0),
Monisotopic_weight=character(0))
#for the xml file in your current working directory
for (infile in dir(getwd(),pattern="*.xml")){
data<-xmlParse(infile)
#parse the file to a list
xml_data<-xmlToList(data)
metaboliteList<-rbind(metaboliteList,
data.frame(Accession=toString(xml_data[["accession"]]),
Name=toString(xml_data[["name"]]),
Chemical_Formula=toString(xml_data[["chemical_formula"]]),
Avg_Molecular_Weight=toString(xml_data[["average_molecular_weight"]]),
Monisotopic_weight=toString(xml_data[["monisotopic_moleculate_weight"]])))
}
write.csv(metaboliteList,"HMDBMetaboliteList.csv")
|
context("test-eval")
df1 <- data.frame(y = c(0,0,0,1,1,0,0,0,1,1,1,0),unit = rep(c(0,1),each=6))
f1 <- funnelModel(y~1|unit,data=df1)
test_that("error if too few folds", {
expect_s3_class(evalCasemixAdj(f1,folds = 2),"data.frame")
expect_s3_class(evalCasemixAdj(f1,folds = 3),"data.frame")
expect_s3_class(evalCasemixAdj(f1,folds = 4),"data.frame")
expect_s3_class(evalCasemixAdj(f1,folds = 5),"data.frame")
expect_error(evalCasemixAdj(f1,folds = 6))
})
df2 <- data.frame(y = rbinom(n=1e5,size=1,prob=0.5),unit = rep(c(0,1),each=1e5/2))
f2 <- funnelModel(y~1|unit,data=df2)
nfolds <- 2
res2 <- evalCasemixAdj(f2,folds = 2)
test_that("Check evalCasemixAdj returns values indicating noise", {
expect_equal(res2$brier[nfolds+1],0.25,tolerance = 0.05)
expect_equal(res2$accuracy[nfolds+1],0.5,tolerance = 0.05)
expect_equal(res2$auc_roc[nfolds+1],0.5,tolerance = 0.05)
expect_equal(res2$no_info_rate[nfolds+1],0.5,tolerance = 0.05)
})
| /tests/testthat/test-eval.R | permissive | oizin/funnelplot | R | false | false | 953 | r | context("test-eval")
df1 <- data.frame(y = c(0,0,0,1,1,0,0,0,1,1,1,0),unit = rep(c(0,1),each=6))
f1 <- funnelModel(y~1|unit,data=df1)
test_that("error if too few folds", {
expect_s3_class(evalCasemixAdj(f1,folds = 2),"data.frame")
expect_s3_class(evalCasemixAdj(f1,folds = 3),"data.frame")
expect_s3_class(evalCasemixAdj(f1,folds = 4),"data.frame")
expect_s3_class(evalCasemixAdj(f1,folds = 5),"data.frame")
expect_error(evalCasemixAdj(f1,folds = 6))
})
df2 <- data.frame(y = rbinom(n=1e5,size=1,prob=0.5),unit = rep(c(0,1),each=1e5/2))
f2 <- funnelModel(y~1|unit,data=df2)
nfolds <- 2
res2 <- evalCasemixAdj(f2,folds = 2)
test_that("Check evalCasemixAdj returns values indicating noise", {
expect_equal(res2$brier[nfolds+1],0.25,tolerance = 0.05)
expect_equal(res2$accuracy[nfolds+1],0.5,tolerance = 0.05)
expect_equal(res2$auc_roc[nfolds+1],0.5,tolerance = 0.05)
expect_equal(res2$no_info_rate[nfolds+1],0.5,tolerance = 0.05)
})
|
#! This file was automatically produced by the testextra package.
#! Changes will be overwritten.
context('tests extracted from file `add_class.R`')
#line 30 "R/add_class.R"
test_that('add_class', {#@testing
expect_is(add_class(1, 'test'), 'test')
val <- add_class(add_class(1, 'class1'), 'class2')
expect_is(val, 'class2')
expect_is(val, 'class1')
expect_is_not(val, 'class3')
})
#line 42 "R/add_class.R"
test_that('set_class', {#@testing
expect_is(set_class(1, 'test'), 'test')
val <- set_class(set_class(1, 'class1'), 'class2')
expect_is(val, 'class2')
expect_is_not(val, 'class1')
expect_is_not(val, 'class3')
})
#line 54 "R/add_class.R"
test_that('add_comment', {#@testing
val <- add_comment(list(), "a test comment")
expect_equal(comment(val), "a test comment")
val <- add_comment(val, "another comment")
expect_equal(comment(val), c("a test comment", "another comment"))
})
#line 65 "R/add_class.R"
test_that('set_comment', {#@testing
val <- set_comment(list(), "a test comment")
expect_equal(comment(val), "a test comment")
val <- set_comment(val, "another comment")
expect_equal(comment(val), "another comment")
})
#line 91 "R/add_class.R"
test_that('carry_forward', {#@testing
x <- dontrepeat(c('a','a', 'b', 'b', 'b'), '.')
y <- carry_forward(factor(c('c', 'd', 'd')), x)
expect_identical(attributes(x), attributes(y))
z <- carry_forward(factor(c(1L, 2L, 2L)), x)
expect_identical(attributes(x), attributes(z))
})
| /tests/testthat/test-add_class.R | no_license | cran/cursory | R | false | false | 1,570 | r | #! This file was automatically produced by the testextra package.
#! Changes will be overwritten.
context('tests extracted from file `add_class.R`')
#line 30 "R/add_class.R"
test_that('add_class', {#@testing
expect_is(add_class(1, 'test'), 'test')
val <- add_class(add_class(1, 'class1'), 'class2')
expect_is(val, 'class2')
expect_is(val, 'class1')
expect_is_not(val, 'class3')
})
#line 42 "R/add_class.R"
test_that('set_class', {#@testing
expect_is(set_class(1, 'test'), 'test')
val <- set_class(set_class(1, 'class1'), 'class2')
expect_is(val, 'class2')
expect_is_not(val, 'class1')
expect_is_not(val, 'class3')
})
#line 54 "R/add_class.R"
test_that('add_comment', {#@testing
val <- add_comment(list(), "a test comment")
expect_equal(comment(val), "a test comment")
val <- add_comment(val, "another comment")
expect_equal(comment(val), c("a test comment", "another comment"))
})
#line 65 "R/add_class.R"
test_that('set_comment', {#@testing
val <- set_comment(list(), "a test comment")
expect_equal(comment(val), "a test comment")
val <- set_comment(val, "another comment")
expect_equal(comment(val), "another comment")
})
#line 91 "R/add_class.R"
test_that('carry_forward', {#@testing
x <- dontrepeat(c('a','a', 'b', 'b', 'b'), '.')
y <- carry_forward(factor(c('c', 'd', 'd')), x)
expect_identical(attributes(x), attributes(y))
z <- carry_forward(factor(c(1L, 2L, 2L)), x)
expect_identical(attributes(x), attributes(z))
})
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 31312
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 31312
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipdiam/ken.flash^12.C-d3.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 10687
c no.of clauses 31312
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 31312
c
c QBFLIB/Biere/tipdiam/ken.flash^12.C-d3.qdimacs 10687 31312 E1 [] 0 104 10583 31312 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Biere/tipdiam/ken.flash^12.C-d3/ken.flash^12.C-d3.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 633 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 31312
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 31312
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipdiam/ken.flash^12.C-d3.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 10687
c no.of clauses 31312
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 31312
c
c QBFLIB/Biere/tipdiam/ken.flash^12.C-d3.qdimacs 10687 31312 E1 [] 0 104 10583 31312 NONE
|
library(rvest)
library(tidyverse)
h <- read_html("https://en.wikipedia.org/wiki/Current_members_of_the_United_States_House_of_Representatives")
reps <- h %>%
html_nodes("#mw-content-text > div > table:nth-child(18)") %>%
html_table()
reps <- reps[,c(1:2,4:9)] %>% as_tibble()
reps
| /Webscraping_Politacial.R | no_license | Arpitchaurasia13/RStudio_Iris-data | R | false | false | 301 | r | library(rvest)
library(tidyverse)
h <- read_html("https://en.wikipedia.org/wiki/Current_members_of_the_United_States_House_of_Representatives")
reps <- h %>%
html_nodes("#mw-content-text > div > table:nth-child(18)") %>%
html_table()
reps <- reps[,c(1:2,4:9)] %>% as_tibble()
reps
|
#1
r <- c(5,5,2,1,4)
#a
rshape <- sum(r)+2.6
rscale <- 1.4/(length(r)*1.4+1)
curve(dgamma(x,shape=rshape,scale=rscale),frame=0,to=12, col='blue', lwd=4, main='Exam 2 Problem 1a', ylim=c(0,.6))
curve(dgamma(x,shape=2.6,scale=1.4),add=T, col='red', lwd=4)
legend(8,.4, c('posterior', 'prior'), lwd=c(4,4), col=c('blue','red'))
#b
#E(X) of prior=
1.4 * 2.6
#c
#E(X) of posterior=
rshape * rscale
#d
qgamma(c(.005,.995), shape=rshape, scale= rscale)
# So upper end= 5.75172
#2
updatemu <- function(data, sigma2, mumu, sigma2mu){
xbar <- mean(data)
n <- length(data)
prec <- 1/sigma2
precmu <- 1/sigma2mu
meanpost <- (n*prec * xbar + precmu*mumu)/(n*prec+precmu)
varpost <- 1/(n*prec +precmu)
return(c(meanpost,varpost))
}
#a
g <- c(82.6, 80.4, 77.2, 79.5, 74.4, 80.8, 82.8, 80.1, 84.4, 74.7, 78.6, 79.4)
gpost <- updatemu(g,9,76,100)
curve(dnorm(x,gpost[1], sqrt(gpost[2])), lwd=3, col='blue', from=50, to=100, main='Exam 2 Problem 2a') #posterior
curve(dnorm(x,76,10), col='red', add=T, lwd=3)
legend(90,.3, c('posterior', 'prior'), lwd=c(4,4), col=c('blue','red'))
#b
quantile(gpost,c(.025,.975))
#c
#Yes, I would reject the null hypothesis that mu= 80 at 5% significance because we can see that 80 is not in our 95% probability interval??
#d
updatesig2<- function(data, mu, prshape, prscale){
n <- length(data)
poshape <- n/2 + prshape
ssx <- sum((data-mu)^2)
poscale <- (2 * prscale)/(prscale * ssx + 2)
out <- c(poshape,poscale)
return(out)
}
igpdf <- function(x,sh,sc){
(1/(gamma(sh)*sc^sh))*x^(-sh-1)*exp(-1/(x*sc))
}
g2post <- updatesig2(g,77,3.1,.02)
curve(igpdf(x, g2post[1], g2post[2]), col='blue', from=0, to=80, lwd=3, main='Exam 2 Problem 1d') # posterior for variance
curve(igpdf(x,3.1, .02), add=T, col='red', lwd=3)
legend(60,.04, legend=c('posterior', 'prior'), col=c('blue','red'),lty=c(1,1),lwd=c(6,6))
#e
q <- 1/rgamma(10000, g2post[1], scale=g2post[2])
quantile(postinv,c(.05,.95))
#f
gibbs <- function(data, loops, mumu, sig2mu, psh, psc){
out <- matrix(0,loops,2)
out[1,1] <- mean(data)
out[1,2] <- var(data)
for (i in 2:loops){
parmmu <- updatemu(data, out[i-1,2], mumu, sig2mu)
out[i,1] <- rnorm(1,parmmu[1], sqrt(parmmu[2]))
parms2 <- updatesig2(data, out[i,1], psh, psc)
out[i,2] <- 1/rgamma(1,shape=parms2[1], scale=parms2[2])}
return(out)}
g3post <- gibbs(g, 10000, 80, 200, 2.1, .005)
plot(density(g3post[,2]), lwd=3, col='blue', main='Exam 2 Problem 2f')
#g
plot(density(g3post[,1]), lwd=3, xlim=c(30,130), col='blue', main='Exam 2 Problem 2g')
gmprior <- rnorm(10000, 80, sqrt(200))
lines(density(gmprior), lwd=3, col='red')
legend(100,.15, c("Posterior","Prior"), lwd=c(3,3), col=c("blue","red"))
#h
quantile(g3post[,1], c(.025,.975))
#lower end is 76.16505
#i
priorgmu <-rnorm(10000, 80, sqrt(200))
priorgsig<-1/rgamma(10000,shape=2.1,scale=0.02)
priorpred<-rnorm(10000,priorgmu,sqrt(priorgsig))
g3post<- gibbs(g, 10000, 80, 200, 2.1, .005)
postpred<-rnorm(100000,g3post[,1],sqrt(g3post[,2]))
hist(g,breaks = 5, xlim = c(40, 120),freq=FALSE, main='Exam 2 Problem 2i')
lines(density(priorpred), lwd=3, col='blue')
lines(density(postpred), lwd=3, col='red')
legend(90,.09, c("Prior Predictive","Posterior Predictive"), lwd=c(3,3), col=c("blue","red"))
#3
#a
t <- c(83.6, 81.1, 78.2, 78.1, 84.2, 85.1, 76.1, 83.7, 87.4, 80.5, 81.1, 84.5)
gb1post<- gibbs(g, 100000, 81, 160, 2.01, .008)
gb2post<- gibbs(t, 100000, 81, 160, 2.01, .008)
plot(density(gb1post[,2]), lwd=3, col='blue', main='Exam 2 Problem 3a')
lines(density(gb2post[,2]), lwd=3, col='red')
legend(70,.04, c("Bridgewater Posterior var"," Taylormade Posterior var"), lwd=c(3,3), col=c("blue","red"))
#b
plot(density(gb1post[,1]), lwd=3, col='blue',xlim=c(70,100), main='Exam 2 Problem 3b')
lines(density(gb2post[,1]), lwd=3, col='red')
legend(86,.2, c("Bridgewater Posterior mean","Taylormade Posterior mean"), lwd=c(3,3), col=c("blue","red"))
#c
diff <- gb1post[,1] - gb2post[,1]
plot(density(diff), lwd=3, col='blue', main='Exam 2 Problem 3c')
#d
diff <- gb1post[,1] - gb2post[,1]
quantile(diff, c(.025,.975))
#upper is 1.882012
#e
#No, I would accept because 0 is in the interval.
#4
#a
1/(1+9)
#b
curve(dbeta(x,12,139), col='blue', lwd=3, from=-.01, to=1, main='Exam 2 Problem 4b')
curve(dbeta(x,1,9), lwd=3, add=T, col='red')
legend(.6,10, c("Posterior","Prior"), lwd=c(3,3), col=c("blue","red"))
#c
12/(139+12)
#d
curve(dbeta(x,22,158), col='blue', lwd=3, from=-.01, to=1, main='Exam 2 Problem 4d')
curve(dbeta(x,1,9), lwd=3, add=T, col='red')
legend(.6,10, c("Posterior","Prior"), lwd=c(3,3), col=c("blue","red"))
#e
#E(X)=
22/(158+22)
#f
dairy <- rbeta(10000,12,139)
sheep <- rbeta(10000,22,158)
d <- dairy - sheep
quantile(d,c(.025,.975))
#No, i would not reject the null hypothesis. 0 is in the prob. interval
#5
#a
((1/6)*(1))/(((1/6)*1)+((1/3)*(3/5))+((1/2)*(1/5)))
#b
((1/6)*(1)*(1))/(((1/6)*(1)*(1))+((2/6)*(3/5)*(2/4))+((3/6)*(1/5)*(0)))
| /TEST 2.R | no_license | Jgressel/examples | R | false | false | 4,929 | r | #1
r <- c(5,5,2,1,4)
#a
rshape <- sum(r)+2.6
rscale <- 1.4/(length(r)*1.4+1)
curve(dgamma(x,shape=rshape,scale=rscale),frame=0,to=12, col='blue', lwd=4, main='Exam 2 Problem 1a', ylim=c(0,.6))
curve(dgamma(x,shape=2.6,scale=1.4),add=T, col='red', lwd=4)
legend(8,.4, c('posterior', 'prior'), lwd=c(4,4), col=c('blue','red'))
#b
#E(X) of prior=
1.4 * 2.6
#c
#E(X) of posterior=
rshape * rscale
#d
qgamma(c(.005,.995), shape=rshape, scale= rscale)
# So upper end= 5.75172
#2
updatemu <- function(data, sigma2, mumu, sigma2mu){
xbar <- mean(data)
n <- length(data)
prec <- 1/sigma2
precmu <- 1/sigma2mu
meanpost <- (n*prec * xbar + precmu*mumu)/(n*prec+precmu)
varpost <- 1/(n*prec +precmu)
return(c(meanpost,varpost))
}
#a
g <- c(82.6, 80.4, 77.2, 79.5, 74.4, 80.8, 82.8, 80.1, 84.4, 74.7, 78.6, 79.4)
gpost <- updatemu(g,9,76,100)
curve(dnorm(x,gpost[1], sqrt(gpost[2])), lwd=3, col='blue', from=50, to=100, main='Exam 2 Problem 2a') #posterior
curve(dnorm(x,76,10), col='red', add=T, lwd=3)
legend(90,.3, c('posterior', 'prior'), lwd=c(4,4), col=c('blue','red'))
#b
quantile(gpost,c(.025,.975))
#c
#Yes, I would reject the null hypothesis that mu= 80 at 5% significance because we can see that 80 is not in our 95% probability interval??
#d
updatesig2<- function(data, mu, prshape, prscale){
n <- length(data)
poshape <- n/2 + prshape
ssx <- sum((data-mu)^2)
poscale <- (2 * prscale)/(prscale * ssx + 2)
out <- c(poshape,poscale)
return(out)
}
igpdf <- function(x,sh,sc){
(1/(gamma(sh)*sc^sh))*x^(-sh-1)*exp(-1/(x*sc))
}
g2post <- updatesig2(g,77,3.1,.02)
curve(igpdf(x, g2post[1], g2post[2]), col='blue', from=0, to=80, lwd=3, main='Exam 2 Problem 1d') # posterior for variance
curve(igpdf(x,3.1, .02), add=T, col='red', lwd=3)
legend(60,.04, legend=c('posterior', 'prior'), col=c('blue','red'),lty=c(1,1),lwd=c(6,6))
#e
q <- 1/rgamma(10000, g2post[1], scale=g2post[2])
quantile(postinv,c(.05,.95))
#f
gibbs <- function(data, loops, mumu, sig2mu, psh, psc){
out <- matrix(0,loops,2)
out[1,1] <- mean(data)
out[1,2] <- var(data)
for (i in 2:loops){
parmmu <- updatemu(data, out[i-1,2], mumu, sig2mu)
out[i,1] <- rnorm(1,parmmu[1], sqrt(parmmu[2]))
parms2 <- updatesig2(data, out[i,1], psh, psc)
out[i,2] <- 1/rgamma(1,shape=parms2[1], scale=parms2[2])}
return(out)}
g3post <- gibbs(g, 10000, 80, 200, 2.1, .005)
plot(density(g3post[,2]), lwd=3, col='blue', main='Exam 2 Problem 2f')
#g
plot(density(g3post[,1]), lwd=3, xlim=c(30,130), col='blue', main='Exam 2 Problem 2g')
gmprior <- rnorm(10000, 80, sqrt(200))
lines(density(gmprior), lwd=3, col='red')
legend(100,.15, c("Posterior","Prior"), lwd=c(3,3), col=c("blue","red"))
#h
quantile(g3post[,1], c(.025,.975))
#lower end is 76.16505
#i
priorgmu <-rnorm(10000, 80, sqrt(200))
priorgsig<-1/rgamma(10000,shape=2.1,scale=0.02)
priorpred<-rnorm(10000,priorgmu,sqrt(priorgsig))
g3post<- gibbs(g, 10000, 80, 200, 2.1, .005)
postpred<-rnorm(100000,g3post[,1],sqrt(g3post[,2]))
hist(g,breaks = 5, xlim = c(40, 120),freq=FALSE, main='Exam 2 Problem 2i')
lines(density(priorpred), lwd=3, col='blue')
lines(density(postpred), lwd=3, col='red')
legend(90,.09, c("Prior Predictive","Posterior Predictive"), lwd=c(3,3), col=c("blue","red"))
#3
#a
t <- c(83.6, 81.1, 78.2, 78.1, 84.2, 85.1, 76.1, 83.7, 87.4, 80.5, 81.1, 84.5)
gb1post<- gibbs(g, 100000, 81, 160, 2.01, .008)
gb2post<- gibbs(t, 100000, 81, 160, 2.01, .008)
plot(density(gb1post[,2]), lwd=3, col='blue', main='Exam 2 Problem 3a')
lines(density(gb2post[,2]), lwd=3, col='red')
legend(70,.04, c("Bridgewater Posterior var"," Taylormade Posterior var"), lwd=c(3,3), col=c("blue","red"))
#b
plot(density(gb1post[,1]), lwd=3, col='blue',xlim=c(70,100), main='Exam 2 Problem 3b')
lines(density(gb2post[,1]), lwd=3, col='red')
legend(86,.2, c("Bridgewater Posterior mean","Taylormade Posterior mean"), lwd=c(3,3), col=c("blue","red"))
#c
diff <- gb1post[,1] - gb2post[,1]
plot(density(diff), lwd=3, col='blue', main='Exam 2 Problem 3c')
#d
diff <- gb1post[,1] - gb2post[,1]
quantile(diff, c(.025,.975))
#upper is 1.882012
#e
#No, I would accept because 0 is in the interval.
#4
#a
1/(1+9)
#b
curve(dbeta(x,12,139), col='blue', lwd=3, from=-.01, to=1, main='Exam 2 Problem 4b')
curve(dbeta(x,1,9), lwd=3, add=T, col='red')
legend(.6,10, c("Posterior","Prior"), lwd=c(3,3), col=c("blue","red"))
#c
12/(139+12)
#d
curve(dbeta(x,22,158), col='blue', lwd=3, from=-.01, to=1, main='Exam 2 Problem 4d')
curve(dbeta(x,1,9), lwd=3, add=T, col='red')
legend(.6,10, c("Posterior","Prior"), lwd=c(3,3), col=c("blue","red"))
#e
#E(X)=
22/(158+22)
#f
dairy <- rbeta(10000,12,139)
sheep <- rbeta(10000,22,158)
d <- dairy - sheep
quantile(d,c(.025,.975))
#No, i would not reject the null hypothesis. 0 is in the prob. interval
#5
#a
((1/6)*(1))/(((1/6)*1)+((1/3)*(3/5))+((1/2)*(1/5)))
#b
((1/6)*(1)*(1))/(((1/6)*(1)*(1))+((2/6)*(3/5)*(2/4))+((3/6)*(1/5)*(0)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energyGap_selection.R
\name{energyGap_selection}
\alias{energyGap_selection}
\title{Recommend levels with possible biological meaning}
\usage{
energyGap_selection(MarkovObject = NULL, m = 3)
}
\arguments{
\item{MarkovObject}{The output of the function, \code{MarkovHC}.}
\item{m}{An integer value. A (local) peak is defined as
a point such that m points either side of it has a lower or equal value to it.}
}
\value{
This function plots the energy gap along the increasing levels,
red points indicate recommend levels with possible biological meaning,
and a red circle highlight the level that may with an optimal cluster number.
A list consist of two components are returned. p is a vector contains the recommend
levels with possible biological meaning. recommend_level is the level that may
with an optimal cluster number.
}
\description{
Function \code{energyGap_selection} The function recommends levels
with possible biological meaning.
}
\author{
Zhenyi Wang wangzy17@mails.tsinghua.edu.cn
}
| /MarkovHC/man/energyGap_selection.Rd | no_license | ZhenyiWangTHU/MarkovHC | R | false | true | 1,078 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energyGap_selection.R
\name{energyGap_selection}
\alias{energyGap_selection}
\title{Recommend levels with possible biological meaning}
\usage{
energyGap_selection(MarkovObject = NULL, m = 3)
}
\arguments{
\item{MarkovObject}{The output of the function, \code{MarkovHC}.}
\item{m}{An integer value. A (local) peak is defined as
a point such that m points either side of it has a lower or equal value to it.}
}
\value{
This function plots the energy gap along the increasing levels,
red points indicate recommend levels with possible biological meaning,
and a red circle highlight the level that may with an optimal cluster number.
A list consist of two components are returned. p is a vector contains the recommend
levels with possible biological meaning. recommend_level is the level that may
with an optimal cluster number.
}
\description{
Function \code{energyGap_selection} The function recommends levels
with possible biological meaning.
}
\author{
Zhenyi Wang wangzy17@mails.tsinghua.edu.cn
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aaa.R, R/modify.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{\%>\%}
\alias{filter}
\alias{slice}
\alias{mutate}
\alias{mutate_at}
\alias{mutate_all}
\alias{activate}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{magrittr}{\code{\link[magrittr]{\%>\%}}}
\item{tidygraph}{\code{\link[tidygraph]{filter}}, \code{\link[tidygraph]{slice}}, \code{\link[tidygraph]{mutate}}, \code{\link[tidygraph]{mutate_at}}, \code{\link[tidygraph]{mutate_all}}, \code{\link[tidygraph]{activate}}}
}}
| /particles/man/reexports.Rd | permissive | akhikolla/InformationHouse | R | false | true | 728 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aaa.R, R/modify.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{\%>\%}
\alias{filter}
\alias{slice}
\alias{mutate}
\alias{mutate_at}
\alias{mutate_all}
\alias{activate}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{magrittr}{\code{\link[magrittr]{\%>\%}}}
\item{tidygraph}{\code{\link[tidygraph]{filter}}, \code{\link[tidygraph]{slice}}, \code{\link[tidygraph]{mutate}}, \code{\link[tidygraph]{mutate_at}}, \code{\link[tidygraph]{mutate_all}}, \code{\link[tidygraph]{activate}}}
}}
|
findFit <- function(Out,start)
{
### Extract fit statistics:
IndStart <- start
# Find end:
IndEnd <- IndStart
repeat
{
IndEnd <- IndEnd + 1
if (!(grepl("\\s*",Out[IndEnd]) | grepl("=",Out[IndEnd]))) break
}
modTxt <- Out[IndStart:IndEnd]
modTxt <- modTxt[grepl("=",modTxt)]
modTxt <- strsplit(modTxt,split="=")
modTxt <- lapply(modTxt,gsub,pattern="^\\s*",replacement="")
modTxt <- lapply(modTxt,gsub,pattern="\\s*$",replacement="")
motTxt <- lapply(modTxt,function(x)c(x[1],paste(x[-1],collapse="=")))
return(data.frame(Statstic=sapply(modTxt,"[",1),Value=sapply(modTxt,"[",2)))
} | /R/findFit.R | no_license | cran/lisrelToR | R | false | false | 639 | r | findFit <- function(Out,start)
{
### Extract fit statistics:
IndStart <- start
# Find end:
IndEnd <- IndStart
repeat
{
IndEnd <- IndEnd + 1
if (!(grepl("\\s*",Out[IndEnd]) | grepl("=",Out[IndEnd]))) break
}
modTxt <- Out[IndStart:IndEnd]
modTxt <- modTxt[grepl("=",modTxt)]
modTxt <- strsplit(modTxt,split="=")
modTxt <- lapply(modTxt,gsub,pattern="^\\s*",replacement="")
modTxt <- lapply(modTxt,gsub,pattern="\\s*$",replacement="")
motTxt <- lapply(modTxt,function(x)c(x[1],paste(x[-1],collapse="=")))
return(data.frame(Statstic=sapply(modTxt,"[",1),Value=sapply(modTxt,"[",2)))
} |
library(tidyverse)
library(tuneR)
library(seewave)
library(fftw)
library(kernlab)
library(e1071)
library(caret)
setwd("/home/p2p/Documents/Datasets/FMA/")
files_path_list <- list.files("fma_small_separated/") %>% map(~ list.files(paste0("fma_small_separated/", ., "/"), full.names = T))
names(files_path_list) <- list.files("fma_small_separated/")
songs_list <- files_path_list[names(files_path_list) %in% c("Rock", "Hip-Hop")] %>% map(~ head(., 500)) %>% map(~ map(., ~ readWave(.)))
songs_list %>% map(~ map_int(., ~ NROW(.@left)) %>% min)
song_length <- 639450
print(song_length/22050)
alignSongLength <- function(wav, sample_length){
wav@left <- wav@left[1:sample_length]
return(wav)
}
songs_list <- songs_list %>% map(~ map(., ~ alignSongLength(., song_length)))
songs_list %>% map(~ map_int(., ~ NROW(.@left)) %>% summary())
songs_list <- songs_list %>% map(~ map(., ~ downsample(., 11025)))
melfcc_list <- songs_list %>% map(~ map(., ~ melfcc(., wintime = 0.04496124 * 2, hoptime = 0.01124031 * 2, numcep = 64)))
melfcc_tbl_list <- melfcc_list %>% map(~ map_dfr(., ~ tibble(t(c(.)))))
melfcc_tbl_list <- melfcc_tbl_list %>% imap(~ mutate(.x, genre = names(melfcc_tbl_list[.y])))
# svm ---------------------------------------------------------------------
samplenum_list <- melfcc_tbl_list %>% map(~ nrow(.))
train_size <- 0.7
train_sampleids_list <- map(samplenum_list, ~ sample(., . * train_size))
training <- map2_dfr(melfcc_tbl_list, train_sampleids_list, ~ .x[.y, ])
predicting <- map2_dfr(melfcc_tbl_list, train_sampleids_list, ~ .x[-.y, ])
training <- training[complete.cases(training), ]
predicting <- predicting[complete.cases(predicting), ]
training$genre <- training$genre %>% as.factor()
predicting$genre <- predicting$genre %>% as.factor()
svm_model <- train(genre ~ ., data = training, method="svmRadial", tuneLength=3, preProcess=c("center", "scale"), trControl = trainControl(method = "cv"))
svm_predict <- predict(svm_model, predicting)
confusionMatrix(data = svm_predict, predicting$genre)
| /utils/fma_svm.R | permissive | matsumototo180/gtzan.keras | R | false | false | 2,037 | r | library(tidyverse)
library(tuneR)
library(seewave)
library(fftw)
library(kernlab)
library(e1071)
library(caret)
setwd("/home/p2p/Documents/Datasets/FMA/")
files_path_list <- list.files("fma_small_separated/") %>% map(~ list.files(paste0("fma_small_separated/", ., "/"), full.names = T))
names(files_path_list) <- list.files("fma_small_separated/")
songs_list <- files_path_list[names(files_path_list) %in% c("Rock", "Hip-Hop")] %>% map(~ head(., 500)) %>% map(~ map(., ~ readWave(.)))
songs_list %>% map(~ map_int(., ~ NROW(.@left)) %>% min)
song_length <- 639450
print(song_length/22050)
alignSongLength <- function(wav, sample_length){
wav@left <- wav@left[1:sample_length]
return(wav)
}
songs_list <- songs_list %>% map(~ map(., ~ alignSongLength(., song_length)))
songs_list %>% map(~ map_int(., ~ NROW(.@left)) %>% summary())
songs_list <- songs_list %>% map(~ map(., ~ downsample(., 11025)))
melfcc_list <- songs_list %>% map(~ map(., ~ melfcc(., wintime = 0.04496124 * 2, hoptime = 0.01124031 * 2, numcep = 64)))
melfcc_tbl_list <- melfcc_list %>% map(~ map_dfr(., ~ tibble(t(c(.)))))
melfcc_tbl_list <- melfcc_tbl_list %>% imap(~ mutate(.x, genre = names(melfcc_tbl_list[.y])))
# svm ---------------------------------------------------------------------
samplenum_list <- melfcc_tbl_list %>% map(~ nrow(.))
train_size <- 0.7
train_sampleids_list <- map(samplenum_list, ~ sample(., . * train_size))
training <- map2_dfr(melfcc_tbl_list, train_sampleids_list, ~ .x[.y, ])
predicting <- map2_dfr(melfcc_tbl_list, train_sampleids_list, ~ .x[-.y, ])
training <- training[complete.cases(training), ]
predicting <- predicting[complete.cases(predicting), ]
training$genre <- training$genre %>% as.factor()
predicting$genre <- predicting$genre %>% as.factor()
svm_model <- train(genre ~ ., data = training, method="svmRadial", tuneLength=3, preProcess=c("center", "scale"), trControl = trainControl(method = "cv"))
svm_predict <- predict(svm_model, predicting)
confusionMatrix(data = svm_predict, predicting$genre)
|
###############################
# Script to calculate sensitivity and exposure from the simulations
# Will Vieira
# July 30, 2019
##############################
##############################
# Steps:
# get data (all simulations)
# For the sensitivity figure
# Organize stateOccup for each time and repetition in an array
# calculate mean and CI transient dynamic for the whole latitude
# For the exposure figure
# Calculate the mean and CI euclidean distance between last and first state proportion
# Save calculated data as RDS
##############################
print('Running simulation analysis for figure 4')
# load simulations from the server
# system(paste("fish -c", shQuote("mammpull STMproject/simResults/output ms_STM-managed/sim-results")))
# getting data
print('[1/4] Getting simulation data')
cellSize = 0.3
RCP = 4.5 # TODO think if I could also use the no CC simulation
managPractice <- 0:4
managInt <- c(0.0025, 0.01, 0.0025, 0.0025)
reps = 1:15
steps = 200
states <- c('B', 'T', 'M', 'R')
# load environment scaling parameters
load('num-results/sysdata.rda')
load('sim-results/data/landInfo.rda')
mainFolder = 'sim-results/output/'
count = 1
for(cc in RCP) {
for(mg in managPractice) {
folderName = paste0('RCP_', cc, '_mg_', mg)
for(rp in reps) {
fileName = paste0('RCP_', cc, '_mg_', mg, '_rep_', rp, '.RDS')
assign(sub('\\.RDS$', '', fileName), readRDS(paste0(mainFolder, folderName, '/', fileName)))
cat(' loading ouput files ', round((count/(length(RCP) * length(managPractice) * length(reps))) * 100, 0), '%\r')
count <- count + 1
}
}
}
# clean up
rm(list = c('folderName', 'fileName', 'mainFolder'))
#
# Get env1 unscaled
env1unscaled <- get('RCP_4.5_mg_0_rep_1')[['env1']] * vars.sd['annual_mean_temp'] + vars.means['annual_mean_temp']
#
# Organize stateOccup for each time and repetition in an array
print('[2/4] Organize state occupancy in lists')
# Create length(managPractice) lists containing 4 (states) arrays with dimension steps x landRow x reps
count = 1
for(mg in managPractice)
{
arB = arT = arM = arR = array(0, dim = c(nCol, steps + 1, length(reps)))
for(rp in reps)
{
sim <- get(paste0('RCP_', RCP, '_mg_', mg, '_rep_', rp))[['stateOccup']]
# get a matrix for each state containing temporal variation for each
# col of the landcape
for(state in states)
{
assign(paste0('mt', state), sapply(sim, function(x) x[state, ]))
}
arB[,,rp] <- mtB
arT[,,rp] <- mtT
arM[,,rp] <- mtM
arR[,,rp] <- mtR
cat(' creating output lists ', round(count/(length(managPractice) * length(reps)) * 100, 0), '%\r')
count <- count + 1
}
assign(paste0('list_mg', mg), list(arB = arB, arT = arT, arM = arM, arR = arR))
}
# clean up
rm(list = c(paste0('ar', states), paste0('mt', states), 'sim'))
#
# calculate mean and CI transient dynamic for the whole latitude
print('[3/4] Calculate mean and CI transient dynamic for each landscape latitude')
count = 1
for(mg in managPractice)
{
listMg <- get(paste0('list_mg', mg))
dims <- dim(listMg[[1]])
# get array result with mean and CI
for(state in states)
{
# dataframes for mean and CI
mMean = mCI = matrix(0, nrow = dims[1], ncol = dims[2])
# mean
for(i in reps) mMean <- mMean + listMg[[paste0('ar', state)]][,, i]
mMean <- mMean/length(reps)
# CI
## first get sd()
for(i in 1:dims[1]) {
for(j in 1:dims[2]) {
mCI[i, j] <- sd(listMg[[paste0('ar', state)]][i, j, ])
}
}
mCI <- mCI * 1.96/sqrt(length(reps))
# create array with mean and CI for the state
assign(paste0('ar', state), simplify2array(list(mMean, mCI)))
cat(' creating summary lists ', round(count/(length(managPractice) * length(states)) * 100, 0), '%\r')
count <- count + 1
}
# create list for each managPractice
assign(paste0('summaryList_mg', mg), list(arB = arB, arT = arT, arM = arM, arR = arR))
}
# clean up
rm(list = c('listMg', 'mMean', 'mCI', paste0('ar', states)))
# Sum the mean and CI of all states
print('[3/4] Calculate mean and CI transient dynamic for each landscape latitude')
for(mg in managPractice)
{
cat('Management practice ', mg + 1, ' of ', length(managPractice), '\n')
sim <- get(paste0('summaryList_mg', mg))
datM = datCI = matrix(0, nrow = dim(sim[[1]])[1], ncol = dim(sim[[1]])[2] - 1)
spar = 0.8
for(i in 1:dim(sim[[1]])[1])
{
datM[i, ] <- abs(diff(smooth.spline(sim[['arB']][,,1][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arT']][,,1][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arM']][,,1][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arR']][,,1][i, ], spar = spar)$y))
datCI[i, ] <- abs(diff(smooth.spline(sim[['arB']][,,2][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arT']][,,2][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arM']][,,2][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arR']][,,2][i, ], spar = spar)$y))
cat(' calculating temporal difference ', round(i/dim(sim[[1]])[1] * 100, 0), '% \r')
}
cat(' calculating temporal difference ', round(i/dim(sim[[1]])[1] * 100, 0), '% \n')
assign(paste0('tempDiff_mg', mg), simplify2array(list(datM, datCI)))
}
#
# Calculate the mean and CI euclidean distance between last and first state proportion
print('[4/4] Calculate Exposure for each landscape latitude')
# Confidence interval function
ci = function(x) 1.96 * sd(x)/sqrt(length(x))
# Create length(managPractice) lists containing a data frame with Dist values
# (nrow) for each rep (ncol)
count = 1
for(mg in managPractice)
{
df <- data.frame(matrix(rep(NA, dims[1] * length(reps)), nrow = dims[1]))
for(rp in reps)
{
sim <- get(paste0('RCP_', RCP, '_mg_', mg, '_rep_', rp))[['stateOccup']]
# calculate euclidean dist between first [1] and last [steps + 1] state proprotion
for(latitude in 1:dims[1])
df[latitude, rp] <- dist(rbind(sim[[1]][, latitude], sim[[(steps + 1)]][, latitude]))
cat(' creating output data frames ', round(count/(length(managPractice) * length(reps)) * 100, 0), '%\r')
count <- count + 1
}
# Calculate mean and CI (save in a data frame wiht mean and CI columns)
dfSummary <- data.frame(distMean = apply(df, 1, mean))
dfSummary$distCI <- apply(df, 1, ci)
assign(paste0('eDist_mg', mg), dfSummary)
}
#
# Save calculated data as RDS
obj <- c('env1unscaled', paste0('tempDiff_mg', managPractice), paste0('eDist_mg', managPractice))
save(list = obj, file = 'sim-results/data/sim_summary_fig4.rda')
#
| /sim-results/run_analysis_fig4.R | permissive | willvieira/ms_STM-managed | R | false | false | 6,977 | r | ###############################
# Script to calculate sensitivity and exposure from the simulations
# Will Vieira
# July 30, 2019
##############################
##############################
# Steps:
# get data (all simulations)
# For the sensitivity figure
# Organize stateOccup for each time and repetition in an array
# calculate mean and CI transient dynamic for the whole latitude
# For the exposure figure
# Calculate the mean and CI euclidean distance between last and first state proportion
# Save calculated data as RDS
##############################
print('Running simulation analysis for figure 4')
# load simulations from the server
# system(paste("fish -c", shQuote("mammpull STMproject/simResults/output ms_STM-managed/sim-results")))
# getting data
print('[1/4] Getting simulation data')
cellSize = 0.3
RCP = 4.5 # TODO think if I could also use the no CC simulation
managPractice <- 0:4
managInt <- c(0.0025, 0.01, 0.0025, 0.0025)
reps = 1:15
steps = 200
states <- c('B', 'T', 'M', 'R')
# load environment scaling parameters
load('num-results/sysdata.rda')
load('sim-results/data/landInfo.rda')
mainFolder = 'sim-results/output/'
count = 1
for(cc in RCP) {
for(mg in managPractice) {
folderName = paste0('RCP_', cc, '_mg_', mg)
for(rp in reps) {
fileName = paste0('RCP_', cc, '_mg_', mg, '_rep_', rp, '.RDS')
assign(sub('\\.RDS$', '', fileName), readRDS(paste0(mainFolder, folderName, '/', fileName)))
cat(' loading ouput files ', round((count/(length(RCP) * length(managPractice) * length(reps))) * 100, 0), '%\r')
count <- count + 1
}
}
}
# clean up
rm(list = c('folderName', 'fileName', 'mainFolder'))
#
# Get env1 unscaled
env1unscaled <- get('RCP_4.5_mg_0_rep_1')[['env1']] * vars.sd['annual_mean_temp'] + vars.means['annual_mean_temp']
#
# Organize stateOccup for each time and repetition in an array
print('[2/4] Organize state occupancy in lists')
# Create length(managPractice) lists containing 4 (states) arrays with dimension steps x landRow x reps
count = 1
for(mg in managPractice)
{
arB = arT = arM = arR = array(0, dim = c(nCol, steps + 1, length(reps)))
for(rp in reps)
{
sim <- get(paste0('RCP_', RCP, '_mg_', mg, '_rep_', rp))[['stateOccup']]
# get a matrix for each state containing temporal variation for each
# col of the landcape
for(state in states)
{
assign(paste0('mt', state), sapply(sim, function(x) x[state, ]))
}
arB[,,rp] <- mtB
arT[,,rp] <- mtT
arM[,,rp] <- mtM
arR[,,rp] <- mtR
cat(' creating output lists ', round(count/(length(managPractice) * length(reps)) * 100, 0), '%\r')
count <- count + 1
}
assign(paste0('list_mg', mg), list(arB = arB, arT = arT, arM = arM, arR = arR))
}
# clean up
rm(list = c(paste0('ar', states), paste0('mt', states), 'sim'))
#
# calculate mean and CI transient dynamic for the whole latitude
print('[3/4] Calculate mean and CI transient dynamic for each landscape latitude')
count = 1
for(mg in managPractice)
{
listMg <- get(paste0('list_mg', mg))
dims <- dim(listMg[[1]])
# get array result with mean and CI
for(state in states)
{
# dataframes for mean and CI
mMean = mCI = matrix(0, nrow = dims[1], ncol = dims[2])
# mean
for(i in reps) mMean <- mMean + listMg[[paste0('ar', state)]][,, i]
mMean <- mMean/length(reps)
# CI
## first get sd()
for(i in 1:dims[1]) {
for(j in 1:dims[2]) {
mCI[i, j] <- sd(listMg[[paste0('ar', state)]][i, j, ])
}
}
mCI <- mCI * 1.96/sqrt(length(reps))
# create array with mean and CI for the state
assign(paste0('ar', state), simplify2array(list(mMean, mCI)))
cat(' creating summary lists ', round(count/(length(managPractice) * length(states)) * 100, 0), '%\r')
count <- count + 1
}
# create list for each managPractice
assign(paste0('summaryList_mg', mg), list(arB = arB, arT = arT, arM = arM, arR = arR))
}
# clean up
rm(list = c('listMg', 'mMean', 'mCI', paste0('ar', states)))
# Sum the mean and CI of all states
print('[3/4] Calculate mean and CI transient dynamic for each landscape latitude')
for(mg in managPractice)
{
cat('Management practice ', mg + 1, ' of ', length(managPractice), '\n')
sim <- get(paste0('summaryList_mg', mg))
datM = datCI = matrix(0, nrow = dim(sim[[1]])[1], ncol = dim(sim[[1]])[2] - 1)
spar = 0.8
for(i in 1:dim(sim[[1]])[1])
{
datM[i, ] <- abs(diff(smooth.spline(sim[['arB']][,,1][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arT']][,,1][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arM']][,,1][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arR']][,,1][i, ], spar = spar)$y))
datCI[i, ] <- abs(diff(smooth.spline(sim[['arB']][,,2][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arT']][,,2][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arM']][,,2][i, ], spar = spar)$y)) +
abs(diff(smooth.spline(sim[['arR']][,,2][i, ], spar = spar)$y))
cat(' calculating temporal difference ', round(i/dim(sim[[1]])[1] * 100, 0), '% \r')
}
cat(' calculating temporal difference ', round(i/dim(sim[[1]])[1] * 100, 0), '% \n')
assign(paste0('tempDiff_mg', mg), simplify2array(list(datM, datCI)))
}
#
# Calculate the mean and CI euclidean distance between last and first state proportion
print('[4/4] Calculate Exposure for each landscape latitude')
# Confidence interval function
ci = function(x) 1.96 * sd(x)/sqrt(length(x))
# Create length(managPractice) lists containing a data frame with Dist values
# (nrow) for each rep (ncol)
count = 1
for(mg in managPractice)
{
df <- data.frame(matrix(rep(NA, dims[1] * length(reps)), nrow = dims[1]))
for(rp in reps)
{
sim <- get(paste0('RCP_', RCP, '_mg_', mg, '_rep_', rp))[['stateOccup']]
# calculate euclidean dist between first [1] and last [steps + 1] state proprotion
for(latitude in 1:dims[1])
df[latitude, rp] <- dist(rbind(sim[[1]][, latitude], sim[[(steps + 1)]][, latitude]))
cat(' creating output data frames ', round(count/(length(managPractice) * length(reps)) * 100, 0), '%\r')
count <- count + 1
}
# Calculate mean and CI (save in a data frame wiht mean and CI columns)
dfSummary <- data.frame(distMean = apply(df, 1, mean))
dfSummary$distCI <- apply(df, 1, ci)
assign(paste0('eDist_mg', mg), dfSummary)
}
#
# Save calculated data as RDS
obj <- c('env1unscaled', paste0('tempDiff_mg', managPractice), paste0('eDist_mg', managPractice))
save(list = obj, file = 'sim-results/data/sim_summary_fig4.rda')
#
|
#produce content displayed through ui
#---------------------------------------
# Load packages (only matters for publishing on shiny.io)
library(tidyverse)
library(DT)
library(shinyjs)
#-----------------------------------
# Basic Structure
server <- function(input, output, session){
#------------------------------------
load(".RData")
#------------------------------------
# Get current time
output$currentTime <- renderText({
# Refresh every second
invalidateLater(1000, session)
# Output server time
format(Sys.time())
})
#-----------------------------------
#Tracker Outputs
#Load the progress tracker output
output$plotProgress <- renderPlot({
tbPriorities %>%
ggplot(aes(fill = Progress, x = Count, y = "")) +
geom_bar(position = position_fill(reverse = TRUE),
stat = "identity",
width = 1) +
coord_flip() +
theme(
legend.position = "none",
# legend.justification = "right",
plot.title = element_text(size=18, face="bold", margin=margin(c(0,0,8,0))),
# legend.margin=margin(c(0,0,-4,0)),
# legend.title=element_text(size=16),
# legend.text=element_text(size=16),
panel.spacing = margin(c(0,0,0,0)),
panel.border = element_rect(colour = "black", fill=NA, size=0.5),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x = element_blank(),
panel.background = element_blank()
) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_discrete(expand = c(0, 0)) +
scale_fill_manual(values=ggpalette1, drop = FALSE, name="Progress")
}, height = "auto")
# Text for timeline vis description
output$timelineText <- renderText({
# Refresh every minute
invalidateLater(60000, session)
# Return text explaining when the 100 Days of Action began, and if
# they are still happening, the current date and number. If they are
# no longer happening, return the date they ended.
ifelse(which(tbDays$Date == Sys.Date())<=101,
paste0("Mayor Scott took office on 12/8/20. Today is day ",
which(tbDays$Date == Sys.Date()-1),
"."
),
paste0("The 100 Days of Action began when Mayor Scott took office ",
"on 12/8/20. The last day was 3/18/21."
)
)
})
output$progressLegend <- renderUI({
div(class="legend",
div(style="background:#199eb4"), p("Complete"),
div(style=paste0("background:", bc_gold, ";")), p("In progress"),
div(style="background:whiteSmoke;"), p("Not yet started")
)
})
#load the timeline tracker output
output$plotTimeline <- renderPlot({
# Refresh every minute
invalidateLater(60000, session)
tbDays %>%
filter(`Mayoral First 100 Days Number` < 101) %>%
mutate(Status = factor(sapply(.$Date,function(x) {ifelse(x > Sys.Date(), "Remaining", ifelse(x == Sys.Date(), "Current","Past"))}),
levels=c("Past", "Current","Remaining")),
Total = 1) %>%
ggplot(aes(fill = Status, x = Total, y = "")) +
geom_bar(position = position_fill(reverse = TRUE),
stat = "identity",
width = 1) +
coord_flip() +
theme(
legend.position = "none",
panel.border = element_rect(colour = "black", fill=NA, size=0.5),
panel.spacing = margin(c(0,0,0,0)),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x = element_blank(),
panel.background = element_blank()
) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_discrete(expand = c(0, 0)) +
scale_fill_manual(values=ggpalette2, drop=FALSE, name = "Status")
}, height = "auto")
legendItemSpacing <- "padding-right:4x;"
output$timelineLegend <- renderUI({
div(class="legend",
div(style="background:DimGrey"), p("Past"),
div(style=paste0("background:", bc_gold, ";")), p("Current"),
div(style="background:whiteSmoke;"), p("Remaining")
)
})
# Table with progress on all priority areas and actions
output$tbPriorities = DT::renderDataTable({
DT::datatable(
cbind(tbCommittees, 'Expand' = '▼'),
options = list(
dom = 'ft',
searching = F,
pageLength = 10,
columnDefs = list(
list(width = '300px', targets = c(3)),
list(width = '18px', targets = c(1)),
list(width = '172px', targets = c(4)),
list(visible = FALSE, targets = c(0, 2, 4, 5)),
list(orderable = FALSE, targets = "_all"),
list(className = 'details-control', targets = c(7)),
list(className = 'dt-center', targets = c(0,4))
),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'font-size': '18px'});",
"}")
),
colnames = c("", "", "", "Priority Area", "", "", "Progress", ""),
callback = JS("
table.column(6).nodes().to$().css({cursor: 'pointer'});
var format = function(d) {
if (d[5] == null) {
return '<p>There is no additional data to display here.</p>';
} else {
var result = '<table class=\"priorities-hierarchy-2\" style=\"font-size:16px;padding:0.5em;margin-left:32px;width:calc(100% - 24px);\">';
result += '<tr><th>Action</th><th>Status</th><th>Parties Responsible</th></tr>';
for (var i in d[5]){
result += '<tr >';
for (var j in d[5][i]) {
if (j == 0) {
result += '<td style=\"width:300px;\">' + d[5][i][j] + '</td>';
} else if (j == 1) {
result += '<td style=\"width:120px;\">' + d[5][i][j] + '</td>';
} else {
result += '<td>' + d[5][i][j] + '</td>';
}
}
result += '</tr>';
}
result += '</table>';
return result;
}
};
table.on('click', 'td.details-control', function() {
var td = $(this), row = table.row(td.closest('tr'));
if (row.child.isShown()) {
row.child.hide();
td.html('▼');
} else {
row.child(format(row.data())).show();
td.html('▲');
}
});"
)
,escape = F
) %>%
formatStyle(
names(tbCommittees),
target = 'row',
backgroundColor = 'white', fontSize = '18px') %>%
formatStyle('Priority Area', fontSize = '18px', fontWeight = 'bold') %>%
formatStyle('Progress', fontSize = '18px', fontWeight = 300) %>%
formatStyle('Expand', fontSize = '18px', color="grey", fontWeight = 300)
})
#------------------------------------
# Download button for data on all actions
output$downloadActions <- downloadHandler(
filename = function() {
paste(Sys.Date(), "-mayor-scott-transition-tracker-actions.csv", sep="")
},
content = function(file) {
write.csv(tbPriorities %>%
rename(Status = Progress,
`Priority Area` = Committee) %>%
select(`Action #`, Action, Status, `Priority Area`,`Parties Responsible`),
file,
row.names=FALSE)
}
)
#-------------------------------------
# Updates table outputs
output$updateContent <- renderUI({
div(class="Updates",
HTML(toString(updateText))
)
})
}
| /server.R | permissive | city-of-baltimore/transition-agenda | R | false | false | 8,133 | r | #produce content displayed through ui
#---------------------------------------
# Load packages (only matters for publishing on shiny.io)
library(tidyverse)
library(DT)
library(shinyjs)
#-----------------------------------
# Basic Structure
server <- function(input, output, session){
#------------------------------------
load(".RData")
#------------------------------------
# Get current time
output$currentTime <- renderText({
# Refresh every second
invalidateLater(1000, session)
# Output server time
format(Sys.time())
})
#-----------------------------------
#Tracker Outputs
#Load the progress tracker output
output$plotProgress <- renderPlot({
tbPriorities %>%
ggplot(aes(fill = Progress, x = Count, y = "")) +
geom_bar(position = position_fill(reverse = TRUE),
stat = "identity",
width = 1) +
coord_flip() +
theme(
legend.position = "none",
# legend.justification = "right",
plot.title = element_text(size=18, face="bold", margin=margin(c(0,0,8,0))),
# legend.margin=margin(c(0,0,-4,0)),
# legend.title=element_text(size=16),
# legend.text=element_text(size=16),
panel.spacing = margin(c(0,0,0,0)),
panel.border = element_rect(colour = "black", fill=NA, size=0.5),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x = element_blank(),
panel.background = element_blank()
) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_discrete(expand = c(0, 0)) +
scale_fill_manual(values=ggpalette1, drop = FALSE, name="Progress")
}, height = "auto")
# Text for timeline vis description
output$timelineText <- renderText({
# Refresh every minute
invalidateLater(60000, session)
# Return text explaining when the 100 Days of Action began, and if
# they are still happening, the current date and number. If they are
# no longer happening, return the date they ended.
ifelse(which(tbDays$Date == Sys.Date())<=101,
paste0("Mayor Scott took office on 12/8/20. Today is day ",
which(tbDays$Date == Sys.Date()-1),
"."
),
paste0("The 100 Days of Action began when Mayor Scott took office ",
"on 12/8/20. The last day was 3/18/21."
)
)
})
output$progressLegend <- renderUI({
div(class="legend",
div(style="background:#199eb4"), p("Complete"),
div(style=paste0("background:", bc_gold, ";")), p("In progress"),
div(style="background:whiteSmoke;"), p("Not yet started")
)
})
#load the timeline tracker output
output$plotTimeline <- renderPlot({
# Refresh every minute
invalidateLater(60000, session)
tbDays %>%
filter(`Mayoral First 100 Days Number` < 101) %>%
mutate(Status = factor(sapply(.$Date,function(x) {ifelse(x > Sys.Date(), "Remaining", ifelse(x == Sys.Date(), "Current","Past"))}),
levels=c("Past", "Current","Remaining")),
Total = 1) %>%
ggplot(aes(fill = Status, x = Total, y = "")) +
geom_bar(position = position_fill(reverse = TRUE),
stat = "identity",
width = 1) +
coord_flip() +
theme(
legend.position = "none",
panel.border = element_rect(colour = "black", fill=NA, size=0.5),
panel.spacing = margin(c(0,0,0,0)),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x = element_blank(),
panel.background = element_blank()
) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_discrete(expand = c(0, 0)) +
scale_fill_manual(values=ggpalette2, drop=FALSE, name = "Status")
}, height = "auto")
legendItemSpacing <- "padding-right:4x;"
output$timelineLegend <- renderUI({
div(class="legend",
div(style="background:DimGrey"), p("Past"),
div(style=paste0("background:", bc_gold, ";")), p("Current"),
div(style="background:whiteSmoke;"), p("Remaining")
)
})
# Table with progress on all priority areas and actions
output$tbPriorities = DT::renderDataTable({
DT::datatable(
cbind(tbCommittees, 'Expand' = '▼'),
options = list(
dom = 'ft',
searching = F,
pageLength = 10,
columnDefs = list(
list(width = '300px', targets = c(3)),
list(width = '18px', targets = c(1)),
list(width = '172px', targets = c(4)),
list(visible = FALSE, targets = c(0, 2, 4, 5)),
list(orderable = FALSE, targets = "_all"),
list(className = 'details-control', targets = c(7)),
list(className = 'dt-center', targets = c(0,4))
),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'font-size': '18px'});",
"}")
),
colnames = c("", "", "", "Priority Area", "", "", "Progress", ""),
callback = JS("
table.column(6).nodes().to$().css({cursor: 'pointer'});
var format = function(d) {
if (d[5] == null) {
return '<p>There is no additional data to display here.</p>';
} else {
var result = '<table class=\"priorities-hierarchy-2\" style=\"font-size:16px;padding:0.5em;margin-left:32px;width:calc(100% - 24px);\">';
result += '<tr><th>Action</th><th>Status</th><th>Parties Responsible</th></tr>';
for (var i in d[5]){
result += '<tr >';
for (var j in d[5][i]) {
if (j == 0) {
result += '<td style=\"width:300px;\">' + d[5][i][j] + '</td>';
} else if (j == 1) {
result += '<td style=\"width:120px;\">' + d[5][i][j] + '</td>';
} else {
result += '<td>' + d[5][i][j] + '</td>';
}
}
result += '</tr>';
}
result += '</table>';
return result;
}
};
table.on('click', 'td.details-control', function() {
var td = $(this), row = table.row(td.closest('tr'));
if (row.child.isShown()) {
row.child.hide();
td.html('▼');
} else {
row.child(format(row.data())).show();
td.html('▲');
}
});"
)
,escape = F
) %>%
formatStyle(
names(tbCommittees),
target = 'row',
backgroundColor = 'white', fontSize = '18px') %>%
formatStyle('Priority Area', fontSize = '18px', fontWeight = 'bold') %>%
formatStyle('Progress', fontSize = '18px', fontWeight = 300) %>%
formatStyle('Expand', fontSize = '18px', color="grey", fontWeight = 300)
})
#------------------------------------
# Download button for data on all actions
output$downloadActions <- downloadHandler(
filename = function() {
paste(Sys.Date(), "-mayor-scott-transition-tracker-actions.csv", sep="")
},
content = function(file) {
write.csv(tbPriorities %>%
rename(Status = Progress,
`Priority Area` = Committee) %>%
select(`Action #`, Action, Status, `Priority Area`,`Parties Responsible`),
file,
row.names=FALSE)
}
)
#-------------------------------------
# Updates table outputs
output$updateContent <- renderUI({
div(class="Updates",
HTML(toString(updateText))
)
})
}
|
library(revemetrics)
library(tidyverse)
library(igraph)
library(networkDynamic)
require(ndtv)
require(ggthemes)
require(RColorBrewer)
# Mapping Alliance Cohesion Patterns?
"
select *
from edvald_research.umd.warptologs_meta
where allianceID = toAllID and allianceID = 99006109
" %>% QueryDB() -> A1
#save(A1,file="~/ETD/selection/code/examples/alliance_99006109.Rdata")
head(A1)
A1 %>% group_by(corporationID) %>% tally()
# Interaction Trend
A1 %>% group_by(eventDate) %>% tally() %>% arrange(eventDate) %>%
ggplot(data=.,aes(x=as.Date(eventDate),y=n)) +
geom_line(lwd=1,color="orange")+ theme_hc()
# Interaction Trend (by corp)
A1 %>% group_by(eventDate,corporationID) %>% tally() %>% arrange(eventDate) %>%
ggplot(data=.,aes(x=as.Date(eventDate),y=n)) +
geom_line(lwd=1)+ theme_hc() + facet_wrap(facets=~corporationID,ncol=4)
# Interaction Trends Within/Between Corporations
A1 %>% mutate(coord=ifelse(corporationID!=toCorpID,"between","within")) %>%
group_by(eventDate,coord) %>% tally() %>% arrange(eventDate) %>%
ggplot(data=.,aes(x=as.Date(eventDate),y=n,color=coord)) +
geom_line(lwd=.7)+ theme_hc() + facet_wrap(facets=~coord,nrow=2) + geom_point()
# The STATIC Network ----------------------------------------------
# Allocating Color Scheme
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
chCp = rbind(A1 %>% select(characterID,corporationID) %>%
distinct() %>% as.data.frame(.),
A1 %>% select(characterID=toCharID,corporationID=toCorpID) %>%
distinct() %>% as.data.frame(.)) %>% distinct
cmap = data.frame(corporationID=unique(chCp$corporationID),
color=col_vector[1:32],stringsAsFactors = F) %>%
merge(chCp,.,by="corporationID")
# By individuals clustered within corporations
em = A1 %>% filter(eventDate=='2017-02-04') %>%
distinct %>%
select(characterID,toCharID) %>% as.matrix();colnames(em) = NULL
actors = unique(c(em[,1],em[,2]))
net <- graph_from_data_frame(em,actors,directed=F)
V(net)$color = cmap$color[match(actors,cmap$characterID)]
V(net)$corporationID = cmap$corporationID[match(actors,cmap$characterID)]
plot.igraph(net,vertex.color=V(net)$color,vertex.frame.color="white",
edge.color="grey",edge.arrow.size=0.3,edge.width=2,vertex.size=5,
vertex.label="",main="")
legend('topright',legend=unique(V(net)$corporationID),col=unique(V(net)$color),pch=15,cex=.7,box.col="white",title="Corp. Names")
# By corporation
em = A1 %>% filter(eventDate=='2016-10-04') %>%
select(corporationID,toCorpID) %>%
filter(corporationID!=toCorpID) %>%
as.matrix();colnames(em) = NULL
actors = unique(c(em[,1],em[,2]))
net <- graph_from_data_frame(em,actors,directed=F)
#net = simplify(net)
V(net)$color = cmap$color[match(actors,cmap$corporationID)]
V(net)$corporationID = cmap$corporationID[match(actors,cmap$corporationID)]
plot.igraph(net,vertex.color=V(net)$color,vertex.frame.color="white",
edge.color="grey",edge.arrow.size=0.3,edge.width=2,vertex.size=5,
vertex.label="",main="")
legend('topright',legend=unique(V(net)$corporationID),
col=unique(V(net)$color),pch=15,cex=.7,
box.col="white",title="Corp. Names")
# Need to think about fleets. Who is in them and how that works under times of stress.
render.d3movie(network(net),output.mode = 'htmlWidget')
| /alliances/examine_alliance_99006109.R | no_license | edunford/eve_selection | R | false | false | 3,570 | r | library(revemetrics)
library(tidyverse)
library(igraph)
library(networkDynamic)
require(ndtv)
require(ggthemes)
require(RColorBrewer)
# Mapping Alliance Cohesion Patterns?
"
select *
from edvald_research.umd.warptologs_meta
where allianceID = toAllID and allianceID = 99006109
" %>% QueryDB() -> A1
#save(A1,file="~/ETD/selection/code/examples/alliance_99006109.Rdata")
head(A1)
A1 %>% group_by(corporationID) %>% tally()
# Interaction Trend
A1 %>% group_by(eventDate) %>% tally() %>% arrange(eventDate) %>%
ggplot(data=.,aes(x=as.Date(eventDate),y=n)) +
geom_line(lwd=1,color="orange")+ theme_hc()
# Interaction Trend (by corp)
A1 %>% group_by(eventDate,corporationID) %>% tally() %>% arrange(eventDate) %>%
ggplot(data=.,aes(x=as.Date(eventDate),y=n)) +
geom_line(lwd=1)+ theme_hc() + facet_wrap(facets=~corporationID,ncol=4)
# Interaction Trends Within/Between Corporations
A1 %>% mutate(coord=ifelse(corporationID!=toCorpID,"between","within")) %>%
group_by(eventDate,coord) %>% tally() %>% arrange(eventDate) %>%
ggplot(data=.,aes(x=as.Date(eventDate),y=n,color=coord)) +
geom_line(lwd=.7)+ theme_hc() + facet_wrap(facets=~coord,nrow=2) + geom_point()
# The STATIC Network ----------------------------------------------
# Allocating Color Scheme
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
chCp = rbind(A1 %>% select(characterID,corporationID) %>%
distinct() %>% as.data.frame(.),
A1 %>% select(characterID=toCharID,corporationID=toCorpID) %>%
distinct() %>% as.data.frame(.)) %>% distinct
cmap = data.frame(corporationID=unique(chCp$corporationID),
color=col_vector[1:32],stringsAsFactors = F) %>%
merge(chCp,.,by="corporationID")
# By individuals clustered within corporations
em = A1 %>% filter(eventDate=='2017-02-04') %>%
distinct %>%
select(characterID,toCharID) %>% as.matrix();colnames(em) = NULL
actors = unique(c(em[,1],em[,2]))
net <- graph_from_data_frame(em,actors,directed=F)
V(net)$color = cmap$color[match(actors,cmap$characterID)]
V(net)$corporationID = cmap$corporationID[match(actors,cmap$characterID)]
plot.igraph(net,vertex.color=V(net)$color,vertex.frame.color="white",
edge.color="grey",edge.arrow.size=0.3,edge.width=2,vertex.size=5,
vertex.label="",main="")
legend('topright',legend=unique(V(net)$corporationID),col=unique(V(net)$color),pch=15,cex=.7,box.col="white",title="Corp. Names")
# By corporation
em = A1 %>% filter(eventDate=='2016-10-04') %>%
select(corporationID,toCorpID) %>%
filter(corporationID!=toCorpID) %>%
as.matrix();colnames(em) = NULL
actors = unique(c(em[,1],em[,2]))
net <- graph_from_data_frame(em,actors,directed=F)
#net = simplify(net)
V(net)$color = cmap$color[match(actors,cmap$corporationID)]
V(net)$corporationID = cmap$corporationID[match(actors,cmap$corporationID)]
plot.igraph(net,vertex.color=V(net)$color,vertex.frame.color="white",
edge.color="grey",edge.arrow.size=0.3,edge.width=2,vertex.size=5,
vertex.label="",main="")
legend('topright',legend=unique(V(net)$corporationID),
col=unique(V(net)$color),pch=15,cex=.7,
box.col="white",title="Corp. Names")
# Need to think about fleets. Who is in them and how that works under times of stress.
render.d3movie(network(net),output.mode = 'htmlWidget')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_elo.R
\name{print.epp_results}
\alias{print.epp_results}
\title{Printing EPP results}
\usage{
\method{print}{epp_results}(x, ...)
}
\arguments{
\item{x}{epp_results. The result of a function \code{\link{calculate_epp}}.}
\item{...}{other parameters}
}
\value{
No return value, prints the structure of the object
}
\description{
Printing EPP results
}
| /man/print.epp_results.Rd | no_license | ModelOriented/EloML | R | false | true | 440 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_elo.R
\name{print.epp_results}
\alias{print.epp_results}
\title{Printing EPP results}
\usage{
\method{print}{epp_results}(x, ...)
}
\arguments{
\item{x}{epp_results. The result of a function \code{\link{calculate_epp}}.}
\item{...}{other parameters}
}
\value{
No return value, prints the structure of the object
}
\description{
Printing EPP results
}
|
# MODEL DESCRIPTION
#***********************************************************************
### var G[N_max,N_max,T], P[N_max,N_max,T];
var loglam[T-1], Na[T], G[(N_max+1),T], P[(N_max+1),T]; # time verying vectors enought in our case
# Model
model{
# THE COVARIATES ####
# Define the priors for the logistic regression parameters
alpha1 ~ dnorm(0,0.01)
alphaa ~ dnorm(0,0.01)
alphar ~ dnorm(0,0.01)
alphal ~ dnorm(0,0.01)
beta1 ~ dnorm(0,0.01)
betaa ~ dnorm(0,0.01)
betar ~ dnorm(0,0.01)
betal ~ dnorm(0,0.01)
# Define the observation error prior
sigy <- 1/tauy
tauy ~ dgamma(0.001,0.001)
# Define the logistic regression equations
# for(t in 1:(T-1)){
for(t in 1:T){
logit(phi1[t]) <- alpha1 + beta1*f[t] # corresponds to the year 1963
logit(phia[t]) <- alphaa + betaa*f[t]
# log(rho[t]) <- alphar + betar*t # We assume here that t=1
log(rho[t]) <- alphar + betar*stdT[t] # We assume here that t=1
# logit(lambda[t]) <- alphal + betal*(t+1)
logit(lambda[t]) <- alphal + betal*stdT[t]
}
# THE STATE SPACE MODEL ####
# 0-1 trick for the states process, the obervation process can stay the same
# Define r[t]
# for (t in 3:(T-1)){
# r[t-2] <- (Na[t+1]+N1[t+1])/(Na[t]+N1[t])
# }
# Define the initial population priors
for(t in 1:2){
# # N1[t] ~ dnorm(200,0.000001)
# # Na[t] ~ dnorm(1000,0.000001) --> 1000+-1000
# N1[t] ~ dpois(200)
# Na[t] ~ dbin(0.5,2000) # --> 1000+-500
Na[t] ~ dbin(0.5,200) # --> 100+-50
}
#####
# Zero trick for loglik of HMM ####
# an observation x[i] contributes a likelihood L[i]
# the "zeros trick": a Poisson(phi) observation of zero has likelihood exp(-phi),
# if the observed data is a set of 0's, and phi[i] is set to - log(L[i]),
# we will obtain the correct likelihood contribution for x[i] i.e. L[i]
# defining:
# spy[i] ~ pdf(y[i],params)/C # scaled probability of y[i] with pdf the required formula
# 1 ~ dben(spy[i])
# together yield the same thing as
# y[i] ~ pdf(params)
# which essentially is the value of the pdf when y[i] has its particular value and when the params have their particular randomly generated MCMC values
#####
for (t in 3:T){
# Use a discrete Uniform prior so the only influence on the posterior distr is the Upper limit
Na[t] ~ dcat(Na_prior[]) # Na_prior = rep(1/(Up+1), Up+1); entered as data
}
C <- 1000000
# loglam[1] <- (log(Na[t-1]) + log(rho[t-1]) + log(phi1[t-1]))
# loglam[2] <- (log(Na[2-1]) + log(rho[2-1]) + log(phi1[2-1]))
for(t in 3:T){
zeros[t] ~ dpois(phi[t])
phi[t] <- -loglik[t] + C
# lam[t] <- Na[t-1]*rho[t-1]*phi1[t-1]
loglam[t-1] <- (log(Na[t-1-1]) + log(rho[t-1-1]) + log(phi1[t-1-1]))
for (i in 0:(N_max-1)){ # from 0!!!
G[i+1,t] <- exp(-exp(loglam[t-1]) + i*loglam[t-1] - logfact(i))
# G[i+1,t] <- exp(-exp(loglam[t-1]) + i*loglam[t-1] - logfact_m[(i+1)])
# P[i+1,t] <- ifelse((i + Na[t-1] - Na[t])>0,
# exp(Na[t]*log(phia[t-1]) + (i + Na[t-1] - Na[t])*log(1-phia[t-1]) + logfact_m[(i + Na[t-1]+1)] - logfact_m[(abs(i + Na[t-1] - Na[t])+1)] - logfact_m[(Na[t]+1)]),
# 0)
P[i+1,t] <- ifelse((i + Na[t-1] - Na[t])>0,
exp(Na[t]*log(phia[t-1]) + (i + Na[t-1] - Na[t])*log(1-phia[t-1]) + logfact(i + Na[t-1]) - logfact(abs(i + Na[t-1] - Na[t])) - logfact(Na[t])),
0)
}
G[(N_max+1),t] <- max(0,1- sum(G[1:(N_max),t]))
P[(N_max+1),t] <- ifelse((N_max + Na[t-1] - Na[t])>0,
exp(Na[t]*log(phia[t-1]) + (N_max + Na[t-1] - Na[t])*log(1-phia[t-1]) + logfact(N_max + Na[t-1]) - logfact(abs(N_max + Na[t-1] - Na[t])) - logfact(Na[t])),
0)
# P[(N_max+1),t] <- ifelse((N_max + Na[t-1] - Na[t])>0,
# exp(Na[t]*log(phia[t-1]) + (N_max + Na[t-1] - Na[t])*log(1-phia[t-1]) + logfact_m[(N_max + Na[t-1]+1)] - logfact_m[(abs(N_max + Na[t-1] - Na[t])+1)] - logfact_m[(Na[t]+1)]),
# 0)
#ifelse function is evaluated during sampling and not at compile time
# LOL. The ifelse function works just like the one in R.
# Note that it is not a control flow statement:
# if you have ifelse(x,a,b) then both a and b need to be evaluated.
# So for example, you can't have y < - ifelse(x==0, 0, log(x)) because log(0) is not finite.
# Different: Also, note that an if/else statement in the BUGS language can only be allowed if the predicate is fixed at compile tim
loglik[t] <- log(sum(G[,t] * P[,t])) # piecewise multiplication enough here
}
# Define the observation process for the census/index data
for(t in 3:T){
y[t] ~ dnorm(Na[t],tauy)
}
# THE RECOVERY MODEL ####
# Calculate the no. of birds released each year
# for(t in 1:T1){
# rel[t] <- sum(m[t,])
# }
# Define the recovery likelihood
for(t in 1:T1){
m[t, 1:(T2+1)] ~ dmulti(p[t,], rel[t])
}
# Calculate the cell probabilities for the recovery table
for(t1 in 1 : (T1-1)){
# Calculate the diagonal
p[t1, t1] <- lambda[t1]*(1-phi1[t1])
# Calculate value one above the diagonal
p[t1, t1+1] <- lambda[t1+1]* phi1[t1]*(1-phia[t1+1])
# Calculate remaining terms above diagonal
for(t2 in (t1+2):T2){
for(t in (t1+1):(t2-1)){
lphi[t1, t2, t] <- log(phia[t])
}
# Probabilities in table
p[t1,t2] <- lambda[t2]*phi1[t1]*(1-phia[t2])*exp(sum(lphi[t1,t2,(t1+1):(t2-1)]))
}
for(t2 in 1:(t1-1)){
# Zero probabilities in lower triangle of table
p[t1, t2] <- 0
}
# Probability of an animal never being seen again
p[t1, T2+1] <- 1 - sum(p[t1,1:T2])
}
# Final row
p[T1,T1] <- lambda[T1]*(1-phi1[T1])
for(t in 1:(T1-1)){
p[T1,t] <- 0
}
p[T1,T1+1] <- 1 - p[T1,T1]
} | /BKM/BKM_Bugs_HMM.R | no_license | Pegpeet/SCDA | R | false | false | 6,134 | r | # MODEL DESCRIPTION
#***********************************************************************
### var G[N_max,N_max,T], P[N_max,N_max,T];
var loglam[T-1], Na[T], G[(N_max+1),T], P[(N_max+1),T]; # time verying vectors enought in our case
# Model
model{
# THE COVARIATES ####
# Define the priors for the logistic regression parameters
alpha1 ~ dnorm(0,0.01)
alphaa ~ dnorm(0,0.01)
alphar ~ dnorm(0,0.01)
alphal ~ dnorm(0,0.01)
beta1 ~ dnorm(0,0.01)
betaa ~ dnorm(0,0.01)
betar ~ dnorm(0,0.01)
betal ~ dnorm(0,0.01)
# Define the observation error prior
sigy <- 1/tauy
tauy ~ dgamma(0.001,0.001)
# Define the logistic regression equations
# for(t in 1:(T-1)){
for(t in 1:T){
logit(phi1[t]) <- alpha1 + beta1*f[t] # corresponds to the year 1963
logit(phia[t]) <- alphaa + betaa*f[t]
# log(rho[t]) <- alphar + betar*t # We assume here that t=1
log(rho[t]) <- alphar + betar*stdT[t] # We assume here that t=1
# logit(lambda[t]) <- alphal + betal*(t+1)
logit(lambda[t]) <- alphal + betal*stdT[t]
}
# THE STATE SPACE MODEL ####
# 0-1 trick for the states process, the obervation process can stay the same
# Define r[t]
# for (t in 3:(T-1)){
# r[t-2] <- (Na[t+1]+N1[t+1])/(Na[t]+N1[t])
# }
# Define the initial population priors
for(t in 1:2){
# # N1[t] ~ dnorm(200,0.000001)
# # Na[t] ~ dnorm(1000,0.000001) --> 1000+-1000
# N1[t] ~ dpois(200)
# Na[t] ~ dbin(0.5,2000) # --> 1000+-500
Na[t] ~ dbin(0.5,200) # --> 100+-50
}
#####
# Zero trick for loglik of HMM ####
# an observation x[i] contributes a likelihood L[i]
# the "zeros trick": a Poisson(phi) observation of zero has likelihood exp(-phi),
# if the observed data is a set of 0's, and phi[i] is set to - log(L[i]),
# we will obtain the correct likelihood contribution for x[i] i.e. L[i]
# defining:
# spy[i] ~ pdf(y[i],params)/C # scaled probability of y[i] with pdf the required formula
# 1 ~ dben(spy[i])
# together yield the same thing as
# y[i] ~ pdf(params)
# which essentially is the value of the pdf when y[i] has its particular value and when the params have their particular randomly generated MCMC values
#####
for (t in 3:T){
# Use a discrete Uniform prior so the only influence on the posterior distr is the Upper limit
Na[t] ~ dcat(Na_prior[]) # Na_prior = rep(1/(Up+1), Up+1); entered as data
}
C <- 1000000
# loglam[1] <- (log(Na[t-1]) + log(rho[t-1]) + log(phi1[t-1]))
# loglam[2] <- (log(Na[2-1]) + log(rho[2-1]) + log(phi1[2-1]))
for(t in 3:T){
zeros[t] ~ dpois(phi[t])
phi[t] <- -loglik[t] + C
# lam[t] <- Na[t-1]*rho[t-1]*phi1[t-1]
loglam[t-1] <- (log(Na[t-1-1]) + log(rho[t-1-1]) + log(phi1[t-1-1]))
for (i in 0:(N_max-1)){ # from 0!!!
G[i+1,t] <- exp(-exp(loglam[t-1]) + i*loglam[t-1] - logfact(i))
# G[i+1,t] <- exp(-exp(loglam[t-1]) + i*loglam[t-1] - logfact_m[(i+1)])
# P[i+1,t] <- ifelse((i + Na[t-1] - Na[t])>0,
# exp(Na[t]*log(phia[t-1]) + (i + Na[t-1] - Na[t])*log(1-phia[t-1]) + logfact_m[(i + Na[t-1]+1)] - logfact_m[(abs(i + Na[t-1] - Na[t])+1)] - logfact_m[(Na[t]+1)]),
# 0)
P[i+1,t] <- ifelse((i + Na[t-1] - Na[t])>0,
exp(Na[t]*log(phia[t-1]) + (i + Na[t-1] - Na[t])*log(1-phia[t-1]) + logfact(i + Na[t-1]) - logfact(abs(i + Na[t-1] - Na[t])) - logfact(Na[t])),
0)
}
G[(N_max+1),t] <- max(0,1- sum(G[1:(N_max),t]))
P[(N_max+1),t] <- ifelse((N_max + Na[t-1] - Na[t])>0,
exp(Na[t]*log(phia[t-1]) + (N_max + Na[t-1] - Na[t])*log(1-phia[t-1]) + logfact(N_max + Na[t-1]) - logfact(abs(N_max + Na[t-1] - Na[t])) - logfact(Na[t])),
0)
# P[(N_max+1),t] <- ifelse((N_max + Na[t-1] - Na[t])>0,
# exp(Na[t]*log(phia[t-1]) + (N_max + Na[t-1] - Na[t])*log(1-phia[t-1]) + logfact_m[(N_max + Na[t-1]+1)] - logfact_m[(abs(N_max + Na[t-1] - Na[t])+1)] - logfact_m[(Na[t]+1)]),
# 0)
#ifelse function is evaluated during sampling and not at compile time
# LOL. The ifelse function works just like the one in R.
# Note that it is not a control flow statement:
# if you have ifelse(x,a,b) then both a and b need to be evaluated.
# So for example, you can't have y < - ifelse(x==0, 0, log(x)) because log(0) is not finite.
# Different: Also, note that an if/else statement in the BUGS language can only be allowed if the predicate is fixed at compile tim
loglik[t] <- log(sum(G[,t] * P[,t])) # piecewise multiplication enough here
}
# Define the observation process for the census/index data
for(t in 3:T){
y[t] ~ dnorm(Na[t],tauy)
}
# THE RECOVERY MODEL ####
# Calculate the no. of birds released each year
# for(t in 1:T1){
# rel[t] <- sum(m[t,])
# }
# Define the recovery likelihood
for(t in 1:T1){
m[t, 1:(T2+1)] ~ dmulti(p[t,], rel[t])
}
# Calculate the cell probabilities for the recovery table
for(t1 in 1 : (T1-1)){
# Calculate the diagonal
p[t1, t1] <- lambda[t1]*(1-phi1[t1])
# Calculate value one above the diagonal
p[t1, t1+1] <- lambda[t1+1]* phi1[t1]*(1-phia[t1+1])
# Calculate remaining terms above diagonal
for(t2 in (t1+2):T2){
for(t in (t1+1):(t2-1)){
lphi[t1, t2, t] <- log(phia[t])
}
# Probabilities in table
p[t1,t2] <- lambda[t2]*phi1[t1]*(1-phia[t2])*exp(sum(lphi[t1,t2,(t1+1):(t2-1)]))
}
for(t2 in 1:(t1-1)){
# Zero probabilities in lower triangle of table
p[t1, t2] <- 0
}
# Probability of an animal never being seen again
p[t1, T2+1] <- 1 - sum(p[t1,1:T2])
}
# Final row
p[T1,T1] <- lambda[T1]*(1-phi1[T1])
for(t in 1:(T1-1)){
p[T1,t] <- 0
}
p[T1,T1+1] <- 1 - p[T1,T1]
} |
#' @encoding UTF-8
#' @title Add transparency
#'
#' @description Alpha function to add transparency in graphic objects
#'
#' @param color Any color or vector of colors
#' @param alpha Level for alpha, default is \code{0.5}
#'
#' @keywords Graphs
#'
#' @author Daniel Marcelino, \email{dmarcelino@@live.com}
#'
#' @examples
#' # setup data
#' x <- seq(0, 50, 1)
#' supply <- x * -2 + 100
#' demand <- x * 2
#' # Point size and transparency
#' plot(supply, demand, pch = 19, cex = 3, col = fade("red", 0.5))
#'
#'@export
#'
#' @importFrom grDevices col2rgb rgb
#'
`fade`<- function (color, alpha = .5)
{
if(missing(color))
stop("vector of colors missing")
col <- col2rgb(color, TRUE)/255
if (length(color) != length(alpha)) {
if (length(color) > 1 && length(alpha) > 1) {
stop("Only one color and alpha can be vectorised!")
}
if (length(color) > 1) {
alpha <- rep(alpha, length.out = length(color))
}
else if (length(alpha) > 1) {
col <- col[, rep(1, length(alpha)), drop = FALSE]
}
}
alpha[is.na(alpha)] <- col[4, ][is.na(alpha)]
alpha_col <- rgb(col[1, ], col[2, ], col[3, ], alpha)
alpha_col[is.na(color)] <- NA
alpha_col
}
NULL
| /SciencesPo/R/fade.R | no_license | ingted/R-Examples | R | false | false | 1,192 | r | #' @encoding UTF-8
#' @title Add transparency
#'
#' @description Alpha function to add transparency in graphic objects
#'
#' @param color Any color or vector of colors
#' @param alpha Level for alpha, default is \code{0.5}
#'
#' @keywords Graphs
#'
#' @author Daniel Marcelino, \email{dmarcelino@@live.com}
#'
#' @examples
#' # setup data
#' x <- seq(0, 50, 1)
#' supply <- x * -2 + 100
#' demand <- x * 2
#' # Point size and transparency
#' plot(supply, demand, pch = 19, cex = 3, col = fade("red", 0.5))
#'
#'@export
#'
#' @importFrom grDevices col2rgb rgb
#'
`fade`<- function (color, alpha = .5)
{
if(missing(color))
stop("vector of colors missing")
col <- col2rgb(color, TRUE)/255
if (length(color) != length(alpha)) {
if (length(color) > 1 && length(alpha) > 1) {
stop("Only one color and alpha can be vectorised!")
}
if (length(color) > 1) {
alpha <- rep(alpha, length.out = length(color))
}
else if (length(alpha) > 1) {
col <- col[, rep(1, length(alpha)), drop = FALSE]
}
}
alpha[is.na(alpha)] <- col[4, ][is.na(alpha)]
alpha_col <- rgb(col[1, ], col[2, ], col[3, ], alpha)
alpha_col[is.na(color)] <- NA
alpha_col
}
NULL
|
function(input, output, session) {
hosptab <- reactiveValues()
hosptab$df <- hosptable
##------------------ UPLOADING FILE ------------------------------
# output$contents <- renderTable({
# req(input$file1)
# df <- read.csv(input$file1$datapath,
# header = input$header,
# sep = input$sep,
# quote = input$quote)
#if(input$disp == "head") {
# return(head(df))
#}
#else {
# return(df)
#}
#})
#####------------------ DOwnloading
# Reactive value for selected dataset ----
#datasetInput <- reactive({
# switch(input$dataset,
# "Datos de superzip en EUA" = cars)
#})
# Table of selected dataset ----
#output$table <- renderTable({
# datasetInput()
#})
# Downloadable csv of selected dataset ----
#output$downloadData <- downloadHandler(
# filename = function() {
# paste(input$dataset, ".csv", sep = "")
#},
#content = function(file) {
# write.csv(datasetInput(), file, row.names = FALSE)
#}
#)
df_r <- reactiveValues(new_data = datf)
clicked_markers <- reactiveValues(clickedMarker = NULL)
observeEvent(input$map_click, {
if (input$rb == "Add a marker") {
click <- input$map_click
click_lat <- click$lat
click_long <- click$lng
clicked_markers$clickedMarker <- c(clicked_markers$clickedMarker, 1)
id <- length(clicked_markers$clickedMarker)
hosptab$df <- rbind(rep(NA,ncol(datf)),hosptab$df)
# hosptab$df$id[1] <- as.numeric(input$id)
hosptab$df$lat[1] <- click_lat
hosptab$df$lon[1] <- click_long
hosptab$df$exist[1] <- as.character(input$exist)
write.csv(x=hosptab$df,file="data/vernewatractores.csv")
#newatracttors<- read.csv("vernewatractores.csv")
}
})
removemark<- observe({
# clicked_markers$clickedMarker <- c(clicked_markers$clickedMarker, 1)
#id <- length(clicked_markers$clickedMarker)
if (input$rb == "Remove a marker") {
showModal(modalDialog(
title = "Important message",
"You want to remove the attractor",
easyClose = TRUE
))
click <- input$map_click
click_lat <- click$lat
click_long <- click$lng
isolate(temp<- hosptab$df[-which(
#hosptab$df$id == input$id &
hosptab$df$lat == click_lat &
hosptab$df$lon == click_long &
hosptab$df$exist == input$exist
),])
print(temp)
write.csv(x=hosptab$df,file="data/vernewatractores.csv") #1
ifelse(test = nrow(temp) == 0,
yes = print("no encontré algo para eliminar"),
no = hosptab$df <- temp
)
}
})
savechanges <- observe ({
if (input$saved){
savedchang<- read.csv("data/vernewatractores.csv")
View(savedchang)
showNotification("Cambios guardados.")
}
})
## update
updateDistances <- observe ({
source("functionsnew.R")
if (input$upd){
leafletProxy("map", session) %>%
addCircleMarkers(data = valuesnew,
radius = 2,
color = ~pal(distance))
}
write.csv(x=valuesnew, file="data/distanciasnuevas.csv")
})
output$tabledata <- renderDataTable({hosptab$df}) #,rownames= TRUE
output$map <- renderLeaflet({
leaflet(data=hosptab$df)%>%
addTiles(layerId = "tiles")%>%
addMarkers(icon = ~isolate(iconify(exist)), layer=~id)%>%
addCircleMarkers(data = values,
radius = 2,
color = ~pal(distance))%>%
# addMarkers(icon = ~isolate(iconify(exist))) %>%
# Markers hospitals with popups
#addMarkers(data=destiny,lng=~longitude, lat=~latitude, label = ~name,icon=~markerhosp,
# popup=~paste("<h6 style='color:red'>#",code,"</h6>","<strong>Hospital:</strong>",name))%>%
# Map view
setView(lng = 115.25942542764824, lat =-8.645581443331796, zoom = 11) #
})
observeEvent(input$providerName, {
leafletProxy("map", session) %>%
addProviderTiles(input$providerName, layerId = "tiles")
})
# Icon marker hospital
#markerhosp <- makeIcon(
# iconUrl = "http://cdn2.iconfinder.com/data/icons/location-map-simplicity/512/hospital-512.png",
#iconWidth = 1, iconHeight = 1
#)
# Create a color palette that indicates proximity (near distances)
pal <- colorNumeric(palette = rev(c("#556270", "#4ECDC4", "#C7F464", "#FF6B6B", "#C44D58")),
domain = values$distance)
# Events: Add and Clear markers hospitals
v <- reactiveValues(msg = "")
observeEvent(input$map_zoom, {
v$msg <- paste("Zoom changed to", input$map_zoom)
})
observeEvent(input$map_bounds, {
v$msg <- paste("Bounds changed to", paste(input$map_bounds, collapse = ", "))
})
output$message <- renderText(v$msg)
# End: add and clear markers
} | /ST_Radio_buttons_Button_Update_and_Save_changes/server.R | no_license | alemontiels/ST | R | false | false | 5,228 | r |
function(input, output, session) {
hosptab <- reactiveValues()
hosptab$df <- hosptable
##------------------ UPLOADING FILE ------------------------------
# output$contents <- renderTable({
# req(input$file1)
# df <- read.csv(input$file1$datapath,
# header = input$header,
# sep = input$sep,
# quote = input$quote)
#if(input$disp == "head") {
# return(head(df))
#}
#else {
# return(df)
#}
#})
#####------------------ DOwnloading
# Reactive value for selected dataset ----
#datasetInput <- reactive({
# switch(input$dataset,
# "Datos de superzip en EUA" = cars)
#})
# Table of selected dataset ----
#output$table <- renderTable({
# datasetInput()
#})
# Downloadable csv of selected dataset ----
#output$downloadData <- downloadHandler(
# filename = function() {
# paste(input$dataset, ".csv", sep = "")
#},
#content = function(file) {
# write.csv(datasetInput(), file, row.names = FALSE)
#}
#)
df_r <- reactiveValues(new_data = datf)
clicked_markers <- reactiveValues(clickedMarker = NULL)
observeEvent(input$map_click, {
if (input$rb == "Add a marker") {
click <- input$map_click
click_lat <- click$lat
click_long <- click$lng
clicked_markers$clickedMarker <- c(clicked_markers$clickedMarker, 1)
id <- length(clicked_markers$clickedMarker)
hosptab$df <- rbind(rep(NA,ncol(datf)),hosptab$df)
# hosptab$df$id[1] <- as.numeric(input$id)
hosptab$df$lat[1] <- click_lat
hosptab$df$lon[1] <- click_long
hosptab$df$exist[1] <- as.character(input$exist)
write.csv(x=hosptab$df,file="data/vernewatractores.csv")
#newatracttors<- read.csv("vernewatractores.csv")
}
})
removemark<- observe({
# clicked_markers$clickedMarker <- c(clicked_markers$clickedMarker, 1)
#id <- length(clicked_markers$clickedMarker)
if (input$rb == "Remove a marker") {
showModal(modalDialog(
title = "Important message",
"You want to remove the attractor",
easyClose = TRUE
))
click <- input$map_click
click_lat <- click$lat
click_long <- click$lng
isolate(temp<- hosptab$df[-which(
#hosptab$df$id == input$id &
hosptab$df$lat == click_lat &
hosptab$df$lon == click_long &
hosptab$df$exist == input$exist
),])
print(temp)
write.csv(x=hosptab$df,file="data/vernewatractores.csv") #1
ifelse(test = nrow(temp) == 0,
yes = print("no encontré algo para eliminar"),
no = hosptab$df <- temp
)
}
})
savechanges <- observe ({
if (input$saved){
savedchang<- read.csv("data/vernewatractores.csv")
View(savedchang)
showNotification("Cambios guardados.")
}
})
## update
updateDistances <- observe ({
source("functionsnew.R")
if (input$upd){
leafletProxy("map", session) %>%
addCircleMarkers(data = valuesnew,
radius = 2,
color = ~pal(distance))
}
write.csv(x=valuesnew, file="data/distanciasnuevas.csv")
})
output$tabledata <- renderDataTable({hosptab$df}) #,rownames= TRUE
output$map <- renderLeaflet({
leaflet(data=hosptab$df)%>%
addTiles(layerId = "tiles")%>%
addMarkers(icon = ~isolate(iconify(exist)), layer=~id)%>%
addCircleMarkers(data = values,
radius = 2,
color = ~pal(distance))%>%
# addMarkers(icon = ~isolate(iconify(exist))) %>%
# Markers hospitals with popups
#addMarkers(data=destiny,lng=~longitude, lat=~latitude, label = ~name,icon=~markerhosp,
# popup=~paste("<h6 style='color:red'>#",code,"</h6>","<strong>Hospital:</strong>",name))%>%
# Map view
setView(lng = 115.25942542764824, lat =-8.645581443331796, zoom = 11) #
})
observeEvent(input$providerName, {
leafletProxy("map", session) %>%
addProviderTiles(input$providerName, layerId = "tiles")
})
# Icon marker hospital
#markerhosp <- makeIcon(
# iconUrl = "http://cdn2.iconfinder.com/data/icons/location-map-simplicity/512/hospital-512.png",
#iconWidth = 1, iconHeight = 1
#)
# Create a color palette that indicates proximity (near distances)
pal <- colorNumeric(palette = rev(c("#556270", "#4ECDC4", "#C7F464", "#FF6B6B", "#C44D58")),
domain = values$distance)
# Events: Add and Clear markers hospitals
v <- reactiveValues(msg = "")
observeEvent(input$map_zoom, {
v$msg <- paste("Zoom changed to", input$map_zoom)
})
observeEvent(input$map_bounds, {
v$msg <- paste("Bounds changed to", paste(input$map_bounds, collapse = ", "))
})
output$message <- renderText(v$msg)
# End: add and clear markers
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.