content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(NaN, NaN, NaN, 1.27319747457422e-312, 5.2625495605761e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/libFuzzer_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1612735985-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 348 | r | testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(NaN, NaN, NaN, 1.27319747457422e-312, 5.2625495605761e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
\name{plot.solist}
\alias{plot.solist}
\title{Plot a List of Spatial Objects}
\description{
Plots a list of two-dimensional spatial objects.
}
\usage{
\method{plot}{solist}(x, \dots, main, arrange=TRUE,
nrows=NULL, ncols=NULL, main.panel=NULL,
mar.panel=c(2,1,1,2), hsep=0, vsep=0,
panel.begin=NULL, panel.end=NULL, panel.args=NULL,
panel.begin.args=NULL, panel.end.args=NULL, panel.vpad = 0.2,
plotcommand="plot",
adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL,
adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE)
}
\arguments{
\item{x}{
An object of the class \code{"solist"},
essentially a list of two-dimensional spatial datasets.
}
\item{\dots}{
Arguments passed to \code{\link{plot}} when generating each
plot panel.
}
\item{main}{
Overall heading for the plot.
}
\item{arrange}{
Logical flag indicating whether to plot the objects
side-by-side on a single page (\code{arrange=TRUE})
or plot them individually in a succession of frames
(\code{arrange=FALSE}).
}
\item{nrows,ncols}{
Optional. The number of rows/columns in the plot layout
(assuming \code{arrange=TRUE}).
You can specify either or both of these numbers.
}
\item{main.panel}{
Optional. A character string, or a vector of character strings,
or a vector of expressions, giving the headings for each
plot panel.
}
\item{mar.panel}{
Size of the margins outside each plot panel.
A numeric vector of length 4 giving the bottom, left, top,
and right margins in that order. (Alternatively the vector
may have length 1 or 2 and will be replicated to length 4).
See the section on \emph{Spacing between plots}.
}
\item{hsep,vsep}{
Additional horizontal and vertical separation between plot panels,
expressed in the same units as \code{mar.panel}.
}
\item{panel.begin,panel.end}{
Optional. Functions that will be executed before and after each panel is
plotted. See Details.
}
\item{panel.args}{
Optional. Function that determines different plot arguments
for different panels. See Details.
}
\item{panel.begin.args}{
Optional. List of additional arguments for \code{panel.begin}
when it is a function.
}
\item{panel.end.args}{
Optional. List of additional arguments for \code{panel.end}
when it is a function.
}
\item{panel.vpad}{
Amount of extra vertical space that should be allowed for the
title of each panel, if a title will be displayed.
Expressed as a fraction of the height of the panel.
Applies only when \code{equal.scales=FALSE} (the default).
}
\item{plotcommand}{
Optional.
Character string containing the name of the command that should be
executed to plot each panel.
}
\item{adorn.left,adorn.right,adorn.top,adorn.bottom}{
Optional. Functions (with no arguments) that will be executed to
generate additional plots at the margins (left, right, top and/or
bottom, respectively) of the array of plots.
}
\item{adorn.size}{
Relative width (as a fraction of the other panels' widths)
of the margin plots.
}
\item{equal.scales}{
Logical value indicating whether the components
should be plotted at (approximately) the same physical scale.
}
\item{halign,valign}{
Logical values indicating whether panels in a column
should be aligned to the same \eqn{x} coordinate system
(\code{halign=TRUE}) and whether panels in a row should
be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}).
These are applicable only if \code{equal.scales=TRUE}.
}
}
\value{
Null.
}
\details{
This is the \code{plot} method for the class \code{"solist"}.
An object of class \code{"solist"} represents a
list of two-dimensional spatial datasets.
This is the \code{plot} method for such objects.
In the \pkg{spatstat} package, various functions produce
an object of class \code{"solist"}.
These objects can be plotted in a nice arrangement
using \code{plot.solist}. See the Examples.
The argument \code{panel.args} determines extra graphics parameters
for each panel. It should be a function that will be called
as \code{panel.args(i)} where \code{i} is the panel number.
Its return value should be a list of graphics parameters that can
be passed to the relevant \code{plot} method. These parameters
override any parameters specified in the \code{\dots} arguments.
The arguments \code{panel.begin} and \code{panel.end}
determine graphics that will be plotted before and after
each panel is plotted. They may be objects
of some class that can be plotted
with the generic \code{plot} command. Alternatively they
may be functions that will be
called as \code{panel.begin(i, y, main=main.panel[i])}
and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel
number and \code{y = x[[i]]}.
If all entries of \code{x} are pixel images,
the function \code{\link{image.listof}} is called to control
the plotting. The arguments \code{equal.ribbon} and \code{col}
can be used to determine the colour map or maps applied.
If \code{equal.scales=FALSE} (the default), then the
plot panels will have equal height on the plot device
(unless there is only one column of panels, in which case
they will have equal width on the plot device). This means that the
objects are plotted at different physical scales, by default.
If \code{equal.scales=TRUE}, then the dimensions of the
plot panels on the plot device will be proportional
to the spatial dimensions of the
corresponding components of \code{x}. This means that the
objects will be plotted at \emph{approximately} equal physical scales.
If these objects have very different spatial sizes,
the plot command could fail (when it tries
to plot the smaller objects at a tiny scale), with an error
message that the figure margins are too large.
The objects will be plotted at \emph{exactly} equal
physical scales, and \emph{exactly} aligned on the device,
under the following conditions:
\itemize{
\item
every component of \code{x} is a spatial object
whose position can be shifted by \code{\link{shift}};
\item
\code{panel.begin} and \code{panel.end} are either
\code{NULL} or they are spatial objects
whose position can be shifted by \code{\link{shift}};
\item
\code{adorn.left},
\code{adorn.right},
\code{adorn.top} and
\code{adorn.bottom} are all \code{NULL}.
}
Another special case is when every component of \code{x} is an
object of class \code{"fv"} representing a function.
If \code{equal.scales=TRUE} then all these functions will
be plotted with the same axis scales
(i.e. with the same \code{xlim} and the same \code{ylim}).
}
\section{Spacing between plots}{
The spacing between individual plots is controlled by the parameters
\code{mar.panel}, \code{hsep} and \code{vsep}.
If \code{equal.scales=FALSE}, the plot panels are
logically separate plots. The margins for each panel are
determined by the argument \code{mar.panel} which becomes
the graphics parameter \code{mar}
described in the help file for \code{\link{par}}.
One unit of \code{mar} corresponds to one line of text in the margin.
If \code{hsep} or \code{vsep} are present, \code{mar.panel}
is augmented by \code{c(vsep, hsep, vsep, hsep)/2}.
If \code{equal.scales=TRUE}, all the plot panels are drawn
in the same coordinate system which represents a physical scale.
The unit of measurement for \code{mar.panel[1,3]}
is one-sixth of the greatest height of any object plotted in the same row
of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the
greatest width of any object plotted in the same column of panels.
If \code{hsep} or \code{vsep} are present,
they are interpreted in the same units as \code{mar.panel[2]}
and \code{mar.panel[1]} respectively.
}
\seealso{
\code{\link{plot.anylist}},
\code{\link{contour.listof}},
\code{\link{image.listof}},
\code{\link{density.splitppp}}
}
\section{Error messages}{
If the error message \sQuote{Figure margins too large}
occurs, this generally means that one of the
objects had a much smaller physical scale than the others.
Ensure that \code{equal.scales=FALSE}
and increase the values of \code{mar.panel}.
}
\examples{
# Intensity estimate of multitype point pattern
plot(D <- density(split(amacrine)))
plot(D, main="", equal.ribbon=TRUE,
panel.end=function(i,y,...){contour(y, ...)})
}
\author{\adrian
\rolf
and \ege
}
\keyword{spatial}
\keyword{hplot}
| /man/plot.solist.Rd | no_license | rubak/spatstat | R | false | false | 8,674 | rd | \name{plot.solist}
\alias{plot.solist}
\title{Plot a List of Spatial Objects}
\description{
Plots a list of two-dimensional spatial objects.
}
\usage{
\method{plot}{solist}(x, \dots, main, arrange=TRUE,
nrows=NULL, ncols=NULL, main.panel=NULL,
mar.panel=c(2,1,1,2), hsep=0, vsep=0,
panel.begin=NULL, panel.end=NULL, panel.args=NULL,
panel.begin.args=NULL, panel.end.args=NULL, panel.vpad = 0.2,
plotcommand="plot",
adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL,
adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE)
}
\arguments{
\item{x}{
An object of the class \code{"solist"},
essentially a list of two-dimensional spatial datasets.
}
\item{\dots}{
Arguments passed to \code{\link{plot}} when generating each
plot panel.
}
\item{main}{
Overall heading for the plot.
}
\item{arrange}{
Logical flag indicating whether to plot the objects
side-by-side on a single page (\code{arrange=TRUE})
or plot them individually in a succession of frames
(\code{arrange=FALSE}).
}
\item{nrows,ncols}{
Optional. The number of rows/columns in the plot layout
(assuming \code{arrange=TRUE}).
You can specify either or both of these numbers.
}
\item{main.panel}{
Optional. A character string, or a vector of character strings,
or a vector of expressions, giving the headings for each
plot panel.
}
\item{mar.panel}{
Size of the margins outside each plot panel.
A numeric vector of length 4 giving the bottom, left, top,
and right margins in that order. (Alternatively the vector
may have length 1 or 2 and will be replicated to length 4).
See the section on \emph{Spacing between plots}.
}
\item{hsep,vsep}{
Additional horizontal and vertical separation between plot panels,
expressed in the same units as \code{mar.panel}.
}
\item{panel.begin,panel.end}{
Optional. Functions that will be executed before and after each panel is
plotted. See Details.
}
\item{panel.args}{
Optional. Function that determines different plot arguments
for different panels. See Details.
}
\item{panel.begin.args}{
Optional. List of additional arguments for \code{panel.begin}
when it is a function.
}
\item{panel.end.args}{
Optional. List of additional arguments for \code{panel.end}
when it is a function.
}
\item{panel.vpad}{
Amount of extra vertical space that should be allowed for the
title of each panel, if a title will be displayed.
Expressed as a fraction of the height of the panel.
Applies only when \code{equal.scales=FALSE} (the default).
}
\item{plotcommand}{
Optional.
Character string containing the name of the command that should be
executed to plot each panel.
}
\item{adorn.left,adorn.right,adorn.top,adorn.bottom}{
Optional. Functions (with no arguments) that will be executed to
generate additional plots at the margins (left, right, top and/or
bottom, respectively) of the array of plots.
}
\item{adorn.size}{
Relative width (as a fraction of the other panels' widths)
of the margin plots.
}
\item{equal.scales}{
Logical value indicating whether the components
should be plotted at (approximately) the same physical scale.
}
\item{halign,valign}{
Logical values indicating whether panels in a column
should be aligned to the same \eqn{x} coordinate system
(\code{halign=TRUE}) and whether panels in a row should
be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}).
These are applicable only if \code{equal.scales=TRUE}.
}
}
\value{
Null.
}
\details{
This is the \code{plot} method for the class \code{"solist"}.
An object of class \code{"solist"} represents a
list of two-dimensional spatial datasets.
This is the \code{plot} method for such objects.
In the \pkg{spatstat} package, various functions produce
an object of class \code{"solist"}.
These objects can be plotted in a nice arrangement
using \code{plot.solist}. See the Examples.
The argument \code{panel.args} determines extra graphics parameters
for each panel. It should be a function that will be called
as \code{panel.args(i)} where \code{i} is the panel number.
Its return value should be a list of graphics parameters that can
be passed to the relevant \code{plot} method. These parameters
override any parameters specified in the \code{\dots} arguments.
The arguments \code{panel.begin} and \code{panel.end}
determine graphics that will be plotted before and after
each panel is plotted. They may be objects
of some class that can be plotted
with the generic \code{plot} command. Alternatively they
may be functions that will be
called as \code{panel.begin(i, y, main=main.panel[i])}
and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel
number and \code{y = x[[i]]}.
If all entries of \code{x} are pixel images,
the function \code{\link{image.listof}} is called to control
the plotting. The arguments \code{equal.ribbon} and \code{col}
can be used to determine the colour map or maps applied.
If \code{equal.scales=FALSE} (the default), then the
plot panels will have equal height on the plot device
(unless there is only one column of panels, in which case
they will have equal width on the plot device). This means that the
objects are plotted at different physical scales, by default.
If \code{equal.scales=TRUE}, then the dimensions of the
plot panels on the plot device will be proportional
to the spatial dimensions of the
corresponding components of \code{x}. This means that the
objects will be plotted at \emph{approximately} equal physical scales.
If these objects have very different spatial sizes,
the plot command could fail (when it tries
to plot the smaller objects at a tiny scale), with an error
message that the figure margins are too large.
The objects will be plotted at \emph{exactly} equal
physical scales, and \emph{exactly} aligned on the device,
under the following conditions:
\itemize{
\item
every component of \code{x} is a spatial object
whose position can be shifted by \code{\link{shift}};
\item
\code{panel.begin} and \code{panel.end} are either
\code{NULL} or they are spatial objects
whose position can be shifted by \code{\link{shift}};
\item
\code{adorn.left},
\code{adorn.right},
\code{adorn.top} and
\code{adorn.bottom} are all \code{NULL}.
}
Another special case is when every component of \code{x} is an
object of class \code{"fv"} representing a function.
If \code{equal.scales=TRUE} then all these functions will
be plotted with the same axis scales
(i.e. with the same \code{xlim} and the same \code{ylim}).
}
\section{Spacing between plots}{
The spacing between individual plots is controlled by the parameters
\code{mar.panel}, \code{hsep} and \code{vsep}.
If \code{equal.scales=FALSE}, the plot panels are
logically separate plots. The margins for each panel are
determined by the argument \code{mar.panel} which becomes
the graphics parameter \code{mar}
described in the help file for \code{\link{par}}.
One unit of \code{mar} corresponds to one line of text in the margin.
If \code{hsep} or \code{vsep} are present, \code{mar.panel}
is augmented by \code{c(vsep, hsep, vsep, hsep)/2}.
If \code{equal.scales=TRUE}, all the plot panels are drawn
in the same coordinate system which represents a physical scale.
The unit of measurement for \code{mar.panel[1,3]}
is one-sixth of the greatest height of any object plotted in the same row
of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the
greatest width of any object plotted in the same column of panels.
If \code{hsep} or \code{vsep} are present,
they are interpreted in the same units as \code{mar.panel[2]}
and \code{mar.panel[1]} respectively.
}
\seealso{
\code{\link{plot.anylist}},
\code{\link{contour.listof}},
\code{\link{image.listof}},
\code{\link{density.splitppp}}
}
\section{Error messages}{
If the error message \sQuote{Figure margins too large}
occurs, this generally means that one of the
objects had a much smaller physical scale than the others.
Ensure that \code{equal.scales=FALSE}
and increase the values of \code{mar.panel}.
}
\examples{
# Intensity estimate of multitype point pattern
plot(D <- density(split(amacrine)))
plot(D, main="", equal.ribbon=TRUE,
panel.end=function(i,y,...){contour(y, ...)})
}
\author{\adrian
\rolf
and \ege
}
\keyword{spatial}
\keyword{hplot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internals.R
\name{rmfi_rev_rainbow}
\alias{rmfi_rev_rainbow}
\title{Reversed rainbow color palette}
\usage{
rmfi_rev_rainbow(...)
}
\description{
Reversed rainbow color palette
}
\keyword{internal}
| /man/rmfi_rev_rainbow.Rd | no_license | rogiersbart/RMODFLOW | R | false | true | 276 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internals.R
\name{rmfi_rev_rainbow}
\alias{rmfi_rev_rainbow}
\title{Reversed rainbow color palette}
\usage{
rmfi_rev_rainbow(...)
}
\description{
Reversed rainbow color palette
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AnnotateChIA.R
\name{associate_centralities}
\alias{associate_centralities}
\title{Associates centrality scores and boolean centrality markers to regions.}
\usage{
associate_centralities(chia.obj, which.measures = c("Degree", "Betweenness",
"Eigenvector", "Closeness"), weight.attr = NULL)
}
\arguments{
\item{chia.obj}{ChIA-PET data, as returned by \code{\link{annotate_chia}}.}
\item{which.measures}{A vector containing the names of the measures to be used
to assess centrality. Those can be "Degree", "Betweenness" and "Eigenvector".}
\item{weight.attr}{The anme of the edge attribute to be sued as a weight in
centrality calculations.}
}
\value{
The annotated chia.obj.
}
\description{
Associates centrality scores and boolean centrality markers to regions.
}
| /man/associate_centralities.Rd | no_license | ehenrion/ChIAnalysis | R | false | true | 848 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AnnotateChIA.R
\name{associate_centralities}
\alias{associate_centralities}
\title{Associates centrality scores and boolean centrality markers to regions.}
\usage{
associate_centralities(chia.obj, which.measures = c("Degree", "Betweenness",
"Eigenvector", "Closeness"), weight.attr = NULL)
}
\arguments{
\item{chia.obj}{ChIA-PET data, as returned by \code{\link{annotate_chia}}.}
\item{which.measures}{A vector containing the names of the measures to be used
to assess centrality. Those can be "Degree", "Betweenness" and "Eigenvector".}
\item{weight.attr}{The anme of the edge attribute to be sued as a weight in
centrality calculations.}
}
\value{
The annotated chia.obj.
}
\description{
Associates centrality scores and boolean centrality markers to regions.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pl_est_com.R
\name{pl_est_com}
\alias{pl_est_com}
\title{Fits the stochastic block model using maximum pseudolikelihood estimation}
\usage{
pl_est_com(X, K = NULL, max.iter = 1000, tol = 1e-08, parallel = FALSE)
}
\arguments{
\item{X}{n x n adjacency matrix}
\item{K}{number of communities; by default, chosen using the method of Le and Levina (2015)}
\item{max.iter}{maximum number of iterations for the EM algorithm}
\item{tol}{the EM algorithm stops when the relative tolerance is less than this value}
\item{parallel}{An optional argument allowing for parallel computing using the
doParallel package}
}
\value{
A list containing the following components:
\item{eta}{Estimate of eta, a K x K matrix defined in Amini et. al. (2013)}
\item{pi}{Estimate of the community membership probabilities}
\item{ploglik}{The maximum of the pseudolikelihood function}
\item{logphi}{n x K matrix, where (i, k)th entry contains the log p.m.f. of a multinomial
random variable with probability vector eta_k (the kth row of eta), evaluated at b_i,
which is the ith row of the block compression matrix defined in Amini et. al. (2013)}
\item{responsibilities}{n x K matrix containing the responsibilities/soft cluster
memberships for the n nodes}
\item{class}{A vector containing n (hard) cluster memberships for the n nodes}
\item{converged}{whether the algorithm converged to the desired tolerance}
}
\description{
Fits the stochastic block model using maximum
pseudolikelihood estimation, as proposed by Amini et. al. (2013).
This function implements the conditional pseudolikelihood algorithm
from Amini et al. (2013).
}
\examples{
# 50 draws from a stochastic block model for two network data views
# where the communities are dependent
n <- 50
Pi <- diag(c(0.5, 0.5))
theta1 <- rbind(c(0.5, 0.1), c(0.1, 0.5))
theta2 <- cbind(c(0.1, 0.5), c(0.5, 0.1))
dat <- mv_sbm_gen(n, Pi, theta1, theta2)
# Fit SBM to view 1
results <- pl_est_com(X=dat$data$view1, K = 2)
table(results$class, dat$communities$view1)
}
\references{
Amini, A. A., Chen, A., Bickel, P. J., & Levina, E. (2013).
Pseudo-likelihood methods for community detection in large sparse networks.
The Annals of Statistics, 41(4), 2097-2122.
Le, C. M., & Levina, E. (2015). Estimating the number of communities
in networks by spectral methods. arXiv preprint arXiv:1507.00827.
}
| /man/pl_est_com.Rd | no_license | cran/multiviewtest | R | false | true | 2,421 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pl_est_com.R
\name{pl_est_com}
\alias{pl_est_com}
\title{Fits the stochastic block model using maximum pseudolikelihood estimation}
\usage{
pl_est_com(X, K = NULL, max.iter = 1000, tol = 1e-08, parallel = FALSE)
}
\arguments{
\item{X}{n x n adjacency matrix}
\item{K}{number of communities; by default, chosen using the method of Le and Levina (2015)}
\item{max.iter}{maximum number of iterations for the EM algorithm}
\item{tol}{the EM algorithm stops when the relative tolerance is less than this value}
\item{parallel}{An optional argument allowing for parallel computing using the
doParallel package}
}
\value{
A list containing the following components:
\item{eta}{Estimate of eta, a K x K matrix defined in Amini et. al. (2013)}
\item{pi}{Estimate of the community membership probabilities}
\item{ploglik}{The maximum of the pseudolikelihood function}
\item{logphi}{n x K matrix, where (i, k)th entry contains the log p.m.f. of a multinomial
random variable with probability vector eta_k (the kth row of eta), evaluated at b_i,
which is the ith row of the block compression matrix defined in Amini et. al. (2013)}
\item{responsibilities}{n x K matrix containing the responsibilities/soft cluster
memberships for the n nodes}
\item{class}{A vector containing n (hard) cluster memberships for the n nodes}
\item{converged}{whether the algorithm converged to the desired tolerance}
}
\description{
Fits the stochastic block model using maximum
pseudolikelihood estimation, as proposed by Amini et. al. (2013).
This function implements the conditional pseudolikelihood algorithm
from Amini et al. (2013).
}
\examples{
# 50 draws from a stochastic block model for two network data views
# where the communities are dependent
n <- 50
Pi <- diag(c(0.5, 0.5))
theta1 <- rbind(c(0.5, 0.1), c(0.1, 0.5))
theta2 <- cbind(c(0.1, 0.5), c(0.5, 0.1))
dat <- mv_sbm_gen(n, Pi, theta1, theta2)
# Fit SBM to view 1
results <- pl_est_com(X=dat$data$view1, K = 2)
table(results$class, dat$communities$view1)
}
\references{
Amini, A. A., Chen, A., Bickel, P. J., & Levina, E. (2013).
Pseudo-likelihood methods for community detection in large sparse networks.
The Annals of Statistics, 41(4), 2097-2122.
Le, C. M., & Levina, E. (2015). Estimating the number of communities
in networks by spectral methods. arXiv preprint arXiv:1507.00827.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readInputs.R
\name{h5ReadClusterDesc}
\alias{h5ReadClusterDesc}
\title{Import cluster description}
\usage{
h5ReadClusterDesc(opts)
}
\arguments{
\item{opts}{\code{list} of simulation parameters returned by the function \link[antaresRead]{setSimulationPath}.}
}
\description{
This function imports the characteristics of each cluster from an h5 file see also \link[antaresRead]{readClusterDesc}.
Be aware that clusters descriptions are read in the input files so they may have changed since a simulation has been run.
}
| /man/h5ReadClusterDesc.Rd | no_license | rte-antares-rpackage/antaresHDF5 | R | false | true | 614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readInputs.R
\name{h5ReadClusterDesc}
\alias{h5ReadClusterDesc}
\title{Import cluster description}
\usage{
h5ReadClusterDesc(opts)
}
\arguments{
\item{opts}{\code{list} of simulation parameters returned by the function \link[antaresRead]{setSimulationPath}.}
}
\description{
This function imports the characteristics of each cluster from an h5 file see also \link[antaresRead]{readClusterDesc}.
Be aware that clusters descriptions are read in the input files so they may have changed since a simulation has been run.
}
|
/MacOSX10.2.8.sdk/System/Library/Frameworks/ApplicationServices.framework/Versions/A/Headers/ApplicationServices.r | no_license | alexey-lysiuk/macos-sdk | R | false | false | 753 | r | ||
#' quantiles
#'
#' The function quantile produces quantiles corresponding to the given
#' probabilities. The smallest observation corresponds to a probability of 0 and the largest to a probability of 1.
#' Current implementation doesn't use the \code{type} parameter of \code{\link{quantile}}. For large \code{ff} vectors the
#' difference between the types is (very) small. If \code{x} has been \code{\link{ffordered}}, quantile is fast, otherwise it is $n log(n)$.
#' @method quantile ff
#' @param x \code{ff} vector
#' @param probs numeric vector of probabilities with values in [0,1].
#' @param na.rm logical; if true, any NA and NaN's are removed from x before the quantiles are computed.
#' @param names logical; if true, the result has a names attribute. Set to FALSE for speedup with many probs.
#' @param ... currently not used
#' @export
#' @importFrom stats quantile
quantile.ff <- function(x, probs = seq(0, 1, 0.25), na.rm = FALSE, names = TRUE, ...){
N <- length(x)
nms <- if (names) paste(100*probs, "%", sep="")
NULL
qnt <- 1L + as.integer(probs * (N-1))
#print(qnt)
idx <- ffordered(x)
ql <- x[idx[qnt]]
names(ql) <- nms
ql
}
# x <- ff(1000000:1)
# #x <- addffIndex(x)
#
# quantile(x)
| /pkg/R/quantile_ff.R | no_license | arturochian/ffbase | R | false | false | 1,245 | r | #' quantiles
#'
#' The function quantile produces quantiles corresponding to the given
#' probabilities. The smallest observation corresponds to a probability of 0 and the largest to a probability of 1.
#' Current implementation doesn't use the \code{type} parameter of \code{\link{quantile}}. For large \code{ff} vectors the
#' difference between the types is (very) small. If \code{x} has been \code{\link{ffordered}}, quantile is fast, otherwise it is $n log(n)$.
#' @method quantile ff
#' @param x \code{ff} vector
#' @param probs numeric vector of probabilities with values in [0,1].
#' @param na.rm logical; if true, any NA and NaN's are removed from x before the quantiles are computed.
#' @param names logical; if true, the result has a names attribute. Set to FALSE for speedup with many probs.
#' @param ... currently not used
#' @export
#' @importFrom stats quantile
quantile.ff <- function(x, probs = seq(0, 1, 0.25), na.rm = FALSE, names = TRUE, ...){
N <- length(x)
nms <- if (names) paste(100*probs, "%", sep="")
NULL
qnt <- 1L + as.integer(probs * (N-1))
#print(qnt)
idx <- ffordered(x)
ql <- x[idx[qnt]]
names(ql) <- nms
ql
}
# x <- ff(1000000:1)
# #x <- addffIndex(x)
#
# quantile(x)
|
# Take single layer of data and combine it with panel information to split
# data into different panels. Adds in extra data for missing facetting
# levels and for margins.
#
# @params data a data frame
locate_grid <- function(data, panels, rows = NULL, cols = NULL, margins = FALSE) {
rows <- as.quoted(rows)
cols <- as.quoted(cols)
vars <- c(names(rows), names(cols))
# Compute facetting values and add margins
data <- add_margins(data, list(names(rows), names(cols)), margins)
# Workaround for bug in reshape
data <- unique(data)
facet_vals <- quoted_df(data, c(rows, cols))
values <- compact(llply(data, quoted_df, vars = c(rows, cols)))
# If any facetting variables are missing, add them in by
# duplicating the data
missing_facets <- setdiff(vars, names(facet_vals))
if (length(missing_facets) > 0) {
to_add <- unique(panels[missing_facets])
data_rep <- rep.int(1:nrow(data), nrow(to_add))
facet_rep <- rep(1:nrow(to_add), each = nrow(data))
data <- unrowname(data[data_rep, , drop = FALSE])
facet_vals <- unrowname(cbind(
facet_vals[data_rep, , drop = FALSE],
to_add[facet_rep, , drop = FALSE]))
}
# Add PANEL variable
if (nrow(facet_vals) == 0) {
# Special case of no facetting
data$PANEL <- 1
} else {
facet_vals[] <- lapply(facet_vals[], as.factor)
keys <- join.keys(facet_vals, panels, by = vars)
data$PANEL <- panels$PANEL[match(keys$x, keys$y)]
}
arrange(data, PANEL)
}
locate_wrap <- function(data, panels, vars) {
vars <- as.quoted(vars)
facet_vals <- quoted_df(data, vars)
facet_vals[] <- lapply(facet_vals[], as.factor)
missing_facets <- setdiff(names(vars), names(facet_vals))
if (length(missing_facets) > 0) {
to_add <- unique(panels[missing_facets])
data_rep <- rep.int(1:nrow(data), nrow(to_add))
facet_rep <- rep(1:nrow(to_add), each = nrow(data))
data <- unrowname(data[data_rep, , drop = FALSE])
facet_vals <- unrowname(cbind(
facet_vals[data_rep, , drop = FALSE],
to_add[facet_rep, , drop = FALSE]))
}
keys <- join.keys(facet_vals, panels, by = names(vars))
data$PANEL <- panels$PANEL[match(keys$x, keys$y)]
data[order(data$PANEL), ]
}
| /R/facet-locate.r | no_license | wligtenberg/ggplot2 | R | false | false | 2,255 | r | # Take single layer of data and combine it with panel information to split
# data into different panels. Adds in extra data for missing facetting
# levels and for margins.
#
# @params data a data frame
locate_grid <- function(data, panels, rows = NULL, cols = NULL, margins = FALSE) {
rows <- as.quoted(rows)
cols <- as.quoted(cols)
vars <- c(names(rows), names(cols))
# Compute facetting values and add margins
data <- add_margins(data, list(names(rows), names(cols)), margins)
# Workaround for bug in reshape
data <- unique(data)
facet_vals <- quoted_df(data, c(rows, cols))
values <- compact(llply(data, quoted_df, vars = c(rows, cols)))
# If any facetting variables are missing, add them in by
# duplicating the data
missing_facets <- setdiff(vars, names(facet_vals))
if (length(missing_facets) > 0) {
to_add <- unique(panels[missing_facets])
data_rep <- rep.int(1:nrow(data), nrow(to_add))
facet_rep <- rep(1:nrow(to_add), each = nrow(data))
data <- unrowname(data[data_rep, , drop = FALSE])
facet_vals <- unrowname(cbind(
facet_vals[data_rep, , drop = FALSE],
to_add[facet_rep, , drop = FALSE]))
}
# Add PANEL variable
if (nrow(facet_vals) == 0) {
# Special case of no facetting
data$PANEL <- 1
} else {
facet_vals[] <- lapply(facet_vals[], as.factor)
keys <- join.keys(facet_vals, panels, by = vars)
data$PANEL <- panels$PANEL[match(keys$x, keys$y)]
}
arrange(data, PANEL)
}
locate_wrap <- function(data, panels, vars) {
vars <- as.quoted(vars)
facet_vals <- quoted_df(data, vars)
facet_vals[] <- lapply(facet_vals[], as.factor)
missing_facets <- setdiff(names(vars), names(facet_vals))
if (length(missing_facets) > 0) {
to_add <- unique(panels[missing_facets])
data_rep <- rep.int(1:nrow(data), nrow(to_add))
facet_rep <- rep(1:nrow(to_add), each = nrow(data))
data <- unrowname(data[data_rep, , drop = FALSE])
facet_vals <- unrowname(cbind(
facet_vals[data_rep, , drop = FALSE],
to_add[facet_rep, , drop = FALSE]))
}
keys <- join.keys(facet_vals, panels, by = names(vars))
data$PANEL <- panels$PANEL[match(keys$x, keys$y)]
data[order(data$PANEL), ]
}
|
#' files.list
#'
#' List files available in via the DoubleClick Reporting API
#'
#' @title List files available in via the DoubleClick Reporting API
#'
#' @param profileId The DFA profile ID - use userprofiles.list() to find yours
#' @param results The number of files to return. Defaults to 10, can be increased to make multiple API requests and get more files.
#' @param scope The scope that defines which results are returned, default is 'MINE'. Acceptable values are:
#' "ALL": All files in account.
#' "MINE": My files. (default)
#' "SHARED_WITH_ME": Files shared with me.
#' @param sortField The field by which to sort the list.
#' Acceptable values are:
#' "ID": Sort by file ID.
#' "LAST_MODIFIED_TIME": Sort by 'lastmodifiedAt' field. (default)
#' @param sortOrder Order of sorted results, default is 'DESCENDING', 'ASCENDING' is also valid.
#' @param fields Subset of fields to include in the report
#'
#' @importFrom httr GET stop_for_status content config
#' @importFrom jsonlite fromJSON
#' @importFrom plyr rbind.fill
#'
#' @return File list (data table)
#'
#' @examples
#' \dontrun{
#' files.list(1234567,results=100)
#'
#' }
#'
#' @export
files.list <- function(profileId, results=10, scope='', sortField='', sortOrder='', fields=''){
# build query string
if(results>=10) {
maxResults <- 10
}
q.string <- paste0("maxResults=",maxResults)
if(nchar(scope)) {
q.string <- paste0(q.string,"&scope=",scope)
}
if(nchar(sortField)) {
q.string <- paste0(q.string,"&sortField=",sortField)
}
if(nchar(sortOrder)) {
q.string <- paste0(q.string,"&sortOrder=",sortOrder)
}
if(nchar(fields)) {
q.string <- paste0(q.string,"&fields=",fields)
}
report <- data.frame()
workingResultNum <- 0
pageToken <- ""
hasNextPage <- TRUE
while((workingResultNum<results)&&hasNextPage==TRUE) {
if(nchar(pageToken)) {
req.q.string <- paste0(q.string,"&pageToken=",pageToken)
} else {
req.q.string <- q.string
}
req.url <- paste0("https://www.googleapis.com/dfareporting/v2.2/userprofiles/",profileId,"/files")
response <- api.request(req.url,querystring=req.q.string)
response <- fromJSON(response)
# Flatten the data frame for binding
response$items$startDate <- response$items$dateRange$startDate
response$items$endDate <- response$items$dateRange$endDate
response$items$dateRange <- NULL
response$items$browserUrl <- response$items$urls$browserUrl
response$items$apiUrl <- response$items$urls$apiUrl
response$items$urls <- NULL
if(nrow(report)>0) {
report <- rbind.fill(report,data.frame(response$items))
} else {
report <- data.frame(response$items)
}
pageToken <- response$nextPageToken
workingResultNum <- nrow(report)
if(nchar(pageToken)){
hasNextPage <- TRUE
} else {
hasNextPage <- FALSE
}
}
return(report)
} | /R/files.list.R | permissive | heck1/RDoubleClick | R | false | false | 2,904 | r | #' files.list
#'
#' List files available in via the DoubleClick Reporting API
#'
#' @title List files available in via the DoubleClick Reporting API
#'
#' @param profileId The DFA profile ID - use userprofiles.list() to find yours
#' @param results The number of files to return. Defaults to 10, can be increased to make multiple API requests and get more files.
#' @param scope The scope that defines which results are returned, default is 'MINE'. Acceptable values are:
#' "ALL": All files in account.
#' "MINE": My files. (default)
#' "SHARED_WITH_ME": Files shared with me.
#' @param sortField The field by which to sort the list.
#' Acceptable values are:
#' "ID": Sort by file ID.
#' "LAST_MODIFIED_TIME": Sort by 'lastmodifiedAt' field. (default)
#' @param sortOrder Order of sorted results, default is 'DESCENDING', 'ASCENDING' is also valid.
#' @param fields Subset of fields to include in the report
#'
#' @importFrom httr GET stop_for_status content config
#' @importFrom jsonlite fromJSON
#' @importFrom plyr rbind.fill
#'
#' @return File list (data table)
#'
#' @examples
#' \dontrun{
#' files.list(1234567,results=100)
#'
#' }
#'
#' @export
files.list <- function(profileId, results=10, scope='', sortField='', sortOrder='', fields=''){
# build query string
if(results>=10) {
maxResults <- 10
}
q.string <- paste0("maxResults=",maxResults)
if(nchar(scope)) {
q.string <- paste0(q.string,"&scope=",scope)
}
if(nchar(sortField)) {
q.string <- paste0(q.string,"&sortField=",sortField)
}
if(nchar(sortOrder)) {
q.string <- paste0(q.string,"&sortOrder=",sortOrder)
}
if(nchar(fields)) {
q.string <- paste0(q.string,"&fields=",fields)
}
report <- data.frame()
workingResultNum <- 0
pageToken <- ""
hasNextPage <- TRUE
while((workingResultNum<results)&&hasNextPage==TRUE) {
if(nchar(pageToken)) {
req.q.string <- paste0(q.string,"&pageToken=",pageToken)
} else {
req.q.string <- q.string
}
req.url <- paste0("https://www.googleapis.com/dfareporting/v2.2/userprofiles/",profileId,"/files")
response <- api.request(req.url,querystring=req.q.string)
response <- fromJSON(response)
# Flatten the data frame for binding
response$items$startDate <- response$items$dateRange$startDate
response$items$endDate <- response$items$dateRange$endDate
response$items$dateRange <- NULL
response$items$browserUrl <- response$items$urls$browserUrl
response$items$apiUrl <- response$items$urls$apiUrl
response$items$urls <- NULL
if(nrow(report)>0) {
report <- rbind.fill(report,data.frame(response$items))
} else {
report <- data.frame(response$items)
}
pageToken <- response$nextPageToken
workingResultNum <- nrow(report)
if(nchar(pageToken)){
hasNextPage <- TRUE
} else {
hasNextPage <- FALSE
}
}
return(report)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JJF.R
\name{bri.gpr}
\alias{bri.gpr}
\title{Gaussian Process Regression in 1D}
\usage{
bri.gpr(x, y, pcprior, nbasis = 25, degree = 2, alpha = 2, xout = x,
sigma0 = sd(y), rho0 = 0.25 * (max(x) - min(x)))
}
\arguments{
\item{x}{the predictor vector}
\item{y}{the response vector}
\item{pcprior}{limites for the penalised complexity prior (optional). If specified should be a vector
of the form c(r,s) where P(range < r = 0.05) and P(SD(y) > s = 0.05)}
\item{nbasis}{- number of basis functions for the spline (default is 25)}
\item{degree}{- degree for splines (default is 2) - allowable possibilities are 0, 1 or 2.}
\item{alpha}{- controls shape of the GP kernel (default is 2) - 0 < alpha <=2 is possible}
\item{xout}{- grid on which posterior will be calculated (default is x)}
\item{sigma0}{- prior mean for the signal SD (default is SD(y))}
\item{rho0}{- prior mean for the range}
}
\value{
list consisting of xout, the posterior mean, the lower 95% credibility band,
the upper 95% credibility band and the INLA object containing the fit
}
\description{
Gaussian Process Regression in 1D
}
\author{
Julian Faraway, \email{jjf23@bath.ac.uk}
}
| /man/bri.gpr.Rd | no_license | nemochina2008/brinla | R | false | true | 1,238 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JJF.R
\name{bri.gpr}
\alias{bri.gpr}
\title{Gaussian Process Regression in 1D}
\usage{
bri.gpr(x, y, pcprior, nbasis = 25, degree = 2, alpha = 2, xout = x,
sigma0 = sd(y), rho0 = 0.25 * (max(x) - min(x)))
}
\arguments{
\item{x}{the predictor vector}
\item{y}{the response vector}
\item{pcprior}{limites for the penalised complexity prior (optional). If specified should be a vector
of the form c(r,s) where P(range < r = 0.05) and P(SD(y) > s = 0.05)}
\item{nbasis}{- number of basis functions for the spline (default is 25)}
\item{degree}{- degree for splines (default is 2) - allowable possibilities are 0, 1 or 2.}
\item{alpha}{- controls shape of the GP kernel (default is 2) - 0 < alpha <=2 is possible}
\item{xout}{- grid on which posterior will be calculated (default is x)}
\item{sigma0}{- prior mean for the signal SD (default is SD(y))}
\item{rho0}{- prior mean for the range}
}
\value{
list consisting of xout, the posterior mean, the lower 95% credibility band,
the upper 95% credibility band and the INLA object containing the fit
}
\description{
Gaussian Process Regression in 1D
}
\author{
Julian Faraway, \email{jjf23@bath.ac.uk}
}
|
setOldClass("ModelFrame")
setOldClass("recipe")
#' Resampling Classes and Methods
#'
#' @name MLControl-class
#' @rdname MLControl-class
#'
#' @slot summary function to compute model performance metrics.
#' @slot cutoff threshold above which probabilities are classified as success
#' for factor outcomes and which expected values are rounded for integer
#' outcomes.
#' @slot cutoff_index function to calculate a desired sensitivity-specificity
#' tradeoff.
#' @slot surv_times numeric vector of follow-up times at which to predict
#' survival events.
#' @slot na.rm logical indicating whether to remove observed or predicted
#' responses that are \code{NA} when calculating model metrics.
#' @slot seed integer to set the seed at the start of resampling.
#'
setClass("MLControl",
slots = c(summary = "function", cutoff = "numeric", cutoff_index = "function",
surv_times = "numeric", na.rm = "logical", seed = "numeric"),
contains = "VIRTUAL"
)
#' The base MLControl constructor initializes a set of parameters that are common
#' to all resampling methods.
#'
#' @rdname MLControl-class
#' @aliases initialize,MLControl-method
#'
#' @param .Object class object being initialized.
#' @param summary function to compute model performance metrics.
#' @param cutoff threshold above which probabilities are classified as success
#' for factor outcomes and which expected values are rounded for integer
#' outcomes.
#' @param cutoff_index function to calculate a desired sensitivity-specificity
#' tradeoff.
#' @param surv_times numeric vector of follow-up times at which to predict
#' survival events.
#' @param na.rm logical indicating whether to remove observed or predicted
#' responses that are \code{NA} when calculating model metrics.
#' @param seed integer to set the seed at the start of resampling. This is set
#' to a random integer by default (NULL).
#' @param ... arguments to be passed to or from other methods.
#'
#' @return MLControl class object.
#'
#' @seealso \code{\link{resample}}, \code{\link{modelmetrics}}
#'
setMethod("initialize", "MLControl",
function(.Object, summary = modelmetrics, cutoff = 0.5,
cutoff_index = function(sens, spec) sens + spec,
surv_times = numeric(), na.rm = TRUE, seed = NULL, ...) {
if (is.null(seed)) seed <- sample.int(.Machine$integer.max, 1)
callNextMethod(.Object, summary = summary, cutoff = cutoff,
cutoff_index = cutoff_index, surv_times = surv_times,
na.rm = na.rm, seed = seed, ...)
}
)
#' \code{BootControl} constructs an MLControl object for simple bootstrap
#' resampling in which models are fit with bootstrap resampled training sets and
#' used to predict the full data set.
#'
#' @name BootControl
#' @rdname MLControl-class
#'
#' @param samples number of bootstrap samples.
#'
#' @examples
#' ## 100 bootstrap samples
#' BootControl(samples = 100)
#'
BootControl <- function(samples = 25, ...) {
new("BootMLControl", samples = samples, ...)
}
setClass("BootMLControl",
slots = c(samples = "numeric"),
contains = "MLControl"
)
#' \code{CVControl} constructs an MLControl object for repeated K-fold
#' cross-validation. In this procedure, the full data set is repeatedly
#' partitioned into K-folds. Within a partitioning, prediction is performed on each
#' of the K folds with models fit on all remaining folds.
#'
#' @name CVControl
#' @rdname MLControl-class
#'
#' @param folds number of cross-validation folds (K).
#' @param repeats number of repeats of the K-fold partitioning.
#'
#' @examples
#' ## 5 repeats of 10-fold cross-validation
#' CVControl(folds = 10, repeats = 5)
#'
CVControl <- function(folds = 10, repeats = 1, ...) {
new("CVMLControl", folds = folds, repeats = repeats, ...)
}
setClass("CVMLControl",
slots = c(folds = "numeric", repeats = "numeric"),
contains = "MLControl"
)
#' \code{OOBControl} constructs an MLControl object for out-of-bootstrap
#' resampling in which models are fit with bootstrap resampled training sets and
#' used to predict the unsampled cases.
#'
#' @name OOBControl
#' @rdname MLControl-class
#'
#' @examples
#' ## 100 out-of-bootstrap samples
#' OOBControl(samples = 100)
#'
OOBControl <- function(samples = 25, ...) {
new("OOBMLControl", samples = samples, ...)
}
setClass("OOBMLControl",
slots = c(samples = "numeric"),
contains = "MLControl"
)
#' \code{SplitControl} constructs an MLControl object for splitting data into a
#' seperate trianing and test set.
#'
#' @param prop proportion of cases to include in the training set
#' (\code{0 < prop < 1}).
#'
#' @name SplitControl
#' @rdname MLControl-class
#'
#' @examples
#' SplitControl(prop = 2/3)
#'
SplitControl <- function(prop = 2/3, ...) {
new("SplitMLControl", prop = prop, ...)
}
setClass("SplitMLControl",
slots = c(prop = "numeric"),
contains = "MLControl"
)
#' \code{TrainControl} constructs an MLControl object for training and
#' performance evaluation to be performed on the same training set.
#'
#' @name TrainControl
#' @rdname MLControl-class
#'
#' @examples
#' TrainControl()
#'
TrainControl <- function(...) {
new("TrainMLControl", ...)
}
setClass("TrainMLControl",
contains = "MLControl"
)
MLFitBits <- setClass("MLFitBits",
slots = c(packages = "character",
predict = "function",
varimp = "function",
x = "ANY",
y = "ANY")
)
#' MLModel Class Constructor
#'
#' @param name character string name for the instantiated MLModel object.
#' @param packages character vector of packages required by the object.
#' @param types character vector of response variable types on which the model
#' can be fit.
#' @param params list of user-specified model parameters.
#' @param nvars function to return the number of predictor variables for a
#' given model frame.
#' @param fit model fitting function.
#' @param predict model prediction function.
#' @param varimp variable importance function.
#'
MLModel <- function(name = "MLModel", packages = character(0),
types = character(0), params = list(),
nvars = function(data) NULL,
fit = function(formula, data, weights, ...)
stop("no fit function"),
predict = function(object, newdata, times, ...)
stop("no predict function"),
varimp = function(object, ...)
stop("no varimp function")) {
new("MLModel",
name = name,
packages = packages,
types = types,
params = params,
nvars = nvars,
fit = fit,
fitbits = MLFitBits(packages = packages,
predict = predict,
varimp = varimp))
}
setClass("MLModel",
slots = c(name = "character",
packages = "character",
types = "character",
params = "list",
nvars = "function",
fit = "function",
fitbits = "MLFitBits")
)
setClass("MLModelFit",
slots = c(fitbits = "MLFitBits"),
contains = "VIRTUAL"
)
setClass("SVMModelFit", contain = c("MLModelFit", "ksvm"))
setClass("CForestModelFit", contains = c("MLModelFit", "RandomForest"))
#' Resamples Class Contructor
#'
#' Create an object of resampled performance metrics from one or more models.
#'
#' @param response data frame of resampled observed and predicted resposnes.
#' @param control MLControl object used to generate the resample output.
#' @param ... named or unnamed resample output from one or more models.
#'
#' @details Argument \code{control} need only be specified if the supplied
#' output is not a Resamples object. Output being combined from more than one
#' model must have been generated with the same resampling object and
#' performance metrics.
#'
#' @return Resamples class object.
#'
#' @seealso \code{\link{resample}}, \code{\link{plot}}, \code{\link{summary}}
#'
#' @examples
#' ## Factor response example
#'
#' fo <- factor(Species) ~ .
#' control <- CVControl()
#'
#' gbmperf1 <- resample(fo, iris, GBMModel(n.trees = 25), control)
#' gbmperf2 <- resample(fo, iris, GBMModel(n.trees = 50), control)
#' gbmperf3 <- resample(fo, iris, GBMModel(n.trees = 100), control)
#'
#' perf <- Resamples(GBM1 = gbmperf1, GBM2 = gbmperf2, GBM3 = gbmperf3)
#' summary(perf)
#' plot(perf)
#'
Resamples <- function(..., control, response = data.frame()) {
new("Resamples", ..., control = control, response = response)
}
setClass("Resamples",
slots = c(control = "MLControl", response = "data.frame"),
contains = "array"
)
setMethod("initialize", "Resamples",
function(.Object, ..., control, response = data.frame()) {
args <- list(...)
if (length(args) == 0) stop("no values given")
.Data <- args[[1]]
if (length(args) == 1) {
if (is(.Data, "Resamples")) {
response <- .Data@response
control <- .Data@control
}
} else {
if (!all(sapply(args, function(x) is(x, "Resamples") && is.matrix(x)))) {
stop("values to combine must be 2 dimensional Resamples objects")
}
control <- .Data@control
is_equal_control <- function(x) isTRUE(all.equal(x@control, control))
if (!all(sapply(args, is_equal_control))) {
stop("resamples have different control structures")
}
if (!all(sapply(args, colnames) == colnames(.Data))) {
stop("resamples contain different metrics")
}
modelnames <- names(args)
if (is.null(modelnames)) modelnames <- paste0("Model", seq(args))
names(args) <- NULL
args$along <- 3
args$new.names <- list(NULL, NULL, modelnames)
.Data <- do.call(abind, args)
}
callNextMethod(.Object, .Data, control = control, response = response)
}
)
MLModelTune <- setClass("MLModelTune",
slots = c(grid = "data.frame", resamples = "Resamples", selected = "numeric"),
contains = "MLModel"
)
ResamplesDiff <- setClass("ResamplesDiff",
slots = c("modelnames" = "character"),
contains = "Resamples"
)
setMethod("initialize", "ResamplesDiff",
function(.Object, ..., modelnames) {
.Object <- callNextMethod(.Object, ...)
.Object@modelnames <- modelnames
.Object
}
)
ResamplesHTest <- setClass("ResamplesHTest",
slots = c("adjust" = "character"),
contains = "array"
)
VarImp <- setClass("VarImp", contains = "data.frame")
setMethod("initialize", "VarImp",
function(.Object, .Data, scale = FALSE, ...) {
idx <- order(rowSums(.Data), decreasing = TRUE)
idx <- idx * (rownames(.Data)[idx] != "(Intercept)")
.Data <- .Data[idx, , drop = FALSE]
if (scale) .Data <- 100 * (.Data - min(.Data)) / diff(range(.Data))
callNextMethod(.Object, .Data, ...)
}
)
setValidity("VarImp", function(object) {
!(nrow(object) && is.null(rownames(object)))
})
| /R/AllClasses.R | no_license | guhjy/MachineShop | R | false | false | 10,860 | r | setOldClass("ModelFrame")
setOldClass("recipe")
#' Resampling Classes and Methods
#'
#' @name MLControl-class
#' @rdname MLControl-class
#'
#' @slot summary function to compute model performance metrics.
#' @slot cutoff threshold above which probabilities are classified as success
#' for factor outcomes and which expected values are rounded for integer
#' outcomes.
#' @slot cutoff_index function to calculate a desired sensitivity-specificity
#' tradeoff.
#' @slot surv_times numeric vector of follow-up times at which to predict
#' survival events.
#' @slot na.rm logical indicating whether to remove observed or predicted
#' responses that are \code{NA} when calculating model metrics.
#' @slot seed integer to set the seed at the start of resampling.
#'
setClass("MLControl",
slots = c(summary = "function", cutoff = "numeric", cutoff_index = "function",
surv_times = "numeric", na.rm = "logical", seed = "numeric"),
contains = "VIRTUAL"
)
#' The base MLControl constructor initializes a set of parameters that are common
#' to all resampling methods.
#'
#' @rdname MLControl-class
#' @aliases initialize,MLControl-method
#'
#' @param .Object class object being initialized.
#' @param summary function to compute model performance metrics.
#' @param cutoff threshold above which probabilities are classified as success
#' for factor outcomes and which expected values are rounded for integer
#' outcomes.
#' @param cutoff_index function to calculate a desired sensitivity-specificity
#' tradeoff.
#' @param surv_times numeric vector of follow-up times at which to predict
#' survival events.
#' @param na.rm logical indicating whether to remove observed or predicted
#' responses that are \code{NA} when calculating model metrics.
#' @param seed integer to set the seed at the start of resampling. This is set
#' to a random integer by default (NULL).
#' @param ... arguments to be passed to or from other methods.
#'
#' @return MLControl class object.
#'
#' @seealso \code{\link{resample}}, \code{\link{modelmetrics}}
#'
setMethod("initialize", "MLControl",
function(.Object, summary = modelmetrics, cutoff = 0.5,
cutoff_index = function(sens, spec) sens + spec,
surv_times = numeric(), na.rm = TRUE, seed = NULL, ...) {
if (is.null(seed)) seed <- sample.int(.Machine$integer.max, 1)
callNextMethod(.Object, summary = summary, cutoff = cutoff,
cutoff_index = cutoff_index, surv_times = surv_times,
na.rm = na.rm, seed = seed, ...)
}
)
#' \code{BootControl} constructs an MLControl object for simple bootstrap
#' resampling in which models are fit with bootstrap resampled training sets and
#' used to predict the full data set.
#'
#' @name BootControl
#' @rdname MLControl-class
#'
#' @param samples number of bootstrap samples.
#'
#' @examples
#' ## 100 bootstrap samples
#' BootControl(samples = 100)
#'
BootControl <- function(samples = 25, ...) {
new("BootMLControl", samples = samples, ...)
}
setClass("BootMLControl",
slots = c(samples = "numeric"),
contains = "MLControl"
)
#' \code{CVControl} constructs an MLControl object for repeated K-fold
#' cross-validation. In this procedure, the full data set is repeatedly
#' partitioned into K-folds. Within a partitioning, prediction is performed on each
#' of the K folds with models fit on all remaining folds.
#'
#' @name CVControl
#' @rdname MLControl-class
#'
#' @param folds number of cross-validation folds (K).
#' @param repeats number of repeats of the K-fold partitioning.
#'
#' @examples
#' ## 5 repeats of 10-fold cross-validation
#' CVControl(folds = 10, repeats = 5)
#'
CVControl <- function(folds = 10, repeats = 1, ...) {
new("CVMLControl", folds = folds, repeats = repeats, ...)
}
setClass("CVMLControl",
slots = c(folds = "numeric", repeats = "numeric"),
contains = "MLControl"
)
#' \code{OOBControl} constructs an MLControl object for out-of-bootstrap
#' resampling in which models are fit with bootstrap resampled training sets and
#' used to predict the unsampled cases.
#'
#' @name OOBControl
#' @rdname MLControl-class
#'
#' @examples
#' ## 100 out-of-bootstrap samples
#' OOBControl(samples = 100)
#'
OOBControl <- function(samples = 25, ...) {
new("OOBMLControl", samples = samples, ...)
}
setClass("OOBMLControl",
slots = c(samples = "numeric"),
contains = "MLControl"
)
#' \code{SplitControl} constructs an MLControl object for splitting data into a
#' seperate trianing and test set.
#'
#' @param prop proportion of cases to include in the training set
#' (\code{0 < prop < 1}).
#'
#' @name SplitControl
#' @rdname MLControl-class
#'
#' @examples
#' SplitControl(prop = 2/3)
#'
SplitControl <- function(prop = 2/3, ...) {
new("SplitMLControl", prop = prop, ...)
}
setClass("SplitMLControl",
slots = c(prop = "numeric"),
contains = "MLControl"
)
#' \code{TrainControl} constructs an MLControl object for training and
#' performance evaluation to be performed on the same training set.
#'
#' @name TrainControl
#' @rdname MLControl-class
#'
#' @examples
#' TrainControl()
#'
TrainControl <- function(...) {
new("TrainMLControl", ...)
}
setClass("TrainMLControl",
contains = "MLControl"
)
MLFitBits <- setClass("MLFitBits",
slots = c(packages = "character",
predict = "function",
varimp = "function",
x = "ANY",
y = "ANY")
)
#' MLModel Class Constructor
#'
#' @param name character string name for the instantiated MLModel object.
#' @param packages character vector of packages required by the object.
#' @param types character vector of response variable types on which the model
#' can be fit.
#' @param params list of user-specified model parameters.
#' @param nvars function to return the number of predictor variables for a
#' given model frame.
#' @param fit model fitting function.
#' @param predict model prediction function.
#' @param varimp variable importance function.
#'
MLModel <- function(name = "MLModel", packages = character(0),
types = character(0), params = list(),
nvars = function(data) NULL,
fit = function(formula, data, weights, ...)
stop("no fit function"),
predict = function(object, newdata, times, ...)
stop("no predict function"),
varimp = function(object, ...)
stop("no varimp function")) {
new("MLModel",
name = name,
packages = packages,
types = types,
params = params,
nvars = nvars,
fit = fit,
fitbits = MLFitBits(packages = packages,
predict = predict,
varimp = varimp))
}
setClass("MLModel",
slots = c(name = "character",
packages = "character",
types = "character",
params = "list",
nvars = "function",
fit = "function",
fitbits = "MLFitBits")
)
setClass("MLModelFit",
slots = c(fitbits = "MLFitBits"),
contains = "VIRTUAL"
)
setClass("SVMModelFit", contain = c("MLModelFit", "ksvm"))
setClass("CForestModelFit", contains = c("MLModelFit", "RandomForest"))
#' Resamples Class Contructor
#'
#' Create an object of resampled performance metrics from one or more models.
#'
#' @param response data frame of resampled observed and predicted resposnes.
#' @param control MLControl object used to generate the resample output.
#' @param ... named or unnamed resample output from one or more models.
#'
#' @details Argument \code{control} need only be specified if the supplied
#' output is not a Resamples object. Output being combined from more than one
#' model must have been generated with the same resampling object and
#' performance metrics.
#'
#' @return Resamples class object.
#'
#' @seealso \code{\link{resample}}, \code{\link{plot}}, \code{\link{summary}}
#'
#' @examples
#' ## Factor response example
#'
#' fo <- factor(Species) ~ .
#' control <- CVControl()
#'
#' gbmperf1 <- resample(fo, iris, GBMModel(n.trees = 25), control)
#' gbmperf2 <- resample(fo, iris, GBMModel(n.trees = 50), control)
#' gbmperf3 <- resample(fo, iris, GBMModel(n.trees = 100), control)
#'
#' perf <- Resamples(GBM1 = gbmperf1, GBM2 = gbmperf2, GBM3 = gbmperf3)
#' summary(perf)
#' plot(perf)
#'
Resamples <- function(..., control, response = data.frame()) {
new("Resamples", ..., control = control, response = response)
}
setClass("Resamples",
slots = c(control = "MLControl", response = "data.frame"),
contains = "array"
)
setMethod("initialize", "Resamples",
function(.Object, ..., control, response = data.frame()) {
args <- list(...)
if (length(args) == 0) stop("no values given")
.Data <- args[[1]]
if (length(args) == 1) {
if (is(.Data, "Resamples")) {
response <- .Data@response
control <- .Data@control
}
} else {
if (!all(sapply(args, function(x) is(x, "Resamples") && is.matrix(x)))) {
stop("values to combine must be 2 dimensional Resamples objects")
}
control <- .Data@control
is_equal_control <- function(x) isTRUE(all.equal(x@control, control))
if (!all(sapply(args, is_equal_control))) {
stop("resamples have different control structures")
}
if (!all(sapply(args, colnames) == colnames(.Data))) {
stop("resamples contain different metrics")
}
modelnames <- names(args)
if (is.null(modelnames)) modelnames <- paste0("Model", seq(args))
names(args) <- NULL
args$along <- 3
args$new.names <- list(NULL, NULL, modelnames)
.Data <- do.call(abind, args)
}
callNextMethod(.Object, .Data, control = control, response = response)
}
)
MLModelTune <- setClass("MLModelTune",
slots = c(grid = "data.frame", resamples = "Resamples", selected = "numeric"),
contains = "MLModel"
)
ResamplesDiff <- setClass("ResamplesDiff",
slots = c("modelnames" = "character"),
contains = "Resamples"
)
setMethod("initialize", "ResamplesDiff",
function(.Object, ..., modelnames) {
.Object <- callNextMethod(.Object, ...)
.Object@modelnames <- modelnames
.Object
}
)
ResamplesHTest <- setClass("ResamplesHTest",
slots = c("adjust" = "character"),
contains = "array"
)
VarImp <- setClass("VarImp", contains = "data.frame")
setMethod("initialize", "VarImp",
function(.Object, .Data, scale = FALSE, ...) {
idx <- order(rowSums(.Data), decreasing = TRUE)
idx <- idx * (rownames(.Data)[idx] != "(Intercept)")
.Data <- .Data[idx, , drop = FALSE]
if (scale) .Data <- 100 * (.Data - min(.Data)) / diff(range(.Data))
callNextMethod(.Object, .Data, ...)
}
)
setValidity("VarImp", function(object) {
!(nrow(object) && is.null(rownames(object)))
})
|
# Exercise 1: practice with basic R syntax
# Create a variable `hometown` that stores the city in which you were born
hometown <- "Palo Alto"
# Assign your name to the variable `my_name`
my_name <- "Ryan Lee"
# Assign your height (in inches) to a variable `my_height`
my_height <- "66"
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 2
# Create a variable `puppy_price`, which is how much you think a puppy costs
puppy_price <- 20
# Create a variable `total_cost` that has the total cost of all of your puppies
total_cost <- puppies * puppy_price
# Create a boolean variable `too_expensive`, set to TRUE if the cost is greater
# than $1,000
too_expensive <- total_cost > 1000
# Create a variable `max_puppies`, which is the number of puppies you can
# afford for $1,000
max_puppies <- 1000 / puppy_price | /chapter-05-exercises/exercise-1/exercise.R | permissive | CocoaCommander/book-exercises | R | false | false | 856 | r | # Exercise 1: practice with basic R syntax
# Create a variable `hometown` that stores the city in which you were born
hometown <- "Palo Alto"
# Assign your name to the variable `my_name`
my_name <- "Ryan Lee"
# Assign your height (in inches) to a variable `my_height`
my_height <- "66"
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 2
# Create a variable `puppy_price`, which is how much you think a puppy costs
puppy_price <- 20
# Create a variable `total_cost` that has the total cost of all of your puppies
total_cost <- puppies * puppy_price
# Create a boolean variable `too_expensive`, set to TRUE if the cost is greater
# than $1,000
too_expensive <- total_cost > 1000
# Create a variable `max_puppies`, which is the number of puppies you can
# afford for $1,000
max_puppies <- 1000 / puppy_price |
library(rstanarm)
if (Sys.getenv("CI") == "true") {
print(packageVersion("dplyr"))
install.packages("hrbrthemes", repos = "https://cinc.rud.is")
}
library(hrbrthemes)
df <- cuadrantes %>%
group_by(crime) %>%
mutate(total_count = sum(count)) %>%
ungroup %>%
filter(total_count / length(unique(cuadrantes$date)) > 20) %>%
group_by(date, crime) %>%
summarise(n = sum(count)) %>%
mutate(month = month(date)) %>%
mutate(time = as.numeric(as.Date(date))) %>%
mutate(logn = log1p(n / days_in_month(as.Date(date)) * 30))
duration <- days_in_month(as.Date(df$date)) / (365/12)
# Add COVID pandemic variable
df$covid <- if_else(df$date > "2020-03-01", TRUE, FALSE)
m1 <- stan_gamm4(n ~ s(time, by = crime) + offset(log(duration)) + covid, # + s(month, bs = "cc", k = 12), #,
family = poisson,
random = ~(1 | crime),
data = df,
chains = 4,
iter = 2000,
adapt_delta = .99,
cores = 4,
seed = 12345)
save(m1, file = "clean-data/m1_crimes.RData")
#load( "clean-data/m1_crimes.RData")
#plot_nonlinear(m1)
#pp_check(m1)
#pp_check(m1, plotfun = "ppc_ecdf_overlay")
dates <- seq(as.Date(min(df$date)), as.Date(max(df$date)), by = "month")
ndates <- length(dates)
trends <- do.call(rbind, lapply(as.character(unique(df$crime)), function(x) {
crime_name <- x
inc <- grep(str_c(crime_name, "|covid"),
colnames(predict(m1$jam, type = "lpmatrix")))
#X0 <- predict(m1$jam, type = 'lpmatrix')[, c(1, inc)]
eps <- 1e-3
newDFeps <- df
newDFeps$time <- df$time + eps
newDFeps$duration <- log(1)
X1 <- predict(m1$jam, newDFeps, type = 'lpmatrix')[, c(1, inc)]
sims_o <- as.matrix(m1)[, c(1, inc)] %*% t(as.matrix(m1$x[which(df$crime == crime_name), c(1, inc)]))
sims_n <- as.matrix(m1)[, c(1, inc)] %*% t(X1[which(df$crime == crime_name),])
#100 x 10 * ndates * 10
d1 <- ((sims_n - sims_o) / eps)
dim(d1)
d1[1:5, 1:5]
sum(d1[, ndates] >= 0)
qt <- quantile(d1[, ndates], c(.1, .9))
med <- median(d1[, ndates])
if (qt[1] < 0 & qt[2] < 0)
return(data.frame(crime = crime_name,
trend = "negative",
fd = med))
else if (qt[1] > 0 & qt[2] > 0)
return(data.frame(crime = crime_name,
trend = "positive",
fd = med))
else
return(data.frame(crime = crime_name,
trend = NA,
fd = med))
})
)
sims <- do.call(rbind, lapply(as.character(unique(df$crime)), function(x) {
crime_name <- x
print(x)
inc <- grep(str_c(crime_name, "|covid"),
colnames(predict(m1$jam, type = "lpmatrix")))
X0 <- as.matrix(m1$x)[which(df$crime == crime_name), c(1, inc)]
sims <- as.matrix(m1)[, c(1, inc)] %*% t(X0) %>% as.data.frame()
binc <- grep(paste0("b\\[\\(Intercept\\) crime:",
str_replace_all(crime_name," ", "_"),
"\\]$"),
colnames(m1$x))
b = as.matrix(m1)[, binc, drop = FALSE]
sims <- apply(sims, 2, function(x) {x + b}) %>% as.data.frame()
sims$sim <- 1:nrow(sims)
sims <- gather(data.frame(sims), "time", "rate", -sim) %>%
mutate(time = as.numeric(str_replace(time, "X", ""))) %>%
arrange(sim, time)
sims$date <- dates
sims$crime <- crime_name
sims$count <- exp(sims$rate)
return(sims)
}))
df$date <- as.Date(df$date)
sims <- left_join(sims, trends, by = "crime")
sims <- left_join(sims, df[, c("crime", "date", "n")], by = c("crime", "date"))
sims <- sims %>%
mutate(fd = as.numeric(fd)) %>%
arrange(desc(fd)) %>%
mutate(crime = factor(crime, levels = unique(crime)))
p <- ggplot(sims, aes(x = date, y = expm1(rate), group = sim)) +
geom_line(alpha = 0.1, aes(color = trend), size = .05) +
geom_point(aes(date, n),
fill = "#f8766d",
color = "black",
shape = 21,
size = 1.1) +
scale_color_manual("tendencia\núltimo mes",
values = c("positive" = "#e41a1c",
"negative" = "#1f78b4"),
labels = c("al alza", "a la baja", "no significativa"),
breaks = c("positive", "negative", NA),
na.value = "#cab2d6") +
expand_limits(y = 0) +
xlab("fecha") +
ylab("número de crímenes") +
labs(title = "Tendencias de crímenes en CDMX y 1000 simulaciones del posterior de un modelo aditivo multinivel, por crimen",
subtitle = "El color de cada crimen corresponde a la tendencia del último mes (primera derivada, intervalo de credibilidad del 90%).",
caption = "Fuente: PGJ-CDMX Carpetas de Investigación") +
theme_ft_rc(base_family = "Arial Narrow", strip_text_size = 10) +
guides(color = guide_legend(override.aes = list(size = 2, alpha = 1))) +
facet_wrap(~crime, scale = "free_y", ncol = 4)
ggsave("graphs/trends.png", height = 14, width = 14, dpi = 100)
| /src/multilevel_gam.R | no_license | diegovalle/hoyodecrimen.clean | R | false | false | 5,014 | r | library(rstanarm)
if (Sys.getenv("CI") == "true") {
print(packageVersion("dplyr"))
install.packages("hrbrthemes", repos = "https://cinc.rud.is")
}
library(hrbrthemes)
df <- cuadrantes %>%
group_by(crime) %>%
mutate(total_count = sum(count)) %>%
ungroup %>%
filter(total_count / length(unique(cuadrantes$date)) > 20) %>%
group_by(date, crime) %>%
summarise(n = sum(count)) %>%
mutate(month = month(date)) %>%
mutate(time = as.numeric(as.Date(date))) %>%
mutate(logn = log1p(n / days_in_month(as.Date(date)) * 30))
duration <- days_in_month(as.Date(df$date)) / (365/12)
# Add COVID pandemic variable
df$covid <- if_else(df$date > "2020-03-01", TRUE, FALSE)
m1 <- stan_gamm4(n ~ s(time, by = crime) + offset(log(duration)) + covid, # + s(month, bs = "cc", k = 12), #,
family = poisson,
random = ~(1 | crime),
data = df,
chains = 4,
iter = 2000,
adapt_delta = .99,
cores = 4,
seed = 12345)
save(m1, file = "clean-data/m1_crimes.RData")
#load( "clean-data/m1_crimes.RData")
#plot_nonlinear(m1)
#pp_check(m1)
#pp_check(m1, plotfun = "ppc_ecdf_overlay")
dates <- seq(as.Date(min(df$date)), as.Date(max(df$date)), by = "month")
ndates <- length(dates)
trends <- do.call(rbind, lapply(as.character(unique(df$crime)), function(x) {
crime_name <- x
inc <- grep(str_c(crime_name, "|covid"),
colnames(predict(m1$jam, type = "lpmatrix")))
#X0 <- predict(m1$jam, type = 'lpmatrix')[, c(1, inc)]
eps <- 1e-3
newDFeps <- df
newDFeps$time <- df$time + eps
newDFeps$duration <- log(1)
X1 <- predict(m1$jam, newDFeps, type = 'lpmatrix')[, c(1, inc)]
sims_o <- as.matrix(m1)[, c(1, inc)] %*% t(as.matrix(m1$x[which(df$crime == crime_name), c(1, inc)]))
sims_n <- as.matrix(m1)[, c(1, inc)] %*% t(X1[which(df$crime == crime_name),])
#100 x 10 * ndates * 10
d1 <- ((sims_n - sims_o) / eps)
dim(d1)
d1[1:5, 1:5]
sum(d1[, ndates] >= 0)
qt <- quantile(d1[, ndates], c(.1, .9))
med <- median(d1[, ndates])
if (qt[1] < 0 & qt[2] < 0)
return(data.frame(crime = crime_name,
trend = "negative",
fd = med))
else if (qt[1] > 0 & qt[2] > 0)
return(data.frame(crime = crime_name,
trend = "positive",
fd = med))
else
return(data.frame(crime = crime_name,
trend = NA,
fd = med))
})
)
sims <- do.call(rbind, lapply(as.character(unique(df$crime)), function(x) {
crime_name <- x
print(x)
inc <- grep(str_c(crime_name, "|covid"),
colnames(predict(m1$jam, type = "lpmatrix")))
X0 <- as.matrix(m1$x)[which(df$crime == crime_name), c(1, inc)]
sims <- as.matrix(m1)[, c(1, inc)] %*% t(X0) %>% as.data.frame()
binc <- grep(paste0("b\\[\\(Intercept\\) crime:",
str_replace_all(crime_name," ", "_"),
"\\]$"),
colnames(m1$x))
b = as.matrix(m1)[, binc, drop = FALSE]
sims <- apply(sims, 2, function(x) {x + b}) %>% as.data.frame()
sims$sim <- 1:nrow(sims)
sims <- gather(data.frame(sims), "time", "rate", -sim) %>%
mutate(time = as.numeric(str_replace(time, "X", ""))) %>%
arrange(sim, time)
sims$date <- dates
sims$crime <- crime_name
sims$count <- exp(sims$rate)
return(sims)
}))
df$date <- as.Date(df$date)
sims <- left_join(sims, trends, by = "crime")
sims <- left_join(sims, df[, c("crime", "date", "n")], by = c("crime", "date"))
sims <- sims %>%
mutate(fd = as.numeric(fd)) %>%
arrange(desc(fd)) %>%
mutate(crime = factor(crime, levels = unique(crime)))
p <- ggplot(sims, aes(x = date, y = expm1(rate), group = sim)) +
geom_line(alpha = 0.1, aes(color = trend), size = .05) +
geom_point(aes(date, n),
fill = "#f8766d",
color = "black",
shape = 21,
size = 1.1) +
scale_color_manual("tendencia\núltimo mes",
values = c("positive" = "#e41a1c",
"negative" = "#1f78b4"),
labels = c("al alza", "a la baja", "no significativa"),
breaks = c("positive", "negative", NA),
na.value = "#cab2d6") +
expand_limits(y = 0) +
xlab("fecha") +
ylab("número de crímenes") +
labs(title = "Tendencias de crímenes en CDMX y 1000 simulaciones del posterior de un modelo aditivo multinivel, por crimen",
subtitle = "El color de cada crimen corresponde a la tendencia del último mes (primera derivada, intervalo de credibilidad del 90%).",
caption = "Fuente: PGJ-CDMX Carpetas de Investigación") +
theme_ft_rc(base_family = "Arial Narrow", strip_text_size = 10) +
guides(color = guide_legend(override.aes = list(size = 2, alpha = 1))) +
facet_wrap(~crime, scale = "free_y", ncol = 4)
ggsave("graphs/trends.png", height = 14, width = 14, dpi = 100)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appregistry_service.R
\name{appregistry}
\alias{appregistry}
\title{AWS Service Catalog App Registry}
\usage{
appregistry(
config = list(),
credentials = list(),
endpoint = NULL,
region = NULL
)
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{credentials}:} {\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
}}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
\item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
}}
\item{credentials}{Optional credentials shorthand for the config parameter
\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
}}
\item{endpoint}{Optional shorthand for complete URL to use for the constructed client.}
\item{region}{Optional shorthand for AWS Region used in instantiating the client.}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Amazon Web Services Service Catalog AppRegistry enables organizations to
understand the application context of their Amazon Web Services
resources. AppRegistry provides a repository of your applications, their
resources, and the application metadata that you use within your
enterprise.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- appregistry(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical",
sts_regional_endpoint = "string"
),
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string"
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=appregistry_associate_attribute_group]{associate_attribute_group} \tab Associates an attribute group with an application to augment the application's metadata with the group's attributes\cr
\link[=appregistry_associate_resource]{associate_resource} \tab Associates a resource with an application\cr
\link[=appregistry_create_application]{create_application} \tab Creates a new application that is the top-level node in a hierarchy of related cloud resource abstractions\cr
\link[=appregistry_create_attribute_group]{create_attribute_group} \tab Creates a new attribute group as a container for user-defined attributes\cr
\link[=appregistry_delete_application]{delete_application} \tab Deletes an application that is specified either by its application ID, name, or ARN\cr
\link[=appregistry_delete_attribute_group]{delete_attribute_group} \tab Deletes an attribute group, specified either by its attribute group ID, name, or ARN\cr
\link[=appregistry_disassociate_attribute_group]{disassociate_attribute_group} \tab Disassociates an attribute group from an application to remove the extra attributes contained in the attribute group from the application's metadata\cr
\link[=appregistry_disassociate_resource]{disassociate_resource} \tab Disassociates a resource from application\cr
\link[=appregistry_get_application]{get_application} \tab Retrieves metadata information about one of your applications\cr
\link[=appregistry_get_associated_resource]{get_associated_resource} \tab Gets the resource associated with the application\cr
\link[=appregistry_get_attribute_group]{get_attribute_group} \tab Retrieves an attribute group by its ARN, ID, or name\cr
\link[=appregistry_get_configuration]{get_configuration} \tab Retrieves a TagKey configuration from an account\cr
\link[=appregistry_list_applications]{list_applications} \tab Retrieves a list of all of your applications\cr
\link[=appregistry_list_associated_attribute_groups]{list_associated_attribute_groups} \tab Lists all attribute groups that are associated with specified application\cr
\link[=appregistry_list_associated_resources]{list_associated_resources} \tab Lists all of the resources that are associated with the specified application\cr
\link[=appregistry_list_attribute_groups]{list_attribute_groups} \tab Lists all attribute groups which you have access to\cr
\link[=appregistry_list_attribute_groups_for_application]{list_attribute_groups_for_application} \tab Lists the details of all attribute groups associated with a specific application\cr
\link[=appregistry_list_tags_for_resource]{list_tags_for_resource} \tab Lists all of the tags on the resource\cr
\link[=appregistry_put_configuration]{put_configuration} \tab Associates a TagKey configuration to an account\cr
\link[=appregistry_sync_resource]{sync_resource} \tab Syncs the resource with current AppRegistry records\cr
\link[=appregistry_tag_resource]{tag_resource} \tab Assigns one or more tags (key-value pairs) to the specified resource\cr
\link[=appregistry_untag_resource]{untag_resource} \tab Removes tags from a resource\cr
\link[=appregistry_update_application]{update_application} \tab Updates an existing application with new attributes\cr
\link[=appregistry_update_attribute_group]{update_attribute_group} \tab Updates an existing attribute group with new details
}
}
\examples{
\dontrun{
svc <- appregistry()
svc$associate_attribute_group(
Foo = 123
)
}
}
| /cran/paws.management/man/appregistry.Rd | permissive | paws-r/paws | R | false | true | 6,957 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appregistry_service.R
\name{appregistry}
\alias{appregistry}
\title{AWS Service Catalog App Registry}
\usage{
appregistry(
config = list(),
credentials = list(),
endpoint = NULL,
region = NULL
)
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{credentials}:} {\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
}}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
\item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
}}
\item{credentials}{Optional credentials shorthand for the config parameter
\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
}}
\item{endpoint}{Optional shorthand for complete URL to use for the constructed client.}
\item{region}{Optional shorthand for AWS Region used in instantiating the client.}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Amazon Web Services Service Catalog AppRegistry enables organizations to
understand the application context of their Amazon Web Services
resources. AppRegistry provides a repository of your applications, their
resources, and the application metadata that you use within your
enterprise.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- appregistry(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical",
sts_regional_endpoint = "string"
),
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string"
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=appregistry_associate_attribute_group]{associate_attribute_group} \tab Associates an attribute group with an application to augment the application's metadata with the group's attributes\cr
\link[=appregistry_associate_resource]{associate_resource} \tab Associates a resource with an application\cr
\link[=appregistry_create_application]{create_application} \tab Creates a new application that is the top-level node in a hierarchy of related cloud resource abstractions\cr
\link[=appregistry_create_attribute_group]{create_attribute_group} \tab Creates a new attribute group as a container for user-defined attributes\cr
\link[=appregistry_delete_application]{delete_application} \tab Deletes an application that is specified either by its application ID, name, or ARN\cr
\link[=appregistry_delete_attribute_group]{delete_attribute_group} \tab Deletes an attribute group, specified either by its attribute group ID, name, or ARN\cr
\link[=appregistry_disassociate_attribute_group]{disassociate_attribute_group} \tab Disassociates an attribute group from an application to remove the extra attributes contained in the attribute group from the application's metadata\cr
\link[=appregistry_disassociate_resource]{disassociate_resource} \tab Disassociates a resource from application\cr
\link[=appregistry_get_application]{get_application} \tab Retrieves metadata information about one of your applications\cr
\link[=appregistry_get_associated_resource]{get_associated_resource} \tab Gets the resource associated with the application\cr
\link[=appregistry_get_attribute_group]{get_attribute_group} \tab Retrieves an attribute group by its ARN, ID, or name\cr
\link[=appregistry_get_configuration]{get_configuration} \tab Retrieves a TagKey configuration from an account\cr
\link[=appregistry_list_applications]{list_applications} \tab Retrieves a list of all of your applications\cr
\link[=appregistry_list_associated_attribute_groups]{list_associated_attribute_groups} \tab Lists all attribute groups that are associated with specified application\cr
\link[=appregistry_list_associated_resources]{list_associated_resources} \tab Lists all of the resources that are associated with the specified application\cr
\link[=appregistry_list_attribute_groups]{list_attribute_groups} \tab Lists all attribute groups which you have access to\cr
\link[=appregistry_list_attribute_groups_for_application]{list_attribute_groups_for_application} \tab Lists the details of all attribute groups associated with a specific application\cr
\link[=appregistry_list_tags_for_resource]{list_tags_for_resource} \tab Lists all of the tags on the resource\cr
\link[=appregistry_put_configuration]{put_configuration} \tab Associates a TagKey configuration to an account\cr
\link[=appregistry_sync_resource]{sync_resource} \tab Syncs the resource with current AppRegistry records\cr
\link[=appregistry_tag_resource]{tag_resource} \tab Assigns one or more tags (key-value pairs) to the specified resource\cr
\link[=appregistry_untag_resource]{untag_resource} \tab Removes tags from a resource\cr
\link[=appregistry_update_application]{update_application} \tab Updates an existing application with new attributes\cr
\link[=appregistry_update_attribute_group]{update_attribute_group} \tab Updates an existing attribute group with new details
}
}
\examples{
\dontrun{
svc <- appregistry()
svc$associate_attribute_group(
Foo = 123
)
}
}
|
#######################
### Dynamische Netzwerkanalyse
# von Björn Siepe & Kai J. Nehler
## Daten laden
data <- read.csv(url("https://osf.io/g6ya4/download"))
names(data) # Variablennamen
data$time <- as.POSIXct(data$time, tz = "Europe/Amsterdam") # Datumsvariable transformieren
data$time[1:8]
## Modell
### Detrending
lm_tired <- lm(tired ~ time, data = data)
summary(lm_tired)
data[!is.na(data["tired"]),"tired"] <- residuals(lm_tired) # fehlende Werte durch Residuen ersetzen
rel_vars <- c("relaxed","sad","nervous","concentration","tired","rumination","bodily.discomfort")
for (v in 1:length(rel_vars)){
# Respektive Variable auf die Zeit regressieren
lm_form <- as.formula(paste0(rel_vars[v], "~ time"))
# lineares Modell rechnen
lm_res <- summary(lm(lm_form, data = data))
# wenn der Zeittrend signifikant ist, detrenden wir mit den Residuen
# [,4] greift auf die Spalte der p-Werte zu
# [2] auf den p-Wert des Regressionsgewichts des Datums
if(lm_res$coefficients[,4][2] < 0.05){
print(paste0("Detrende Variable: ", rel_vars[v]))
data[!is.na(data[rel_vars[v]]),rel_vars[v]] <- residuals(lm_res)
}
}
### Modellschätzung
data$date <- as.Date(data$time, tz = "Europe/Amsterdam") # Zeitangabe auf Tage reduzieren
data$beep <- rep(1:5, 14) # Nummerierung erstellen
# Starte Loop für einzigartige Daten
for (i in unique(data$date)){
# Schreibe alle Messungen eines Tages einen getrennten Datensatz
set <- data[data$date == i,]
# Schaue in diesem Datensatz die Ordnung der Zeit-Variable an
# Schreib die zugehörige Zahl in der Reihenfolge in den Original-Datensatz
data$beep[data$date==i] <- order(set$time)
}
library(qgraph)
library(bootnet)
res <- estimateNetwork(data = data,
default = "graphicalVAR", # verwendetes Package
vars = rel_vars, # Variablennamen
dayvar = "date", # Tagesvariable
beepvar = "beep", # Notifikation
tuning = 0, # EBIC Tuningparameter
nLambda = 25) # Anzahl getesteter LASSO Tuningparameter
res$graph # Teilnetzwerke
Layout <- averageLayout(res$graph$temporal, res$graph$contemporaneous) # durchschnittliches Layout bestimmen
plot(res, graph = "temporal", layout = Layout, title = "Temporal")
plot(res, graph = "contemporaneous", layout = Layout, title = "Contemporaneous")
| /content/post/KliPPs_MSc5a_R_Files/11_netzwerkanalyse-laengsschnitt_RCode.R | no_license | martscht/projekte | R | false | false | 2,463 | r | #######################
### Dynamische Netzwerkanalyse
# von Björn Siepe & Kai J. Nehler
## Daten laden
data <- read.csv(url("https://osf.io/g6ya4/download"))
names(data) # Variablennamen
data$time <- as.POSIXct(data$time, tz = "Europe/Amsterdam") # Datumsvariable transformieren
data$time[1:8]
## Modell
### Detrending
lm_tired <- lm(tired ~ time, data = data)
summary(lm_tired)
data[!is.na(data["tired"]),"tired"] <- residuals(lm_tired) # fehlende Werte durch Residuen ersetzen
rel_vars <- c("relaxed","sad","nervous","concentration","tired","rumination","bodily.discomfort")
for (v in 1:length(rel_vars)){
# Respektive Variable auf die Zeit regressieren
lm_form <- as.formula(paste0(rel_vars[v], "~ time"))
# lineares Modell rechnen
lm_res <- summary(lm(lm_form, data = data))
# wenn der Zeittrend signifikant ist, detrenden wir mit den Residuen
# [,4] greift auf die Spalte der p-Werte zu
# [2] auf den p-Wert des Regressionsgewichts des Datums
if(lm_res$coefficients[,4][2] < 0.05){
print(paste0("Detrende Variable: ", rel_vars[v]))
data[!is.na(data[rel_vars[v]]),rel_vars[v]] <- residuals(lm_res)
}
}
### Modellschätzung
data$date <- as.Date(data$time, tz = "Europe/Amsterdam") # Zeitangabe auf Tage reduzieren
data$beep <- rep(1:5, 14) # Nummerierung erstellen
# Starte Loop für einzigartige Daten
for (i in unique(data$date)){
# Schreibe alle Messungen eines Tages einen getrennten Datensatz
set <- data[data$date == i,]
# Schaue in diesem Datensatz die Ordnung der Zeit-Variable an
# Schreib die zugehörige Zahl in der Reihenfolge in den Original-Datensatz
data$beep[data$date==i] <- order(set$time)
}
library(qgraph)
library(bootnet)
res <- estimateNetwork(data = data,
default = "graphicalVAR", # verwendetes Package
vars = rel_vars, # Variablennamen
dayvar = "date", # Tagesvariable
beepvar = "beep", # Notifikation
tuning = 0, # EBIC Tuningparameter
nLambda = 25) # Anzahl getesteter LASSO Tuningparameter
res$graph # Teilnetzwerke
Layout <- averageLayout(res$graph$temporal, res$graph$contemporaneous) # durchschnittliches Layout bestimmen
plot(res, graph = "temporal", layout = Layout, title = "Temporal")
plot(res, graph = "contemporaneous", layout = Layout, title = "Contemporaneous")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/nplcm-fit-NoReg-BrSandSS-Nest.R
\name{nplcm_fit_NoReg_BrSandSS_Nest}
\alias{nplcm_fit_NoReg_BrSandSS_Nest}
\title{Fit nested partially-latent class model (low-level)}
\usage{
nplcm_fit_NoReg_BrSandSS_Nest(data_nplcm, model_options, mcmc_options)
}
\arguments{
\item{data_nplcm}{\itemize{
\item \code{Mobs} A list of measurements. The elements of the list
should include \code{MBS}, \code{MSS}, and \code{MGS}. If any of the component
is not available, please specify it as, e.g., \code{MGS=NA} or \code{MGS=NULL}
(effectively deleting \code{MGS} from \code{Mobs}).
\itemize{
\item \code{MBS} a data frame of bronze-standard (BrS) measurements.
Rows are subjects, columns are pathogens.
They have imperfect sensitivity/specificity (e.g. nasalpharyngeal PCR).
\item \code{MSS} a data frame of silver-standard (SS) measurements.
Rows are subjects, columns are pathogens measured in specimen (e.g. blood culture).
These measurements have perfect specificity but imperfect sensitivity.
\item \code{MGS} a data frame of gold-standard (GS) measurements.
Rows are subject, columns are pathogen measurements.
These measurements have perfect sensitivity and specificity.
}
\item \code{Y} Vector of disease status: 1 for case, 0 for control.
\item \code{X} Covariate matrix for regression modeling. It contains raw covariate
data, not design matrix for regression models.
}}
\item{model_options}{A list of model options.
\itemize{
\item \code{M_use} List of measurements to be used in the model;
\item \code{k_subclass}The number of nested subclasses. 1 for conditional independence,
>1 for conditional dependence;
\item \code{TPR_prior} Description of priors for the measurements
(e.g., informative vs non-informative).
Its length should be the same with \code{M_use};
\item \code{Eti_prior} Description of etiology prior
(e.g., \code{overall_uniform} - all hyperparameters are 1; or \code{0_1} - all hyperparameters
are 0.1);
\item \code{pathogen_BrS_list} The vector of pathogen names with BrS measure;
\item \code{cause_list} The vector of causes that are either singleton or
combinations of items in \code{pathogen_BrS_list}; 'NoA' can also be included
at the end, meaning 'None-of-Above';
\item \code{X_reg_FPR} formula for false positive rates (FPR) regressions; see
\code{\link{formula}}. You can use \code{\link{dm_Rdate_FPR}} to specify part
of the design matrix for R format enrollment date; it will produce thin-plate
regression spline basis for every date (if \code{effect="random"} and \code{num_knots_FPR} is
specified to a positive integer, e.g., 10.). If \code{effect="fixed"}, \code{\link{dm_Rdate_FPR}}
will just specify a design matrix with appropirately standardized dates. Specify \code{~0} if no
regression is intended.
\item \code{X_reg_Eti} formula for etiology regressions. You can use
\code{\link{dm_Rdate_Eti}} to specify the design matrix for R format enrollment date;
it will produce natural cubic splines for every date. Specify \code{~0} if no
regression is intended.
\item \code{pathogen_BrS_cat} The two-column data frame that has category of pathogens: virus (V), bacteria (B)
and fungi (F);
\item \code{pathogen_SSonly_list} The vector of pathogens with only
SS measure;
\item \code{pathogen_SSonly_cat} The category of pathogens with only SS measure.
}}
\item{mcmc_options}{A list of Markov chain Monte Carlo (MCMC) options.
\itemize{
\item \code{debugstatus} Logical - whether to pause WinBUGS after it finishes
model fitting;
\item \code{n.chains} Number of MCMC chains;
\item \code{n.burnin} Number of burn-in samples;
\item \code{n.thin} To keep every other \code{n.thin} samples after burn-in period;
\item \code{individual.pred} whether to perform individual prediction;
\item \code{ppd} whether to perform posterior predictive (ppd) checking;
\item \code{result.folder} Path to folder storing the results;
\item \code{bugsmodel.dir} Path to WinBUGS model files;
\item \code{winbugs.dir} Path to where WinBUGS 1.4 is installed.
}}
}
\value{
WinBUGS fit results.
}
\description{
Features:
\itemize{
\item no regression;
\item bronze- (BrS)and silver-standard (SS) measurements;
\item conditional dependence;
\item all pathogens have BrS measurements.
}
}
| /man/nplcm_fit_NoReg_BrSandSS_Nest.Rd | permissive | zhenkewu/nplcm | R | false | false | 4,372 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/nplcm-fit-NoReg-BrSandSS-Nest.R
\name{nplcm_fit_NoReg_BrSandSS_Nest}
\alias{nplcm_fit_NoReg_BrSandSS_Nest}
\title{Fit nested partially-latent class model (low-level)}
\usage{
nplcm_fit_NoReg_BrSandSS_Nest(data_nplcm, model_options, mcmc_options)
}
\arguments{
\item{data_nplcm}{\itemize{
\item \code{Mobs} A list of measurements. The elements of the list
should include \code{MBS}, \code{MSS}, and \code{MGS}. If any of the component
is not available, please specify it as, e.g., \code{MGS=NA} or \code{MGS=NULL}
(effectively deleting \code{MGS} from \code{Mobs}).
\itemize{
\item \code{MBS} a data frame of bronze-standard (BrS) measurements.
Rows are subjects, columns are pathogens.
They have imperfect sensitivity/specificity (e.g. nasalpharyngeal PCR).
\item \code{MSS} a data frame of silver-standard (SS) measurements.
Rows are subjects, columns are pathogens measured in specimen (e.g. blood culture).
These measurements have perfect specificity but imperfect sensitivity.
\item \code{MGS} a data frame of gold-standard (GS) measurements.
Rows are subject, columns are pathogen measurements.
These measurements have perfect sensitivity and specificity.
}
\item \code{Y} Vector of disease status: 1 for case, 0 for control.
\item \code{X} Covariate matrix for regression modeling. It contains raw covariate
data, not design matrix for regression models.
}}
\item{model_options}{A list of model options.
\itemize{
\item \code{M_use} List of measurements to be used in the model;
\item \code{k_subclass}The number of nested subclasses. 1 for conditional independence,
>1 for conditional dependence;
\item \code{TPR_prior} Description of priors for the measurements
(e.g., informative vs non-informative).
Its length should be the same with \code{M_use};
\item \code{Eti_prior} Description of etiology prior
(e.g., \code{overall_uniform} - all hyperparameters are 1; or \code{0_1} - all hyperparameters
are 0.1);
\item \code{pathogen_BrS_list} The vector of pathogen names with BrS measure;
\item \code{cause_list} The vector of causes that are either singleton or
combinations of items in \code{pathogen_BrS_list}; 'NoA' can also be included
at the end, meaning 'None-of-Above';
\item \code{X_reg_FPR} formula for false positive rates (FPR) regressions; see
\code{\link{formula}}. You can use \code{\link{dm_Rdate_FPR}} to specify part
of the design matrix for R format enrollment date; it will produce thin-plate
regression spline basis for every date (if \code{effect="random"} and \code{num_knots_FPR} is
specified to a positive integer, e.g., 10.). If \code{effect="fixed"}, \code{\link{dm_Rdate_FPR}}
will just specify a design matrix with appropirately standardized dates. Specify \code{~0} if no
regression is intended.
\item \code{X_reg_Eti} formula for etiology regressions. You can use
\code{\link{dm_Rdate_Eti}} to specify the design matrix for R format enrollment date;
it will produce natural cubic splines for every date. Specify \code{~0} if no
regression is intended.
\item \code{pathogen_BrS_cat} The two-column data frame that has category of pathogens: virus (V), bacteria (B)
and fungi (F);
\item \code{pathogen_SSonly_list} The vector of pathogens with only
SS measure;
\item \code{pathogen_SSonly_cat} The category of pathogens with only SS measure.
}}
\item{mcmc_options}{A list of Markov chain Monte Carlo (MCMC) options.
\itemize{
\item \code{debugstatus} Logical - whether to pause WinBUGS after it finishes
model fitting;
\item \code{n.chains} Number of MCMC chains;
\item \code{n.burnin} Number of burn-in samples;
\item \code{n.thin} To keep every other \code{n.thin} samples after burn-in period;
\item \code{individual.pred} whether to perform individual prediction;
\item \code{ppd} whether to perform posterior predictive (ppd) checking;
\item \code{result.folder} Path to folder storing the results;
\item \code{bugsmodel.dir} Path to WinBUGS model files;
\item \code{winbugs.dir} Path to where WinBUGS 1.4 is installed.
}}
}
\value{
WinBUGS fit results.
}
\description{
Features:
\itemize{
\item no regression;
\item bronze- (BrS)and silver-standard (SS) measurements;
\item conditional dependence;
\item all pathogens have BrS measurements.
}
}
|
library(rio)
library(tidyverse)
library(skimr)
getwd()
setwd("~/documents/")
getwd()
m = import("pr201_marks_leaked.csv", encoding="UTF-8")
glimpse(m)
qplot(data = m, x = k3_final) +
labs(title = "оценки за КР-3",
x = "Оценка за кр",
y = "Количество бриллиантов") # гистограмма
skim(m)
view(m)
mu_hat = mean(m$k3_final) #матожидание
glimpse(mu_hat)
mu_hat
sigma_hat = sd(m$k3_final) #стандартное отклонение
sigma_hat
n = nrow(m)
n
se_mu_hat = sigma_hat / sqrt(n) #стандартная ошибка
se_mu_hat
t_crit = qt(0.975, df = n - 1) #квантиль распределения стьюдента
t_crit
left_ci = mu_hat - t_crit * se_mu_hat
right_ci = mu_hat + t_crit * se_mu_hat
left_ci
right_ci
t.test(m$k3_final)
| /DZ.R | no_license | DaveYerzh/catch_the_dragon | R | false | false | 841 | r | library(rio)
library(tidyverse)
library(skimr)
getwd()
setwd("~/documents/")
getwd()
m = import("pr201_marks_leaked.csv", encoding="UTF-8")
glimpse(m)
qplot(data = m, x = k3_final) +
labs(title = "оценки за КР-3",
x = "Оценка за кр",
y = "Количество бриллиантов") # гистограмма
skim(m)
view(m)
mu_hat = mean(m$k3_final) #матожидание
glimpse(mu_hat)
mu_hat
sigma_hat = sd(m$k3_final) #стандартное отклонение
sigma_hat
n = nrow(m)
n
se_mu_hat = sigma_hat / sqrt(n) #стандартная ошибка
se_mu_hat
t_crit = qt(0.975, df = n - 1) #квантиль распределения стьюдента
t_crit
left_ci = mu_hat - t_crit * se_mu_hat
right_ci = mu_hat + t_crit * se_mu_hat
left_ci
right_ci
t.test(m$k3_final)
|
\name{qqthin}
\alias{qqthin}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{a version of qqplot() that thins out points that overplot}
\description{
QQ-plots with large numbers of points typically generate graphics files
that are unhelpfully large. This function handles the problem by
removing points that are, for all practical purposes, redundant
}
\usage{
qqthin(x, y, ends = c(0.01, 0.99), eps = 0.001, xlab = deparse(substitute(x)),
adj.xlab = NULL, ylab = deparse(substitute(y)), show.line = TRUE,
print.thinning.details=TRUE, centerline = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ordered values of \code{x} will be plotted on the x-axis}
\item{y}{ordered values of \code{y} will be plotted on the y-axis}
\item{ends}{outside these cumulative proportions of numbers of points,
all points will be included in the graph}
\item{eps}{controls the extent of overplotting}
\item{xlab}{label for x-axis}
\item{adj.xlab}{positioning of x-label}
\item{ylab}{label for y-axis}
\item{show.line}{logical; show the line y=x?}
\item{print.thinning.details}{logical; print number of points after thinning?}
\item{centerline}{logical; draw a line though the part of the graph where
some points have been omitted?}
\item{\dots}{additional graphics parameters}
}
\value{
Gives a qqplot. The number of points retained is returned invisibly.
}
\references{ ~put references to the literature/web site here ~ }
\author{John Maindonald}
\examples{
mat <- matrix(rnorm(1000), ncol=20)
cl <- factor(rep(1:3, c(7,9,4)))
Fstats <- aovFbyrow(x = mat, cl = cl)
qqthin(qf(ppoints(length(Fstats)), 2, 17), Fstats, eps=0.01)
## The function is currently defined as
function(x, y, ends=c(.01,.99), eps=0.001,
xlab = deparse(substitute(x)), adj.xlab=NULL,
ylab = deparse(substitute(y)), show.line=TRUE,
print.thinning.details=TRUE,
centerline=TRUE, ...){
## qqthin() is a substitute for qqplot(), that thins
## out plotted points from the region where they are
## dense. Apart from the overlaid curve that shows
## the region where points have been thinned, it may
## be hard to distinguish the result of qqthin()
## from that of qqplot()
xlab <- xlab
ylab <- ylab
x <- sort(x)
y <- sort(y)
dx<-diff(x)
epsdist <- sqrt(diff(range(x))^2+diff(range(y))^2)*eps
dx<-0.5*(c(dx[1],dx)+c(dx,dx[length(dx)]))
dy<-diff(y)
dy<-0.5*(c(dy[1],dy)+c(dy,dy[length(dy)]))
dpoints <- epsdist/sqrt(dx^2+dy^2)
## dpoints is a local measure of the number of points
## per unit distance along the diagonal, with the unit
## set to approximately eps*(length of diagonal)
dig<-floor(dpoints)+1
## dig is, roughly, the number of points per unit distance.
## We wish to retain one point per unit distance. For this
## retain points where cdig rounds to an integer. For such
## points, cdig has increased by approx 1, relative to the
## previous point that is retained.
cdig<-round(cumsum(1/dig))
subs<-match(unique(cdig), cdig)
if(is.null(adj.xlab))
plot(x[subs], y[subs], xlab=xlab, ylab=ylab)
else {
plot(x[subs], y[subs], xlab="", ylab=ylab)
mtext(side=1, xlab, adj=adj.xlab, line=par()$mgp[1])
}
if(any(diff(subs)>1)){
n1 <- min(subs[c(diff(subs),0)>1])
n2 <- max(subs[c(0,diff(subs))>1])
ns1 <- match(n1, subs)
ns2 <- match(n2, subs)
if(print.thinning.details)
print(paste("Graph retains", length(subs), "points."))
if(centerline)
lines(smooth.spline(x[subs[ns1:ns2]], y[subs[ns1:ns2]]),
col="grey", lwd=2)
}
if(show.line)abline(0, 1, col="red")
invisible(length(subs))
}
}
\keyword{hplot}% at least one, from doc/KEYWORDS
| /man/qqthin.Rd | no_license | cran/hddplot | R | false | false | 3,835 | rd | \name{qqthin}
\alias{qqthin}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{a version of qqplot() that thins out points that overplot}
\description{
QQ-plots with large numbers of points typically generate graphics files
that are unhelpfully large. This function handles the problem by
removing points that are, for all practical purposes, redundant
}
\usage{
qqthin(x, y, ends = c(0.01, 0.99), eps = 0.001, xlab = deparse(substitute(x)),
adj.xlab = NULL, ylab = deparse(substitute(y)), show.line = TRUE,
print.thinning.details=TRUE, centerline = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ordered values of \code{x} will be plotted on the x-axis}
\item{y}{ordered values of \code{y} will be plotted on the y-axis}
\item{ends}{outside these cumulative proportions of numbers of points,
all points will be included in the graph}
\item{eps}{controls the extent of overplotting}
\item{xlab}{label for x-axis}
\item{adj.xlab}{positioning of x-label}
\item{ylab}{label for y-axis}
\item{show.line}{logical; show the line y=x?}
\item{print.thinning.details}{logical; print number of points after thinning?}
\item{centerline}{logical; draw a line though the part of the graph where
some points have been omitted?}
\item{\dots}{additional graphics parameters}
}
\value{
Gives a qqplot. The number of points retained is returned invisibly.
}
\references{ ~put references to the literature/web site here ~ }
\author{John Maindonald}
\examples{
mat <- matrix(rnorm(1000), ncol=20)
cl <- factor(rep(1:3, c(7,9,4)))
Fstats <- aovFbyrow(x = mat, cl = cl)
qqthin(qf(ppoints(length(Fstats)), 2, 17), Fstats, eps=0.01)
## The function is currently defined as
function(x, y, ends=c(.01,.99), eps=0.001,
xlab = deparse(substitute(x)), adj.xlab=NULL,
ylab = deparse(substitute(y)), show.line=TRUE,
print.thinning.details=TRUE,
centerline=TRUE, ...){
## qqthin() is a substitute for qqplot(), that thins
## out plotted points from the region where they are
## dense. Apart from the overlaid curve that shows
## the region where points have been thinned, it may
## be hard to distinguish the result of qqthin()
## from that of qqplot()
xlab <- xlab
ylab <- ylab
x <- sort(x)
y <- sort(y)
dx<-diff(x)
epsdist <- sqrt(diff(range(x))^2+diff(range(y))^2)*eps
dx<-0.5*(c(dx[1],dx)+c(dx,dx[length(dx)]))
dy<-diff(y)
dy<-0.5*(c(dy[1],dy)+c(dy,dy[length(dy)]))
dpoints <- epsdist/sqrt(dx^2+dy^2)
## dpoints is a local measure of the number of points
## per unit distance along the diagonal, with the unit
## set to approximately eps*(length of diagonal)
dig<-floor(dpoints)+1
## dig is, roughly, the number of points per unit distance.
## We wish to retain one point per unit distance. For this
## retain points where cdig rounds to an integer. For such
## points, cdig has increased by approx 1, relative to the
## previous point that is retained.
cdig<-round(cumsum(1/dig))
subs<-match(unique(cdig), cdig)
if(is.null(adj.xlab))
plot(x[subs], y[subs], xlab=xlab, ylab=ylab)
else {
plot(x[subs], y[subs], xlab="", ylab=ylab)
mtext(side=1, xlab, adj=adj.xlab, line=par()$mgp[1])
}
if(any(diff(subs)>1)){
n1 <- min(subs[c(diff(subs),0)>1])
n2 <- max(subs[c(0,diff(subs))>1])
ns1 <- match(n1, subs)
ns2 <- match(n2, subs)
if(print.thinning.details)
print(paste("Graph retains", length(subs), "points."))
if(centerline)
lines(smooth.spline(x[subs[ns1:ns2]], y[subs[ns1:ns2]]),
col="grey", lwd=2)
}
if(show.line)abline(0, 1, col="red")
invisible(length(subs))
}
}
\keyword{hplot}% at least one, from doc/KEYWORDS
|
print_dust_markdown <- function(x, ..., asis=TRUE,
interactive = getOption("pixie_interactive"))
{
if (is.null(interactive)) interactive <- interactive()
if (!is.null(x$caption) & x$caption_number) increment_pixie_count()
caption_number_prefix <-
if (x$caption_number) sprintf("Table %s: ", get_pixie_count())
else ""
#* Determine the number of divisions
#* It looks more complicated than it is, but the gist of it is
#* total number of divisions: ceiling(total_rows / longtable_rows)
#* The insane looking data frame is just to make a reference of what rows
#* go in what division.
if (!is.numeric(x$longtable) & x$longtable) longtable_rows <- 25L
else if (!is.numeric(x$longtable) & !x$longtable) longtable_rows <- as.integer(max(x$body$row))
else longtable_rows <- as.integer(x$longtable)
Divisions <- data.frame(div_num = rep(1:ceiling(max(x$body$row) / longtable_rows),
each = longtable_rows)[1:max(x$body$row)],
row_num = 1:max(x$body$row))
total_div <- max(Divisions$div_num)
#* If the table is not being run interactively (ie, in an rmarkdown script)
#* detect the type of output. The spacing between tables is output-specific
if (!interactive){
output_type <- knitr::opts_knit$get('rmarkdown.pandoc.to')
linebreak <- if (is.null(output_type)) " "
else if (output_type == "html") "<br>"
else if (output_type == "latex") "\\ \\linebreak"
else " "
}
else linebreak <- " "
#* Format the table divisions
head <- part_prep_markdown(x$head)
body <- part_prep_markdown(x$body)
foot <- if (!is.null(x$foot)) part_prep_markdown(x$foot) else NULL
interfoot <- if (!is.null(x$interfoot)) part_prep_markdown(x$interfoot) else NULL
names(body) <- names(head) <- head[1, ]
if (!is.null(foot)) names(foot) <- names(head)
if (!is.null(interfoot)) names(interfoot) <- names(head)
subhead <- head[-1, ]
subhead <- lapply(subhead, function(v) paste0("**", v, "**")) %>%
as.data.frame(stringsAsFactors=FALSE)
numeric_classes <- c("numeric", "double", "int")
#* Determine the alignments. Alignments in 'knitr::kable' are assigned
#* by the first letter of the HTML alignment. If no alignment is
#* assigned, a default is chosen based on the variable type. Numerics
#* are aligned right, characters are aligned left.
alignments <- x$head[x$head$row == 1, ]
alignments <- alignments[c("row", "col", "halign", "col_class")]
alignments$halign <- ifelse(alignments$halign == "",
ifelse(alignments$col_class %in% numeric_classes,
"r",
"l"),
substr(alignments$halign, 1, 1))
#* Run a for loop to generate all the code.
#* Not the most efficient way to do this, probably, but
#* it's easy to read and understand.
tbl_code <- ""
for (i in 1:total_div){
tbl <- .rbind_internal(if (nrow(head) > 1) subhead else NULL,
body[Divisions$row_num[Divisions$div_num == i], ],
if (i == total_div) foot else interfoot)
tbl_code <- paste0(tbl_code,
paste(c("", "",
knitr::kable(tbl,
format = "markdown",
align = substr(alignments$halign, 1, 1)),
"\n", linebreak, "\n", linebreak, "\n"),
collapse = "\n"))
if (!is.null(x$caption))
tbl_code <- paste0(caption_number_prefix, x$caption, "\n", tbl_code)
}
if (asis) knitr::asis_output(tbl_code)
else tbl_code
}
#**** Helper functions
part_prep_markdown <- function(part)
{
numeric_classes <- c("double", "numeric")
part <- perform_function(part)
#* Perform any rounding
logic <- part$round == "" & part$col_class %in% numeric_classes
part$round[logic] <- getOption("digits")
logic <- part$col_class %in% numeric_classes
if (any(logic))
part$value[logic] <-
as.character(roundSafe(part$value[logic], as.numeric(part$round[logic])))
#* Replacement
logic <- !is.na(part[["replace"]])
part[["value"]][logic] <- part[["replace"]][logic]
#* Bold text
logic <- part$bold
part$value[logic] <-
with(part, paste0("**", value[logic], "**"))
#* Italic text
logic <- part$italic
part$value[logic] <-
with(part, paste0("_", value[logic], "_"))
part$value[part$rowspan == 0] <- ""
part$value[part$colspan == 0] <- ""
#* Set NA (missing) values to na_string
logic <- is.na(part$value) & !is.na(part$na_string)
part$value[logic] <-
part$na_string[logic]
#* Spread to wide format for printing
.make_dataframe_wide(part)
} | /R/print_dust_markdown.R | no_license | cran/pixiedust | R | false | false | 5,030 | r | print_dust_markdown <- function(x, ..., asis=TRUE,
interactive = getOption("pixie_interactive"))
{
if (is.null(interactive)) interactive <- interactive()
if (!is.null(x$caption) & x$caption_number) increment_pixie_count()
caption_number_prefix <-
if (x$caption_number) sprintf("Table %s: ", get_pixie_count())
else ""
#* Determine the number of divisions
#* It looks more complicated than it is, but the gist of it is
#* total number of divisions: ceiling(total_rows / longtable_rows)
#* The insane looking data frame is just to make a reference of what rows
#* go in what division.
if (!is.numeric(x$longtable) & x$longtable) longtable_rows <- 25L
else if (!is.numeric(x$longtable) & !x$longtable) longtable_rows <- as.integer(max(x$body$row))
else longtable_rows <- as.integer(x$longtable)
Divisions <- data.frame(div_num = rep(1:ceiling(max(x$body$row) / longtable_rows),
each = longtable_rows)[1:max(x$body$row)],
row_num = 1:max(x$body$row))
total_div <- max(Divisions$div_num)
#* If the table is not being run interactively (ie, in an rmarkdown script)
#* detect the type of output. The spacing between tables is output-specific
if (!interactive){
output_type <- knitr::opts_knit$get('rmarkdown.pandoc.to')
linebreak <- if (is.null(output_type)) " "
else if (output_type == "html") "<br>"
else if (output_type == "latex") "\\ \\linebreak"
else " "
}
else linebreak <- " "
#* Format the table divisions
head <- part_prep_markdown(x$head)
body <- part_prep_markdown(x$body)
foot <- if (!is.null(x$foot)) part_prep_markdown(x$foot) else NULL
interfoot <- if (!is.null(x$interfoot)) part_prep_markdown(x$interfoot) else NULL
names(body) <- names(head) <- head[1, ]
if (!is.null(foot)) names(foot) <- names(head)
if (!is.null(interfoot)) names(interfoot) <- names(head)
subhead <- head[-1, ]
subhead <- lapply(subhead, function(v) paste0("**", v, "**")) %>%
as.data.frame(stringsAsFactors=FALSE)
numeric_classes <- c("numeric", "double", "int")
#* Determine the alignments. Alignments in 'knitr::kable' are assigned
#* by the first letter of the HTML alignment. If no alignment is
#* assigned, a default is chosen based on the variable type. Numerics
#* are aligned right, characters are aligned left.
alignments <- x$head[x$head$row == 1, ]
alignments <- alignments[c("row", "col", "halign", "col_class")]
alignments$halign <- ifelse(alignments$halign == "",
ifelse(alignments$col_class %in% numeric_classes,
"r",
"l"),
substr(alignments$halign, 1, 1))
#* Run a for loop to generate all the code.
#* Not the most efficient way to do this, probably, but
#* it's easy to read and understand.
tbl_code <- ""
for (i in 1:total_div){
tbl <- .rbind_internal(if (nrow(head) > 1) subhead else NULL,
body[Divisions$row_num[Divisions$div_num == i], ],
if (i == total_div) foot else interfoot)
tbl_code <- paste0(tbl_code,
paste(c("", "",
knitr::kable(tbl,
format = "markdown",
align = substr(alignments$halign, 1, 1)),
"\n", linebreak, "\n", linebreak, "\n"),
collapse = "\n"))
if (!is.null(x$caption))
tbl_code <- paste0(caption_number_prefix, x$caption, "\n", tbl_code)
}
if (asis) knitr::asis_output(tbl_code)
else tbl_code
}
#**** Helper functions
part_prep_markdown <- function(part)
{
numeric_classes <- c("double", "numeric")
part <- perform_function(part)
#* Perform any rounding
logic <- part$round == "" & part$col_class %in% numeric_classes
part$round[logic] <- getOption("digits")
logic <- part$col_class %in% numeric_classes
if (any(logic))
part$value[logic] <-
as.character(roundSafe(part$value[logic], as.numeric(part$round[logic])))
#* Replacement
logic <- !is.na(part[["replace"]])
part[["value"]][logic] <- part[["replace"]][logic]
#* Bold text
logic <- part$bold
part$value[logic] <-
with(part, paste0("**", value[logic], "**"))
#* Italic text
logic <- part$italic
part$value[logic] <-
with(part, paste0("_", value[logic], "_"))
part$value[part$rowspan == 0] <- ""
part$value[part$colspan == 0] <- ""
#* Set NA (missing) values to na_string
logic <- is.na(part$value) & !is.na(part$na_string)
part$value[logic] <-
part$na_string[logic]
#* Spread to wide format for printing
.make_dataframe_wide(part)
} |
library(dplyr)
library(tidyr)
library(data.table)
#Download and unzip data set
if(!file.exists("./SamsungGalaxS.zip")){
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, "SamsungGalaxyS.zip")
unzip("SamsungGalaxyS.zip", exdir="SamsungGalaxyS") #creade data directory
}
#Read test and training data
x_test <- read.table("./SamsungGalaxyS/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./SamsungGalaxyS/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./SamsungGalaxyS/UCI HAR Dataset/test/subject_test.txt")
x_train <- read.table("./SamsungGalaxyS/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./SamsungGalaxyS/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./SamsungGalaxyS/UCI HAR Dataset/train/subject_train.txt")
#Read activity names and feature names
activity <- read.table("./SamsungGalaxyS/UCI HAR Dataset/activity_labels.txt")
features <- read.table("./SamsungGalaxyS/UCI HAR Dataset/features.txt")
#Combine test and traing data and add descriptive names
combine_data_subject <- rbind(subject_train, subject_test) #combine subject data
colnames(combine_data_subject) <- "SubjectNumber"
combine_data_x <- rbind(x_train, x_test) #combine feature data
colnames(combine_data_x) <- features$V2 #add feature names
combine_data_y <- rbind(y_train, y_test) #combine activity data
combine_data_y_activity <- merge(combine_data_y, activity, by="V1", sort=FALSE) #add activity names to data
colnames(combine_data_y_activity) <- c("ActivityNumber", "ACTIVITY")
combine_data <- cbind(combine_data_subject, combine_data_y_activity, combine_data_x) #combine all data
#Extract mean and standard deviation for each measurement
mean_std <- grep("*mean*|*std*", features[,2]) #determine column numbers with mean or std
extracted_data_prep <- combine_data[, 4:564]
extracted_data <- extracted_data_prep[,mean_std] #extract mean and standard deviation measurements
#Clean names
names(extracted_data) <- gsub("Acc", "Acelerometer", names(extracted_data))
names(extracted_data) <- gsub("Gyro", "Gyroscope", names(extracted_data))
names(extracted_data) <- gsub("BodyBody", "Body", names(extracted_data))
names(extracted_data) <- gsub("Mag", "Magnitude", names(extracted_data))
names(extracted_data) <- gsub("^t", "Time", names(extracted_data))
names(extracted_data) <- gsub("^f", "Frequency", names(extracted_data))
names(extracted_data) <- gsub("tBody", "TimeBody", names(extracted_data))
names(extracted_data) <- gsub("-mean()", "Mean", names(extracted_data))
names(extracted_data) <- gsub("-std()", "StandardDeviation", names(extracted_data))
names(extracted_data) <- gsub("-freq()", "Frequencey", names(extracted_data))
names(extracted_data) <- gsub("angle", "Angle", names(extracted_data))
names(extracted_data) <- gsub("gravity", "Gravity", names(extracted_data))
names(extracted_data) <- gsub("()", "", names(extracted_data), fixed=TRUE)
#Create tidy data set with mean of each activity and each subject
tidy_data <- rbind(extracted_data, means=colMeans(extracted_data))
write.table(tidy_data, file="TidyRunAnalysis.txt", row.name=FALSE)
| /run_analysis.R | no_license | yvettejanecek/Getting-and-Cleaning-Data | R | false | false | 3,168 | r | library(dplyr)
library(tidyr)
library(data.table)
#Download and unzip data set
if(!file.exists("./SamsungGalaxS.zip")){
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, "SamsungGalaxyS.zip")
unzip("SamsungGalaxyS.zip", exdir="SamsungGalaxyS") #creade data directory
}
#Read test and training data
x_test <- read.table("./SamsungGalaxyS/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./SamsungGalaxyS/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./SamsungGalaxyS/UCI HAR Dataset/test/subject_test.txt")
x_train <- read.table("./SamsungGalaxyS/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./SamsungGalaxyS/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./SamsungGalaxyS/UCI HAR Dataset/train/subject_train.txt")
#Read activity names and feature names
activity <- read.table("./SamsungGalaxyS/UCI HAR Dataset/activity_labels.txt")
features <- read.table("./SamsungGalaxyS/UCI HAR Dataset/features.txt")
#Combine test and traing data and add descriptive names
combine_data_subject <- rbind(subject_train, subject_test) #combine subject data
colnames(combine_data_subject) <- "SubjectNumber"
combine_data_x <- rbind(x_train, x_test) #combine feature data
colnames(combine_data_x) <- features$V2 #add feature names
combine_data_y <- rbind(y_train, y_test) #combine activity data
combine_data_y_activity <- merge(combine_data_y, activity, by="V1", sort=FALSE) #add activity names to data
colnames(combine_data_y_activity) <- c("ActivityNumber", "ACTIVITY")
combine_data <- cbind(combine_data_subject, combine_data_y_activity, combine_data_x) #combine all data
#Extract mean and standard deviation for each measurement
mean_std <- grep("*mean*|*std*", features[,2]) #determine column numbers with mean or std
extracted_data_prep <- combine_data[, 4:564]
extracted_data <- extracted_data_prep[,mean_std] #extract mean and standard deviation measurements
#Clean names
names(extracted_data) <- gsub("Acc", "Acelerometer", names(extracted_data))
names(extracted_data) <- gsub("Gyro", "Gyroscope", names(extracted_data))
names(extracted_data) <- gsub("BodyBody", "Body", names(extracted_data))
names(extracted_data) <- gsub("Mag", "Magnitude", names(extracted_data))
names(extracted_data) <- gsub("^t", "Time", names(extracted_data))
names(extracted_data) <- gsub("^f", "Frequency", names(extracted_data))
names(extracted_data) <- gsub("tBody", "TimeBody", names(extracted_data))
names(extracted_data) <- gsub("-mean()", "Mean", names(extracted_data))
names(extracted_data) <- gsub("-std()", "StandardDeviation", names(extracted_data))
names(extracted_data) <- gsub("-freq()", "Frequencey", names(extracted_data))
names(extracted_data) <- gsub("angle", "Angle", names(extracted_data))
names(extracted_data) <- gsub("gravity", "Gravity", names(extracted_data))
names(extracted_data) <- gsub("()", "", names(extracted_data), fixed=TRUE)
#Create tidy data set with mean of each activity and each subject
tidy_data <- rbind(extracted_data, means=colMeans(extracted_data))
write.table(tidy_data, file="TidyRunAnalysis.txt", row.name=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LocalSearchFSFunctions.R
\name{GRASPFS}
\alias{GRASPFS}
\title{A GRASP algorithm for the permutative flowshop}
\usage{
GRASPFS(M, rcl = 4, iter = 100, op = "swap", opt = "HC", ...)
}
\arguments{
\item{M}{A matrix object storing the time to process task j (column) in machine i (row).}
\item{rcl}{the size of the restricted candidate list.}
\item{iter}{number of iterations.}
\item{op}{The neighbourhood operator for the local search heuristic. Presently are implemented \code{swap} (the default) and \code{insertion} neighbourhoods.}
\item{opt}{the local search algorithm to implement. Presently are supported \code{HC} (hill climbing with \link{HCFS}), \code{SA} (simulated annealing with \link{SAFS}) and \code{TS} (tabu search with \link{TSFS}).}
}
\value{
sol The obtained solution.
obj The makespan of the obtained solution.
}
\description{
A greedy randomized adaptive search procedure (GRASP) for the permutative flowshop. At each iteration, it generates a solution based on the Palmer heuristic. Instead of picking the best element, it picks an element randomly among a restricted candidate list, consisting on the first \code{rcl} first elements of the Palmer ordering. Then, this solution is improved with a local search heuristic.
}
\examples{
set.seed(2020)
instance <- matrix(sample(10:90, 100, replace=TRUE), 5, 20)
GRASPFS(M=instance)
}
| /man/GRASPFS.Rd | no_license | jmsallan/combheuristics | R | false | true | 1,435 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LocalSearchFSFunctions.R
\name{GRASPFS}
\alias{GRASPFS}
\title{A GRASP algorithm for the permutative flowshop}
\usage{
GRASPFS(M, rcl = 4, iter = 100, op = "swap", opt = "HC", ...)
}
\arguments{
\item{M}{A matrix object storing the time to process task j (column) in machine i (row).}
\item{rcl}{the size of the restricted candidate list.}
\item{iter}{number of iterations.}
\item{op}{The neighbourhood operator for the local search heuristic. Presently are implemented \code{swap} (the default) and \code{insertion} neighbourhoods.}
\item{opt}{the local search algorithm to implement. Presently are supported \code{HC} (hill climbing with \link{HCFS}), \code{SA} (simulated annealing with \link{SAFS}) and \code{TS} (tabu search with \link{TSFS}).}
}
\value{
sol The obtained solution.
obj The makespan of the obtained solution.
}
\description{
A greedy randomized adaptive search procedure (GRASP) for the permutative flowshop. At each iteration, it generates a solution based on the Palmer heuristic. Instead of picking the best element, it picks an element randomly among a restricted candidate list, consisting on the first \code{rcl} first elements of the Palmer ordering. Then, this solution is improved with a local search heuristic.
}
\examples{
set.seed(2020)
instance <- matrix(sample(10:90, 100, replace=TRUE), 5, 20)
GRASPFS(M=instance)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rClr-exported.R
\name{clrGetStaticFields}
\alias{clrGetStaticFields}
\title{Gets the static fields for a type}
\usage{
clrGetStaticFields(objOrType, contains = "")
}
\arguments{
\item{objOrType}{a CLR object, or type name, possibly namespace and assembly qualified type name, e.g. 'My.Namespace.MyClass,MyAssemblyName'.}
\item{contains}{a string that the property names returned must contain}
}
\description{
Gets the static fields for a type
}
| /man/clrGetStaticFields.Rd | no_license | dbremner/rclr | R | false | false | 533 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rClr-exported.R
\name{clrGetStaticFields}
\alias{clrGetStaticFields}
\title{Gets the static fields for a type}
\usage{
clrGetStaticFields(objOrType, contains = "")
}
\arguments{
\item{objOrType}{a CLR object, or type name, possibly namespace and assembly qualified type name, e.g. 'My.Namespace.MyClass,MyAssemblyName'.}
\item{contains}{a string that the property names returned must contain}
}
\description{
Gets the static fields for a type
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_mediation.R
\name{fit_mediation}
\alias{fit_mediation}
\alias{print.fit_mediation}
\alias{summary.reg_fit_mediation}
\alias{summary.cov_fit_mediation}
\title{(Robustly) fit a mediation model}
\usage{
fit_mediation(data, x, y, m, covariates = NULL,
method = c("regression", "covariance"), robust = TRUE,
median = FALSE, control, ...)
}
\arguments{
\item{data}{a data frame containing the variables.}
\item{x}{a character string, an integer or a logical vector specifying the
column of \code{data} containing the independent variable.}
\item{y}{a character string, an integer or a logical vector specifying the
column of \code{data} containing the dependent variable.}
\item{m}{a character, integer or logical vector specifying the columns of
\code{data} containing the hypothesized mediator variables.}
\item{covariates}{optional; a character, integer or logical vector
specifying the columns of \code{data} containing additional covariates to be
used as control variables.}
\item{method}{a character string specifying the method of
estimation. Possible values are \code{"regression"} (the default)
to estimate the effects via regressions, or \code{"covariance"} to
estimate the effects via the covariance matrix. Note that the effects are
always estimated via regressions if more than one hypothesized mediator is
supplied in \code{m}, or if control variables are specified via
\code{covariates}.}
\item{robust}{a logical indicating whether to robustly estimate the effects
(defaults to \code{TRUE}).}
\item{median}{a logical indicating if the effects should be estimated via
median regression (defaults to \code{FALSE}). This is ignored unless
\code{method} is \code{"regression"} and \code{robust} is \code{TRUE}.}
\item{control}{a list of tuning parameters for the corresponding robust
method. For robust regression (\code{method = "regression"},
\code{robust = TRUE} and \code{median = FALSE}), a list of tuning
parameters for \code{\link[robustbase]{lmrob}} as generated by
\code{\link{reg_control}}. For Huberized covariance matrix estimation
(\code{method = "covariance"} and \code{robust = TRUE}), a list of tuning
parameters for \code{\link{cov_Huber}} as generated by
\code{\link{cov_control}}. No tuning parameters are necessary for median
regression (\code{method = "regression"}, \code{robust = TRUE} and
\code{median = TRUE}).}
\item{\dots}{additional arguments can be used to specify tuning parameters
directly instead of via \code{control}.}
}
\value{
An object inheriting from class \code{"fit_mediation"} (class
\code{"reg_fit_mediation"} if \code{method} is \code{"regression"} or
\code{"cov_fit_mediation"} if \code{method} is \code{"covariance"}) with
the following components:
\item{a}{a numeric vector containing the point estimates of the effect of
the independent variable on the proposed mediator variables.}
\item{b}{a numeric vector containing the point estimates of the direct
effect of the proposed mediator variables on the dependent variable.}
\item{c}{numeric; the point estimate of the direct effect of the
independent variable on the dependent variable.}
\item{c_prime}{numeric; the point estimate of the total effect of the
independent variable on the dependent variable.}
\item{fit_mx}{an object of class \code{"\link[robustbase]{lmrob}"} or
\code{"\link[stats]{lm}"} containing the estimation results from the
regression of the proposed mediator variable on the independent variable, or
a list of such objects in case of more than one hypothesized mediator
(only \code{"reg_fit_mediation"}).}
\item{fit_ymx}{an object of class \code{"\link[robustbase]{lmrob}"} or
\code{"\link[stats]{lm}"} containing the estimation results from the
regression of the dependent variable on the proposed mediator and
independent variables (only \code{"reg_fit_mediation"}).}
\item{fit_yx}{an object of class \code{"\link[stats]{lm}"} containing the
estimation results from the regression of the dependent variable on the
independent variable (only \code{"reg_fit_mediation"} and if \code{robust}
is \code{FALSE}).}
\item{cov}{an object of class \code{"\link{cov_Huber}"} or
\code{"\link{cov_ML}"} containing the covariance matrix estimates
(only \code{"cov_fit_mediation"}).}
\item{x, y, m, covariates}{character vectors specifying the respective
variables used.}
\item{data}{a data frame containing the independent, dependent and
proposed mediator variables, as well as covariates.}
\item{robust}{a logical indicating whether the effects were estimated
robustly.}
\item{median}{a logical indicating whether the effects were estimated
via median regression (only \code{"reg_fit_mediation"}).}
\item{control}{a list of tuning parameters used (only if \code{robust} is
\code{TRUE}).}
}
\description{
(Robustly) estimate the effects in a mediation model.
}
\details{
If \code{method} is \code{"regression"}, \code{robust} is \code{TRUE} and
\code{median} is \code{FALSE} (the defaults), the effects are estimated via
robust regressions with \code{\link[robustbase]{lmrob}}.
If \code{method} is \code{"regression"}, \code{robust} is \code{TRUE} and
\code{median} is \code{TRUE}, the effects are estimated via median
regressions with \code{\link[quantreg]{rq}}. Unlike the robust regressions
above, median regressions are not robust against outliers in the explanatory
variables.
If \code{method} is \code{"covariance"} and \code{robust} is \code{TRUE},
the effects are estimated based on a Huber M-estimator of location and
scatter. Note that this covariance-based approach is less robust than the
approach based on robust regressions described above.
}
\examples{
data("BSG2014")
fit <- fit_mediation(BSG2014,
x = "ValueDiversity",
y = "TeamCommitment",
m = "TaskConflict")
test <- test_mediation(fit)
summary(test)
}
\references{
Alfons, A., Ates, N.Y. and Groenen, P.J.F. (2018) A robust bootstrap test
for mediation analysis. \emph{ERIM Report Series in Management}, Erasmus
Research Institute of Management. URL
\url{https://hdl.handle.net/1765/109594}.
Yuan, Y. and MacKinnon, D.P. (2014) Robust mediation analysis based on
median regression. \emph{Psychological Methods}, \bold{19}(1),
1--20.
Zu, J. and Yuan, K.-H. (2010) Local influence and robust procedures for
mediation analysis. \emph{Multivariate Behavioral Research}, \bold{45}(1),
1--44.
}
\seealso{
\code{\link{test_mediation}}
\code{\link[robustbase]{lmrob}}, \code{\link[stats]{lm}},
\code{\link{cov_Huber}}, \code{\link{cov_ML}}
}
\author{
Andreas Alfons
}
\keyword{multivariate}
| /man/fit_mediation.Rd | no_license | cnovoaneira/robmed | R | false | true | 6,640 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_mediation.R
\name{fit_mediation}
\alias{fit_mediation}
\alias{print.fit_mediation}
\alias{summary.reg_fit_mediation}
\alias{summary.cov_fit_mediation}
\title{(Robustly) fit a mediation model}
\usage{
fit_mediation(data, x, y, m, covariates = NULL,
method = c("regression", "covariance"), robust = TRUE,
median = FALSE, control, ...)
}
\arguments{
\item{data}{a data frame containing the variables.}
\item{x}{a character string, an integer or a logical vector specifying the
column of \code{data} containing the independent variable.}
\item{y}{a character string, an integer or a logical vector specifying the
column of \code{data} containing the dependent variable.}
\item{m}{a character, integer or logical vector specifying the columns of
\code{data} containing the hypothesized mediator variables.}
\item{covariates}{optional; a character, integer or logical vector
specifying the columns of \code{data} containing additional covariates to be
used as control variables.}
\item{method}{a character string specifying the method of
estimation. Possible values are \code{"regression"} (the default)
to estimate the effects via regressions, or \code{"covariance"} to
estimate the effects via the covariance matrix. Note that the effects are
always estimated via regressions if more than one hypothesized mediator is
supplied in \code{m}, or if control variables are specified via
\code{covariates}.}
\item{robust}{a logical indicating whether to robustly estimate the effects
(defaults to \code{TRUE}).}
\item{median}{a logical indicating if the effects should be estimated via
median regression (defaults to \code{FALSE}). This is ignored unless
\code{method} is \code{"regression"} and \code{robust} is \code{TRUE}.}
\item{control}{a list of tuning parameters for the corresponding robust
method. For robust regression (\code{method = "regression"},
\code{robust = TRUE} and \code{median = FALSE}), a list of tuning
parameters for \code{\link[robustbase]{lmrob}} as generated by
\code{\link{reg_control}}. For Huberized covariance matrix estimation
(\code{method = "covariance"} and \code{robust = TRUE}), a list of tuning
parameters for \code{\link{cov_Huber}} as generated by
\code{\link{cov_control}}. No tuning parameters are necessary for median
regression (\code{method = "regression"}, \code{robust = TRUE} and
\code{median = TRUE}).}
\item{\dots}{additional arguments can be used to specify tuning parameters
directly instead of via \code{control}.}
}
\value{
An object inheriting from class \code{"fit_mediation"} (class
\code{"reg_fit_mediation"} if \code{method} is \code{"regression"} or
\code{"cov_fit_mediation"} if \code{method} is \code{"covariance"}) with
the following components:
\item{a}{a numeric vector containing the point estimates of the effect of
the independent variable on the proposed mediator variables.}
\item{b}{a numeric vector containing the point estimates of the direct
effect of the proposed mediator variables on the dependent variable.}
\item{c}{numeric; the point estimate of the direct effect of the
independent variable on the dependent variable.}
\item{c_prime}{numeric; the point estimate of the total effect of the
independent variable on the dependent variable.}
\item{fit_mx}{an object of class \code{"\link[robustbase]{lmrob}"} or
\code{"\link[stats]{lm}"} containing the estimation results from the
regression of the proposed mediator variable on the independent variable, or
a list of such objects in case of more than one hypothesized mediator
(only \code{"reg_fit_mediation"}).}
\item{fit_ymx}{an object of class \code{"\link[robustbase]{lmrob}"} or
\code{"\link[stats]{lm}"} containing the estimation results from the
regression of the dependent variable on the proposed mediator and
independent variables (only \code{"reg_fit_mediation"}).}
\item{fit_yx}{an object of class \code{"\link[stats]{lm}"} containing the
estimation results from the regression of the dependent variable on the
independent variable (only \code{"reg_fit_mediation"} and if \code{robust}
is \code{FALSE}).}
\item{cov}{an object of class \code{"\link{cov_Huber}"} or
\code{"\link{cov_ML}"} containing the covariance matrix estimates
(only \code{"cov_fit_mediation"}).}
\item{x, y, m, covariates}{character vectors specifying the respective
variables used.}
\item{data}{a data frame containing the independent, dependent and
proposed mediator variables, as well as covariates.}
\item{robust}{a logical indicating whether the effects were estimated
robustly.}
\item{median}{a logical indicating whether the effects were estimated
via median regression (only \code{"reg_fit_mediation"}).}
\item{control}{a list of tuning parameters used (only if \code{robust} is
\code{TRUE}).}
}
\description{
(Robustly) estimate the effects in a mediation model.
}
\details{
If \code{method} is \code{"regression"}, \code{robust} is \code{TRUE} and
\code{median} is \code{FALSE} (the defaults), the effects are estimated via
robust regressions with \code{\link[robustbase]{lmrob}}.
If \code{method} is \code{"regression"}, \code{robust} is \code{TRUE} and
\code{median} is \code{TRUE}, the effects are estimated via median
regressions with \code{\link[quantreg]{rq}}. Unlike the robust regressions
above, median regressions are not robust against outliers in the explanatory
variables.
If \code{method} is \code{"covariance"} and \code{robust} is \code{TRUE},
the effects are estimated based on a Huber M-estimator of location and
scatter. Note that this covariance-based approach is less robust than the
approach based on robust regressions described above.
}
\examples{
data("BSG2014")
fit <- fit_mediation(BSG2014,
x = "ValueDiversity",
y = "TeamCommitment",
m = "TaskConflict")
test <- test_mediation(fit)
summary(test)
}
\references{
Alfons, A., Ates, N.Y. and Groenen, P.J.F. (2018) A robust bootstrap test
for mediation analysis. \emph{ERIM Report Series in Management}, Erasmus
Research Institute of Management. URL
\url{https://hdl.handle.net/1765/109594}.
Yuan, Y. and MacKinnon, D.P. (2014) Robust mediation analysis based on
median regression. \emph{Psychological Methods}, \bold{19}(1),
1--20.
Zu, J. and Yuan, K.-H. (2010) Local influence and robust procedures for
mediation analysis. \emph{Multivariate Behavioral Research}, \bold{45}(1),
1--44.
}
\seealso{
\code{\link{test_mediation}}
\code{\link[robustbase]{lmrob}}, \code{\link[stats]{lm}},
\code{\link{cov_Huber}}, \code{\link{cov_ML}}
}
\author{
Andreas Alfons
}
\keyword{multivariate}
|
# パラメータ設定済みのモデルを訓練&評価
# CV 分割ごとの平均値を評価スコアとする
# df.model_with_param: パラメータ設定済みモデルの一覧(parsnip::model_spec のリスト)
# df.cv: クロスバリデーション用データ(rsample::vfold_cv)
# metrics: 評価指標(デフォルトは Accuracy)
train_and_evaluate <- function(df.model_with_param, df.cv, metrics = yardstick::accuracy) {
library(tidyverse)
library(tidymodels)
df.scores <- df.model_with_param %>%
# ハイパーパラメータの組み合わせごとにループ
purrr::map(function(model.applied) {
# クロスバリデーションの分割ごとにループ
purrr::map(df.cv$splits, model = model.applied, function(df.split, model) {
# 前処理済データの作成
df.train <- recipe %>%
recipes::prep() %>%
recipes::bake(rsample::analysis(df.split))
df.test <- recipe %>%
recipes::prep() %>%
recipes::bake(rsample::assessment(df.split))
model %>%
# モデルの学習
{
model <- (.)
parsnip::fit(model, Species ~ ., df.train)
} %>%
# 学習済モデルによる予測
{
fit <- (.)
list(
train = predict(fit, df.train, type = "class")[[1]],
test = predict(fit, df.test, type = "class")[[1]]
)
} %>%
# 評価
{
lst.predicted <- (.)
# train データでモデルを評価
df.result.train <- df.train %>%
dplyr::mutate(
predicted = lst.predicted$train
) %>%
metrics(truth = Species, estimate = predicted) %>%
dplyr::select(-.estimator) %>%
dplyr::mutate(
.metric = stringr::str_c("train", .metric, sep = "_")
) %>%
tidyr::spread(key = .metric, value = .estimate)
# test データでモデルを評価
df.result.test <- df.test %>%
dplyr::mutate(
predicted = lst.predicted$test
) %>%
metrics(truth = Species, estimate = predicted) %>%
dplyr::select(-.estimator) %>%
dplyr::mutate(
.metric = stringr::str_c("test", .metric, sep = "_")
) %>%
tidyr::spread(key = .metric, value = .estimate)
dplyr::bind_cols(
df.result.train,
df.result.test
)
}
}) %>%
# CV 分割全体の平均値を評価スコアとする
purrr::reduce(dplyr::bind_rows) %>%
dplyr::summarise_all(mean)
}) %>%
# 評価結果とパラメータを結合
purrr::reduce(dplyr::bind_rows)
df.grid.params %>%
dplyr::bind_cols(df.scores)
}
| /train_and_evaluate.R | no_license | you1025/train_and_evaluate | R | false | false | 2,926 | r | # パラメータ設定済みのモデルを訓練&評価
# CV 分割ごとの平均値を評価スコアとする
# df.model_with_param: パラメータ設定済みモデルの一覧(parsnip::model_spec のリスト)
# df.cv: クロスバリデーション用データ(rsample::vfold_cv)
# metrics: 評価指標(デフォルトは Accuracy)
train_and_evaluate <- function(df.model_with_param, df.cv, metrics = yardstick::accuracy) {
library(tidyverse)
library(tidymodels)
df.scores <- df.model_with_param %>%
# ハイパーパラメータの組み合わせごとにループ
purrr::map(function(model.applied) {
# クロスバリデーションの分割ごとにループ
purrr::map(df.cv$splits, model = model.applied, function(df.split, model) {
# 前処理済データの作成
df.train <- recipe %>%
recipes::prep() %>%
recipes::bake(rsample::analysis(df.split))
df.test <- recipe %>%
recipes::prep() %>%
recipes::bake(rsample::assessment(df.split))
model %>%
# モデルの学習
{
model <- (.)
parsnip::fit(model, Species ~ ., df.train)
} %>%
# 学習済モデルによる予測
{
fit <- (.)
list(
train = predict(fit, df.train, type = "class")[[1]],
test = predict(fit, df.test, type = "class")[[1]]
)
} %>%
# 評価
{
lst.predicted <- (.)
# train データでモデルを評価
df.result.train <- df.train %>%
dplyr::mutate(
predicted = lst.predicted$train
) %>%
metrics(truth = Species, estimate = predicted) %>%
dplyr::select(-.estimator) %>%
dplyr::mutate(
.metric = stringr::str_c("train", .metric, sep = "_")
) %>%
tidyr::spread(key = .metric, value = .estimate)
# test データでモデルを評価
df.result.test <- df.test %>%
dplyr::mutate(
predicted = lst.predicted$test
) %>%
metrics(truth = Species, estimate = predicted) %>%
dplyr::select(-.estimator) %>%
dplyr::mutate(
.metric = stringr::str_c("test", .metric, sep = "_")
) %>%
tidyr::spread(key = .metric, value = .estimate)
dplyr::bind_cols(
df.result.train,
df.result.test
)
}
}) %>%
# CV 分割全体の平均値を評価スコアとする
purrr::reduce(dplyr::bind_rows) %>%
dplyr::summarise_all(mean)
}) %>%
# 評価結果とパラメータを結合
purrr::reduce(dplyr::bind_rows)
df.grid.params %>%
dplyr::bind_cols(df.scores)
}
|
# The first function, makeCacheMatrix creates a special "matrix", which is really a # list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# It assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("Getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
message("First time run, no cached data.")
inv
}
# Sample run:
x = rbind(c(1, 2, -2), c(2, 1, -1), c(2, -1, -2))
x
m = makeCacheMatrix(x)
m
m$get()
# No cache in the first run
cacheSolve(m)
# Retrieving from the cache in the second run
cacheSolve(m)
| /cachematrix.R | no_license | huberyzhou/ProgrammingAssignment2 | R | false | false | 1,328 | r | # The first function, makeCacheMatrix creates a special "matrix", which is really a # list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# It assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("Getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
message("First time run, no cached data.")
inv
}
# Sample run:
x = rbind(c(1, 2, -2), c(2, 1, -1), c(2, -1, -2))
x
m = makeCacheMatrix(x)
m
m$get()
# No cache in the first run
cacheSolve(m)
# Retrieving from the cache in the second run
cacheSolve(m)
|
#' @rdname ConsRegArima
#' @export
ConsRegArima.default <- function(x, y, order,
seasonal, optimizer,
LOWER = NULL, UPPER = NULL, penalty = 1000,
constraints = NULL,
ini.pars.coef, model_fit = NULL,
...){
fit = list()
if(is.null(model_fit)){
fit <- estimationArima(x = x, y = y, order = order,
seasonal,
optimizer = optimizer,
LOWER = LOWER, UPPER = UPPER, penalty = penalty,
constraints = constraints,
ini.pars.coef = ini.pars.coef,
...)
coef = fit$coefficients
arma <- fit$arma
}else{
coef = model_fit$coefficients
fit$coefficients = coef
order = model_fit$order
arma <- model_fit$arma
fit$arma = arma
}
coef_arma = coef[(ncol(x)+1):length(coef)]
coef_reg = coef[0:ncol(x)]
n = length(y)
SSinit = "Gardner1980"
kappa = 1e+06
trarma <- ArimaTransf(coef_arma, arma)
model <- stats::makeARIMA(trarma[[1L]], trarma[[2L]], numeric(0), kappa,
SSinit)
ArimaMatrix(y - x %*% coef_reg, 0L, TRUE, model$phi,
model$theta, model$Delta, model$a,
model$P, model$Pn)
max.order = arma[1] + arma[5] * arma[3]
val <- ArimaEstimation(y - x %*% coef_reg,
arma, trarma[[1L]], trarma[[2L]],
max.order, T)
sigma2 <- val[[1L]]
npar = length(coef)
fit$aic <- n * log(sigma2) + 2 * npar
fit$bic <- fit$aic + npar * (log(n) - 2)
fit$aicc <- fit$aic + 2 * npar * (npar + 1) /(n - npar - 1)
fit$model = model
fit$sigma2 = sigma2
fit$n.used = n
fit$order = order
tmp = getFittedArima(object = fit, x = x, y = y)
fit$fitted = tmp$fitted
fit$metrics = forecast::accuracy(fit$fitted, y)
fit$residuals = tmp$residuals
fit$fitted_regression = tmp$fitted_regression
fit$fitted_arima = tmp$fitted_arima
fit$call = match.call()
class(fit) = 'ConsRegArima'
return(fit)
}
| /R/ConsRegArima_default.R | no_license | puigjos/ConsReg | R | false | false | 2,170 | r |
#' @rdname ConsRegArima
#' @export
ConsRegArima.default <- function(x, y, order,
seasonal, optimizer,
LOWER = NULL, UPPER = NULL, penalty = 1000,
constraints = NULL,
ini.pars.coef, model_fit = NULL,
...){
fit = list()
if(is.null(model_fit)){
fit <- estimationArima(x = x, y = y, order = order,
seasonal,
optimizer = optimizer,
LOWER = LOWER, UPPER = UPPER, penalty = penalty,
constraints = constraints,
ini.pars.coef = ini.pars.coef,
...)
coef = fit$coefficients
arma <- fit$arma
}else{
coef = model_fit$coefficients
fit$coefficients = coef
order = model_fit$order
arma <- model_fit$arma
fit$arma = arma
}
coef_arma = coef[(ncol(x)+1):length(coef)]
coef_reg = coef[0:ncol(x)]
n = length(y)
SSinit = "Gardner1980"
kappa = 1e+06
trarma <- ArimaTransf(coef_arma, arma)
model <- stats::makeARIMA(trarma[[1L]], trarma[[2L]], numeric(0), kappa,
SSinit)
ArimaMatrix(y - x %*% coef_reg, 0L, TRUE, model$phi,
model$theta, model$Delta, model$a,
model$P, model$Pn)
max.order = arma[1] + arma[5] * arma[3]
val <- ArimaEstimation(y - x %*% coef_reg,
arma, trarma[[1L]], trarma[[2L]],
max.order, T)
sigma2 <- val[[1L]]
npar = length(coef)
fit$aic <- n * log(sigma2) + 2 * npar
fit$bic <- fit$aic + npar * (log(n) - 2)
fit$aicc <- fit$aic + 2 * npar * (npar + 1) /(n - npar - 1)
fit$model = model
fit$sigma2 = sigma2
fit$n.used = n
fit$order = order
tmp = getFittedArima(object = fit, x = x, y = y)
fit$fitted = tmp$fitted
fit$metrics = forecast::accuracy(fit$fitted, y)
fit$residuals = tmp$residuals
fit$fitted_regression = tmp$fitted_regression
fit$fitted_arima = tmp$fitted_arima
fit$call = match.call()
class(fit) = 'ConsRegArima'
return(fit)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters.R
\name{GroupParameter}
\alias{GroupParameter}
\title{GroupParameter}
\usage{
GroupParameter(name, items, id = NULL, default = NULL,
optional = FALSE, include_in_default = TRUE)
}
\arguments{
\item{name}{Human-readable name for the parameter, must match argument of scoring function}
\item{items}{List of parameters to be included in group (can be empty).}
\item{id}{(optional) Unique id for parameter (no spaces allowed)}
\item{default}{(optional) Initial value for parameter (must be present in the choices list). If not defined, it will be the first item from the list.}
\item{optional}{(optional) Whether the parameter can be omitted in a Configuration}
\item{include_in_default}{(optional) Whether an optional parameter will be included in the default Configuration}
}
\description{
A set of parameters, all of which will be included in each Configuration (unless any of them are optional).
}
| /man/GroupParameter.Rd | permissive | MindFoundry/optaas-r-client | R | false | true | 994 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters.R
\name{GroupParameter}
\alias{GroupParameter}
\title{GroupParameter}
\usage{
GroupParameter(name, items, id = NULL, default = NULL,
optional = FALSE, include_in_default = TRUE)
}
\arguments{
\item{name}{Human-readable name for the parameter, must match argument of scoring function}
\item{items}{List of parameters to be included in group (can be empty).}
\item{id}{(optional) Unique id for parameter (no spaces allowed)}
\item{default}{(optional) Initial value for parameter (must be present in the choices list). If not defined, it will be the first item from the list.}
\item{optional}{(optional) Whether the parameter can be omitted in a Configuration}
\item{include_in_default}{(optional) Whether an optional parameter will be included in the default Configuration}
}
\description{
A set of parameters, all of which will be included in each Configuration (unless any of them are optional).
}
|
# ui.R
library(rhandsontable)
shinyUI(fluidPage(
titlePanel("LO%Own3d \"DFS\" App"),
sidebarLayout(
sidebarPanel(width = 3,
selectInput("Sport", label = ("Sport"),
choices = list("Football", "Basketball", "Baseball", "Soccer", "LOL" ), selected = "Football"),
selectInput("Team", "Team:", c("All",unique(as.character(players$Team) ))),
selectInput("Pos", "Position:", c("All",unique(as.character(players$Pos) ))),
sliderInput("Salary",
label = ("Salary"),
min = 2000, max = 12000, step = 100,value = c(2000, 12000)
),
sliderInput("Projection", label = ("Projection"),
min = 0, max = 50, value = c(1, 50), step = 1),
sliderInput("KJD", label = ("KJD"),
min = -5, max = 20, value = c(-5, 20), step = 0.1),
sliderInput("NoLU", label = ("Number of Lineups"),
min = 0, max = 10, value = 5)
#radioButtons("contest", label = ("Draftkings Contest"),
# choices = list("Main" = 1, "Turbo" = 2, "Late" = 3), selected = 1),
#submitButton("Solve for Best Lineups")
),
mainPanel(
#img(src='backdrop.jpg', align = "right"),
#textOutput("text1"),
# Create a new row for the table.
tabsetPanel(
id = 'dataset',
tabPanel('Player Pool', fluidRow(DT::dataTableOutput("table")), textOutput("text2")),
tabPanel('Lineups', fluidRow(DT::dataTableOutput("table2")),
img(src="Lu1.PNG"),
img(src="Lu2.PNG")),
tabPanel('KJD', rHandsontableOutput("table3"))
),
textOutput("text1")
)
)
)) | /ui.R | no_license | sangamc/_lowOwned | R | false | false | 1,733 | r | # ui.R
library(rhandsontable)
shinyUI(fluidPage(
titlePanel("LO%Own3d \"DFS\" App"),
sidebarLayout(
sidebarPanel(width = 3,
selectInput("Sport", label = ("Sport"),
choices = list("Football", "Basketball", "Baseball", "Soccer", "LOL" ), selected = "Football"),
selectInput("Team", "Team:", c("All",unique(as.character(players$Team) ))),
selectInput("Pos", "Position:", c("All",unique(as.character(players$Pos) ))),
sliderInput("Salary",
label = ("Salary"),
min = 2000, max = 12000, step = 100,value = c(2000, 12000)
),
sliderInput("Projection", label = ("Projection"),
min = 0, max = 50, value = c(1, 50), step = 1),
sliderInput("KJD", label = ("KJD"),
min = -5, max = 20, value = c(-5, 20), step = 0.1),
sliderInput("NoLU", label = ("Number of Lineups"),
min = 0, max = 10, value = 5)
#radioButtons("contest", label = ("Draftkings Contest"),
# choices = list("Main" = 1, "Turbo" = 2, "Late" = 3), selected = 1),
#submitButton("Solve for Best Lineups")
),
mainPanel(
#img(src='backdrop.jpg', align = "right"),
#textOutput("text1"),
# Create a new row for the table.
tabsetPanel(
id = 'dataset',
tabPanel('Player Pool', fluidRow(DT::dataTableOutput("table")), textOutput("text2")),
tabPanel('Lineups', fluidRow(DT::dataTableOutput("table2")),
img(src="Lu1.PNG"),
img(src="Lu2.PNG")),
tabPanel('KJD', rHandsontableOutput("table3"))
),
textOutput("text1")
)
)
)) |
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{jiebaR}
\alias{jiebaR}
\alias{jiebaR-package}
\title{A package for Chinese text segmentation}
\description{
This is a package for Chinese text segmentation, keyword extraction
and speech tagging with Rcpp and cppjieba. JiebaR supports four
types of segmentation mode: Maximum Probability, Hidden Markov Model,
Query Segment and Mix Segment.
}
\details{
You can use custom
dictionary to be included in the jiebaR default dictionary. JiebaR can
also identify new words, but adding your own new words will ensure a higher
accuracy.
}
\examples{
### Note: Can not display Chinese character here.
words = "hello world"
test1 = worker()
test1 <= words
\dontrun{
test <= "./temp.txt"
engine2 = worker("hmm")
engine2 <= "./temp.txt"
engine2$write = T
engine2 <= "./temp.txt"
engine3 = worker(type = "mix", dict = "dict_path",symbol = T)
engine3 <= "./temp.txt"
}
### Keyword Extraction
keys = worker("keywords", topn = 1)
keys <= words
### Speech Tagging
tagger = worker("tag")
tagger <= words
### Simhash
simhasher = worker("simhash", topn = 1)
simhasher <= words
distance("hello world" , "hello world!" , simhasher)
ShowDictPath()
}
\author{
Qin Wenfeng <\url{http://qinwenfeng.com}>
}
\references{
CppJieba \url{https://github.com/aszxqw/cppjieba};
}
\seealso{
JiebaR \url{https://github.com/qinwf/jiebaR};
}
| /man/jiebaR.Rd | permissive | wertion/jiebaR | R | false | false | 1,451 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{jiebaR}
\alias{jiebaR}
\alias{jiebaR-package}
\title{A package for Chinese text segmentation}
\description{
This is a package for Chinese text segmentation, keyword extraction
and speech tagging with Rcpp and cppjieba. JiebaR supports four
types of segmentation mode: Maximum Probability, Hidden Markov Model,
Query Segment and Mix Segment.
}
\details{
You can use custom
dictionary to be included in the jiebaR default dictionary. JiebaR can
also identify new words, but adding your own new words will ensure a higher
accuracy.
}
\examples{
### Note: Can not display Chinese character here.
words = "hello world"
test1 = worker()
test1 <= words
\dontrun{
test <= "./temp.txt"
engine2 = worker("hmm")
engine2 <= "./temp.txt"
engine2$write = T
engine2 <= "./temp.txt"
engine3 = worker(type = "mix", dict = "dict_path",symbol = T)
engine3 <= "./temp.txt"
}
### Keyword Extraction
keys = worker("keywords", topn = 1)
keys <= words
### Speech Tagging
tagger = worker("tag")
tagger <= words
### Simhash
simhasher = worker("simhash", topn = 1)
simhasher <= words
distance("hello world" , "hello world!" , simhasher)
ShowDictPath()
}
\author{
Qin Wenfeng <\url{http://qinwenfeng.com}>
}
\references{
CppJieba \url{https://github.com/aszxqw/cppjieba};
}
\seealso{
JiebaR \url{https://github.com/qinwf/jiebaR};
}
|
# ----------------------------------------------------------- #
# Install relevant packages (if not already done)
# ----------------------------------------------------------- #
Packages <- c("mlbench", "caret", "elasticnet", "klaR",
"xtable", "tikzDevice")
# install.packages(Packages)
# ----------------------------------------------------------- #
# Load relevant packages
# ----------------------------------------------------------- #
lapply(Packages, library, character.only = TRUE)
source("utilities.R")
# ----------------------------------------------------------- #
# Load data files
# ----------------------------------------------------------- #
allDataFiles <- c("HFfullImp", "HFfullOutcomes")
lapply(gsub(" ", "", paste("data_files/", allDataFiles,
".Rdat")), load,.GlobalEnv)
# ----------------------------------------------------------- #
# Add cross validation configuration
# ----------------------------------------------------------- #
kfold <- trainControl(method = "cv", number = 10)
seed <- 0123456789
metric <- "Accuracy"
preProcess <- "pca"
# ----------------------------------------------------------- #
# Train and evaluate the classification algorithms with kfold
# ----------------------------------------------------------- #
dataset <- HFfullImp[,-1]
mortality <- HFfullOutcomes[,3]
readmission <- HFfullOutcomes[,4]
# ----------------------------------------------------------- #
# Mortality
# ----------------------------------------------------------- #
# kfold CV evaluation of classifiers
# ----------------------------------------------------------- #
set.seed(seed)
fitKnnKfoldMort <- train(dataset, mortality, method="knn",
metric=metric, trControl=kfold,
preProcess = preProcess)
set.seed(seed)
fitLLKfoldMort <- train(dataset, mortality, method = "glm",
metric=metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitLDAKfoldMort <- train(dataset, mortality, method = "lda",
metric = metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitNbKfoldMort <- train(dataset, mortality, method = "nb",
metric = metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitSvmKfoldMort <- train(dataset, mortality,method="svmRadial",
metric=metric, trControl=kfold,
preProcess = preProcess)
set.seed(seed)
fitRfKfoldMort <- train(dataset, mortality, method="rf",
metric = metric, trControl = kfold,
preProcess = preProcess)
# ----------------------------------------------------------- #
# Produce summary statistics and plots
# ----------------------------------------------------------- #
# Kfold CV
# ----------------------------------------------------------- #
resultsMortalityKfold <- resamples(list(knn = fitKnnKfoldMort,
logr = fitLLKfoldMort,
lda = fitLDAKfoldMort,
nb = fitNbKfoldMort,
svm = fitSvmKfoldMort,
rf = fitRfKfoldMort))
xtable(summary(resultsMortalityKfold)$statistics$Accuracy,
digits = 3)
xtable(summary(resultsMortalityKfold)$statistics$Kappa,
digits = 3)
pathToImages <- "../../../doc/thesis/images/"
tikz(file=paste(pathToImages,"classificationMortality.tex",
sep = ""), height = 5.5, standAlone = F)
dotplot(resultsMortalityKfold, main = "Mortality")
dev.off()
# ----------------------------------------------------------- #
# Readmission
# ----------------------------------------------------------- #
# kfold CV evaluation of classifiers
# ----------------------------------------------------------- #
set.seed(seed)
fitKnnKfoldReadm <- train(dataset, readmission, method="knn",
metric=metric, trControl=kfold,
preProcess = preProcess)
set.seed(seed)
fitLLKfoldReadm <- train(dataset, readmission, method = "glm",
metric=metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitLDAKfoldReadm <- train(dataset, readmission,
method = "lda", metric = metric,
trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitNbKfoldReadm <- train(dataset, readmission, method = "nb",
metric = metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitSvmKfoldReadm <- train(dataset, readmission,
method="svmRadial", metric=metric,
trControl=kfold,
preProcess = preProcess)
set.seed(seed)
fitRfKfoldReadm <- train(dataset, readmission, method="rf",
metric = metric, trControl = kfold,
preProcess = preProcess)
# ----------------------------------------------------------- #
# Produce summary statistics and plots
# ----------------------------------------------------------- #
# Kfold CV
# ----------------------------------------------------------- #
resultsReadmKfold <- resamples(list(knn = fitKnnKfoldReadm,
lda = fitLDAKfoldReadm,
nb = fitNbKfoldReadm,
logr = fitLLKfoldReadm,
svm = fitSvmKfoldReadm,
rf = fitRfKfoldReadm))
xtable(summary(resultsReadmKfold)$statistics$Accuracy,
digits = 3)
xtable(summary(resultsReadmKfold)$statistics$Kappa,
digits = 3)
pathToImages <- "../../../doc/thesis/images/"
tikz(file=paste(pathToImages,"classificationReadmission.tex",
sep = ""), height = 5.5, standAlone = F)
dotplot(resultsReadmKfold, main = "Re-admission")
dev.off()
# ----------------------------------------------------------- # | /data/data_sets/source/classification.R | no_license | seemir/bias | R | false | false | 6,212 | r | # ----------------------------------------------------------- #
# Install relevant packages (if not already done)
# ----------------------------------------------------------- #
Packages <- c("mlbench", "caret", "elasticnet", "klaR",
"xtable", "tikzDevice")
# install.packages(Packages)
# ----------------------------------------------------------- #
# Load relevant packages
# ----------------------------------------------------------- #
lapply(Packages, library, character.only = TRUE)
source("utilities.R")
# ----------------------------------------------------------- #
# Load data files
# ----------------------------------------------------------- #
allDataFiles <- c("HFfullImp", "HFfullOutcomes")
lapply(gsub(" ", "", paste("data_files/", allDataFiles,
".Rdat")), load,.GlobalEnv)
# ----------------------------------------------------------- #
# Add cross validation configuration
# ----------------------------------------------------------- #
kfold <- trainControl(method = "cv", number = 10)
seed <- 0123456789
metric <- "Accuracy"
preProcess <- "pca"
# ----------------------------------------------------------- #
# Train and evaluate the classification algorithms with kfold
# ----------------------------------------------------------- #
dataset <- HFfullImp[,-1]
mortality <- HFfullOutcomes[,3]
readmission <- HFfullOutcomes[,4]
# ----------------------------------------------------------- #
# Mortality
# ----------------------------------------------------------- #
# kfold CV evaluation of classifiers
# ----------------------------------------------------------- #
set.seed(seed)
fitKnnKfoldMort <- train(dataset, mortality, method="knn",
metric=metric, trControl=kfold,
preProcess = preProcess)
set.seed(seed)
fitLLKfoldMort <- train(dataset, mortality, method = "glm",
metric=metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitLDAKfoldMort <- train(dataset, mortality, method = "lda",
metric = metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitNbKfoldMort <- train(dataset, mortality, method = "nb",
metric = metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitSvmKfoldMort <- train(dataset, mortality,method="svmRadial",
metric=metric, trControl=kfold,
preProcess = preProcess)
set.seed(seed)
fitRfKfoldMort <- train(dataset, mortality, method="rf",
metric = metric, trControl = kfold,
preProcess = preProcess)
# ----------------------------------------------------------- #
# Produce summary statistics and plots
# ----------------------------------------------------------- #
# Kfold CV
# ----------------------------------------------------------- #
resultsMortalityKfold <- resamples(list(knn = fitKnnKfoldMort,
logr = fitLLKfoldMort,
lda = fitLDAKfoldMort,
nb = fitNbKfoldMort,
svm = fitSvmKfoldMort,
rf = fitRfKfoldMort))
xtable(summary(resultsMortalityKfold)$statistics$Accuracy,
digits = 3)
xtable(summary(resultsMortalityKfold)$statistics$Kappa,
digits = 3)
pathToImages <- "../../../doc/thesis/images/"
tikz(file=paste(pathToImages,"classificationMortality.tex",
sep = ""), height = 5.5, standAlone = F)
dotplot(resultsMortalityKfold, main = "Mortality")
dev.off()
# ----------------------------------------------------------- #
# Readmission
# ----------------------------------------------------------- #
# kfold CV evaluation of classifiers
# ----------------------------------------------------------- #
set.seed(seed)
fitKnnKfoldReadm <- train(dataset, readmission, method="knn",
metric=metric, trControl=kfold,
preProcess = preProcess)
set.seed(seed)
fitLLKfoldReadm <- train(dataset, readmission, method = "glm",
metric=metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitLDAKfoldReadm <- train(dataset, readmission,
method = "lda", metric = metric,
trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitNbKfoldReadm <- train(dataset, readmission, method = "nb",
metric = metric, trControl = kfold,
preProcess = preProcess)
set.seed(seed)
fitSvmKfoldReadm <- train(dataset, readmission,
method="svmRadial", metric=metric,
trControl=kfold,
preProcess = preProcess)
set.seed(seed)
fitRfKfoldReadm <- train(dataset, readmission, method="rf",
metric = metric, trControl = kfold,
preProcess = preProcess)
# ----------------------------------------------------------- #
# Produce summary statistics and plots
# ----------------------------------------------------------- #
# Kfold CV
# ----------------------------------------------------------- #
resultsReadmKfold <- resamples(list(knn = fitKnnKfoldReadm,
lda = fitLDAKfoldReadm,
nb = fitNbKfoldReadm,
logr = fitLLKfoldReadm,
svm = fitSvmKfoldReadm,
rf = fitRfKfoldReadm))
xtable(summary(resultsReadmKfold)$statistics$Accuracy,
digits = 3)
xtable(summary(resultsReadmKfold)$statistics$Kappa,
digits = 3)
pathToImages <- "../../../doc/thesis/images/"
tikz(file=paste(pathToImages,"classificationReadmission.tex",
sep = ""), height = 5.5, standAlone = F)
dotplot(resultsReadmKfold, main = "Re-admission")
dev.off()
# ----------------------------------------------------------- # |
library(gbRd)
### Name: Rdo_args2txt_list
### Title: Extract the descriptions of the arguments of a function
### Aliases: Rdo_args2txt_list
### Keywords: Rd
### ** Examples
# each arg always gets an individual entry in the list;
# compare:
Rdo_args2txt_list("seq", c("from", "to", "by"))
# to:
cat(Rdo_args2txt("seq", c("from", "to", "by")))
| /data/genthat_extracted_code/gbRd/examples/Rdo_args2txt_list.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 350 | r | library(gbRd)
### Name: Rdo_args2txt_list
### Title: Extract the descriptions of the arguments of a function
### Aliases: Rdo_args2txt_list
### Keywords: Rd
### ** Examples
# each arg always gets an individual entry in the list;
# compare:
Rdo_args2txt_list("seq", c("from", "to", "by"))
# to:
cat(Rdo_args2txt("seq", c("from", "to", "by")))
|
# Expression QTL analysis in expression genetic data
############################################################
# 1. install some packages
############################################################
install.packages("qtl")
install.packages("lineup")
############################################################
# 2. Download data
############################################################
## The data are at https://phenome.jax.org/projects/Attie1
zipurl <- "https://phenomedoc.jax.org/QTL_Archive/attie_2015/Attie_2015_eqtl_clean.zip"
dir_for_data <- "Attie_data"
zipfile <- file.path(dir_for_data, "Attie_2015_eqtl_clean.zip")
# check if directory exists; if not, create it
if(!dir.exists(dir_for_data))
dir.create(dir_for_data)
download.file(zipurl, zipfile) # about 913 MB
unzipped <- unzip(zipfile, exdir=dir_for_data) # about 2.6 GB expanded
## data gets placed in "Clean" subdirectory
data_dir <- file.path(dir_for_data, "Clean")
############################################################
# 3. load data
############################################################
## annotation file
library(data.table)
data_dir="./Attie_data/Clean"
annot <- fread(file.path(data_dir, "microarray_annot.csv"), data.table=FALSE)
# "a_gene_id" is the main probe identifier
# "chr", "pos.cM", and "pos.Mb" are the genomic positions
## QTL cross
library(qtl)
f2g <- read.cross("csv", data_dir, "genotypes_clean.csv",
genotypes=c("BB", "BR", "RR"), alleles=c("B", "R"))
f2g <- jittermap(f2g) # avoid having markers at exactly the same location
## load the islet expression data
islet <- fread(file.path(data_dir, "islet_mlratio_clean.csv"), header=TRUE, data.table=FALSE)
# make first column (mouse IDs) the row names
rownames(islet) <- islet[,1]
islet <- islet[,-1]
# 491 rows (the mice) and 40572 columns (the microarray probes)
############################################################
# 4. keep only probes that have genomic positions
# and are on an autosome (1-19)
############################################################
probeindex=which(!is.na(annot$pos.cM) & annot$chr!="X")
probes2keep=as.character(annot$a_gene_id[probeindex])
# 36364 probes kept
# subset the islet data to just these probes
islet1 <- islet[,probes2keep]
# probe location in cM
probeloc <- data.frame(chr=annot$chr[probeindex],
pos=annot$pos.cM[probeindex])
rownames(probeloc) <- probes2keep
############################################################
# 5. calculate conditional QTL genotype probabilities
############################################################
f2g <- calc.genoprob(f2g, step=0.5, error.prob=0.002, map.function="c-f")
# probabilities are now embedded inside f2g
# f2g$geno[[6]]$prob is a 3d array for chr 6, mouse x position x genotype
#pdf("datasummary_islet.pdf")
plot(f2g)
#dev.off()
############################################################
# 6. find pseudomarker nearest each gene
############################################################
library(lineup)
pmar <- find.gene.pseudomarker(f2g, pull.map(f2g), probeloc)
# doing all this with cM rather than Mbp
# some genes quite far from any marker, but we can ignore this for now
############################################################
# 7. calculate a and d for each sex in islet
############################################################
n=length(probes2keep)
result=matrix(0,nrow=n,ncol=6)
adjrsq=NULL
rsq=NULL
fit=NULL
for(i in 1:n) #36364
{
probe=probes2keep[i]
chr <- as.character(pmar[probe, "chr"])
this_pmar <- pmar[probe, "pmark"]
# probabilities are embedded in
pr <- f2g$geno[[chr]]$prob[,this_pmar,] # 544 x 3 matrix
# put IDs as row names
rownames(pr) <- f2g$pheno$MouseNum
# sex of the mice ("Male" and "Female")
sex <- f2g$pheno$Sex
# lineup the mice in the genotype data and the islet data
# (function in R/lineup package)
id <- findCommonID(rownames(pr), rownames(islet))
# subset the two; also subset sex
pr <- pr[id$first,]
islet <- islet[id$second,]
sex <- sex[id$first]
# the expression data for this particular probe
y <- islet[,probe]
# calculate X matrix; can leave out the intercept
x <- cbind(a = (pr[,3] - pr[,1])/2,
d = pr[,2] - (pr[,1] + pr[,3])/2)
# estimate a and d in females and males separately
lm_fem <- lm(y ~ x, subset=(sex=="Female"))
lm_mal <- lm(y ~ x, subset=(sex=="Male"))
# results in a vector
result[i,] <- c(a_fem=lm_fem$coef[2],
d_fem=lm_fem$coef[3],
sig_fem=summary(lm_fem)$sigma,
a_mal=lm_mal$coef[2],
d_mal=lm_mal$coef[3],
sig_mal=summary(lm_mal)$sigma)
adjrsq[i]=summary(lm_fem)$adj.r.squared
rsq[i]=summary(lm_mal)$r.squared
fit[[2*i-1]]=lm_fem
fit[[2*i]]=lm_mal
}
rownames(result)=probes2keep
colnames(result)=c("a_fem","d_fem","sig_fem","a_mal","d_mal","sig_mal")
############################################################
# 7. some histograms plots to explore a vs d for each sex in islet
############################################################
par(mfrow=c(1,2),pty = "s")
#pdf("Histogram_islet.pdf")
hist(result[,1],breaks=300,main="Histogram of Additive effect in female",xlab="Additive effect in female",cex.main=0.8)
rug(result[,1])
hist(result[,2],breaks=300,main="Histogram of Dominance effect in female",xlab="Dominance effect in female",cex.main=0.8)
rug(result[,2])
hist(result[,4],breaks=300,main="Histogram of Additive effect in male",xlab="Additive effect in male",cex.main=0.8)
rug(result[,4])
hist(result[,5],breaks=300,main="Histogram of Dominance effect in male",xlab="Dominance effect in male",cex.main=0.8)
rug(result[,5])
hist(result[,1]/result[,3],breaks=300,main="Histogram of a/sig female")
rug(result[,1]/result[,3],cex.main=0.8)
hist(result[,2]/result[,3],breaks=300,main="Histogram of d/sig female")
rug(result[,2]/result[,3],cex.main=0.8)
hist(result[,4]/result[,6],breaks=300,main="Histogram of a/sig male")
rug(result[,4]/result[,6],cex.main=0.8)
hist(result[,5]/result[,6],breaks=300,main="Histogram of d/sig male")
rug(result[,5]/result[,6],cex.main=0.8)
############################################################
# 8. plot a vs d for each sex in islet
############################################################
#plot a vs d
#pdf("Scatterplot_islet.pdf",height=6,width=10)
par(mfrow=c(1,2),pty = "s")
plot(result[,1],result[,2],pch=16,cex=0.5,xlab="Additive effect in female",ylab="Dominance effect in female",ylim=c(-1,1),xlim=c(-1,1))
abline(0,1)
abline(0,-1)
plot(result[,4],result[,5],pch=16,cex=0.5,xlab="Additive effect in male",ylab="Dominance effect in male",ylim=c(-1,1),xlim=c(-1,1))
abline(0,1)
abline(0,-1)
plot(result[,1],result[,4],pch=16,cex=0.5,xlab="Additive effect in female",ylab="Additive effect in male",ylim=c(-1,1),xlim=c(-1,1))
abline(0,1)
plot(result[,2],result[,5],pch=16,cex=0.5,xlab="Dominance effect in female",ylab="Dominance effect in male",ylim=c(-0.5,0.5),xlim=c(-0.5,0.5))
abline(0,1)
plot(result[,1]/result[,3],result[,2]/result[,1],pch=16,cex=0.5,xlab="a/sig female",ylab="d/a female")
abline(0,1)
abline(0,-1)
plot(result[,4]/result[,6],result[,5]/result[,4],pch=16,cex=0.5,xlab="a/sig male",ylab="d/a male")
abline(0,1)
abline(0,-1)
#dev.off()
| /DataAnalysis.R | no_license | shiluzhang/eQTL-analysis | R | false | false | 7,251 | r |
# Expression QTL analysis in expression genetic data
############################################################
# 1. install some packages
############################################################
install.packages("qtl")
install.packages("lineup")
############################################################
# 2. Download data
############################################################
## The data are at https://phenome.jax.org/projects/Attie1
zipurl <- "https://phenomedoc.jax.org/QTL_Archive/attie_2015/Attie_2015_eqtl_clean.zip"
dir_for_data <- "Attie_data"
zipfile <- file.path(dir_for_data, "Attie_2015_eqtl_clean.zip")
# check if directory exists; if not, create it
if(!dir.exists(dir_for_data))
dir.create(dir_for_data)
download.file(zipurl, zipfile) # about 913 MB
unzipped <- unzip(zipfile, exdir=dir_for_data) # about 2.6 GB expanded
## data gets placed in "Clean" subdirectory
data_dir <- file.path(dir_for_data, "Clean")
############################################################
# 3. load data
############################################################
## annotation file
library(data.table)
data_dir="./Attie_data/Clean"
annot <- fread(file.path(data_dir, "microarray_annot.csv"), data.table=FALSE)
# "a_gene_id" is the main probe identifier
# "chr", "pos.cM", and "pos.Mb" are the genomic positions
## QTL cross
library(qtl)
f2g <- read.cross("csv", data_dir, "genotypes_clean.csv",
genotypes=c("BB", "BR", "RR"), alleles=c("B", "R"))
f2g <- jittermap(f2g) # avoid having markers at exactly the same location
## load the islet expression data
islet <- fread(file.path(data_dir, "islet_mlratio_clean.csv"), header=TRUE, data.table=FALSE)
# make first column (mouse IDs) the row names
rownames(islet) <- islet[,1]
islet <- islet[,-1]
# 491 rows (the mice) and 40572 columns (the microarray probes)
############################################################
# 4. keep only probes that have genomic positions
# and are on an autosome (1-19)
############################################################
probeindex=which(!is.na(annot$pos.cM) & annot$chr!="X")
probes2keep=as.character(annot$a_gene_id[probeindex])
# 36364 probes kept
# subset the islet data to just these probes
islet1 <- islet[,probes2keep]
# probe location in cM
probeloc <- data.frame(chr=annot$chr[probeindex],
pos=annot$pos.cM[probeindex])
rownames(probeloc) <- probes2keep
############################################################
# 5. calculate conditional QTL genotype probabilities
############################################################
f2g <- calc.genoprob(f2g, step=0.5, error.prob=0.002, map.function="c-f")
# probabilities are now embedded inside f2g
# f2g$geno[[6]]$prob is a 3d array for chr 6, mouse x position x genotype
#pdf("datasummary_islet.pdf")
plot(f2g)
#dev.off()
############################################################
# 6. find pseudomarker nearest each gene
############################################################
library(lineup)
pmar <- find.gene.pseudomarker(f2g, pull.map(f2g), probeloc)
# doing all this with cM rather than Mbp
# some genes quite far from any marker, but we can ignore this for now
############################################################
# 7. calculate a and d for each sex in islet
############################################################
n=length(probes2keep)
result=matrix(0,nrow=n,ncol=6)
adjrsq=NULL
rsq=NULL
fit=NULL
for(i in 1:n) #36364
{
probe=probes2keep[i]
chr <- as.character(pmar[probe, "chr"])
this_pmar <- pmar[probe, "pmark"]
# probabilities are embedded in
pr <- f2g$geno[[chr]]$prob[,this_pmar,] # 544 x 3 matrix
# put IDs as row names
rownames(pr) <- f2g$pheno$MouseNum
# sex of the mice ("Male" and "Female")
sex <- f2g$pheno$Sex
# lineup the mice in the genotype data and the islet data
# (function in R/lineup package)
id <- findCommonID(rownames(pr), rownames(islet))
# subset the two; also subset sex
pr <- pr[id$first,]
islet <- islet[id$second,]
sex <- sex[id$first]
# the expression data for this particular probe
y <- islet[,probe]
# calculate X matrix; can leave out the intercept
x <- cbind(a = (pr[,3] - pr[,1])/2,
d = pr[,2] - (pr[,1] + pr[,3])/2)
# estimate a and d in females and males separately
lm_fem <- lm(y ~ x, subset=(sex=="Female"))
lm_mal <- lm(y ~ x, subset=(sex=="Male"))
# results in a vector
result[i,] <- c(a_fem=lm_fem$coef[2],
d_fem=lm_fem$coef[3],
sig_fem=summary(lm_fem)$sigma,
a_mal=lm_mal$coef[2],
d_mal=lm_mal$coef[3],
sig_mal=summary(lm_mal)$sigma)
adjrsq[i]=summary(lm_fem)$adj.r.squared
rsq[i]=summary(lm_mal)$r.squared
fit[[2*i-1]]=lm_fem
fit[[2*i]]=lm_mal
}
rownames(result)=probes2keep
colnames(result)=c("a_fem","d_fem","sig_fem","a_mal","d_mal","sig_mal")
############################################################
# 7. some histograms plots to explore a vs d for each sex in islet
############################################################
par(mfrow=c(1,2),pty = "s")
#pdf("Histogram_islet.pdf")
hist(result[,1],breaks=300,main="Histogram of Additive effect in female",xlab="Additive effect in female",cex.main=0.8)
rug(result[,1])
hist(result[,2],breaks=300,main="Histogram of Dominance effect in female",xlab="Dominance effect in female",cex.main=0.8)
rug(result[,2])
hist(result[,4],breaks=300,main="Histogram of Additive effect in male",xlab="Additive effect in male",cex.main=0.8)
rug(result[,4])
hist(result[,5],breaks=300,main="Histogram of Dominance effect in male",xlab="Dominance effect in male",cex.main=0.8)
rug(result[,5])
hist(result[,1]/result[,3],breaks=300,main="Histogram of a/sig female")
rug(result[,1]/result[,3],cex.main=0.8)
hist(result[,2]/result[,3],breaks=300,main="Histogram of d/sig female")
rug(result[,2]/result[,3],cex.main=0.8)
hist(result[,4]/result[,6],breaks=300,main="Histogram of a/sig male")
rug(result[,4]/result[,6],cex.main=0.8)
hist(result[,5]/result[,6],breaks=300,main="Histogram of d/sig male")
rug(result[,5]/result[,6],cex.main=0.8)
############################################################
# 8. plot a vs d for each sex in islet
############################################################
#plot a vs d
#pdf("Scatterplot_islet.pdf",height=6,width=10)
par(mfrow=c(1,2),pty = "s")
plot(result[,1],result[,2],pch=16,cex=0.5,xlab="Additive effect in female",ylab="Dominance effect in female",ylim=c(-1,1),xlim=c(-1,1))
abline(0,1)
abline(0,-1)
plot(result[,4],result[,5],pch=16,cex=0.5,xlab="Additive effect in male",ylab="Dominance effect in male",ylim=c(-1,1),xlim=c(-1,1))
abline(0,1)
abline(0,-1)
plot(result[,1],result[,4],pch=16,cex=0.5,xlab="Additive effect in female",ylab="Additive effect in male",ylim=c(-1,1),xlim=c(-1,1))
abline(0,1)
plot(result[,2],result[,5],pch=16,cex=0.5,xlab="Dominance effect in female",ylab="Dominance effect in male",ylim=c(-0.5,0.5),xlim=c(-0.5,0.5))
abline(0,1)
plot(result[,1]/result[,3],result[,2]/result[,1],pch=16,cex=0.5,xlab="a/sig female",ylab="d/a female")
abline(0,1)
abline(0,-1)
plot(result[,4]/result[,6],result[,5]/result[,4],pch=16,cex=0.5,xlab="a/sig male",ylab="d/a male")
abline(0,1)
abline(0,-1)
#dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coeff_lasso.R
\name{lasso_f}
\alias{lasso_f}
\title{LASSO penalty term}
\usage{
lasso_f(lambda, beta)
}
\description{
LASSO penalty term
}
| /man/lasso_f.Rd | no_license | mcmtroffaes/bootlasso | R | false | true | 217 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coeff_lasso.R
\name{lasso_f}
\alias{lasso_f}
\title{LASSO penalty term}
\usage{
lasso_f(lambda, beta)
}
\description{
LASSO penalty term
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logger.R
\name{logger.setLevel}
\alias{logger.setLevel}
\title{Configure logging level.}
\usage{
logger.setLevel(level)
}
\arguments{
\item{level}{the level of the message (ALL, DEBUG, INFO, WARN, ERROR, OFF)}
}
\description{
This will configure the logger level. This allows to turn DEBUG, INFO,
WARN and ERROR messages on and off.
}
\examples{
\dontrun{
logger.setLevel('DEBUG')
}
}
\author{
Rob Kooper
}
| /utils/man/logger.setLevel.Rd | permissive | yogeshdarji/pecan | R | false | true | 486 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logger.R
\name{logger.setLevel}
\alias{logger.setLevel}
\title{Configure logging level.}
\usage{
logger.setLevel(level)
}
\arguments{
\item{level}{the level of the message (ALL, DEBUG, INFO, WARN, ERROR, OFF)}
}
\description{
This will configure the logger level. This allows to turn DEBUG, INFO,
WARN and ERROR messages on and off.
}
\examples{
\dontrun{
logger.setLevel('DEBUG')
}
}
\author{
Rob Kooper
}
|
#install.packages("devtools", dependencies = TRUE, repos = "http://cran.us.r-project.org")
library(devtools)
#install_github("bmckuw/UWbe536")
library(ggplot2)
library(lmtest)
library(beeswarm)
library(data.table)
library(tidyverse)
library(UWbe536)
#setwd("C:/Users/arthur/Desktop/AU2019/Biost 536")
#data <- read_rds("Project536-2019.rds")
link = "https://github.com/dmccoomes/Biostats_536/raw/master/Final%20project/Project536-2019.rds"
data <- readRDS(url(link))
#set up smoking status variable & BMI
#data$BMI <- ((data$weight)^2) / data$height
#Converting height to meters and weight to kilograms, then constructing BMI
data$height_m <- data$height/100
data$weight_kg <- data$weight*0.453592
data$BMI <- data$weight_kg / (data$height_m)^2
data$smkst <- ifelse(data$packyrs>0 & data$yrsquit ==0, 2, ifelse(data$packyrs>0&data$yrsquit>0,1,0))
#blood test and blood pressure indicator
data$abldl <- ifelse(data$ldl > 150, 1, 0)
data$abglu <- ifelse(data$glu <70 | data$glu >105, 1, 0)
data$abalb <- ifelse(data$alb <2.4 | data$alb > 5.4, 1, 0)
data$abfib <- ifelse(data$fib <150 | data$fib >350, 1, 0)
data$abcrt <- ifelse(data$crt <0.7 | data$crt >1.3, 1, 0)
data$abwbc <- ifelse(data$wbc <4.5 | data$wbc >11, 1, 0)
data$abplt <- ifelse(data$plt <150 | data$plt >350, 1, 0)
data$hbp <- ifelse(data$sbp > 120 & data$dbp >80, 1, 0)
#data$badblood <-ifelse(as.numeric(apply(data[,25:30], 1, sum)) > 2, 1, 0)
#DMC - Using column names in case we need to add some more variables
data$badblood <- ifelse(as.numeric(apply(data[,c("abldl", "abglu", "abalb", "abfib", "abcrt", "abwbc", "abplt", "hbp")], 1, sum)) > 2, 1, 0)
#Generating summary statistics
summary(data$age[data$case==1])
summary(data$age[data$case==0])
t.test(age ~ case, data=data)
data$age_65_74 <- ifelse(data$age > 64 & data$age<75, 1, 0)
data$age_75_84 <- ifelse(data$age > 74 & data$age<85, 1, 0)
data$age_85 <- ifelse(data$age > 84, 1, 0)
summary(data$age_65_74[data$case==1])
summary(data$age_75_84[data$case==1])
summary(data$age_85[data$case==1])
summary(data$age_65_74[data$case==0])
summary(data$age_75_84[data$case==0])
summary(data$age_85[data$case==0])
summary(data$male[data$case==1])
summary(data$male[data$case==0])
t.test(male ~ case, data=data)
summary(data$nonwhite[data$case==1])
summary(data$nonwhite[data$case==0])
data$educ_nohs <- ifelse(data$educ ==0, 1, 0)
data$educ_hs <- ifelse(data$educ ==1, 1, 0)
data$educ_coll <- ifelse(data$educ ==2, 1, 0)
summary(data$educ_nohs[data$case==1])
summary(data$educ_nohs[data$case==0])
summary(data$educ_hs[data$case==1])
summary(data$educ_hs[data$case==0])
summary(data$educ_coll[data$case==1])
summary(data$educ_coll[data$case==0])
summary(data$gmalcoh[data$case==1])
summary(data$gmalcoh[data$case==0])
t.test(gmalcoh ~ case, data=data)
data$smkr_curr <- ifelse(data$smkst ==2, 1, 0)
data$smkr_form <- ifelse(data$smkst ==1, 1, 0)
data$smkr_nev <- ifelse(data$smkst ==0, 1, 0)
summary(data$smkr_curr[data$case==1])
summary(data$smkr_curr[data$case==0])
summary(data$smkr_form[data$case==1])
summary(data$smkr_form[data$case==0])
summary(data$smkr_nev[data$case==1])
summary(data$smkr_nev[data$case==0])
summary(data$packyrs[data$case==1])
summary(data$packyrs[data$case==0])
t.test(packyrs ~ case, data=data)
summary(data$BMI[data$case==1])
summary(data$BMI[data$case==0])
t.test(BMI ~ case, data=data)
summary(data$badblood[data$case==1])
summary(data$badblood[data$case==0])
t.test(badblood ~ case, data=data)
# set up linear spline varibale
data <- data %>%
mutate(s1 = dsst,
s2 = (dsst-20)*(dsst>20),
s3 = (dsst-40)*(dsst>40),
s4 = (dsst-60)*(dsst>60),
s5 = (dsst-80)*(dsst>80),
dsst2 = dsst^2,
dsst3 = dsst*dsst2,
s12 = s1*s1,
s22 = s2*s2,
s32 = s3*s3,
s42 = s4*s4,
s52 = s5*s5)
crmd.l <- glm(case~dsst,data=data, family= binomial)
crmd.lnq <- glm(case~dsst + dsst2,data =data, family = binomial)
#DMC adding in cubic and quadratic spline
crmd.lncu <- glm(case~dsst + dsst2 + dsst3, data=data, family = binomial)
crmd.spq <- glm(case~s1 + s12 + s22 + s32 + s42 + s52, data=data, family=binomial)
crmd.sp <- glm(case~s1+s2+s3+s4+s5, data =data, family = binomial)
lincom(crmd.l)
lincom(crmd.lnq,c("40*dsst+4000*dsst2==0"))
lincom(crmd.sp,c("40*s1+40*s2+30*s3+10*s4==0"))
lincom(crmd.lncu,c())
#Setting up models with confounder adjustment
cfmd.l <- glm(case~dsst+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood,
data=data,family = binomial)
cfmd.lnq <- glm(case~dsst+dsst2+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood,
data=data,family=binomial)
cfmd.sp <- glm(case~s1+s2+s3+s4+s5+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood,
data=data,family=binomial)
lincom(cfmd.l)
lincom(cfmd.lnq,c("40*dsst+4000*dsst2==0"))
lincom(cfmd.sp,c("40*s1+40*s2+30*s3+10*s4==0"))
emmd.l <- glm(case~dsst+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood+dsst:age+dsst:gmalcoh,
data=data,family = binomial)
emmd.lnq <- glm(case~dsst+dsst2+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood+dsst:age+dsst:gmalcoh
+dsst2:age+dsst2:gmalcoh,
data=data,family=binomial)
emmd.sp <- glm(case~s1+s2+s3+s4+s5+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood
+s1:age+s1:gmalcoh+s2:age+s2:gmalcoh
+s3:age+s3:gmalcoh+s4:age+s4:gmalcoh
+s5:age+s5:gmalcoh,
data=data, family = binomial)
summary(emmd.sp)
lincom(emmd.l,c("dsst+dsst:age+dsst:gmalcoh==0"))
lincom(emmd.lnq,c("40*dsst+40*dsst:age+40*dsst:gmalcoh+4000*dsst2+4000*dsst2:age+4000*dsst2:gmalcoh==0"))
#Compare models
#Comparing base models linear, quadratic, and spline
anova(crmd.l, crmd.sp, test="LRT") #linear and spline
#p-value = 0.2711 : no difference
anova(crmd.l, crmd.lnq, test="LRT") #linear and quadratic
#p-value = 0.1176 : no difference
anova(crmd.sp, crmd.lnq, test="LRT") #spline and quadratic
#p-value = 0.4382 : no difference
#Comparing full models linear, quadratic, and spline
anova(cfmd.l, cfmd.sp, test="LRT") #linear and spline
#p-value = 0.113 : no difference
anova(cfmd.l, cfmd.lnq, test="LRT") #linear and quadratic
#p-value = 0.05309 : no difference, but real close
anova(cfmd.sp, cfmd.lnq, test="LRT") #spline and quadratic
#p-value = 0.2922 : no difference
#Comparing full models with interactions linear, spline, and quadratic
anova(emmd.l, emmd.sp, test="LRT") #linear and spline
#p-value = 0.09784 : no difference
anova(emmd.l, emmd.lnq, test="LRT") #linear and quadratic
#p-value = 0.2687 : no difference
#Can also do a Wald's test since they're nested
waldtest(emmd.l, emmd.lnq, test="Chisq")
#p-value = 0.2571 : no difference
anova(emmd.sp, emmd.lnq, test="LRT") #spline and quadratic
#p-value = 0.09819 : no difference
#Deletion diagnostics
#plots
with(data, plot(age, gmalcoh))
qplot(age, gmalcoh, data=data, color=case)
qplot(gmalcoh, packyrs, data=data, color=educ)
beeswarm(data$gmalcoh, vertical=FALSE, method="square")
with(data, plot(crmd.l, age))
with(data, plot(crmd.lnq, age))
| /Final project/main_analysis.R | no_license | dmccoomes/Biostats_536 | R | false | false | 7,275 | r |
#install.packages("devtools", dependencies = TRUE, repos = "http://cran.us.r-project.org")
library(devtools)
#install_github("bmckuw/UWbe536")
library(ggplot2)
library(lmtest)
library(beeswarm)
library(data.table)
library(tidyverse)
library(UWbe536)
#setwd("C:/Users/arthur/Desktop/AU2019/Biost 536")
#data <- read_rds("Project536-2019.rds")
link = "https://github.com/dmccoomes/Biostats_536/raw/master/Final%20project/Project536-2019.rds"
data <- readRDS(url(link))
#set up smoking status variable & BMI
#data$BMI <- ((data$weight)^2) / data$height
#Converting height to meters and weight to kilograms, then constructing BMI
data$height_m <- data$height/100
data$weight_kg <- data$weight*0.453592
data$BMI <- data$weight_kg / (data$height_m)^2
data$smkst <- ifelse(data$packyrs>0 & data$yrsquit ==0, 2, ifelse(data$packyrs>0&data$yrsquit>0,1,0))
#blood test and blood pressure indicator
data$abldl <- ifelse(data$ldl > 150, 1, 0)
data$abglu <- ifelse(data$glu <70 | data$glu >105, 1, 0)
data$abalb <- ifelse(data$alb <2.4 | data$alb > 5.4, 1, 0)
data$abfib <- ifelse(data$fib <150 | data$fib >350, 1, 0)
data$abcrt <- ifelse(data$crt <0.7 | data$crt >1.3, 1, 0)
data$abwbc <- ifelse(data$wbc <4.5 | data$wbc >11, 1, 0)
data$abplt <- ifelse(data$plt <150 | data$plt >350, 1, 0)
data$hbp <- ifelse(data$sbp > 120 & data$dbp >80, 1, 0)
#data$badblood <-ifelse(as.numeric(apply(data[,25:30], 1, sum)) > 2, 1, 0)
#DMC - Using column names in case we need to add some more variables
data$badblood <- ifelse(as.numeric(apply(data[,c("abldl", "abglu", "abalb", "abfib", "abcrt", "abwbc", "abplt", "hbp")], 1, sum)) > 2, 1, 0)
#Generating summary statistics
summary(data$age[data$case==1])
summary(data$age[data$case==0])
t.test(age ~ case, data=data)
data$age_65_74 <- ifelse(data$age > 64 & data$age<75, 1, 0)
data$age_75_84 <- ifelse(data$age > 74 & data$age<85, 1, 0)
data$age_85 <- ifelse(data$age > 84, 1, 0)
summary(data$age_65_74[data$case==1])
summary(data$age_75_84[data$case==1])
summary(data$age_85[data$case==1])
summary(data$age_65_74[data$case==0])
summary(data$age_75_84[data$case==0])
summary(data$age_85[data$case==0])
summary(data$male[data$case==1])
summary(data$male[data$case==0])
t.test(male ~ case, data=data)
summary(data$nonwhite[data$case==1])
summary(data$nonwhite[data$case==0])
data$educ_nohs <- ifelse(data$educ ==0, 1, 0)
data$educ_hs <- ifelse(data$educ ==1, 1, 0)
data$educ_coll <- ifelse(data$educ ==2, 1, 0)
summary(data$educ_nohs[data$case==1])
summary(data$educ_nohs[data$case==0])
summary(data$educ_hs[data$case==1])
summary(data$educ_hs[data$case==0])
summary(data$educ_coll[data$case==1])
summary(data$educ_coll[data$case==0])
summary(data$gmalcoh[data$case==1])
summary(data$gmalcoh[data$case==0])
t.test(gmalcoh ~ case, data=data)
data$smkr_curr <- ifelse(data$smkst ==2, 1, 0)
data$smkr_form <- ifelse(data$smkst ==1, 1, 0)
data$smkr_nev <- ifelse(data$smkst ==0, 1, 0)
summary(data$smkr_curr[data$case==1])
summary(data$smkr_curr[data$case==0])
summary(data$smkr_form[data$case==1])
summary(data$smkr_form[data$case==0])
summary(data$smkr_nev[data$case==1])
summary(data$smkr_nev[data$case==0])
summary(data$packyrs[data$case==1])
summary(data$packyrs[data$case==0])
t.test(packyrs ~ case, data=data)
summary(data$BMI[data$case==1])
summary(data$BMI[data$case==0])
t.test(BMI ~ case, data=data)
summary(data$badblood[data$case==1])
summary(data$badblood[data$case==0])
t.test(badblood ~ case, data=data)
# set up linear spline varibale
data <- data %>%
mutate(s1 = dsst,
s2 = (dsst-20)*(dsst>20),
s3 = (dsst-40)*(dsst>40),
s4 = (dsst-60)*(dsst>60),
s5 = (dsst-80)*(dsst>80),
dsst2 = dsst^2,
dsst3 = dsst*dsst2,
s12 = s1*s1,
s22 = s2*s2,
s32 = s3*s3,
s42 = s4*s4,
s52 = s5*s5)
crmd.l <- glm(case~dsst,data=data, family= binomial)
crmd.lnq <- glm(case~dsst + dsst2,data =data, family = binomial)
#DMC adding in cubic and quadratic spline
crmd.lncu <- glm(case~dsst + dsst2 + dsst3, data=data, family = binomial)
crmd.spq <- glm(case~s1 + s12 + s22 + s32 + s42 + s52, data=data, family=binomial)
crmd.sp <- glm(case~s1+s2+s3+s4+s5, data =data, family = binomial)
lincom(crmd.l)
lincom(crmd.lnq,c("40*dsst+4000*dsst2==0"))
lincom(crmd.sp,c("40*s1+40*s2+30*s3+10*s4==0"))
lincom(crmd.lncu,c())
#Setting up models with confounder adjustment
cfmd.l <- glm(case~dsst+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood,
data=data,family = binomial)
cfmd.lnq <- glm(case~dsst+dsst2+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood,
data=data,family=binomial)
cfmd.sp <- glm(case~s1+s2+s3+s4+s5+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood,
data=data,family=binomial)
lincom(cfmd.l)
lincom(cfmd.lnq,c("40*dsst+4000*dsst2==0"))
lincom(cfmd.sp,c("40*s1+40*s2+30*s3+10*s4==0"))
emmd.l <- glm(case~dsst+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood+dsst:age+dsst:gmalcoh,
data=data,family = binomial)
emmd.lnq <- glm(case~dsst+dsst2+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood+dsst:age+dsst:gmalcoh
+dsst2:age+dsst2:gmalcoh,
data=data,family=binomial)
emmd.sp <- glm(case~s1+s2+s3+s4+s5+age+male+nonwhite+factor(educ)+BMI+packyrs+factor(smkst)+gmalcoh+badblood
+s1:age+s1:gmalcoh+s2:age+s2:gmalcoh
+s3:age+s3:gmalcoh+s4:age+s4:gmalcoh
+s5:age+s5:gmalcoh,
data=data, family = binomial)
summary(emmd.sp)
lincom(emmd.l,c("dsst+dsst:age+dsst:gmalcoh==0"))
lincom(emmd.lnq,c("40*dsst+40*dsst:age+40*dsst:gmalcoh+4000*dsst2+4000*dsst2:age+4000*dsst2:gmalcoh==0"))
#Compare models
#Comparing base models linear, quadratic, and spline
anova(crmd.l, crmd.sp, test="LRT") #linear and spline
#p-value = 0.2711 : no difference
anova(crmd.l, crmd.lnq, test="LRT") #linear and quadratic
#p-value = 0.1176 : no difference
anova(crmd.sp, crmd.lnq, test="LRT") #spline and quadratic
#p-value = 0.4382 : no difference
#Comparing full models linear, quadratic, and spline
anova(cfmd.l, cfmd.sp, test="LRT") #linear and spline
#p-value = 0.113 : no difference
anova(cfmd.l, cfmd.lnq, test="LRT") #linear and quadratic
#p-value = 0.05309 : no difference, but real close
anova(cfmd.sp, cfmd.lnq, test="LRT") #spline and quadratic
#p-value = 0.2922 : no difference
#Comparing full models with interactions linear, spline, and quadratic
anova(emmd.l, emmd.sp, test="LRT") #linear and spline
#p-value = 0.09784 : no difference
anova(emmd.l, emmd.lnq, test="LRT") #linear and quadratic
#p-value = 0.2687 : no difference
#Can also do a Wald's test since they're nested
waldtest(emmd.l, emmd.lnq, test="Chisq")
#p-value = 0.2571 : no difference
anova(emmd.sp, emmd.lnq, test="LRT") #spline and quadratic
#p-value = 0.09819 : no difference
#Deletion diagnostics
#plots
with(data, plot(age, gmalcoh))
qplot(age, gmalcoh, data=data, color=case)
qplot(gmalcoh, packyrs, data=data, color=educ)
beeswarm(data$gmalcoh, vertical=FALSE, method="square")
with(data, plot(crmd.l, age))
with(data, plot(crmd.lnq, age))
|
library(aemo)
### Name: aemo
### Title: AEMO data set May 2009 - May 2014
### Aliases: aemo
### Keywords: datasets
### ** Examples
data(aemo)
head(aemo)
| /data/genthat_extracted_code/aemo/examples/aemo.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 160 | r | library(aemo)
### Name: aemo
### Title: AEMO data set May 2009 - May 2014
### Aliases: aemo
### Keywords: datasets
### ** Examples
data(aemo)
head(aemo)
|
library(RPostgreSQL)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, host="postgres", dbname='dataexpo')
# con <- dbConnect(drv, host="rpsql", dbname='dataexpo')
dbListConnections(drv)
dbListTables(con)
dbListFields(con, "location_table")
# dbGetQuery returns a data.frame which can be used directly
meas <- dbGetQuery(con, "select * from location_table")
head(meas)
class(meas)
rm(meas)
# dbSendQuery returns a PostgreSQLResult
measures <- dbSendQuery(con, "select * from location_table")
dbGetStatement(measures)
# The default number of records to retrieve is 500 per fetch
while (!dbHasCompleted(measures)) {
chunk <- fetch(measures, n=50)
print(nrow(chunk))
}
class(measures)
dbClearResult(measures)
# n=-1 fetches all the remaining records
# dbFetch(measures, n=-1)
dbDisconnect(con)
dbUnloadDriver(drv)
rm(con, drv)
| /db-test.R | no_license | wwtung/rspark-tests | R | false | false | 837 | r | library(RPostgreSQL)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, host="postgres", dbname='dataexpo')
# con <- dbConnect(drv, host="rpsql", dbname='dataexpo')
dbListConnections(drv)
dbListTables(con)
dbListFields(con, "location_table")
# dbGetQuery returns a data.frame which can be used directly
meas <- dbGetQuery(con, "select * from location_table")
head(meas)
class(meas)
rm(meas)
# dbSendQuery returns a PostgreSQLResult
measures <- dbSendQuery(con, "select * from location_table")
dbGetStatement(measures)
# The default number of records to retrieve is 500 per fetch
while (!dbHasCompleted(measures)) {
chunk <- fetch(measures, n=50)
print(nrow(chunk))
}
class(measures)
dbClearResult(measures)
# n=-1 fetches all the remaining records
# dbFetch(measures, n=-1)
dbDisconnect(con)
dbUnloadDriver(drv)
rm(con, drv)
|
## Common Correlated Effects Pooled/MG estimators
## ref. Holly, Pesaran and Yamagata JoE 158 (2010)
## (also Kapetanios, Pesaran and Yamagata JoE 2011)
## CCEP and CCEMG together in the same SW framework
## based on generalized FEs
## this version 6: includes both defactored (cce) and raw (standard) residuals,
## leaving to a special residuals.pcce method the choice of which to retrieve
## NB the effect of including a trend is exactly the same as for
## including as.numeric(<timeindex>) in the model specification
## If the panel is unbalanced, though, then for some i the trend becomes
## (3,4,5, ...) instead of (1,2,3, ...); the difference is absorbed by
## the individual intercept, and *the group intercept* changes.
## needed for standalone operation:
#plm <- plm:::plm
#pdim <- plm:::pdim
#model.matrix.plm <- plm:::model.matrix.plm
#pmodel.response.plm <- plm:::pmodel.response.plm
#tss <- plm:::tss
#' Common Correlated Effects estimators
#'
#' Common Correlated Effects Mean Groups (CCEMG) and Pooled (CCEP)
#' estimators for panel data with common factors (balanced or
#' unbalanced)
#'
#' `pcce` is a function for the estimation of linear panel models by
#' the Common Correlated Effects Mean Groups or Pooled estimator,
#' consistent under the hypothesis of unobserved common factors and
#' idiosyncratic factor loadings. The CCE estimator works by
#' augmenting the model by cross-sectional averages of the dependent
#' variable and regressors in order to account for the common factors,
#' and adding individual intercepts and possibly trends.
#'
#' @aliases pcce
#' @param formula a symbolic description of the model to be estimated,
#' @param object,x an object of class `"pcce"`,
#' @param data a `data.frame`,
#' @param subset see `lm`,
#' @param na.action see `lm`,
#' @param model one of `"mg"`, `"p"`, selects Mean Groups vs. Pooled
#' CCE model,
#' @param index the indexes, see [pdata.frame()],
#' @param trend logical specifying whether an individual-specific
#' trend has to be included,
#' @param digits digits,
#' @param width the maximum length of the lines in the print output,
#' @param type one of `"defactored"` or `"standard"`,
#' @param vcov a variance-covariance matrix furnished by the user or a function to calculate one,
#' @param \dots further arguments.
#' @return An object of class `c("pcce", "panelmodel")` containing:
#' \item{coefficients}{the vector of coefficients,}
#' \item{residuals}{the vector of (defactored) residuals,}
#' \item{stdres}{the vector of (raw) residuals,}
#' \item{tr.model}{the transformed data after projection on H,}
#' \item{fitted.values}{the vector of fitted values,}
#' \item{vcov}{the covariance matrix of the coefficients,}
#' \item{df.residual}{degrees of freedom of the residuals,}
#' \item{model}{a data.frame containing the variables used for the
#' estimation,}
#' \item{call}{the call,}
#' \item{indcoef}{the matrix of individual coefficients from
#' separate time series regressions,}
#' \item{r.squared}{numeric, the R squared.}
#' @export
#' @importFrom MASS ginv
#' @importFrom collapse rsplit gsplit GRP
#' @author Giovanni Millo
#' @references
#'
#' \insertRef{kappesyam11}{plm}
#'
#' \insertRef{HOLL:PESA:YAMA:10}{plm}
#'
#' @keywords regression
#' @examples
#'
#' data("Produc", package = "plm")
#' ccepmod <- pcce(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, model="p")
#' summary(ccepmod)
#' summary(ccepmod, vcov = vcovHC) # use argument vcov for robust std. errors
#'
#' ccemgmod <- pcce(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, model="mg")
#' summary(ccemgmod)
#'
pcce <- function (formula, data, subset, na.action,
model = c("mg", "p"),
#residuals = c("defactored", "standard"),
index = NULL, trend = FALSE, ...) {
## TODO: in general:
## * consider parallel execution via mclapply/mcmapply (aligns with the
## split-only-once aspect mentioned above).
## Create a Formula object if necessary (from plm)
if (!inherits(formula, "Formula")) formula <- as.Formula(formula)
## same as pggls but for effect, fixed at "individual" for compatibility
## ind for id, tind for time, k for K, coefnam for coef.names
effect <- "individual"
## record call etc.
model <- match.arg(model)
model.name <- paste("cce", model, sep="")
data.name <- paste(deparse(substitute(data)))
cl <- match.call()
plm.model <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "na.action", "effect",
"model", "index"), names(plm.model), 0)
plm.model <- plm.model[c(1L, m)]
plm.model[[1L]] <- as.name("plm")
## change the 'model' in call
plm.model$model <- "pooling"
## evaluates the call, modified with model == "pooling", inside the
## parent frame resulting in the pooling model on formula, data
plm.model <- eval(plm.model, parent.frame())
mf <- model.frame(plm.model)
index <- unclass(attr(mf, "index")) # unclass for speed
ind <- index[[1L]] ## individual index
tind <- index[[2L]] ## time index
## set dimension variables
pdim <- pdim(plm.model)
nt <- pdim$Tint$nt
Ti <- pdim$Tint$Ti
T. <- pdim$nT$T
n <- pdim$nT$n
N <- pdim$nT$N
## set index names
time.names <- pdim$panel.names$time.names
id.names <- pdim$panel.names$id.names
coef.names <- names(coef(plm.model))
## number of coefficients
k <- length(coef.names)
## model data
X <- model.matrix(plm.model)
y <- model.response(mf)
## det. *minimum* group numerosity
t <- min(Ti)
## check min. t numerosity
## NB it is also possible to allow estimation if there *is* one group
## with t large enough and average on coefficients removing NAs
## Here we choose the explicit way: let estimation fail if we lose df
## but a warning would do...
if(t < (k+1)) stop("Insufficient number of time periods")
## one regression for each group i in 1..n
## and retrieve coefficients putting them into a matrix
## (might be unbalanced => t1 != t2 but we don't care as long
## as min(t) > k+1)
has.int <- attr(terms(plm.model), "intercept")
if(has.int) {
## subtract intercept from parms number and names
k <- k - 1
coef.names <- coef.names[-1L]
## must put the intercept into the group-invariant part!!
## so first drop it from X
X <- X[ , -1L, drop = FALSE]
}
## "pre-allocate" coefficients matrix for the n models
## (dimensions are known in advance/by now)
tcoef <- matrix(NA_real_, nrow = k, ncol = n)
## pre-allocate residuals lists for individual regressions
## (lists allow for unbalanced panels)
cceres <- vector("list", n)
stdres <- vector("list", n)
## CCE by-group estimation
## group-invariant part, goes in Hhat
## between-periods transformation (take means over groups for each t)
Xm <- Between(X, effect = tind, na.rm = TRUE)
ym <- as.numeric(Between(y, effect = "time", na.rm = TRUE))
Hhat <- if(has.int) cbind(ym, Xm, 1L) else cbind(ym, Xm)
## pre-allocate XMX, XMy arrays
## (dimensions are known in advance/by now)
XMX <- array(data = NA_real_, dim = c(k, k, n))
XMy <- array(data = NA_real_, dim = c(k, 1L, n))
## pre-allocate MX, My for list of transformed data,
## later reduced to matrix and numeric, respectively
## (dimensions of n matrices/numerics to be hold by MX/My are not known in
## advance, depend on time periods per individual -> hence use list)
MX <- vector("list", length = n)
My <- vector("list", length = n)
## hence calc. beta_i anyway because of vcov
## for each x-sect. i=1..n estimate (over t) the CCE for every TS
## as in KPY, eq. 15
# split X, y, Hhat by individual and store in lists
ind.GRP <- collapse::GRP(ind)
tX.list <- collapse::rsplit(X, ind.GRP, use.names = FALSE)
ty.list <- collapse::gsplit(y, ind.GRP)
tHhat.list <- collapse::rsplit(Hhat, ind.GRP, use.names = FALSE)
tMhat.list <- vector("list", length = n) # pre-allocate
for(i in seq_len(n)) {
tX <- tX.list[[i]]
ty <- ty.list[[i]]
tHhat <- tHhat.list[[i]]
## if 'trend' then augment the xs-invariant component
if(trend) tHhat <- cbind(tHhat, seq_len(dim(tHhat)[[1L]]))
## NB tHHat, tMhat should be i-invariant (but for the
## group size if unbalanced)
tMhat <- diag(1, length(ty)) -
crossprod(t(tHhat), solve(crossprod(tHhat), t(tHhat)))
## tMhat is needed again later, so save in list
tMhat.list[[i]] <- tMhat
CP.tXtMhat <- crossprod(tX, tMhat)
tXMX <- tcrossprod(CP.tXtMhat, t(tX))
tXMy <- tcrossprod(CP.tXtMhat, t(ty))
## XMX_i, XMy_i
XMX[ , , i] <- tXMX
XMy[ , , i] <- tXMy
## save transformed data My, MX for vcovHC use
## (NB M is symmetric)
MX[[i]] <- t(CP.tXtMhat)
My[[i]] <- crossprod(tMhat, ty)
## single CCE coefficients
tcoef[ , i] <- crossprod(ginv(tXMX), tXMy) # solve(tXMX, tXMy)
## USED A GENERALIZED INVERSE HERE BECAUSE OF PBs WITH ECM SPECS
## Notice remark in Pesaran (2006, p.977, between (27) and (28))
## that XMX.i is invariant to the choice of a g-inverse for H'H
}
# Reduce transformed data to matrix and numeric, respectively
MX <- Reduce(rbind, MX)
My <- Reduce(c, My)
# set names lost in processing above
dimnames(MX) <- list(rownames(X), colnames(X))
names(My) <- names(y)
## end data module
## CCEMG coefs are averages across individual regressions
## (here: coefs of xs-variants only!)
coefmg <- rowMeans(tcoef)
## make demeaned coefficients: b_i - b_CCEMG
demcoef <- tcoef - coefmg # coefmg gets recycled n times by column
## pre-allocate matrix of cross-products of demeaned individual coefficients
Rmat <- array(data = NA_real_, dim = c(k, k, n))
## calc. coef, vcov, and residuals according to model
switch(model,
"mg" = {
## assign beta CCEMG
coef <- coefmg
## calc CCEMG covariance:
## (HPY 2010, p. 163, between (3.10) and (3.11) / KPY 2011, p. 330 (38))
for(i in seq_len(n)) Rmat[ , , i] <- outer(demcoef[ , i], demcoef[ , i])
vcov <- 1/(n*(n-1)) * rowSums(Rmat, dims = 2L) # == 1/(n*(n-1)) * apply(Rmat, 1:2, sum), but rowSums(., dims = 2L)-construct is way faster
## calc CCEMG residuals, both defactored and raw
for(i in seq_len(n)) {
## must redo all this because needs b_CCEP, which is
## not known at by-groups step
tX <- tX.list[[i]]
ty <- ty.list[[i]]
tMhat <- tMhat.list[[i]]
tb <- tcoef[ , i]
## cce (defactored) residuals as M_i(y_i - X_i * bCCEMG_i)
tytXtb <- ty - tcrossprod(tX, t(tb))
cceres[[i]] <- tcrossprod(tMhat, t(tytXtb))
## std. (raw) residuals as y_i - X_i * bCCEMG_i - a_i
ta <- mean(ty - tX)
stdres[[i]] <- tytXtb - ta
}
},
"p" = {
## calc beta_CCEP
sXMX <- rowSums(XMX, dims = 2L) # == apply(XMX, 1:2, sum), but rowSums(., dims = 2L)-construct is way faster
sXMy <- rowSums(XMy, dims = 2L) # == apply(XMy, 1:2, sum), but rowSums(., dims = 2L)-construct is way faster
coef <- solve(sXMX, sXMy) # bCCEP in HPY
## calc CCEP covariance:
## (HPY 2010, p. 163-4, (3.12, 3.13)
for(i in seq_len(n)) {
Rmat[ , , i] <- crossprod(XMX[ , , i],
crossprod(outer(demcoef[ , i],
demcoef[ , i]),
XMX[ , , i]))
}
## summing over the n-dimension of the array we get the
## covariance matrix of coefs
R.star <- 1/(n-1) * rowSums(Rmat, dims = 2L) * 1/(t^2) # rowSums(Rmat, dims = 2L) faster than == apply(Rmat, 1:2, sum)
psi.star <- 1/N * sXMX
Sigmap.star <- tcrossprod(solve(psi.star, R.star), solve(psi.star))
vcov <- Sigmap.star/n
## calc CCEP residuals, both defactored and raw
for(i in seq_len(n)) {
tX <- tX.list[[i]]
ty <- ty.list[[i]]
tMhat <- tMhat.list[[i]]
## cce residuals as M_i(y_i - X_i * bCCEP)
tytXcoef <- ty - tcrossprod(tX, t(coef))
cceres[[i]] <- tcrossprod(tMhat, t(tytXcoef))
## std. (raw) residuals as y_i - X_i * bCCEP - a_i
# (HPY, p. 165 (left column at the bottom))
ta <- mean(ty - tX)
stdres[[i]] <- tytXcoef - ta
}
})
## calc. measures of fit according to model type
switch(model,
"mg" = {
## R2 as in HPY 2010: sigma2ccemg = average (over n) of variances
## of defactored residuals
## (for unbalanced panels, each variance is correctly normalized
## by group dimension T.i)
##
## If balanced, would simply be
## sum(unlist(cceres)^2)/(n*(T.-2*k-2))
## average variance of defactored residuals sigma2ccemg as in
## Holly, Pesaran and Yamagata, (3.14)
sigma2cce.i <- vapply(cceres,
function(cceres.i)
crossprod(cceres.i) * 1/(length(cceres.i)-2*k-2),
FUN.VALUE = 0.0, USE.NAMES = FALSE)
sigma2cce <- 1/n*sum(sigma2cce.i)
},
"p" = {
## variance of defactored residuals sigma2ccep as in Holly,
## Pesaran and Yamagata, (3.15)
sigma2cce <- 1/(n*(T.-k-2)-k)*
sum(vapply(cceres, crossprod, FUN.VALUE = 0.0, USE.NAMES = FALSE))
## is the same as sum(unlist(cceres)^2)
})
## calc. overall R2, CCEMG or CCEP depending on 'model'
sigma2.i <- collapse::gsplit(y, ind.GRP)
sigma2.i <- lapply(sigma2.i, function(y.i) {
as.numeric(crossprod(y.i - mean(y.i)))/(length(y.i)-1)})
sigma2y <- mean(unlist(sigma2.i, use.names = FALSE))
r2cce <- 1 - sigma2cce/sigma2y
## allow outputting different types of residuals, defactored residuals are
## default/go into slot 'residuals'
stdres <- unlist(stdres)
residuals <- unlist(cceres)
## add transformed data (for now a simple list)
tr.model <- list(y = My, X = MX)
## so that if the model is ccepmod,
## > lm(ccepmod$tr.model[["y"]] ~ ccepmod$tr.model[["X"]]-1)
## reproduces the model results
## Final model object:
## code as in pggls, differences:
## - here there is no 'sigma'
## - there are two types of residuals
## - transformed data My, MX are included for vcovHC usage
df.residual <- nrow(X) - ncol(X)
fitted.values <- y - residuals
coef <- as.numeric(coef)
names(coef) <- rownames(vcov) <- colnames(vcov) <- coef.names
dimnames(tcoef) <- list(coef.names, id.names)
pmodel <- list(model.name = model.name)
pccemod <- list(coefficients = coef,
residuals = residuals,
stdres = stdres,
tr.model = tr.model,
fitted.values = fitted.values,
vcov = vcov,
df.residual = df.residual,
model = mf,
indcoef = tcoef,
r.squared = r2cce,
#cceres = as.vector(cceres),
#ccemgres = as.vector(ccemgres),
formula = formula,
call = cl)
pccemod <- structure(pccemod, pdim = pdim, pmodel = pmodel)
class(pccemod) <- c("pcce", "panelmodel")
pccemod
}
#' @rdname pcce
#' @export
summary.pcce <- function(object, vcov = NULL, ...){
vcov_arg <- vcov
std.err <- if (!is.null(vcov_arg)) {
if (is.matrix(vcov_arg)) rvcov <- vcov_arg
if (is.function(vcov_arg)) rvcov <- vcov_arg(object)
sqrt(diag(rvcov))
} else {
sqrt(diag(stats::vcov(object)))
}
b <- object$coefficients
z <- b/std.err
p <- 2*pnorm(abs(z), lower.tail = FALSE)
CoefTable <- cbind(b, std.err, z, p)
colnames(CoefTable) <- c("Estimate", "Std. Error", "z-value", "Pr(>|z|)")
object$CoefTable <- CoefTable
y <- object$model[[1L]]
object$tss <- tss(y)
object$ssr <- as.numeric(crossprod(residuals(object)))
object$rsqr <- object$r.squared #1-object$ssr/object$tss
## add some info to summary.pcce object
# robust vcov (next to "normal" vcov)
if (!is.null(vcov_arg)) {
object$rvcov <- rvcov
rvcov.name <- paste0(deparse(substitute(vcov)))
attr(object$rvcov, which = "rvcov.name") <- rvcov.name
}
class(object) <- c("summary.pcce")
return(object)
}
#' @rdname pcce
#' @export
print.summary.pcce <- function(x, digits = max(3, getOption("digits") - 2), width = getOption("width"), ...){
pmodel <- attr(x, "pmodel")
pdim <- attr(x, "pdim")
cat("Common Correlated Effects ")
cat(paste(model.pcce.list[pmodel$model.name], "\n", sep = ""))
if (!is.null(x$rvcov)) {
cat("\nNote: Coefficient variance-covariance matrix supplied: ", attr(x$rvcov, which = "rvcov.name"), "\n", sep = "")
}
cat("\nCall:\n")
print(x$call)
cat("\n")
print(pdim)
cat("\nResiduals:\n")
print(sumres(x))
cat("\nCoefficients:\n")
printCoefmat(x$CoefTable, digits = digits)
cat(paste("Total Sum of Squares: ", signif(x$tss, digits), "\n", sep=""))
cat(paste("Residual Sum of Squares: ", signif(x$ssr, digits), "\n", sep=""))
cat(paste("HPY R-squared: ", signif(x$rsqr, digits), "\n", sep=""))
invisible(x)
}
#' @rdname pcce
#' @export
residuals.pcce <- function(object,
type = c("defactored", "standard"),
...) {
## special resid() method for pcce: allows to extract either
## defactored residuals (default) or raw residuals
defres <- pres(object)
switch(match.arg(type),
"standard" = {
## add panel features and names from 'defres'
residuals <- add_pseries_features(object$stdres, index(defres))
names(residuals) <- names(defres)
},
"defactored" = { residuals <- defres }
)
return(residuals)
}
#' @rdname pcce
#' @export
model.matrix.pcce <- function(object, ...) {
object$tr.model$X
}
#' @rdname pcce
#' @export
pmodel.response.pcce <- function(object, ...) {
object$tr.model$y
}
| /R/est_cce.R | no_license | cran/plm | R | false | false | 19,494 | r | ## Common Correlated Effects Pooled/MG estimators
## ref. Holly, Pesaran and Yamagata JoE 158 (2010)
## (also Kapetanios, Pesaran and Yamagata JoE 2011)
## CCEP and CCEMG together in the same SW framework
## based on generalized FEs
## this version 6: includes both defactored (cce) and raw (standard) residuals,
## leaving to a special residuals.pcce method the choice of which to retrieve
## NB the effect of including a trend is exactly the same as for
## including as.numeric(<timeindex>) in the model specification
## If the panel is unbalanced, though, then for some i the trend becomes
## (3,4,5, ...) instead of (1,2,3, ...); the difference is absorbed by
## the individual intercept, and *the group intercept* changes.
## needed for standalone operation:
#plm <- plm:::plm
#pdim <- plm:::pdim
#model.matrix.plm <- plm:::model.matrix.plm
#pmodel.response.plm <- plm:::pmodel.response.plm
#tss <- plm:::tss
#' Common Correlated Effects estimators
#'
#' Common Correlated Effects Mean Groups (CCEMG) and Pooled (CCEP)
#' estimators for panel data with common factors (balanced or
#' unbalanced)
#'
#' `pcce` is a function for the estimation of linear panel models by
#' the Common Correlated Effects Mean Groups or Pooled estimator,
#' consistent under the hypothesis of unobserved common factors and
#' idiosyncratic factor loadings. The CCE estimator works by
#' augmenting the model by cross-sectional averages of the dependent
#' variable and regressors in order to account for the common factors,
#' and adding individual intercepts and possibly trends.
#'
#' @aliases pcce
#' @param formula a symbolic description of the model to be estimated,
#' @param object,x an object of class `"pcce"`,
#' @param data a `data.frame`,
#' @param subset see `lm`,
#' @param na.action see `lm`,
#' @param model one of `"mg"`, `"p"`, selects Mean Groups vs. Pooled
#' CCE model,
#' @param index the indexes, see [pdata.frame()],
#' @param trend logical specifying whether an individual-specific
#' trend has to be included,
#' @param digits digits,
#' @param width the maximum length of the lines in the print output,
#' @param type one of `"defactored"` or `"standard"`,
#' @param vcov a variance-covariance matrix furnished by the user or a function to calculate one,
#' @param \dots further arguments.
#' @return An object of class `c("pcce", "panelmodel")` containing:
#' \item{coefficients}{the vector of coefficients,}
#' \item{residuals}{the vector of (defactored) residuals,}
#' \item{stdres}{the vector of (raw) residuals,}
#' \item{tr.model}{the transformed data after projection on H,}
#' \item{fitted.values}{the vector of fitted values,}
#' \item{vcov}{the covariance matrix of the coefficients,}
#' \item{df.residual}{degrees of freedom of the residuals,}
#' \item{model}{a data.frame containing the variables used for the
#' estimation,}
#' \item{call}{the call,}
#' \item{indcoef}{the matrix of individual coefficients from
#' separate time series regressions,}
#' \item{r.squared}{numeric, the R squared.}
#' @export
#' @importFrom MASS ginv
#' @importFrom collapse rsplit gsplit GRP
#' @author Giovanni Millo
#' @references
#'
#' \insertRef{kappesyam11}{plm}
#'
#' \insertRef{HOLL:PESA:YAMA:10}{plm}
#'
#' @keywords regression
#' @examples
#'
#' data("Produc", package = "plm")
#' ccepmod <- pcce(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, model="p")
#' summary(ccepmod)
#' summary(ccepmod, vcov = vcovHC) # use argument vcov for robust std. errors
#'
#' ccemgmod <- pcce(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, model="mg")
#' summary(ccemgmod)
#'
pcce <- function (formula, data, subset, na.action,
model = c("mg", "p"),
#residuals = c("defactored", "standard"),
index = NULL, trend = FALSE, ...) {
## TODO: in general:
## * consider parallel execution via mclapply/mcmapply (aligns with the
## split-only-once aspect mentioned above).
## Create a Formula object if necessary (from plm)
if (!inherits(formula, "Formula")) formula <- as.Formula(formula)
## same as pggls but for effect, fixed at "individual" for compatibility
## ind for id, tind for time, k for K, coefnam for coef.names
effect <- "individual"
## record call etc.
model <- match.arg(model)
model.name <- paste("cce", model, sep="")
data.name <- paste(deparse(substitute(data)))
cl <- match.call()
plm.model <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "na.action", "effect",
"model", "index"), names(plm.model), 0)
plm.model <- plm.model[c(1L, m)]
plm.model[[1L]] <- as.name("plm")
## change the 'model' in call
plm.model$model <- "pooling"
## evaluates the call, modified with model == "pooling", inside the
## parent frame resulting in the pooling model on formula, data
plm.model <- eval(plm.model, parent.frame())
mf <- model.frame(plm.model)
index <- unclass(attr(mf, "index")) # unclass for speed
ind <- index[[1L]] ## individual index
tind <- index[[2L]] ## time index
## set dimension variables
pdim <- pdim(plm.model)
nt <- pdim$Tint$nt
Ti <- pdim$Tint$Ti
T. <- pdim$nT$T
n <- pdim$nT$n
N <- pdim$nT$N
## set index names
time.names <- pdim$panel.names$time.names
id.names <- pdim$panel.names$id.names
coef.names <- names(coef(plm.model))
## number of coefficients
k <- length(coef.names)
## model data
X <- model.matrix(plm.model)
y <- model.response(mf)
## det. *minimum* group numerosity
t <- min(Ti)
## check min. t numerosity
## NB it is also possible to allow estimation if there *is* one group
## with t large enough and average on coefficients removing NAs
## Here we choose the explicit way: let estimation fail if we lose df
## but a warning would do...
if(t < (k+1)) stop("Insufficient number of time periods")
## one regression for each group i in 1..n
## and retrieve coefficients putting them into a matrix
## (might be unbalanced => t1 != t2 but we don't care as long
## as min(t) > k+1)
has.int <- attr(terms(plm.model), "intercept")
if(has.int) {
## subtract intercept from parms number and names
k <- k - 1
coef.names <- coef.names[-1L]
## must put the intercept into the group-invariant part!!
## so first drop it from X
X <- X[ , -1L, drop = FALSE]
}
## "pre-allocate" coefficients matrix for the n models
## (dimensions are known in advance/by now)
tcoef <- matrix(NA_real_, nrow = k, ncol = n)
## pre-allocate residuals lists for individual regressions
## (lists allow for unbalanced panels)
cceres <- vector("list", n)
stdres <- vector("list", n)
## CCE by-group estimation
## group-invariant part, goes in Hhat
## between-periods transformation (take means over groups for each t)
Xm <- Between(X, effect = tind, na.rm = TRUE)
ym <- as.numeric(Between(y, effect = "time", na.rm = TRUE))
Hhat <- if(has.int) cbind(ym, Xm, 1L) else cbind(ym, Xm)
## pre-allocate XMX, XMy arrays
## (dimensions are known in advance/by now)
XMX <- array(data = NA_real_, dim = c(k, k, n))
XMy <- array(data = NA_real_, dim = c(k, 1L, n))
## pre-allocate MX, My for list of transformed data,
## later reduced to matrix and numeric, respectively
## (dimensions of n matrices/numerics to be hold by MX/My are not known in
## advance, depend on time periods per individual -> hence use list)
MX <- vector("list", length = n)
My <- vector("list", length = n)
## hence calc. beta_i anyway because of vcov
## for each x-sect. i=1..n estimate (over t) the CCE for every TS
## as in KPY, eq. 15
# split X, y, Hhat by individual and store in lists
ind.GRP <- collapse::GRP(ind)
tX.list <- collapse::rsplit(X, ind.GRP, use.names = FALSE)
ty.list <- collapse::gsplit(y, ind.GRP)
tHhat.list <- collapse::rsplit(Hhat, ind.GRP, use.names = FALSE)
tMhat.list <- vector("list", length = n) # pre-allocate
for(i in seq_len(n)) {
tX <- tX.list[[i]]
ty <- ty.list[[i]]
tHhat <- tHhat.list[[i]]
## if 'trend' then augment the xs-invariant component
if(trend) tHhat <- cbind(tHhat, seq_len(dim(tHhat)[[1L]]))
## NB tHHat, tMhat should be i-invariant (but for the
## group size if unbalanced)
tMhat <- diag(1, length(ty)) -
crossprod(t(tHhat), solve(crossprod(tHhat), t(tHhat)))
## tMhat is needed again later, so save in list
tMhat.list[[i]] <- tMhat
CP.tXtMhat <- crossprod(tX, tMhat)
tXMX <- tcrossprod(CP.tXtMhat, t(tX))
tXMy <- tcrossprod(CP.tXtMhat, t(ty))
## XMX_i, XMy_i
XMX[ , , i] <- tXMX
XMy[ , , i] <- tXMy
## save transformed data My, MX for vcovHC use
## (NB M is symmetric)
MX[[i]] <- t(CP.tXtMhat)
My[[i]] <- crossprod(tMhat, ty)
## single CCE coefficients
tcoef[ , i] <- crossprod(ginv(tXMX), tXMy) # solve(tXMX, tXMy)
## USED A GENERALIZED INVERSE HERE BECAUSE OF PBs WITH ECM SPECS
## Notice remark in Pesaran (2006, p.977, between (27) and (28))
## that XMX.i is invariant to the choice of a g-inverse for H'H
}
# Reduce transformed data to matrix and numeric, respectively
MX <- Reduce(rbind, MX)
My <- Reduce(c, My)
# set names lost in processing above
dimnames(MX) <- list(rownames(X), colnames(X))
names(My) <- names(y)
## end data module
## CCEMG coefs are averages across individual regressions
## (here: coefs of xs-variants only!)
coefmg <- rowMeans(tcoef)
## make demeaned coefficients: b_i - b_CCEMG
demcoef <- tcoef - coefmg # coefmg gets recycled n times by column
## pre-allocate matrix of cross-products of demeaned individual coefficients
Rmat <- array(data = NA_real_, dim = c(k, k, n))
## calc. coef, vcov, and residuals according to model
switch(model,
"mg" = {
## assign beta CCEMG
coef <- coefmg
## calc CCEMG covariance:
## (HPY 2010, p. 163, between (3.10) and (3.11) / KPY 2011, p. 330 (38))
for(i in seq_len(n)) Rmat[ , , i] <- outer(demcoef[ , i], demcoef[ , i])
vcov <- 1/(n*(n-1)) * rowSums(Rmat, dims = 2L) # == 1/(n*(n-1)) * apply(Rmat, 1:2, sum), but rowSums(., dims = 2L)-construct is way faster
## calc CCEMG residuals, both defactored and raw
for(i in seq_len(n)) {
## must redo all this because needs b_CCEP, which is
## not known at by-groups step
tX <- tX.list[[i]]
ty <- ty.list[[i]]
tMhat <- tMhat.list[[i]]
tb <- tcoef[ , i]
## cce (defactored) residuals as M_i(y_i - X_i * bCCEMG_i)
tytXtb <- ty - tcrossprod(tX, t(tb))
cceres[[i]] <- tcrossprod(tMhat, t(tytXtb))
## std. (raw) residuals as y_i - X_i * bCCEMG_i - a_i
ta <- mean(ty - tX)
stdres[[i]] <- tytXtb - ta
}
},
"p" = {
## calc beta_CCEP
sXMX <- rowSums(XMX, dims = 2L) # == apply(XMX, 1:2, sum), but rowSums(., dims = 2L)-construct is way faster
sXMy <- rowSums(XMy, dims = 2L) # == apply(XMy, 1:2, sum), but rowSums(., dims = 2L)-construct is way faster
coef <- solve(sXMX, sXMy) # bCCEP in HPY
## calc CCEP covariance:
## (HPY 2010, p. 163-4, (3.12, 3.13)
for(i in seq_len(n)) {
Rmat[ , , i] <- crossprod(XMX[ , , i],
crossprod(outer(demcoef[ , i],
demcoef[ , i]),
XMX[ , , i]))
}
## summing over the n-dimension of the array we get the
## covariance matrix of coefs
R.star <- 1/(n-1) * rowSums(Rmat, dims = 2L) * 1/(t^2) # rowSums(Rmat, dims = 2L) faster than == apply(Rmat, 1:2, sum)
psi.star <- 1/N * sXMX
Sigmap.star <- tcrossprod(solve(psi.star, R.star), solve(psi.star))
vcov <- Sigmap.star/n
## calc CCEP residuals, both defactored and raw
for(i in seq_len(n)) {
tX <- tX.list[[i]]
ty <- ty.list[[i]]
tMhat <- tMhat.list[[i]]
## cce residuals as M_i(y_i - X_i * bCCEP)
tytXcoef <- ty - tcrossprod(tX, t(coef))
cceres[[i]] <- tcrossprod(tMhat, t(tytXcoef))
## std. (raw) residuals as y_i - X_i * bCCEP - a_i
# (HPY, p. 165 (left column at the bottom))
ta <- mean(ty - tX)
stdres[[i]] <- tytXcoef - ta
}
})
## calc. measures of fit according to model type
switch(model,
"mg" = {
## R2 as in HPY 2010: sigma2ccemg = average (over n) of variances
## of defactored residuals
## (for unbalanced panels, each variance is correctly normalized
## by group dimension T.i)
##
## If balanced, would simply be
## sum(unlist(cceres)^2)/(n*(T.-2*k-2))
## average variance of defactored residuals sigma2ccemg as in
## Holly, Pesaran and Yamagata, (3.14)
sigma2cce.i <- vapply(cceres,
function(cceres.i)
crossprod(cceres.i) * 1/(length(cceres.i)-2*k-2),
FUN.VALUE = 0.0, USE.NAMES = FALSE)
sigma2cce <- 1/n*sum(sigma2cce.i)
},
"p" = {
## variance of defactored residuals sigma2ccep as in Holly,
## Pesaran and Yamagata, (3.15)
sigma2cce <- 1/(n*(T.-k-2)-k)*
sum(vapply(cceres, crossprod, FUN.VALUE = 0.0, USE.NAMES = FALSE))
## is the same as sum(unlist(cceres)^2)
})
## calc. overall R2, CCEMG or CCEP depending on 'model'
sigma2.i <- collapse::gsplit(y, ind.GRP)
sigma2.i <- lapply(sigma2.i, function(y.i) {
as.numeric(crossprod(y.i - mean(y.i)))/(length(y.i)-1)})
sigma2y <- mean(unlist(sigma2.i, use.names = FALSE))
r2cce <- 1 - sigma2cce/sigma2y
## allow outputting different types of residuals, defactored residuals are
## default/go into slot 'residuals'
stdres <- unlist(stdres)
residuals <- unlist(cceres)
## add transformed data (for now a simple list)
tr.model <- list(y = My, X = MX)
## so that if the model is ccepmod,
## > lm(ccepmod$tr.model[["y"]] ~ ccepmod$tr.model[["X"]]-1)
## reproduces the model results
## Final model object:
## code as in pggls, differences:
## - here there is no 'sigma'
## - there are two types of residuals
## - transformed data My, MX are included for vcovHC usage
df.residual <- nrow(X) - ncol(X)
fitted.values <- y - residuals
coef <- as.numeric(coef)
names(coef) <- rownames(vcov) <- colnames(vcov) <- coef.names
dimnames(tcoef) <- list(coef.names, id.names)
pmodel <- list(model.name = model.name)
pccemod <- list(coefficients = coef,
residuals = residuals,
stdres = stdres,
tr.model = tr.model,
fitted.values = fitted.values,
vcov = vcov,
df.residual = df.residual,
model = mf,
indcoef = tcoef,
r.squared = r2cce,
#cceres = as.vector(cceres),
#ccemgres = as.vector(ccemgres),
formula = formula,
call = cl)
pccemod <- structure(pccemod, pdim = pdim, pmodel = pmodel)
class(pccemod) <- c("pcce", "panelmodel")
pccemod
}
#' @rdname pcce
#' @export
summary.pcce <- function(object, vcov = NULL, ...){
vcov_arg <- vcov
std.err <- if (!is.null(vcov_arg)) {
if (is.matrix(vcov_arg)) rvcov <- vcov_arg
if (is.function(vcov_arg)) rvcov <- vcov_arg(object)
sqrt(diag(rvcov))
} else {
sqrt(diag(stats::vcov(object)))
}
b <- object$coefficients
z <- b/std.err
p <- 2*pnorm(abs(z), lower.tail = FALSE)
CoefTable <- cbind(b, std.err, z, p)
colnames(CoefTable) <- c("Estimate", "Std. Error", "z-value", "Pr(>|z|)")
object$CoefTable <- CoefTable
y <- object$model[[1L]]
object$tss <- tss(y)
object$ssr <- as.numeric(crossprod(residuals(object)))
object$rsqr <- object$r.squared #1-object$ssr/object$tss
## add some info to summary.pcce object
# robust vcov (next to "normal" vcov)
if (!is.null(vcov_arg)) {
object$rvcov <- rvcov
rvcov.name <- paste0(deparse(substitute(vcov)))
attr(object$rvcov, which = "rvcov.name") <- rvcov.name
}
class(object) <- c("summary.pcce")
return(object)
}
#' @rdname pcce
#' @export
print.summary.pcce <- function(x, digits = max(3, getOption("digits") - 2), width = getOption("width"), ...){
pmodel <- attr(x, "pmodel")
pdim <- attr(x, "pdim")
cat("Common Correlated Effects ")
cat(paste(model.pcce.list[pmodel$model.name], "\n", sep = ""))
if (!is.null(x$rvcov)) {
cat("\nNote: Coefficient variance-covariance matrix supplied: ", attr(x$rvcov, which = "rvcov.name"), "\n", sep = "")
}
cat("\nCall:\n")
print(x$call)
cat("\n")
print(pdim)
cat("\nResiduals:\n")
print(sumres(x))
cat("\nCoefficients:\n")
printCoefmat(x$CoefTable, digits = digits)
cat(paste("Total Sum of Squares: ", signif(x$tss, digits), "\n", sep=""))
cat(paste("Residual Sum of Squares: ", signif(x$ssr, digits), "\n", sep=""))
cat(paste("HPY R-squared: ", signif(x$rsqr, digits), "\n", sep=""))
invisible(x)
}
#' @rdname pcce
#' @export
residuals.pcce <- function(object,
type = c("defactored", "standard"),
...) {
## special resid() method for pcce: allows to extract either
## defactored residuals (default) or raw residuals
defres <- pres(object)
switch(match.arg(type),
"standard" = {
## add panel features and names from 'defres'
residuals <- add_pseries_features(object$stdres, index(defres))
names(residuals) <- names(defres)
},
"defactored" = { residuals <- defres }
)
return(residuals)
}
#' @rdname pcce
#' @export
model.matrix.pcce <- function(object, ...) {
object$tr.model$X
}
#' @rdname pcce
#' @export
pmodel.response.pcce <- function(object, ...) {
object$tr.model$y
}
|
get_label <- function(data, cols) {
sapply(cols, function(col) {
lbl <- attr(data[[col]], "label")
if (is.null(lbl))
lbl <- col
lbl
})
}
stop_nice <- function(...) {
stop(paste(strwrap(paste(...), exdent = 7), collapse = "\n"), call. = FALSE)
}
#' Compute automatic cognostics
#'
#' @param data a list of data frames (one per subset), a grouped data frame, or a nested data frame
#' @return If the input is a list of data frames, the return value is a list of data frames containing the cognostics. If the input is a grouped or nested df, the result will be a nested df with a new column containing the cognostics.
#' @importFrom purrr map map_df
#' @export
#' @seealso \code{\link{trelliscope}}
auto_cogs <- function(data) {
# if a grouped df, nest it so we have a nested df
if (inherits(data, "grouped_df")) {
# nesting causes label attributes to be lost, so preserve them...
# (need to find a better way to deal with this)
labels <- lapply(data, function(x) attr(x, "label"))
data <- nest(data)
# set first subset label attributes (auto_cogs will look for them here)
for (nm in names(data$data[[1]]))
attr(data$data[[1]][[nm]], "label") <- labels[[nm]]
}
# in the case of nested df, there should be atomic columns indicating splitting variables
# and then a single "list" column of data frames
data_is_df <- FALSE
if (is.data.frame(data)) {
data_is_df <- TRUE
is_atomic <- sapply(data, is.atomic)
# at_least_one_atomic <- length(which(is_atomic)) > 0
exactly_one_non_atomic <- length(which(!is_atomic)) == 1
if (!exactly_one_non_atomic)
stop_nice("Data supplied to auto_cogs must be a data frame with a single",
"nested data frame column.")
nest_nm <- names(data)[which(!is_atomic)]
if (! inherits(data[[nest_nm]][[1]], "data.frame"))
stop_nice("Data in nested column supplied to auto_cogs must contain data frames.")
cog_data <- data[[nest_nm]]
} else {
cog_data <- data
}
# cog_spec is a list specifying the cognostics and their descriptions
# so that we can add these in later
## determine which columns to compute what kind of cognostics for
cog_spec <- list(
count = data_frame(col = NA, cogname = "count", desc = "number of observations")
)
# if any columns are unique per group, add them as an "identity" cognostic
tmp <- cog_data %>% purrr::map_df(. %>% summarise_all(n_distinct))
unique_cols <- names(tmp)[sapply(tmp, function(x) all(x == 1))]
if (length(unique_cols) > 0) {
cog_spec$unique <- data_frame(
col = unique_cols,
cogname = sanitize(unique_cols),
desc = get_label(cog_data[[1]], unique_cols))
}
# if numeric and not unique, get the mean (TODO - other summary stats and group them)
num_cols <- names(cog_data[[1]])[sapply(cog_data[[1]], is.numeric)]
num_cols <- setdiff(num_cols, unique_cols)
if (length(num_cols) > 0)
cog_spec$num <- data_frame(
col = num_cols,
cogname = paste0(sanitize(num_cols), "_mean"),
desc = paste("mean", get_label(cog_data[[1]], num_cols)))
tmp <- bind_rows(cog_spec)
cog_desc <- as.list(tmp$desc)
names(cog_desc) <- tmp$cogname
res <- map_cog(cog_data, function(x) {
res <- data_frame(count = nrow(x))
for (ii in seq_along(cog_spec$unique$col))
res[[cog_spec$unique$cogname[ii]]] <- x[[cog_spec$unique$col[ii]]][1]
for (ii in seq_along(cog_spec$num$col))
res[[cog_spec$num$cogname[ii]]] <- mean(x[[cog_spec$num$col[ii]]])
res
})
if (data_is_df) {
return(
data %>%
mutate(auto_cogs = res)
)
} else {
return(res)
}
}
| /R/auto_cogs.R | no_license | beansrowning/trelliscopejs | R | false | false | 3,658 | r |
get_label <- function(data, cols) {
sapply(cols, function(col) {
lbl <- attr(data[[col]], "label")
if (is.null(lbl))
lbl <- col
lbl
})
}
stop_nice <- function(...) {
stop(paste(strwrap(paste(...), exdent = 7), collapse = "\n"), call. = FALSE)
}
#' Compute automatic cognostics
#'
#' @param data a list of data frames (one per subset), a grouped data frame, or a nested data frame
#' @return If the input is a list of data frames, the return value is a list of data frames containing the cognostics. If the input is a grouped or nested df, the result will be a nested df with a new column containing the cognostics.
#' @importFrom purrr map map_df
#' @export
#' @seealso \code{\link{trelliscope}}
auto_cogs <- function(data) {
# if a grouped df, nest it so we have a nested df
if (inherits(data, "grouped_df")) {
# nesting causes label attributes to be lost, so preserve them...
# (need to find a better way to deal with this)
labels <- lapply(data, function(x) attr(x, "label"))
data <- nest(data)
# set first subset label attributes (auto_cogs will look for them here)
for (nm in names(data$data[[1]]))
attr(data$data[[1]][[nm]], "label") <- labels[[nm]]
}
# in the case of nested df, there should be atomic columns indicating splitting variables
# and then a single "list" column of data frames
data_is_df <- FALSE
if (is.data.frame(data)) {
data_is_df <- TRUE
is_atomic <- sapply(data, is.atomic)
# at_least_one_atomic <- length(which(is_atomic)) > 0
exactly_one_non_atomic <- length(which(!is_atomic)) == 1
if (!exactly_one_non_atomic)
stop_nice("Data supplied to auto_cogs must be a data frame with a single",
"nested data frame column.")
nest_nm <- names(data)[which(!is_atomic)]
if (! inherits(data[[nest_nm]][[1]], "data.frame"))
stop_nice("Data in nested column supplied to auto_cogs must contain data frames.")
cog_data <- data[[nest_nm]]
} else {
cog_data <- data
}
# cog_spec is a list specifying the cognostics and their descriptions
# so that we can add these in later
## determine which columns to compute what kind of cognostics for
cog_spec <- list(
count = data_frame(col = NA, cogname = "count", desc = "number of observations")
)
# if any columns are unique per group, add them as an "identity" cognostic
tmp <- cog_data %>% purrr::map_df(. %>% summarise_all(n_distinct))
unique_cols <- names(tmp)[sapply(tmp, function(x) all(x == 1))]
if (length(unique_cols) > 0) {
cog_spec$unique <- data_frame(
col = unique_cols,
cogname = sanitize(unique_cols),
desc = get_label(cog_data[[1]], unique_cols))
}
# if numeric and not unique, get the mean (TODO - other summary stats and group them)
num_cols <- names(cog_data[[1]])[sapply(cog_data[[1]], is.numeric)]
num_cols <- setdiff(num_cols, unique_cols)
if (length(num_cols) > 0)
cog_spec$num <- data_frame(
col = num_cols,
cogname = paste0(sanitize(num_cols), "_mean"),
desc = paste("mean", get_label(cog_data[[1]], num_cols)))
tmp <- bind_rows(cog_spec)
cog_desc <- as.list(tmp$desc)
names(cog_desc) <- tmp$cogname
res <- map_cog(cog_data, function(x) {
res <- data_frame(count = nrow(x))
for (ii in seq_along(cog_spec$unique$col))
res[[cog_spec$unique$cogname[ii]]] <- x[[cog_spec$unique$col[ii]]][1]
for (ii in seq_along(cog_spec$num$col))
res[[cog_spec$num$cogname[ii]]] <- mean(x[[cog_spec$num$col[ii]]])
res
})
if (data_is_df) {
return(
data %>%
mutate(auto_cogs = res)
)
} else {
return(res)
}
}
|
## 23 May 2019 - R 3.5.3 - planetfish2 version 0.6.1
## Let's find that bug!
## Script for generating data and casal assessment output using Planetfish2
## Goal: Use a scenario that performs poorly to inspect attributes between OM and AM that may cause discrepency. Namely, inspect
## perfect knowledge of population parameters, correspondence of catch quantity and survey scanns, etc.
## Packages ----
library(earthfish)
library(casal)
## House ----
# rm(list = ls())
## number of iterations and scenario name
n_iters <- 2
scenario <- "TOA_bug_1"
## define a file name
file_name <- scenario
## Brett's file path
file_path <- paste0("C:/Users/STA384/Documents/GitHub/planetfish2_bugs/Output/", scenario, "/")
# file_path <- paste0("C:/Users/bstacy/Documents/GitHub/planetfish2_bugs/Output/", scenario, "/")
## PB file path
# file_path <- paste0("C:/Work/Manuscripts/2019_Stacy_etal_Exploratory_fisheries/Antarctic_toothfish_scenario/single_region/", scenario, "/")
## not sure what requires the WD to be set
setwd(file_path)
getwd()
### Specify Antarctic toothfish biological parameters ----
# Ages
TOA_max_age = 35 # Yates and Ziegler 2018
# Growth. Von Bertalanfy. Yates and Ziegler 2018
TOA_L_inf = 1565
TOA_K = 0.146
TOA_t_0 = 0.015
TOA_CV = 0.122
# # Growth. Von Bertalanfy. TOP
# TOA_L_inf = 2870
# TOA_K = 0.02056
# TOA_t_0 = -4.28970
# TOA_CV = 0.100
# Growth. Von Bertalanfy. TOA Mormede et al. 2014
# TOA_L_inf = 1690.7
# TOA_K = 0.093
# TOA_t_0 = -0.256
# TOA_CV = 0.102
# Growth. Von Bertalanfy. TOA Other
# TOA_L_inf = 2265
# TOA_K = 0.093
# TOA_t_0 = -0.256
# TOA_CV = 0.102
# Weight-Length. Yates and Ziegler 2018
TOA_wl_c = 3.0088e-12
TOA_wl_d = 3.2064
# Maturity. Yates and Ziegler 2018
TOA_maturity_ogive = "logistic"
TOA_a_50 = 14.45 # 14.45
TOA_a_95 = 6.5 # 6.5
# Natural Mortality. Yates and Ziegler 2018
TOA_M = 0.13
# Stock-recruitment Steepness h from Beverton-Holt
TOA_h = 0.75
### Model parameters ----
## mean recruitment
########## STUDY PERIOD
study_year_range = c(1990, 2010) # c(1968, 2018)
no_fish_range = 1 ##** Don't fish the first 10 years
R_mu <- 1e6 # 1e6
## recruitment variability
R_sigma <- 0 ############################################### 3e-01
# Total catch across single area
total_catch <- 6000
########## SAMPLING
n_years_aged = 10 ##** age fish for last 20 years. used in para$ass$sample_years
age_years = if(n_years_aged == 0) NULL else((study_year_range[2] - n_years_aged):study_year_range[2])
### BS: 30/05/19 add len_years
n_years_lengthed = 0
len_years = if(n_years_lengthed == 0) NULL else((study_year_range[2] - n_years_lengthed):study_year_range[2])
# The number of tags released in area 1 each year ##### just area 1?
n_tags = 2500 # 2500
# Number of years to release tags. leave out last year.
n_years_tags = 5 # 5
tag_years = (study_year_range[2] - n_years_tags + 1):study_year_range[2] - 1
## define longline selectivity
LL_sel <- list(top=10, sigma_left=2, sigma_right=10)
## add a logistic selectivity
### OM ----
## specify the default parameters
para <- get_om_data()
para$control$Assyr_range = para$om$year
# Set age parameters
para$om$age = c(1, TOA_max_age)
para$om$ages = 1:TOA_max_age
para$om$n_ages = length(para$om$ages)
para$om$names_ages = paste0("age", "_", para$om$ages, sep = "")
# Set growth parameters
para$om$growth = list(f = c(TOA_L_inf, TOA_K, TOA_t_0, TOA_CV),
m = c(TOA_L_inf, TOA_K, TOA_t_0, TOA_CV))
# Set WL parameters
para$om$WL = list(f = list(a = TOA_wl_c, b = TOA_wl_d), m = list(a = TOA_wl_c, b = TOA_wl_d))
# Set maturity parameters
para$om$pin_mat = TOA_maturity_ogive
para$om$maturity = list(f = list(x50 = TOA_a_50, x95 = TOA_a_95), m = list(x50 = TOA_a_50, x95 = TOA_a_95))
# Set natural mortality parameters
para$om$natM = rep(TOA_M, para$om$n_ages)
# Set Stock-recruitment h
para$om$rec_h = TOA_h
## redefine the recruitment used for spawning biomass
para$om$rec_mu <- R_mu
para$om$rec_sigma <- R_sigma
## redefine the number of areas
para$om$region <- c(1,1)
para$om$regions <- c(1)
para$om$n_regions <- length(para$om$regions)
## B0 is determined by method 3
## redefine the fisheries
para$om$fishery <- c("LL1")
para$om$n_fisheries <- length(para$om$fishery)
## set selectivity to NULL then define selectivities
para$om$pin_sel <- NULL
para$om$pin_sel$LL1 <- "double_normal"
para$om$select$LL1 <- LL_sel
## catches for the two fisheries
para$om$catch <- array(data=0, dim=c(para$om$n_years, para$om$n_fisheries,
para$om$n_seasons, para$om$n_regions),
dimnames=list("Year"=para$om$years,"Fishery"=para$om$fishery,
"Season"=para$om$seasons,"Region"=para$om$regions))
## fille the arrays with the catch in the specified proportions
para$om$catch[,"LL1", 1,1] <- rep(total_catch, para$om$n_years)
para$om$catch[1,,,] <- 0 # Catch in first year set to 0, such that SSB in first year is unfished biomass
## overwrite the effort (not sure it is used)
para$om$effort <- para$om$catch
## catch splits (I don't think it is used)
para$om$catchsplits <- array(data=0, dim=c(para$om$n_years, para$om$n_fisheries,
para$om$n_seasons,para$om$n_regions),
dimnames=list("Year"=para$om$years,"Fishery"=para$om$fishery,
"Season"=para$om$seasons,"Region"=para$om$regions))
para$om$catchsplits[,"LL1", 1,1] <- rep(1, para$om$n_years)
para$om$catchsplits[1,,,] <- 0 # Catch in first year set to 0, such that SSB in first year is unfished biomass
## remove Trawl selectivity
##** better to just specify the complete selectivity
para$om$select$Trawl <- NULL
## define the movement matrix for the population ################## BS 8/5: may have to set these == 0 because there are default values in there?
###* do we need something in the movement
# move_by_age <- as.data.frame(matrix(c(rep(move_1_to_2, para$om$n_ages),
# rep(move_2_to_1, para$om$n_ages)),
# nrow=2, ncol=para$om$n_ages, byrow=TRUE))
## for Sex, Year and Season, 0 means the movement rule represents all of that category
# para$om$move_rules <- data.frame("Origin" = c(1,2),"Destination" = c(2,1), "Sex" = c(0,0),
# "Year" = c(0,0), "Season" = c(0, 0),
# setNames(move_by_age, para$om$names_ages))
## define the movement matrix
# move_tag_by_age <- as.data.frame(matrix(c(rep(move_tags_1_to_2, para$om$n_ages),
# rep(move_tags_2_to_1, para$om$n_ages)),
# nrow=2, ncol=para$om$n_ages, byrow=TRUE))
## separate dataframe for the movement of tags
# para$om$move_tag_rules <- data.frame("Origin" = c(1,2),"Destination" = c(2,1), "Sex" = c(0,0),
# "Year" = c(0,0), "Season" = c(0, 0),
# setNames(move_tag_by_age, para$om$names_ages))
### Sampling ----
## only release tags in Area 1
para$sampling$pin_tag_N <- "Fixed" # this defines how tagging specified
para$sampling$tag_N <- c(n_tags, 0)
para$sampling$tag_rate <- c(2,0)
# Change sampling length classes and n_classes
# para$sampling$len_classes = seq(100, round(L_inf, digits = -2), 50)
# para$sampling$len_classes = seq(100, 3000, 50) # seq(300, 2000, 50) # yates and ziegler 2018
# para$sampling$n_lengths = length(para$sampling$len_classes)
## Tagging selectivity
para$sampling$pin_tag_sel <- list()
para$sampling$pin_tag_sel[[para$om$fishery[1]]] <- para$om$pin_sel[[para$om$fishery[1]]]
## define the selectivity parameters
para$sampling$tag_select <- list()
para$sampling$tag_select[[para$om$fishery[1]]] <- para$om$select[[para$om$fishery[1]]]
###* Brett thinks this isn't being implemented. 23/5/19: It is being implemented but it isn't clear where it shows up in CASAL files.
## age 1000 fish in Region 1
para$sampling$catchage_N <- 1000 # 1000
#### BS 30/05/19 add catchlen_N too
# para$sampling$catchlen_N = 1000 # 1000
### Assessment ----
para <- get_casal_para(para)
## Plust Group? BS 6/6/19 CONCLUSION: no effect.
# para$ass$age_plus_group = "False"
### add TOA LHPs to ASSESSMENT
para$ass$estgrowth = list(c(TOA_L_inf, TOA_K, TOA_t_0, TOA_CV))
para$ass$estWL = list(c(TOA_wl_c, TOA_wl_d))
para$ass$estpin.mat = TOA_maturity_ogive
para$ass$estmaturity = list(c(TOA_a_50, TOA_a_95))
para$ass$maturity_props_all = c("allvalues ", round(ogive("logistic", para$ass$ages, list(x50 = TOA_a_50, x95 = TOA_a_95)), 4))
para$ass$estnatM[[1]] = TOA_M
para$ass$rec_steepness = TOA_h
### specify regions and fisheries in the AM
para$ass$regions <- "R1"
para$ass$Fish <- matrix(c("LL", 1, "R1", "SelLL", "qLL", 1, 100, 100),
ncol=8, byrow=TRUE,
dimnames=list(c(),c("Fishery","Season","Region","Sel","q",
"ProjCatch", "catchage_N", "catchlen_N")))
## match the fisheries in the OM with the AM
para$ass$match_fishery <- matrix(c("LL1", "LL"), ncol=1, byrow=TRUE,
dimnames=list(c("OM","Ass"),c()))
## match the regions in the OM with the AM
para$ass$match_region <- matrix(c(1, "R1"), ncol=1, byrow=TRUE,
dimnames=list(c("OM","Ass"),c()))
## Initial values and bounds for B0
## make these boundries wider
## lower upper, casal lower casal upper
para$ass$initialB0 <- c(5e3, 5e5, 5e3, 5e5)
# para$ass$initialB0 <- c(2e5, 3e5, 5e4, 1e6)
para$ass$B0 <- runif(1, para$ass$initialB0[1],para$ass$initialB0[2])
para$ass$estim_initialization.B0 <- c("1", (para$ass$initialB0[3]),
(para$ass$initialB0[4]),
"uniform", "initialization.B0")
para$ass$list_fishery <- "LL"
para$ass$list_sel <- "SelLL"
para$ass$list_season <- "1" #(this was "1" "1")
para$ass$list_region <- "R1"
para$ass$list_q <- "qLL"
para$ass$selectivity_names <- "SelLL"
## Set to NULL to have no selectivity estimated selectivity
para$ass$estimate_selectivity <- para$ass$selectivity_names
para$ass$qq_names <- "qLL"
## specify the selectivity in the assessment
para$ass$selN_all <- list()
para$ass$selN_all[[1]] <- c("double_normal", as.character(LL_sel))
##** removing Trawl selectivity here, not sure what these values should be
para$ass$est_selN_all[[1]] <- NULL
para$ass$qqvalues ## there are 3 of these, should there only be one?
para$ass$future_constant_catches <- 200 # what does this specify?
# BS: ##### change para$ass$sample_years to accomodate edit above: expand study years.
# BS: 30/05/19 add catchlen_yrs to activate sampling lengths of fish ass well as age
para$ass$sample_years = am_sampling(years = para$om$years,
ycurr = para$om$year[2],
catchage_yrs = age_years,
catchlen_yrs = len_years,
tagging_yrs = tag_years)$sample_years ##** This is the range of years various sampling (sizing, ageing, tagging, etc.) took place. note, when long year range, tagging row doesn't show up when print.
## movement parameters
para$ass$migration_est_pin <- FALSE
para$ass$n_migrations <- 0
## for safety set the other parameters to NULL
para$ass$migration_names <- NULL
para$ass$migration_times <- NULL
para$ass$migrate_from <- NULL
para$ass$migrate_to <- NULL
para$ass$migrators <- NULL
para$ass$rates_all <- NULL
para$ass$migration_rates_all_low <- NULL
para$ass$migration_rates_all_upp <- NULL
para$ass$migration_rates_all_prior <- NULL
## might need modify R1 values as well
para$ass$output$`numbers_at[Numbers_at_age_R2]` <- NULL
## Modify the CASAL control parameters
para[["control"]]$casal_path <- file_path
para[["control"]] <- update_casal_file_names(para[["control"]]) # Update casal file names with in/outputprefix
## turn the TAC finder off (quicker running) and we dont need the TAC finder
para[["control"]]$pin_TAC_finder <- 0
## set CASAL to calculate SSB after 100% F and M
para$ass$spawning_part_mort <- 1
##*** now we modify the tag loss rate in the AM
para$ass$tag_shedding_rate <- 0.0084
#### Turn off estimations
para$ass$estimate_selectivity = NULL
para$ass$estim_recruitment.YCS[[2]] = rep(1, para$om$n_years)
para$ass$estim_recruitment.YCS[[3]] = rep(1, para$om$n_years)
# para$ass$estim_recruitment.YCS[4] = "uniform"
#### Turn off other stuff?
# para$ass$size_based = "True"
para$ass$rec_sigma = 0
# ## this appears to be correctly allocating arrays
# res <- setup_om_objects(para=para)
# ## now populate the OM arrays
# res <- populate_om_objects(para=para, res=res)
# ## specify the initial population
# res <- get_initial_pop(para, res=res)
# res <- run_annual_om(para, res=res, FALSE)
## Check if OM matches AM parameters ----
## source check_lhps and check the life history parameters
# source("../../check_lhps.R")
# check_lhps(para)
check_match(para)
### Loop ----
## specify objects to save simulation outputs
dim_names <- c("OM_ssb0", paste0("OM_ssb_R1_", para$om$years),
paste0("OM_rec_R1_", para$om$years),
"AM_ssb0_",paste0("AM_ssb_", para$om$years),
paste0("AM_rec_", para$om$years),
"SelLL_P1", "SelLL_P2", "SelLL_P3", "Starting_AM_B0")
# #################################################################### add different dim_names if using AT selectivity
# dim_names <- c("OM_ssb0", paste0("OM_ssb_R1_", para$om$years), paste0("OM_ssb_R2_",para$om$years),
# paste0("OM_rec_R1_", para$om$years), paste0("OM_rec_R2_", para$om$years),
# "AM_ssb0_",paste0("AM_ssb_", para$ass$years), paste0("AM_rec_", para$ass$years),
# "SelLL_P1", "SelLL_P2", "SelLL_P3", "SelLL_P4", "SelLL_P5", "Starting_AM_B0")
#
dim_length <- length(dim_names)
## construct the output array
output <- array(data = 0, dim = c(n_iters, dim_length),
dimnames = list("Iter"=1:n_iters, dim_names))
## some conveniences for accessing arrays in the OM
R1 <- 1
S1 <- para$om$season[2]
## paths for the CASAL outputs
casal_path <- para[["control"]]$casal_path
mpd_dat <- para[["control"]]$mpd_dat
output_log <- para[["control"]]$output_log
##* Save the parameter specifications (in case something goes wrong)
# sink(paste0(file_name, "_Para.txt"))
# para
# sink()
## add the scenario name to para
para$scenario$name <- scenario
## a better way to save the parameters is to save an Rds
# saveRDS(para, file = paste0(file_path, "baseline_para_PT.Rds"))
## loop over the number of iterations
for(i_iter in 1:n_iters){
## Set up om objects
res <- setup_om_objects(para=para)
#### Populate om objects (mod) with biological data (M and fecundity), fishery effort & selectivity, and observation sample sizes
res <- populate_om_objects(para=para, res=res)
#### Get initial population numbers and calculate SSB0
## the warning about init_age_comp' is getting anoying
res <- suppressWarnings(get_initial_pop(para=para, res=res))
#### Run Annual OM loops with/without assessment
#para[["control"]]$pin_casal_assess <- 1
res <- run_annual_om(para=para, res=res) #, intern=TRUE) #
## set output quantities to NULL, could use rm() instead
ssb <- rec <- SSB0 <- OM_SSB_R1 <- OM_Rec_R1 <- NULL
## calculated quantities
## ssb[quant, year, sex, season, area] # rec is the same
ssb <- res$mod$ssb
rec <- res$mod$rec
## Spawning Biomass
OM_SSB0 <- res$mod$ssb0
OM_SSB_R1 <- apply(ssb[1,,,S1,R1],c(1),sum)
## Recruitment
OM_Rec_R1 <- apply(rec[1,,,S1,R1],c(1),sum)
##*** Potentially add selectivity, however, perhaps not required
## length of output
om_ncols <- length(OM_SSB0) + length(OM_SSB_R1) + length(OM_Rec_R1)
# apply(ssb(stock)[,,,om$season[2],],c(2),sum)
##** I actually want the value for area 1 in my simulations
##** I'll need to sum up ssb for the first year and area (example below)
output[i_iter, 1:om_ncols] <- c(OM_SSB0, OM_SSB_R1, OM_Rec_R1)
## add the AM output if it exists for this iteration
if(file.exists(paste0(casal_path, para[["control"]]$mpd_dat)) &
length(scan(file = paste0(casal_path, para[["control"]]$mpd_dat),
what = "character")) > 0){
nname1 <- para[["control"]]$output_log # instead of output_logYear = take output log file from last assessment year only
casal_quants <- casal::extract.quantities(file=nname1, path=casal_path)
casal_freeparams <- casal::extract.free.parameters(file=nname1, path=casal_path)
## quantities form the Assessment model
AM_SSB0 <- casal_quants$B0
AM_SSB_R1 <- casal_quants$SSBs$SSB
AM_Rec_R1 <- casal_quants$recruitments$recruitment
## save the selectivity as it can be useful for investigating bias
if(length(casal_freeparams$`selectivity[SelLL].all`) > 0){
AM_SelLL <- c(casal_freeparams$`selectivity[SelLL].all`, para$ass$B0)
}else AM_SelLL <- c(0, 0, 0, 0)
## add the rest of the output
output[i_iter, (om_ncols+1):ncol(output)] <- c(AM_SSB0, AM_SSB_R1, AM_Rec_R1, AM_SelLL)
}
# print(round(c(OM_SSB0, OM_SSB_R1, OM_SSB_R2, OM_Rec_R1, OM_Rec_R2),0))
# print(round(c(AM_SSB0, AM_SSB_R1, AM_Rec_R1, AM_SelLL),0))
# OM_SSB_R1 - AM_SSB_R1
# OM_Rec_R1 - AM_Rec_R1
} # end MCMC loop
### Save Output ----
## write to file
# write.csv(output, file=paste0(casal_path,file_name, "_Niter_", n_iters, ".csv"),
# quote=FALSE, na="NA", row.names=FALSE)
################################## BS Plots ----
library(fishplot)
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1")
plot_SSB(output, item = "AM_ssb_", mean = F)
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(9000, 60000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(9000, 60000))
temp = read.csv("TOA_bug_1_Niter_1000.csv")
par(mfrow = c(1,2))
plot_SSB(temp, item = "OM_ssb_R1", ylim = c(9000, 60000), main = "TOA_bug_1_Niter_1000")
plot_SSB(temp, item = "AM_ssb_", mean = F, ylim = c(9000, 60000))
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(350000, 500000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(350000, 500000))
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(0, 30000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(0, 30000))
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(70000, 140000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(70000, 140000))
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(200000, 330000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(200000, 330000))
| /scripts/TOA_bug_1.R | no_license | Brett-Stacy/planetfish2_bugs | R | false | false | 18,774 | r | ## 23 May 2019 - R 3.5.3 - planetfish2 version 0.6.1
## Let's find that bug!
## Script for generating data and casal assessment output using Planetfish2
## Goal: Use a scenario that performs poorly to inspect attributes between OM and AM that may cause discrepency. Namely, inspect
## perfect knowledge of population parameters, correspondence of catch quantity and survey scanns, etc.
## Packages ----
library(earthfish)
library(casal)
## House ----
# rm(list = ls())
## number of iterations and scenario name
n_iters <- 2
scenario <- "TOA_bug_1"
## define a file name
file_name <- scenario
## Brett's file path
file_path <- paste0("C:/Users/STA384/Documents/GitHub/planetfish2_bugs/Output/", scenario, "/")
# file_path <- paste0("C:/Users/bstacy/Documents/GitHub/planetfish2_bugs/Output/", scenario, "/")
## PB file path
# file_path <- paste0("C:/Work/Manuscripts/2019_Stacy_etal_Exploratory_fisheries/Antarctic_toothfish_scenario/single_region/", scenario, "/")
## not sure what requires the WD to be set
setwd(file_path)
getwd()
### Specify Antarctic toothfish biological parameters ----
# Ages
TOA_max_age = 35 # Yates and Ziegler 2018
# Growth. Von Bertalanfy. Yates and Ziegler 2018
TOA_L_inf = 1565
TOA_K = 0.146
TOA_t_0 = 0.015
TOA_CV = 0.122
# # Growth. Von Bertalanfy. TOP
# TOA_L_inf = 2870
# TOA_K = 0.02056
# TOA_t_0 = -4.28970
# TOA_CV = 0.100
# Growth. Von Bertalanfy. TOA Mormede et al. 2014
# TOA_L_inf = 1690.7
# TOA_K = 0.093
# TOA_t_0 = -0.256
# TOA_CV = 0.102
# Growth. Von Bertalanfy. TOA Other
# TOA_L_inf = 2265
# TOA_K = 0.093
# TOA_t_0 = -0.256
# TOA_CV = 0.102
# Weight-Length. Yates and Ziegler 2018
TOA_wl_c = 3.0088e-12
TOA_wl_d = 3.2064
# Maturity. Yates and Ziegler 2018
TOA_maturity_ogive = "logistic"
TOA_a_50 = 14.45 # 14.45
TOA_a_95 = 6.5 # 6.5
# Natural Mortality. Yates and Ziegler 2018
TOA_M = 0.13
# Stock-recruitment Steepness h from Beverton-Holt
TOA_h = 0.75
### Model parameters ----
## mean recruitment
########## STUDY PERIOD
study_year_range = c(1990, 2010) # c(1968, 2018)
no_fish_range = 1 ##** Don't fish the first 10 years
R_mu <- 1e6 # 1e6
## recruitment variability
R_sigma <- 0 ############################################### 3e-01
# Total catch across single area
total_catch <- 6000
########## SAMPLING
n_years_aged = 10 ##** age fish for last 20 years. used in para$ass$sample_years
age_years = if(n_years_aged == 0) NULL else((study_year_range[2] - n_years_aged):study_year_range[2])
### BS: 30/05/19 add len_years
n_years_lengthed = 0
len_years = if(n_years_lengthed == 0) NULL else((study_year_range[2] - n_years_lengthed):study_year_range[2])
# The number of tags released in area 1 each year ##### just area 1?
n_tags = 2500 # 2500
# Number of years to release tags. leave out last year.
n_years_tags = 5 # 5
tag_years = (study_year_range[2] - n_years_tags + 1):study_year_range[2] - 1
## define longline selectivity
LL_sel <- list(top=10, sigma_left=2, sigma_right=10)
## add a logistic selectivity
### OM ----
## specify the default parameters
para <- get_om_data()
para$control$Assyr_range = para$om$year
# Set age parameters
para$om$age = c(1, TOA_max_age)
para$om$ages = 1:TOA_max_age
para$om$n_ages = length(para$om$ages)
para$om$names_ages = paste0("age", "_", para$om$ages, sep = "")
# Set growth parameters
para$om$growth = list(f = c(TOA_L_inf, TOA_K, TOA_t_0, TOA_CV),
m = c(TOA_L_inf, TOA_K, TOA_t_0, TOA_CV))
# Set WL parameters
para$om$WL = list(f = list(a = TOA_wl_c, b = TOA_wl_d), m = list(a = TOA_wl_c, b = TOA_wl_d))
# Set maturity parameters
para$om$pin_mat = TOA_maturity_ogive
para$om$maturity = list(f = list(x50 = TOA_a_50, x95 = TOA_a_95), m = list(x50 = TOA_a_50, x95 = TOA_a_95))
# Set natural mortality parameters
para$om$natM = rep(TOA_M, para$om$n_ages)
# Set Stock-recruitment h
para$om$rec_h = TOA_h
## redefine the recruitment used for spawning biomass
para$om$rec_mu <- R_mu
para$om$rec_sigma <- R_sigma
## redefine the number of areas
para$om$region <- c(1,1)
para$om$regions <- c(1)
para$om$n_regions <- length(para$om$regions)
## B0 is determined by method 3
## redefine the fisheries
para$om$fishery <- c("LL1")
para$om$n_fisheries <- length(para$om$fishery)
## set selectivity to NULL then define selectivities
para$om$pin_sel <- NULL
para$om$pin_sel$LL1 <- "double_normal"
para$om$select$LL1 <- LL_sel
## catches for the two fisheries
para$om$catch <- array(data=0, dim=c(para$om$n_years, para$om$n_fisheries,
para$om$n_seasons, para$om$n_regions),
dimnames=list("Year"=para$om$years,"Fishery"=para$om$fishery,
"Season"=para$om$seasons,"Region"=para$om$regions))
## fille the arrays with the catch in the specified proportions
para$om$catch[,"LL1", 1,1] <- rep(total_catch, para$om$n_years)
para$om$catch[1,,,] <- 0 # Catch in first year set to 0, such that SSB in first year is unfished biomass
## overwrite the effort (not sure it is used)
para$om$effort <- para$om$catch
## catch splits (I don't think it is used)
para$om$catchsplits <- array(data=0, dim=c(para$om$n_years, para$om$n_fisheries,
para$om$n_seasons,para$om$n_regions),
dimnames=list("Year"=para$om$years,"Fishery"=para$om$fishery,
"Season"=para$om$seasons,"Region"=para$om$regions))
para$om$catchsplits[,"LL1", 1,1] <- rep(1, para$om$n_years)
para$om$catchsplits[1,,,] <- 0 # Catch in first year set to 0, such that SSB in first year is unfished biomass
## remove Trawl selectivity
##** better to just specify the complete selectivity
para$om$select$Trawl <- NULL
## define the movement matrix for the population ################## BS 8/5: may have to set these == 0 because there are default values in there?
###* do we need something in the movement
# move_by_age <- as.data.frame(matrix(c(rep(move_1_to_2, para$om$n_ages),
# rep(move_2_to_1, para$om$n_ages)),
# nrow=2, ncol=para$om$n_ages, byrow=TRUE))
## for Sex, Year and Season, 0 means the movement rule represents all of that category
# para$om$move_rules <- data.frame("Origin" = c(1,2),"Destination" = c(2,1), "Sex" = c(0,0),
# "Year" = c(0,0), "Season" = c(0, 0),
# setNames(move_by_age, para$om$names_ages))
## define the movement matrix
# move_tag_by_age <- as.data.frame(matrix(c(rep(move_tags_1_to_2, para$om$n_ages),
# rep(move_tags_2_to_1, para$om$n_ages)),
# nrow=2, ncol=para$om$n_ages, byrow=TRUE))
## separate dataframe for the movement of tags
# para$om$move_tag_rules <- data.frame("Origin" = c(1,2),"Destination" = c(2,1), "Sex" = c(0,0),
# "Year" = c(0,0), "Season" = c(0, 0),
# setNames(move_tag_by_age, para$om$names_ages))
### Sampling ----
## only release tags in Area 1
para$sampling$pin_tag_N <- "Fixed" # this defines how tagging specified
para$sampling$tag_N <- c(n_tags, 0)
para$sampling$tag_rate <- c(2,0)
# Change sampling length classes and n_classes
# para$sampling$len_classes = seq(100, round(L_inf, digits = -2), 50)
# para$sampling$len_classes = seq(100, 3000, 50) # seq(300, 2000, 50) # yates and ziegler 2018
# para$sampling$n_lengths = length(para$sampling$len_classes)
## Tagging selectivity
para$sampling$pin_tag_sel <- list()
para$sampling$pin_tag_sel[[para$om$fishery[1]]] <- para$om$pin_sel[[para$om$fishery[1]]]
## define the selectivity parameters
para$sampling$tag_select <- list()
para$sampling$tag_select[[para$om$fishery[1]]] <- para$om$select[[para$om$fishery[1]]]
###* Brett thinks this isn't being implemented. 23/5/19: It is being implemented but it isn't clear where it shows up in CASAL files.
## age 1000 fish in Region 1
para$sampling$catchage_N <- 1000 # 1000
#### BS 30/05/19 add catchlen_N too
# para$sampling$catchlen_N = 1000 # 1000
### Assessment ----
para <- get_casal_para(para)
## Plust Group? BS 6/6/19 CONCLUSION: no effect.
# para$ass$age_plus_group = "False"
### add TOA LHPs to ASSESSMENT
para$ass$estgrowth = list(c(TOA_L_inf, TOA_K, TOA_t_0, TOA_CV))
para$ass$estWL = list(c(TOA_wl_c, TOA_wl_d))
para$ass$estpin.mat = TOA_maturity_ogive
para$ass$estmaturity = list(c(TOA_a_50, TOA_a_95))
para$ass$maturity_props_all = c("allvalues ", round(ogive("logistic", para$ass$ages, list(x50 = TOA_a_50, x95 = TOA_a_95)), 4))
para$ass$estnatM[[1]] = TOA_M
para$ass$rec_steepness = TOA_h
### specify regions and fisheries in the AM
para$ass$regions <- "R1"
para$ass$Fish <- matrix(c("LL", 1, "R1", "SelLL", "qLL", 1, 100, 100),
ncol=8, byrow=TRUE,
dimnames=list(c(),c("Fishery","Season","Region","Sel","q",
"ProjCatch", "catchage_N", "catchlen_N")))
## match the fisheries in the OM with the AM
para$ass$match_fishery <- matrix(c("LL1", "LL"), ncol=1, byrow=TRUE,
dimnames=list(c("OM","Ass"),c()))
## match the regions in the OM with the AM
para$ass$match_region <- matrix(c(1, "R1"), ncol=1, byrow=TRUE,
dimnames=list(c("OM","Ass"),c()))
## Initial values and bounds for B0
## make these boundries wider
## lower upper, casal lower casal upper
para$ass$initialB0 <- c(5e3, 5e5, 5e3, 5e5)
# para$ass$initialB0 <- c(2e5, 3e5, 5e4, 1e6)
para$ass$B0 <- runif(1, para$ass$initialB0[1],para$ass$initialB0[2])
para$ass$estim_initialization.B0 <- c("1", (para$ass$initialB0[3]),
(para$ass$initialB0[4]),
"uniform", "initialization.B0")
para$ass$list_fishery <- "LL"
para$ass$list_sel <- "SelLL"
para$ass$list_season <- "1" #(this was "1" "1")
para$ass$list_region <- "R1"
para$ass$list_q <- "qLL"
para$ass$selectivity_names <- "SelLL"
## Set to NULL to have no selectivity estimated selectivity
para$ass$estimate_selectivity <- para$ass$selectivity_names
para$ass$qq_names <- "qLL"
## specify the selectivity in the assessment
para$ass$selN_all <- list()
para$ass$selN_all[[1]] <- c("double_normal", as.character(LL_sel))
##** removing Trawl selectivity here, not sure what these values should be
para$ass$est_selN_all[[1]] <- NULL
para$ass$qqvalues ## there are 3 of these, should there only be one?
para$ass$future_constant_catches <- 200 # what does this specify?
# BS: ##### change para$ass$sample_years to accomodate edit above: expand study years.
# BS: 30/05/19 add catchlen_yrs to activate sampling lengths of fish ass well as age
para$ass$sample_years = am_sampling(years = para$om$years,
ycurr = para$om$year[2],
catchage_yrs = age_years,
catchlen_yrs = len_years,
tagging_yrs = tag_years)$sample_years ##** This is the range of years various sampling (sizing, ageing, tagging, etc.) took place. note, when long year range, tagging row doesn't show up when print.
## movement parameters
para$ass$migration_est_pin <- FALSE
para$ass$n_migrations <- 0
## for safety set the other parameters to NULL
para$ass$migration_names <- NULL
para$ass$migration_times <- NULL
para$ass$migrate_from <- NULL
para$ass$migrate_to <- NULL
para$ass$migrators <- NULL
para$ass$rates_all <- NULL
para$ass$migration_rates_all_low <- NULL
para$ass$migration_rates_all_upp <- NULL
para$ass$migration_rates_all_prior <- NULL
## might need modify R1 values as well
para$ass$output$`numbers_at[Numbers_at_age_R2]` <- NULL
## Modify the CASAL control parameters
para[["control"]]$casal_path <- file_path
para[["control"]] <- update_casal_file_names(para[["control"]]) # Update casal file names with in/outputprefix
## turn the TAC finder off (quicker running) and we dont need the TAC finder
para[["control"]]$pin_TAC_finder <- 0
## set CASAL to calculate SSB after 100% F and M
para$ass$spawning_part_mort <- 1
##*** now we modify the tag loss rate in the AM
para$ass$tag_shedding_rate <- 0.0084
#### Turn off estimations
para$ass$estimate_selectivity = NULL
para$ass$estim_recruitment.YCS[[2]] = rep(1, para$om$n_years)
para$ass$estim_recruitment.YCS[[3]] = rep(1, para$om$n_years)
# para$ass$estim_recruitment.YCS[4] = "uniform"
#### Turn off other stuff?
# para$ass$size_based = "True"
para$ass$rec_sigma = 0
# ## this appears to be correctly allocating arrays
# res <- setup_om_objects(para=para)
# ## now populate the OM arrays
# res <- populate_om_objects(para=para, res=res)
# ## specify the initial population
# res <- get_initial_pop(para, res=res)
# res <- run_annual_om(para, res=res, FALSE)
## Check if OM matches AM parameters ----
## source check_lhps and check the life history parameters
# source("../../check_lhps.R")
# check_lhps(para)
check_match(para)
### Loop ----
## specify objects to save simulation outputs
dim_names <- c("OM_ssb0", paste0("OM_ssb_R1_", para$om$years),
paste0("OM_rec_R1_", para$om$years),
"AM_ssb0_",paste0("AM_ssb_", para$om$years),
paste0("AM_rec_", para$om$years),
"SelLL_P1", "SelLL_P2", "SelLL_P3", "Starting_AM_B0")
# #################################################################### add different dim_names if using AT selectivity
# dim_names <- c("OM_ssb0", paste0("OM_ssb_R1_", para$om$years), paste0("OM_ssb_R2_",para$om$years),
# paste0("OM_rec_R1_", para$om$years), paste0("OM_rec_R2_", para$om$years),
# "AM_ssb0_",paste0("AM_ssb_", para$ass$years), paste0("AM_rec_", para$ass$years),
# "SelLL_P1", "SelLL_P2", "SelLL_P3", "SelLL_P4", "SelLL_P5", "Starting_AM_B0")
#
dim_length <- length(dim_names)
## construct the output array
output <- array(data = 0, dim = c(n_iters, dim_length),
dimnames = list("Iter"=1:n_iters, dim_names))
## some conveniences for accessing arrays in the OM
R1 <- 1
S1 <- para$om$season[2]
## paths for the CASAL outputs
casal_path <- para[["control"]]$casal_path
mpd_dat <- para[["control"]]$mpd_dat
output_log <- para[["control"]]$output_log
##* Save the parameter specifications (in case something goes wrong)
# sink(paste0(file_name, "_Para.txt"))
# para
# sink()
## add the scenario name to para
para$scenario$name <- scenario
## a better way to save the parameters is to save an Rds
# saveRDS(para, file = paste0(file_path, "baseline_para_PT.Rds"))
## loop over the number of iterations
for(i_iter in 1:n_iters){
## Set up om objects
res <- setup_om_objects(para=para)
#### Populate om objects (mod) with biological data (M and fecundity), fishery effort & selectivity, and observation sample sizes
res <- populate_om_objects(para=para, res=res)
#### Get initial population numbers and calculate SSB0
## the warning about init_age_comp' is getting anoying
res <- suppressWarnings(get_initial_pop(para=para, res=res))
#### Run Annual OM loops with/without assessment
#para[["control"]]$pin_casal_assess <- 1
res <- run_annual_om(para=para, res=res) #, intern=TRUE) #
## set output quantities to NULL, could use rm() instead
ssb <- rec <- SSB0 <- OM_SSB_R1 <- OM_Rec_R1 <- NULL
## calculated quantities
## ssb[quant, year, sex, season, area] # rec is the same
ssb <- res$mod$ssb
rec <- res$mod$rec
## Spawning Biomass
OM_SSB0 <- res$mod$ssb0
OM_SSB_R1 <- apply(ssb[1,,,S1,R1],c(1),sum)
## Recruitment
OM_Rec_R1 <- apply(rec[1,,,S1,R1],c(1),sum)
##*** Potentially add selectivity, however, perhaps not required
## length of output
om_ncols <- length(OM_SSB0) + length(OM_SSB_R1) + length(OM_Rec_R1)
# apply(ssb(stock)[,,,om$season[2],],c(2),sum)
##** I actually want the value for area 1 in my simulations
##** I'll need to sum up ssb for the first year and area (example below)
output[i_iter, 1:om_ncols] <- c(OM_SSB0, OM_SSB_R1, OM_Rec_R1)
## add the AM output if it exists for this iteration
if(file.exists(paste0(casal_path, para[["control"]]$mpd_dat)) &
length(scan(file = paste0(casal_path, para[["control"]]$mpd_dat),
what = "character")) > 0){
nname1 <- para[["control"]]$output_log # instead of output_logYear = take output log file from last assessment year only
casal_quants <- casal::extract.quantities(file=nname1, path=casal_path)
casal_freeparams <- casal::extract.free.parameters(file=nname1, path=casal_path)
## quantities form the Assessment model
AM_SSB0 <- casal_quants$B0
AM_SSB_R1 <- casal_quants$SSBs$SSB
AM_Rec_R1 <- casal_quants$recruitments$recruitment
## save the selectivity as it can be useful for investigating bias
if(length(casal_freeparams$`selectivity[SelLL].all`) > 0){
AM_SelLL <- c(casal_freeparams$`selectivity[SelLL].all`, para$ass$B0)
}else AM_SelLL <- c(0, 0, 0, 0)
## add the rest of the output
output[i_iter, (om_ncols+1):ncol(output)] <- c(AM_SSB0, AM_SSB_R1, AM_Rec_R1, AM_SelLL)
}
# print(round(c(OM_SSB0, OM_SSB_R1, OM_SSB_R2, OM_Rec_R1, OM_Rec_R2),0))
# print(round(c(AM_SSB0, AM_SSB_R1, AM_Rec_R1, AM_SelLL),0))
# OM_SSB_R1 - AM_SSB_R1
# OM_Rec_R1 - AM_Rec_R1
} # end MCMC loop
### Save Output ----
## write to file
# write.csv(output, file=paste0(casal_path,file_name, "_Niter_", n_iters, ".csv"),
# quote=FALSE, na="NA", row.names=FALSE)
################################## BS Plots ----
library(fishplot)
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1")
plot_SSB(output, item = "AM_ssb_", mean = F)
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(9000, 60000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(9000, 60000))
temp = read.csv("TOA_bug_1_Niter_1000.csv")
par(mfrow = c(1,2))
plot_SSB(temp, item = "OM_ssb_R1", ylim = c(9000, 60000), main = "TOA_bug_1_Niter_1000")
plot_SSB(temp, item = "AM_ssb_", mean = F, ylim = c(9000, 60000))
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(350000, 500000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(350000, 500000))
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(0, 30000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(0, 30000))
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(70000, 140000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(70000, 140000))
par(mfrow = c(1,2))
plot_SSB(output, item = "OM_ssb_R1", ylim = c(200000, 330000))
plot_SSB(output, item = "AM_ssb_", mean = F, ylim = c(200000, 330000))
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170364e+295, 2.39632081294858e-312, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613102657-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170364e+295, 2.39632081294858e-312, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fifo.R
\name{add}
\alias{add}
\title{Add a value to the queue}
\usage{
add(q, val)
}
\arguments{
\item{q}{is the current queue object}
\item{val}{is the value to be added to the queue}
}
\value{
The updated queue object
}
\description{
Add a value to the queue
}
\examples{
q <- qfifo()
q <- add(q,1234)
}
| /Practice/qfifo/man/add.Rd | no_license | shyamks111/R | R | false | true | 385 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fifo.R
\name{add}
\alias{add}
\title{Add a value to the queue}
\usage{
add(q, val)
}
\arguments{
\item{q}{is the current queue object}
\item{val}{is the value to be added to the queue}
}
\value{
The updated queue object
}
\description{
Add a value to the queue
}
\examples{
q <- qfifo()
q <- add(q,1234)
}
|
testlist <- list(A = structure(c(1.38523985028333e-309, 3.81575932257023e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613126546-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 226 | r | testlist <- list(A = structure(c(1.38523985028333e-309, 3.81575932257023e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
# 2. faza: Uvoz podatkov
sl <- locale("sl", decimal_mark=",", grouping_mark=".")
# install.packages("maptools")
# install.packages("httr")
# install.packages("rvest")
library(rvest)
library(reshape2)
library(rgeos)
library(maptools)
library(httr)
# Funkcija, ki uvozi občine iz Wikipedije
uvozi.obcine <- function() {
link <- "http://sl.wikipedia.org/wiki/Seznam_ob%C4%8Din_v_Sloveniji"
stran <- html_session(link) %>% read_html()
tabela <- stran %>% html_nodes(xpath="//table[@class='wikitable sortable']") %>%
.[[1]] %>% html_table(dec=",")
for (i in 1:ncol(tabela)) {
if (is.character(tabela[[i]])) {
Encoding(tabela[[i]]) <- "UTF-8"
}
if (is.numeric(tabela[[i]])){
tabela[[i]] <- as.character(tabela[[i]])
Encoding(tabela[[i]]) <- "UTF-8"
}
}
colnames(tabela) <- c("obcina", "povrsina", "prebivalci", "gostota", "naselja",
"ustanovitev", "pokrajina", "regija", "odcepitev")
tabela$obcina <- gsub("Slovenskih", "Slov.", tabela$obcina)
tabela$obcina[tabela$obcina == "Kanal ob Soči"] <- "Kanal"
tabela$obcina[tabela$obcina == "Loški potok"] <- "Loški Potok"
for (col in c("povrsina", "prebivalci", "gostota", "naselja", "ustanovitev")) {
tabela[[col]] <- parse_number(tabela[[col]], na="-", locale=sl)
}
for (col in c("obcina", "pokrajina", "regija")) {
tabela[[col]] <- factor(tabela[[col]])
}
return(tabela)
}
# Funkcija, ki uvozi podatke iz datoteke druzine.csv
uvozi.druzine <- function(obcine) {
data <- read_csv2("podatki/druzine.csv", col_names=c("obcina", 1:4),
locale=locale(encoding="Windows-1250"))
data$obcina <- data$obcina %>% strapplyc("^([^/]*)") %>% unlist() %>%
strapplyc("([^ ]+)") %>% sapply(paste, collapse=" ") %>% unlist()
data$obcina[data$obcina == "Sveti Jurij"] <- "Sveti Jurij ob Ščavnici"
data <- data %>% melt(id.vars="obcina", variable.name="velikost.druzine",
value.name="stevilo.druzin")
data$velikost.druzine <- parse_number(as.character(data$velikost.druzine), na="-", locale=sl)
data$obcina <- factor(data$obcina, levels=obcine)
return(data)
}
# Zapišimo podatke v razpredelnico obcine
obcine <- uvozi.obcine()
# Zapišimo podatke v razpredelnico druzine.
druzine <- uvozi.druzine(levels(obcine$obcina))
# Če bi imeli več funkcij za uvoz in nekaterih npr. še ne bi
# potrebovali v 3. fazi, bi bilo smiselno funkcije dati v svojo
# datoteko, tukaj pa bi klicali tiste, ki jih potrebujemo v
# 2. fazi. Seveda bi morali ustrezno datoteko uvoziti v prihodnjih
# fazah.
| /uvoz/uvoz.r | permissive | AnjaTrobec/APPR-2018-19 | R | false | false | 2,577 | r | # 2. faza: Uvoz podatkov
sl <- locale("sl", decimal_mark=",", grouping_mark=".")
# install.packages("maptools")
# install.packages("httr")
# install.packages("rvest")
library(rvest)
library(reshape2)
library(rgeos)
library(maptools)
library(httr)
# Funkcija, ki uvozi občine iz Wikipedije
uvozi.obcine <- function() {
link <- "http://sl.wikipedia.org/wiki/Seznam_ob%C4%8Din_v_Sloveniji"
stran <- html_session(link) %>% read_html()
tabela <- stran %>% html_nodes(xpath="//table[@class='wikitable sortable']") %>%
.[[1]] %>% html_table(dec=",")
for (i in 1:ncol(tabela)) {
if (is.character(tabela[[i]])) {
Encoding(tabela[[i]]) <- "UTF-8"
}
if (is.numeric(tabela[[i]])){
tabela[[i]] <- as.character(tabela[[i]])
Encoding(tabela[[i]]) <- "UTF-8"
}
}
colnames(tabela) <- c("obcina", "povrsina", "prebivalci", "gostota", "naselja",
"ustanovitev", "pokrajina", "regija", "odcepitev")
tabela$obcina <- gsub("Slovenskih", "Slov.", tabela$obcina)
tabela$obcina[tabela$obcina == "Kanal ob Soči"] <- "Kanal"
tabela$obcina[tabela$obcina == "Loški potok"] <- "Loški Potok"
for (col in c("povrsina", "prebivalci", "gostota", "naselja", "ustanovitev")) {
tabela[[col]] <- parse_number(tabela[[col]], na="-", locale=sl)
}
for (col in c("obcina", "pokrajina", "regija")) {
tabela[[col]] <- factor(tabela[[col]])
}
return(tabela)
}
# Funkcija, ki uvozi podatke iz datoteke druzine.csv
uvozi.druzine <- function(obcine) {
data <- read_csv2("podatki/druzine.csv", col_names=c("obcina", 1:4),
locale=locale(encoding="Windows-1250"))
data$obcina <- data$obcina %>% strapplyc("^([^/]*)") %>% unlist() %>%
strapplyc("([^ ]+)") %>% sapply(paste, collapse=" ") %>% unlist()
data$obcina[data$obcina == "Sveti Jurij"] <- "Sveti Jurij ob Ščavnici"
data <- data %>% melt(id.vars="obcina", variable.name="velikost.druzine",
value.name="stevilo.druzin")
data$velikost.druzine <- parse_number(as.character(data$velikost.druzine), na="-", locale=sl)
data$obcina <- factor(data$obcina, levels=obcine)
return(data)
}
# Zapišimo podatke v razpredelnico obcine
obcine <- uvozi.obcine()
# Zapišimo podatke v razpredelnico druzine.
druzine <- uvozi.druzine(levels(obcine$obcina))
# Če bi imeli več funkcij za uvoz in nekaterih npr. še ne bi
# potrebovali v 3. fazi, bi bilo smiselno funkcije dati v svojo
# datoteko, tukaj pa bi klicali tiste, ki jih potrebujemo v
# 2. fazi. Seveda bi morali ustrezno datoteko uvoziti v prihodnjih
# fazah.
|
#arg6=region
#arg7=number of MCI
#arg8=number of normal
args=commandArgs()
REGION=args[6]
DIAG=args[7]
LCDM <- function(fileNames,condTests,REGION){
#DATA_NUMB<-c(as.integer(args[7]),as.integer(args[8]))
### Pre-Processing ###
library(MASS)
b<-list(); dists<-c(); distsl<-c();
for (i in 1:length(fileNames)){
distsl[i]<-read.table(fileNames[i])
distsl[[i]]=distsl[[i]][distsl[[i]]>-2]
distsl[[i]]=distsl[[i]][distsl[[i]]<8]
}
minlength=min(as.numeric(lapply(distsl,length)))
dists=sapply(distsl,sample,minlength) #for tests requiring same number in group
######### Simple Summary Statistics ########
pdf(file=paste("wave1_wave2/summarystat_",REGION,"_",DIAGs,".pdf",sep=''))
col=c('black','red')
thk95<-c()
thk99<-c()
vol95<-c()
vol99<-c()
for(i in 1:length(fileNames)){
if (i==1)
hist(distsl[[i]], 100, freq=FALSE, col="white", border="white", xlim=c(-4,10), ylim=c(0,.4),main=NA,xlab="Distance (mm)",ylab="Probability Density")
par(new=T);
dens1=density(distsl[[i]])
lines(dens1, lwd=1, col=col[i])
thk95[i]=quantile(distsl[[i]],.95)
thk99[i]=quantile(distsl[[i]],.99)
}
legend(6, .3, paste(c('wave1','wave2')), cex=1, col= c('black','red','blue'),lwd=2, bty="n");
for(i in 1:length(fileNames)){
cdf1=ecdf(round(distsl[[i]],3))
if (i==1)
{
plot(cdf1, verticals=TRUE, xlim=c(-2,7),do.points=F, lwd=.5, col=col[i], main=NA,xlab="Distance (mm)", ylab="Cumulative Probability Density")
par(new=T);
}
else { lines(cdf1, lwd=.5, col=col[i]) }
}
legend(5, .3, paste(c('wave1','wave2'),sep=", "), cex=1, col= c('black','red','blue'),lwd=2, bty="n");
dev.off()
########## 2-group tests ###########
## Kolmogorov-Smirnov Test (KS Test) ##
if(condTests[2]==TRUE){
b[[1]]<-ks.test(distsl[[2]],distsl[[1]],alternative="t",exact=NULL)
b[[2]]<-ks.test(distsl[[2]],distsl[[1]],alternative="l",exact=NULL)
b[[3]]<-ks.test(distsl[[2]],distsl[[1]],alternative="g",exact=NULL)
}
return(b)
}
file1=paste("./pooled_files_wave1wave2/",REGION,"_",DIAGs[1],"_wave1_pooled_antsy.txt",sep="")
file2=paste("./pooled_files_wave1wave2/",REGION,"_",DIAGs[2],"_wave1_pooled_antsy.txt",sep="")
fileNames=c(file1,file2)
b <- c("Kolmogorov-Smirnov Test")
condTests<-c()
condTests[1]="Mann Whitney U Test"%in%b
condTests[2]="Kolmogorov-Smirnov Test"%in%b
condTests[3]="Welch's t-test"%in%b
condTests[4]="Kruskal-Wallis Test"%in%b
condTests[5]="ANOVA F-test"%in%b
b<-condTests
out<-LCDM(fileNames,condTests,REGION)
### write results into file ###
OUTFILE=paste("./results/Pooled_test_results_",REGION,"_",DIAGs,".txt",sep='')
#cat("Statistical Summary for ",REGION,"\n",file=OUTFILE)
if(length(out)!=0)
{
if(b[2]){
cat(out[[1]]$p.value,"\t",out[[2]]$p.value,"\t",out[[3]]$p.value,"\n",file=OUTFILE,append=TRUE)
}
} else { cat("No test was performed",file=OUTFILE,append=TRUE)}
| /src/LCDM_wave1_wave2_pooled.r | no_license | adalisan/MAS_Score_analysis | R | false | false | 2,946 | r | #arg6=region
#arg7=number of MCI
#arg8=number of normal
args=commandArgs()
REGION=args[6]
DIAG=args[7]
LCDM <- function(fileNames,condTests,REGION){
#DATA_NUMB<-c(as.integer(args[7]),as.integer(args[8]))
### Pre-Processing ###
library(MASS)
b<-list(); dists<-c(); distsl<-c();
for (i in 1:length(fileNames)){
distsl[i]<-read.table(fileNames[i])
distsl[[i]]=distsl[[i]][distsl[[i]]>-2]
distsl[[i]]=distsl[[i]][distsl[[i]]<8]
}
minlength=min(as.numeric(lapply(distsl,length)))
dists=sapply(distsl,sample,minlength) #for tests requiring same number in group
######### Simple Summary Statistics ########
pdf(file=paste("wave1_wave2/summarystat_",REGION,"_",DIAGs,".pdf",sep=''))
col=c('black','red')
thk95<-c()
thk99<-c()
vol95<-c()
vol99<-c()
for(i in 1:length(fileNames)){
if (i==1)
hist(distsl[[i]], 100, freq=FALSE, col="white", border="white", xlim=c(-4,10), ylim=c(0,.4),main=NA,xlab="Distance (mm)",ylab="Probability Density")
par(new=T);
dens1=density(distsl[[i]])
lines(dens1, lwd=1, col=col[i])
thk95[i]=quantile(distsl[[i]],.95)
thk99[i]=quantile(distsl[[i]],.99)
}
legend(6, .3, paste(c('wave1','wave2')), cex=1, col= c('black','red','blue'),lwd=2, bty="n");
for(i in 1:length(fileNames)){
cdf1=ecdf(round(distsl[[i]],3))
if (i==1)
{
plot(cdf1, verticals=TRUE, xlim=c(-2,7),do.points=F, lwd=.5, col=col[i], main=NA,xlab="Distance (mm)", ylab="Cumulative Probability Density")
par(new=T);
}
else { lines(cdf1, lwd=.5, col=col[i]) }
}
legend(5, .3, paste(c('wave1','wave2'),sep=", "), cex=1, col= c('black','red','blue'),lwd=2, bty="n");
dev.off()
########## 2-group tests ###########
## Kolmogorov-Smirnov Test (KS Test) ##
if(condTests[2]==TRUE){
b[[1]]<-ks.test(distsl[[2]],distsl[[1]],alternative="t",exact=NULL)
b[[2]]<-ks.test(distsl[[2]],distsl[[1]],alternative="l",exact=NULL)
b[[3]]<-ks.test(distsl[[2]],distsl[[1]],alternative="g",exact=NULL)
}
return(b)
}
file1=paste("./pooled_files_wave1wave2/",REGION,"_",DIAGs[1],"_wave1_pooled_antsy.txt",sep="")
file2=paste("./pooled_files_wave1wave2/",REGION,"_",DIAGs[2],"_wave1_pooled_antsy.txt",sep="")
fileNames=c(file1,file2)
b <- c("Kolmogorov-Smirnov Test")
condTests<-c()
condTests[1]="Mann Whitney U Test"%in%b
condTests[2]="Kolmogorov-Smirnov Test"%in%b
condTests[3]="Welch's t-test"%in%b
condTests[4]="Kruskal-Wallis Test"%in%b
condTests[5]="ANOVA F-test"%in%b
b<-condTests
out<-LCDM(fileNames,condTests,REGION)
### write results into file ###
OUTFILE=paste("./results/Pooled_test_results_",REGION,"_",DIAGs,".txt",sep='')
#cat("Statistical Summary for ",REGION,"\n",file=OUTFILE)
if(length(out)!=0)
{
if(b[2]){
cat(out[[1]]$p.value,"\t",out[[2]]$p.value,"\t",out[[3]]$p.value,"\n",file=OUTFILE,append=TRUE)
}
} else { cat("No test was performed",file=OUTFILE,append=TRUE)}
|
#' @title r.iplot
#' @export
r.iplot <- function (
y,
x = NULL,
xlim = c(0,1), ylim = c(0,1),
...)
{
require(manipulate)
y = rmodel::r.toColumns(y)
n <- length(y[,1])
m <- length(y[1,])
if(missing(xlim) && !missing(x)) {
xlim = c(min(x), max(x))
} else if(missing(xlim)) {
xlim = c(1,n)
}
if(missing(ylim) && !missing(y)) {
ylim = c(min(y), max(y))
}
manipulate(r.plot(x=x, y=y, xlim=c(xa,xb), ylim=c(ya,yb), ...),
xa=slider(xlim[1],xlim[2],initial=xlim[1]),
xb=slider(xlim[1],xlim[2],initial=xlim[2]),
ya=slider(ylim[1],ylim[2],initial=ylim[1]),
yb=slider(ylim[1],ylim[2],initial=ylim[2])
)
}
#' @title r.iplot.kmeans.shapes
#' @export
r.iplot.kmeans.shapes <- function (
x,
fmin = 0.05, fmax=1.0, fstep = 0.05,
main = NULL, sub = NULL, xlab = NULL, ylab = NULL)
{
require(manipulate)
manipulate(r.plot.kmeans.shapes(x=x, nclusters=k, filtrat=f, paintCentroids=c, main=main, sub=sub, xlab=xlab, ylab=ylab),
k = slider(1, 10, initial = 2, label="nclusters"),
f = slider(fmin, fmax, label="Filtre", step=fstep),
c = checkbox(TRUE, "Pintar Centroides")
)
}
#' @title r.iplot.smoothkmeans
#' @export
r.iplot.smoothkmeans <- function (
x,
main = NULL, sub = NULL, xlab = NULL, ylab = NULL)
{
require(manipulate)
manipulate(r.plot.kmeans.smoothshapes(x=x, nclusters=k, main=main, sub=sub, xlab=xlab, ylab=ylab),
k = slider(1, 10, initial=2, label="nclusters")
)
}
#' @title r.iplot2D.data
#' @export
r.iplot2D.data <- function (
x,
clustReal = NULL, clustModel = NULL,
main = NULL, sub = NULL, xlab = NULL, ylab = NULL,
xaxis = T, yaxis = T, box = T, ...)
{
m <- length(x[1,])
require(manipulate)
manipulate(r.plot2D.data(x=x,
comp1 = c1,
comp2 = c2,
clustReal = clustReal,
clustModel = clustModel,
main = main,
sub = sub,
xlab = xlab,
ylab = ylab,
xaxis = xaxis,
yaxis = yaxis,
box = box, ...),
c1 = slider(1, m, initial = 1, label="coord 1", step=1),
c2 = slider(1, m, initial = 2, label="coord 2", step=1)
)
}
#' @title r.iplot2D.pca
#' @export
r.iplot2D.pca <- function (
x = NULL, pca = NULL,
clustReal = NULL, clustModel = NULL,
main = NULL, sub = NULL, xlab = NULL, ylab = NULL,
xaxis = T, yaxis = T, box = T, ...)
{
if(missing(pca)) {
if (!missing(x) && !is.null(x)) {
x = rmodel::r.toColumns(x)
m = length(x[1,])
pca = prcomp(x)
} else {
print("Error: no poden ser x i pca parametres absents al mateix temps.")
}
} else {
m = length(pca$x[1,])
}
require(manipulate)
manipulate(r.plot2D.pca(pca=pca,
comp1 = c1,
comp2 = c2,
clustReal = clustReal,
clustModel = clustModel,
main = main,
sub = sub,
xlab = xlab,
ylab = ylab,
xaxis = xaxis,
yaxis = yaxis,
box = box, ...),
c1 = slider(1, m, initial = 1, label="coord 1", step=1),
c2 = slider(1, m, initial = 2, label="coord 2", step=1)
)
} | /R/rplot_interactive.R | no_license | rocalabern/rplot | R | false | false | 3,664 | r | #' @title r.iplot
#' @export
r.iplot <- function (
y,
x = NULL,
xlim = c(0,1), ylim = c(0,1),
...)
{
require(manipulate)
y = rmodel::r.toColumns(y)
n <- length(y[,1])
m <- length(y[1,])
if(missing(xlim) && !missing(x)) {
xlim = c(min(x), max(x))
} else if(missing(xlim)) {
xlim = c(1,n)
}
if(missing(ylim) && !missing(y)) {
ylim = c(min(y), max(y))
}
manipulate(r.plot(x=x, y=y, xlim=c(xa,xb), ylim=c(ya,yb), ...),
xa=slider(xlim[1],xlim[2],initial=xlim[1]),
xb=slider(xlim[1],xlim[2],initial=xlim[2]),
ya=slider(ylim[1],ylim[2],initial=ylim[1]),
yb=slider(ylim[1],ylim[2],initial=ylim[2])
)
}
#' @title r.iplot.kmeans.shapes
#' @export
r.iplot.kmeans.shapes <- function (
x,
fmin = 0.05, fmax=1.0, fstep = 0.05,
main = NULL, sub = NULL, xlab = NULL, ylab = NULL)
{
require(manipulate)
manipulate(r.plot.kmeans.shapes(x=x, nclusters=k, filtrat=f, paintCentroids=c, main=main, sub=sub, xlab=xlab, ylab=ylab),
k = slider(1, 10, initial = 2, label="nclusters"),
f = slider(fmin, fmax, label="Filtre", step=fstep),
c = checkbox(TRUE, "Pintar Centroides")
)
}
#' @title r.iplot.smoothkmeans
#' @export
r.iplot.smoothkmeans <- function (
x,
main = NULL, sub = NULL, xlab = NULL, ylab = NULL)
{
require(manipulate)
manipulate(r.plot.kmeans.smoothshapes(x=x, nclusters=k, main=main, sub=sub, xlab=xlab, ylab=ylab),
k = slider(1, 10, initial=2, label="nclusters")
)
}
#' @title r.iplot2D.data
#' @export
r.iplot2D.data <- function (
x,
clustReal = NULL, clustModel = NULL,
main = NULL, sub = NULL, xlab = NULL, ylab = NULL,
xaxis = T, yaxis = T, box = T, ...)
{
m <- length(x[1,])
require(manipulate)
manipulate(r.plot2D.data(x=x,
comp1 = c1,
comp2 = c2,
clustReal = clustReal,
clustModel = clustModel,
main = main,
sub = sub,
xlab = xlab,
ylab = ylab,
xaxis = xaxis,
yaxis = yaxis,
box = box, ...),
c1 = slider(1, m, initial = 1, label="coord 1", step=1),
c2 = slider(1, m, initial = 2, label="coord 2", step=1)
)
}
#' @title r.iplot2D.pca
#' @export
r.iplot2D.pca <- function (
x = NULL, pca = NULL,
clustReal = NULL, clustModel = NULL,
main = NULL, sub = NULL, xlab = NULL, ylab = NULL,
xaxis = T, yaxis = T, box = T, ...)
{
if(missing(pca)) {
if (!missing(x) && !is.null(x)) {
x = rmodel::r.toColumns(x)
m = length(x[1,])
pca = prcomp(x)
} else {
print("Error: no poden ser x i pca parametres absents al mateix temps.")
}
} else {
m = length(pca$x[1,])
}
require(manipulate)
manipulate(r.plot2D.pca(pca=pca,
comp1 = c1,
comp2 = c2,
clustReal = clustReal,
clustModel = clustModel,
main = main,
sub = sub,
xlab = xlab,
ylab = ylab,
xaxis = xaxis,
yaxis = yaxis,
box = box, ...),
c1 = slider(1, m, initial = 1, label="coord 1", step=1),
c2 = slider(1, m, initial = 2, label="coord 2", step=1)
)
} |
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(lubridate)
library(tidyverse)
library(tabulizer)
# Directories
indir <- "data/entanglements/data/entanglement_reports"
outdir <- "data/entanglements/data"
# Extract data
data_orig <- tabulizer::extract_tables(file=file.path(indir, "Saez_etal_2020_Appendix2.pdf"))
# Read code keys
fishery_key <- readxl::read_excel(file.path(indir, "code_key.xlsx"), sheet="Fishery codes")
location_key <- readxl::read_excel(file.path(indir, "code_key.xlsx"), sheet="Location codes") %>% select(location_code, location_short, location_state)
response_key <- readxl::read_excel(file.path(indir, "code_key.xlsx"), sheet="Response codes")
# Format data
################################################################################
# Column names
cols <- c("num", "case_id", "comm_name", "date", "confirmed_yn", "report_source",
"county_obs", "state_obs", "region_obs_code", "alive_yn", "entanglement_type", "gear_code",
"fishery_code", "location_ent_code", "response_yn", "response_status_code")
# Merge data
data1 <- purrr::map_df(1:length(data_orig), function(x) {
# Format data
df <- data_orig[[x]] %>%
as.data.frame(stringsAsFactors=F) %>%
slice(2:nrow(.)) %>%
setNames(cols)
})
# Format data
data2 <- data1 %>%
# Remove header rows that weren't caught in merge
filter(!num %in% c("", "#")) %>%
# Fix dates
mutate(date=date %>% recode("2017/06017"="2017/06/17") %>% ymd(),
year=year(date),
month=month(date)) %>%
# Format columns
mutate(num=as.numeric(num),
comm_name=stringr::str_to_sentence(comm_name),
confirmed_yn=recode(confirmed_yn, "C"="Confirmed", "U"="Unconfirmed"),
report_source=recode(report_source, "Fishing Vessel"="Fishing vessel", "OBSERVER"="Observer", "San Juan Excursi"="San Juan Excursion"),
state_obs=toupper(state_obs) %>% recode("MX"="Mexico", "CANADA"="British Columbia", "MEXICO"="Mexico", "CA"="California", "OR"="Oregon", "WA"="Washington", "BC"="British Columbia"),
county_obs=recode(county_obs, "Canada"="British Columbia"),
alive_yn=stringr::str_to_sentence(alive_yn),
response_yn=recode(response_yn, "N"="No", "Y"="Yes"),
gear_type=recode(gear_code, "HK/LN"="Hook/line", "NET"="Net", "OTH"="Other", "POT"="Pot", "UNK"="Unknown")) %>%
# Add country
mutate(country_obs=ifelse(state_obs=="Mexico", "Mexico",
ifelse(state_obs=="British Columbia", "Canada", "United States"))) %>%
# Add fishery info
left_join(fishery_key, by="fishery_code") %>%
rename(fishery_type=fishery) %>%
# Add response info
left_join(response_key, by="response_status_code") %>%
# Add location info
left_join(location_key %>% select(-location_state), by=c("region_obs_code"="location_code")) %>%
rename(region_obs=location_short) %>%
left_join(location_key, by=c("location_ent_code"="location_code")) %>%
rename(state_ent=location_state,
location_ent=location_short) %>%
# Arrange columns
select(num, case_id, year, month, date, comm_name, confirmed_yn, report_source,
country_obs, state_obs, region_obs, county_obs, state_ent, location_ent,
entanglement_type, gear_type, fishery_type, alive_yn, response_yn, response_status)
# Inspect data
str(data2)
freeR::complete(data2)
# table(data2$num)
# table(data2$case_id)
table(data2$comm_name)
range(data2$date_obs)
table(data2$confirmed_yn)
table(data2$report_source)
table(data2$state)
table(data2$county)
table(data2$region_code)
table(data2$alive_yn)
table(data2$type)
table(data2$gear_code)
table(data2$fishery_code)
table(data2$location_code)
table(data2$response)
table(data2$response_status_code)
# Export data
saveRDS(data2, file=file.path(outdir, "WC_1982_2017_entanglement_observations.Rds"))
| /data/entanglements/Step2_format_entanglement_data.R | no_license | cfree14/dungeness | R | false | false | 3,947 | r |
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(lubridate)
library(tidyverse)
library(tabulizer)
# Directories
indir <- "data/entanglements/data/entanglement_reports"
outdir <- "data/entanglements/data"
# Extract data
data_orig <- tabulizer::extract_tables(file=file.path(indir, "Saez_etal_2020_Appendix2.pdf"))
# Read code keys
fishery_key <- readxl::read_excel(file.path(indir, "code_key.xlsx"), sheet="Fishery codes")
location_key <- readxl::read_excel(file.path(indir, "code_key.xlsx"), sheet="Location codes") %>% select(location_code, location_short, location_state)
response_key <- readxl::read_excel(file.path(indir, "code_key.xlsx"), sheet="Response codes")
# Format data
################################################################################
# Column names
cols <- c("num", "case_id", "comm_name", "date", "confirmed_yn", "report_source",
"county_obs", "state_obs", "region_obs_code", "alive_yn", "entanglement_type", "gear_code",
"fishery_code", "location_ent_code", "response_yn", "response_status_code")
# Merge data
data1 <- purrr::map_df(1:length(data_orig), function(x) {
# Format data
df <- data_orig[[x]] %>%
as.data.frame(stringsAsFactors=F) %>%
slice(2:nrow(.)) %>%
setNames(cols)
})
# Format data
data2 <- data1 %>%
# Remove header rows that weren't caught in merge
filter(!num %in% c("", "#")) %>%
# Fix dates
mutate(date=date %>% recode("2017/06017"="2017/06/17") %>% ymd(),
year=year(date),
month=month(date)) %>%
# Format columns
mutate(num=as.numeric(num),
comm_name=stringr::str_to_sentence(comm_name),
confirmed_yn=recode(confirmed_yn, "C"="Confirmed", "U"="Unconfirmed"),
report_source=recode(report_source, "Fishing Vessel"="Fishing vessel", "OBSERVER"="Observer", "San Juan Excursi"="San Juan Excursion"),
state_obs=toupper(state_obs) %>% recode("MX"="Mexico", "CANADA"="British Columbia", "MEXICO"="Mexico", "CA"="California", "OR"="Oregon", "WA"="Washington", "BC"="British Columbia"),
county_obs=recode(county_obs, "Canada"="British Columbia"),
alive_yn=stringr::str_to_sentence(alive_yn),
response_yn=recode(response_yn, "N"="No", "Y"="Yes"),
gear_type=recode(gear_code, "HK/LN"="Hook/line", "NET"="Net", "OTH"="Other", "POT"="Pot", "UNK"="Unknown")) %>%
# Add country
mutate(country_obs=ifelse(state_obs=="Mexico", "Mexico",
ifelse(state_obs=="British Columbia", "Canada", "United States"))) %>%
# Add fishery info
left_join(fishery_key, by="fishery_code") %>%
rename(fishery_type=fishery) %>%
# Add response info
left_join(response_key, by="response_status_code") %>%
# Add location info
left_join(location_key %>% select(-location_state), by=c("region_obs_code"="location_code")) %>%
rename(region_obs=location_short) %>%
left_join(location_key, by=c("location_ent_code"="location_code")) %>%
rename(state_ent=location_state,
location_ent=location_short) %>%
# Arrange columns
select(num, case_id, year, month, date, comm_name, confirmed_yn, report_source,
country_obs, state_obs, region_obs, county_obs, state_ent, location_ent,
entanglement_type, gear_type, fishery_type, alive_yn, response_yn, response_status)
# Inspect data
str(data2)
freeR::complete(data2)
# table(data2$num)
# table(data2$case_id)
table(data2$comm_name)
range(data2$date_obs)
table(data2$confirmed_yn)
table(data2$report_source)
table(data2$state)
table(data2$county)
table(data2$region_code)
table(data2$alive_yn)
table(data2$type)
table(data2$gear_code)
table(data2$fishery_code)
table(data2$location_code)
table(data2$response)
table(data2$response_status_code)
# Export data
saveRDS(data2, file=file.path(outdir, "WC_1982_2017_entanglement_observations.Rds"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scMeth_functions.R
\name{merge_cpgs}
\alias{merge_cpgs}
\title{Create in silico merged bulk profiles from single-cell files}
\usage{
merge_cpgs(cpg_all, cluster_assignments, desired_cluster)
}
\arguments{
\item{cpg_all}{The list containing all CpG calls in data.frame format. Required.}
\item{cluster_assignments}{The cluster_assignments outputed from cluster_dissimilarity(). Required}
\item{desired_cluster}{The number desired from the input cluster assignments. Required}
}
\value{
a BSmooth object ready for smoothing
}
\description{
Create in silico merged bulk profiles from single-cell files
}
\details{
Uses the bsseq package to perform in silico merging of single-cell CpG calls. Requires the bsseq R package to be installed
}
| /man/merge_cpgs.Rd | no_license | hui-tony-zk/PDclust | R | false | true | 817 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scMeth_functions.R
\name{merge_cpgs}
\alias{merge_cpgs}
\title{Create in silico merged bulk profiles from single-cell files}
\usage{
merge_cpgs(cpg_all, cluster_assignments, desired_cluster)
}
\arguments{
\item{cpg_all}{The list containing all CpG calls in data.frame format. Required.}
\item{cluster_assignments}{The cluster_assignments outputed from cluster_dissimilarity(). Required}
\item{desired_cluster}{The number desired from the input cluster assignments. Required}
}
\value{
a BSmooth object ready for smoothing
}
\description{
Create in silico merged bulk profiles from single-cell files
}
\details{
Uses the bsseq package to perform in silico merging of single-cell CpG calls. Requires the bsseq R package to be installed
}
|
test_that("broom/tidy returns a simplified data set with only maes_ghoos t50", {
options(warn = 0)
data = cleanup_data(simulate_breathtest_data(seed = 10)$data)
fit = nls_fit(data)
td = tidy(fit)
expect_is(td, "tbl")
expect_equal(names(td), c("patient_id", "group", "m", "k", "beta", "t50"))
expect_equal(nrow(td), 10)
})
test_that("broom/augment returns predictions", {
data = cleanup_data(simulate_breathtest_data()$data)
fit = nls_fit(data)
td = augment(fit)
expect_equal(names(td), c("patient_id", "group", "minute","pdr", "fitted"))
expect_equal(nrow(td), 110)
# Use spacing
td = augment(fit, by = 5)
expect_equal(names(td), c("patient_id", "group", "minute", "fitted"))
expect_equal(nrow(td), 320)
# Use vector of time values
td = augment(fit, minute = c(0:9, seq(10, 150, by = 5)))
expect_equal(names(td), c("patient_id", "group", "minute", "fitted"))
expect_equal(nrow(td), 390)
})
| /tests/testthat/test_broom.R | no_license | histopathology/breathtestcore | R | false | false | 939 | r | test_that("broom/tidy returns a simplified data set with only maes_ghoos t50", {
options(warn = 0)
data = cleanup_data(simulate_breathtest_data(seed = 10)$data)
fit = nls_fit(data)
td = tidy(fit)
expect_is(td, "tbl")
expect_equal(names(td), c("patient_id", "group", "m", "k", "beta", "t50"))
expect_equal(nrow(td), 10)
})
test_that("broom/augment returns predictions", {
data = cleanup_data(simulate_breathtest_data()$data)
fit = nls_fit(data)
td = augment(fit)
expect_equal(names(td), c("patient_id", "group", "minute","pdr", "fitted"))
expect_equal(nrow(td), 110)
# Use spacing
td = augment(fit, by = 5)
expect_equal(names(td), c("patient_id", "group", "minute", "fitted"))
expect_equal(nrow(td), 320)
# Use vector of time values
td = augment(fit, minute = c(0:9, seq(10, 150, by = 5)))
expect_equal(names(td), c("patient_id", "group", "minute", "fitted"))
expect_equal(nrow(td), 390)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vector_parse_date.R
\name{vector_parse_date_first_clean}
\alias{vector_parse_date_first_clean}
\title{cleans incoming dates a little bit so they are a little similar and we don't need to try as many formats}
\usage{
vector_parse_date_first_clean(
dts,
TIME_SPLIT = "T",
seps = "[-.:/\\\\s+]",
replace_sep = "-"
)
}
\arguments{
\item{dts}{vecotor of date strings}
\item{TIME_SPLIT}{where to split to get rid of the time}
\item{seps}{what the date separators might be}
\item{replace_sep}{what to replace the seperators with}
}
\description{
cleans incoming dates a little bit so they are a little similar and we don't need to try as many formats
}
\examples{
vector_parse_date_first_clean( c("03/03/92", "03/21/94", "03/02/99", "03/07/02"))
vector_parse_date_first_clean(dts = c("2018-11-01 08:30:00", "2017-09-19 08:30:00", "2017-02-28 08:30:00"), TIME_SPLIT = " ")
vector_parse_date_first_clean(dts = c("2018-11-01T08:30:00", "2017-09-19T08:30:00", "2017-02-28T08:30:00"), TIME_SPLIT = "T")
}
| /man/vector_parse_date_first_clean.Rd | no_license | hswerdfe/VectorParseDate | R | false | true | 1,085 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vector_parse_date.R
\name{vector_parse_date_first_clean}
\alias{vector_parse_date_first_clean}
\title{cleans incoming dates a little bit so they are a little similar and we don't need to try as many formats}
\usage{
vector_parse_date_first_clean(
dts,
TIME_SPLIT = "T",
seps = "[-.:/\\\\s+]",
replace_sep = "-"
)
}
\arguments{
\item{dts}{vecotor of date strings}
\item{TIME_SPLIT}{where to split to get rid of the time}
\item{seps}{what the date separators might be}
\item{replace_sep}{what to replace the seperators with}
}
\description{
cleans incoming dates a little bit so they are a little similar and we don't need to try as many formats
}
\examples{
vector_parse_date_first_clean( c("03/03/92", "03/21/94", "03/02/99", "03/07/02"))
vector_parse_date_first_clean(dts = c("2018-11-01 08:30:00", "2017-09-19 08:30:00", "2017-02-28 08:30:00"), TIME_SPLIT = " ")
vector_parse_date_first_clean(dts = c("2018-11-01T08:30:00", "2017-09-19T08:30:00", "2017-02-28T08:30:00"), TIME_SPLIT = "T")
}
|
\alias{gtkFileChooserListFilters}
\name{gtkFileChooserListFilters}
\title{gtkFileChooserListFilters}
\description{Lists the current set of user-selectable filters; see
\code{\link{gtkFileChooserAddFilter}}, \code{\link{gtkFileChooserRemoveFilter}}.}
\usage{gtkFileChooserListFilters(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkFileChooser}}] a \code{\link{GtkFileChooser}}}}
\details{ Since 2.4}
\value{[list] a \code{list} containing the current set of
user selectable filters.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gtkFileChooserListFilters.Rd | no_license | cran/RGtk2.10 | R | false | false | 569 | rd | \alias{gtkFileChooserListFilters}
\name{gtkFileChooserListFilters}
\title{gtkFileChooserListFilters}
\description{Lists the current set of user-selectable filters; see
\code{\link{gtkFileChooserAddFilter}}, \code{\link{gtkFileChooserRemoveFilter}}.}
\usage{gtkFileChooserListFilters(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkFileChooser}}] a \code{\link{GtkFileChooser}}}}
\details{ Since 2.4}
\value{[list] a \code{list} containing the current set of
user selectable filters.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
testlist <- list(Rs = c(-1.9577272327571e+276, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = numeric(0), relh = c(7.64681479918174e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309 ), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615862582-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 473 | r | testlist <- list(Rs = c(-1.9577272327571e+276, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = numeric(0), relh = c(7.64681479918174e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309 ), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly.
# The two functions below provide a means to cache the inverse of a matrix, thus
# potentially saving significant processing time.
# This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# This function computes the inverse of the special "matrix" returned by
# makeCacheMatrix above. If the inverse has already been calculated (and the matrix
# has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
} | /cachematrix.R | no_license | edickinson75/ProgrammingAssignment2 | R | false | false | 1,229 | r | # Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly.
# The two functions below provide a means to cache the inverse of a matrix, thus
# potentially saving significant processing time.
# This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# This function computes the inverse of the special "matrix" returned by
# makeCacheMatrix above. If the inverse has already been calculated (and the matrix
# has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
} |
tcga_build_cellimage_nodes_files <- function() {
iatlas.data::synapse_store_feather_file(
dplyr::tibble(name = character()),
"deprecated_tcga_cellimage_network_nodes.feather",
"syn22126180"
)
immune_subtype_nodes <- "syn23538719" %>%
synapse_feather_id_to_tbl() %>%
dplyr::mutate("network" = "Cellimage Network") %>%
dplyr::select("name", "network", "feature", "entrez", "score", "dataset", "x", "y")
iatlas.data::synapse_store_feather_file(
immune_subtype_nodes,
"tcga_cellimage_immune_subtype_nodes.feather",
"syn22126180"
)
tcga_study_nodes <- "syn23538721" %>%
synapse_feather_id_to_tbl() %>%
dplyr::mutate("network" = "Cellimage Network") %>%
dplyr::select("name", "network", "feature", "entrez", "score", "dataset", "x", "y")
iatlas.data::synapse_store_feather_file(
tcga_study_nodes,
"tcga_cellimage_tcga_study_nodes.feather",
"syn22126180"
)
tcga_subtype_nodes <- "syn23538726" %>%
synapse_feather_id_to_tbl() %>%
dplyr::mutate("network" = "Cellimage Network") %>%
dplyr::select("name", "network", "feature", "entrez", "score", "dataset", "x", "y")
iatlas.data::synapse_store_feather_file(
tcga_subtype_nodes,
"tcga_cellimage_tcga_subtype_nodes.feather",
"syn22126180"
)
}
| /R/feather_file_creator-tcga_build_cellimage_nodes_files.R | no_license | CRI-iAtlas/iatlas-feather-files | R | false | false | 1,295 | r | tcga_build_cellimage_nodes_files <- function() {
iatlas.data::synapse_store_feather_file(
dplyr::tibble(name = character()),
"deprecated_tcga_cellimage_network_nodes.feather",
"syn22126180"
)
immune_subtype_nodes <- "syn23538719" %>%
synapse_feather_id_to_tbl() %>%
dplyr::mutate("network" = "Cellimage Network") %>%
dplyr::select("name", "network", "feature", "entrez", "score", "dataset", "x", "y")
iatlas.data::synapse_store_feather_file(
immune_subtype_nodes,
"tcga_cellimage_immune_subtype_nodes.feather",
"syn22126180"
)
tcga_study_nodes <- "syn23538721" %>%
synapse_feather_id_to_tbl() %>%
dplyr::mutate("network" = "Cellimage Network") %>%
dplyr::select("name", "network", "feature", "entrez", "score", "dataset", "x", "y")
iatlas.data::synapse_store_feather_file(
tcga_study_nodes,
"tcga_cellimage_tcga_study_nodes.feather",
"syn22126180"
)
tcga_subtype_nodes <- "syn23538726" %>%
synapse_feather_id_to_tbl() %>%
dplyr::mutate("network" = "Cellimage Network") %>%
dplyr::select("name", "network", "feature", "entrez", "score", "dataset", "x", "y")
iatlas.data::synapse_store_feather_file(
tcga_subtype_nodes,
"tcga_cellimage_tcga_subtype_nodes.feather",
"syn22126180"
)
}
|
## version: 1.30
## method: post
## path: /networks/create
## code: 201
## response: {"Id":"22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30","Warning":""}
list(
id = "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30",
warning = "")
| /tests/testthat/sample_responses/v1.30/network_create.R | no_license | cran/stevedore | R | false | false | 269 | r | ## version: 1.30
## method: post
## path: /networks/create
## code: 201
## response: {"Id":"22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30","Warning":""}
list(
id = "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30",
warning = "")
|
context("hyperparameterValidation")
test_that("generate data", {
# generate data with nested no trafo
ps = makeParamSet(makeNumericParam("C", lower = -5, upper = 5,
trafo = function(x) 2^x)
)
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout")
lrn = makeTuneWrapper("classif.ksvm", control = ctrl,
resampling = rdesc, par.set = ps,
show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2,
extract = getTuneResult)
orig = getNestedTuneResultsOptPathDf(res)
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# generate data, no include diag, trafo
rdesc = makeResampleDesc("Holdout")
res = tuneParams("classif.ksvm", task = pid.task, resampling = rdesc,
par.set = ps, control = ctrl, measures = acc)
orig = as.data.frame(trafoOptPath(res$opt.path))
orig = dropNamed(orig, c("eol", "error.message"))
orig = plyr::rename(orig, c(dob = "iteration"))
new = generateHyperParsEffectData(res, trafo = TRUE)
expect_equivalent(new$data, orig)
})
test_that("1 numeric hyperparam", {
# generate data
ps = makeParamSet(makeDiscreteParam("C", values = 2^ (-2:2)))
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
res = tuneParams("classif.ksvm", task = pid.task, resampling = rdesc,
par.set = ps, control = ctrl, measures = acc)
orig = as.data.frame(res$opt.path)
orig$C = as.numeric(as.character(orig$C))
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# make sure plot is created and can be saved
plt = plotHyperParsEffect(new, x = "iteration", y = "acc.test.mean",
plot.type = "line")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
# make sure plot has expected attributes
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomLine"))
expect_equal(plt$labels$x, "iteration")
expect_equal(plt$labels$y, "acc.test.mean")
# FIXME: make sure plot looks as expected
})
test_that("1 discrete hyperparam", {
# generate data
ps = makeParamSet(makeDiscreteParam("kernel", values = c("vanilladot",
"polydot", "rbfdot"))
)
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
res = tuneParams("classif.ksvm", task = pid.task, resampling = rdesc,
par.set = ps, control = ctrl, measures = acc)
orig = as.data.frame(res$opt.path)
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# make sure plot is created and can be saved
plt = plotHyperParsEffect(new, x = "kernel", y = "acc.test.mean")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
# make sure plot has expected attributes
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
"GeomPoint")
expect_equal(plt$labels$x, "kernel")
expect_equal(plt$labels$y, "acc.test.mean")
# FIXME: make sure plot looks as expected
})
test_that("1 numeric hyperparam with optimizer failure", {
# generate data
ps = makeParamSet(makeDiscreteParam("C", values = c(-1, 0.5, 1.5))
)
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
res = tuneParams("classif.ksvm", task = pid.task, resampling = rdesc,
par.set = ps, control = ctrl, measures = acc)
orig = as.data.frame(res$opt.path)
orig$C = as.numeric(as.character(orig$C))
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# make sure plot is created and can be saved
plt = plotHyperParsEffect(new, x = "C", y = "acc.test.mean")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
# make sure plot has expected attributes
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
"GeomPoint")
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "acc.test.mean")
# FIXME: make sure plot looks as expected
})
test_that("1 numeric hyperparam with nested cv", {
# generate data
ps = makeParamSet(makeNumericParam("C", lower = 0.01, upper = 2)
)
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout")
lrn = makeTuneWrapper("classif.ksvm", control = ctrl,
resampling = rdesc, par.set = ps,
show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2,
extract = getTuneResult)
orig = getNestedTuneResultsOptPathDf(res)
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# make sure plot is created and can be saved
plt = plotHyperParsEffect(new, x = "C", y = "mmce.test.mean")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
# make sure plot has expected attributes
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
"GeomPoint")
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "mmce.test.mean")
# FIXME: make sure plot looks as expected
})
test_that("2 hyperparams", {
# generate data
ps = makeParamSet(
makeNumericParam("C", lower = -5, upper = 5, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -5, upper = 5, trafo = function(x) 2^x))
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "rbfdot"))
res = tuneParams(learn, task = pid.task, control = ctrl, measures = acc,
resampling = rdesc, par.set = ps, show.info = FALSE)
data = generateHyperParsEffectData(res)
# test line creation
plt = plotHyperParsEffect(data, x = "iteration", y = "acc.test.mean",
plot.type = "line")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomLine", "GeomPoint"))
expect_equal(plt$labels$x, "iteration")
expect_equal(plt$labels$y, "acc.test.mean")
# test heatcontour creation with interpolation
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "heatmap", interpolate = "regr.earth",
show.experiments = TRUE)
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomRaster"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
expect_equal(plt$labels$shape, "learner_status")
# learner crash
ps = makeParamSet(
makeDiscreteParam("C", values = c(-1, 0.5, 1.5)),
makeDiscreteParam("sigma", values = c(-1, 0.5, 1.5)))
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "rbfdot"))
res = tuneParams(learn, task = pid.task, control = ctrl, measures = acc,
resampling = rdesc, par.set = ps, show.info = FALSE)
data = generateHyperParsEffectData(res)
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "heatmap", interpolate = "regr.earth")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomRaster"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
expect_equal(plt$labels$shape, "learner_status")
# FIXME: make sure plots looks as expected
})
test_that("2 hyperparams nested", {
# generate data
ps = makeParamSet(
makeNumericParam("C", lower = -5, upper = 5, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -5, upper = 5, trafo = function(x) 2^x))
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "rbfdot"))
lrn = makeTuneWrapper(learn, control = ctrl,
measures = list(acc, mmce), resampling = rdesc,
par.set = ps, show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2,
extract = getTuneResult)
data = generateHyperParsEffectData(res)
# contour plot
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "contour", interpolate = "regr.earth",
show.interpolated = TRUE)
expect_warning(print(plt))
dir = tempdir()
path = file.path(dir, "test.svg")
expect_warning(ggsave(path))
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomRaster", "GeomContour"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
expect_equal(plt$labels$shape, "learner_status")
# learner crashes
ps = makeParamSet(
makeDiscreteParam("C", values = c(-1, 0.5, 1.5)),
makeDiscreteParam("sigma", values = c(-1, 0.5, 1.5)))
lrn = makeTuneWrapper(learn, control = ctrl,
measures = list(acc, mmce), resampling = rdesc,
par.set = ps, show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2,
extract = getTuneResult)
data = generateHyperParsEffectData(res)
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "heatmap", interpolate = "regr.earth",
show.experiments = TRUE)
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomRaster"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
expect_equal(plt$labels$shape, "learner_status")
})
test_that("2+ hyperparams", {
# generate data
ps = makeParamSet(
makeNumericParam("C", lower = -5, upper = 5, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -5, upper = 5, trafo = function(x) 2^x),
makeDiscreteParam("degree", values = 2:5))
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout", predict = "both")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "besseldot"))
res = tuneParams(learn, task = pid.task, control = ctrl,
measures = list(acc, setAggregation(acc, train.mean)), resampling = rdesc,
par.set = ps, show.info = FALSE)
data = generateHyperParsEffectData(res, partial.dep = TRUE)
# test single hyperparam creation
plt = plotHyperParsEffect(data, x = "C", y = "acc.test.mean",
plot.type = "line", partial.dep.learn = "regr.rpart")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomLine", "GeomPoint"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "acc.test.mean")
# test bivariate
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "heatmap", partial.dep.learn = "regr.rpart")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_equal(class(plt$layers[[1]]$geom)[1], "GeomTile")
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
# simple example with nested cv
ps = makeParamSet(
makeNumericParam("C", lower = -5, upper = 5, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -5, upper = 5, trafo = function(x) 2^x),
makeDiscreteParam("degree", values = 2:5))
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout", predict = "both")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "besseldot"))
lrn = makeTuneWrapper(learn, control = ctrl,
measures = list(acc, mmce), resampling = rdesc, par.set = ps,
show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2, extract = getTuneResult)
data = generateHyperParsEffectData(res, partial.dep = TRUE)
plt = plotHyperParsEffect(data, x = "C", y = "acc.test.mean",
plot.type = "line", partial.dep.learn = "regr.rpart")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomLine", "GeomPoint"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "acc.test.mean")
# learner crash with imputation works
ps = makeParamSet(
makeDiscreteParam("C", values = c(-1, 0.5, 1.5)),
makeDiscreteParam("sigma", values = c(-1, 0.5, 1.5)))
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "besseldot"))
res = tuneParams(learn, task = pid.task, control = ctrl, measures = acc,
resampling = rdesc, par.set = ps, show.info = FALSE)
data = generateHyperParsEffectData(res, partial.dep = TRUE)
plt = plotHyperParsEffect(data, x = "C", y = "acc.test.mean",
plot.type = "line", partial.dep.learn = "regr.rpart")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomLine", "GeomPoint"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "acc.test.mean")
# FIXME: make sure plots looks as expected
})
| /tests/testthat/test_base_generateHyperParsEffect.R | no_license | cauldnz/mlr | R | false | false | 13,312 | r | context("hyperparameterValidation")
test_that("generate data", {
# generate data with nested no trafo
ps = makeParamSet(makeNumericParam("C", lower = -5, upper = 5,
trafo = function(x) 2^x)
)
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout")
lrn = makeTuneWrapper("classif.ksvm", control = ctrl,
resampling = rdesc, par.set = ps,
show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2,
extract = getTuneResult)
orig = getNestedTuneResultsOptPathDf(res)
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# generate data, no include diag, trafo
rdesc = makeResampleDesc("Holdout")
res = tuneParams("classif.ksvm", task = pid.task, resampling = rdesc,
par.set = ps, control = ctrl, measures = acc)
orig = as.data.frame(trafoOptPath(res$opt.path))
orig = dropNamed(orig, c("eol", "error.message"))
orig = plyr::rename(orig, c(dob = "iteration"))
new = generateHyperParsEffectData(res, trafo = TRUE)
expect_equivalent(new$data, orig)
})
test_that("1 numeric hyperparam", {
# generate data
ps = makeParamSet(makeDiscreteParam("C", values = 2^ (-2:2)))
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
res = tuneParams("classif.ksvm", task = pid.task, resampling = rdesc,
par.set = ps, control = ctrl, measures = acc)
orig = as.data.frame(res$opt.path)
orig$C = as.numeric(as.character(orig$C))
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# make sure plot is created and can be saved
plt = plotHyperParsEffect(new, x = "iteration", y = "acc.test.mean",
plot.type = "line")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
# make sure plot has expected attributes
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomLine"))
expect_equal(plt$labels$x, "iteration")
expect_equal(plt$labels$y, "acc.test.mean")
# FIXME: make sure plot looks as expected
})
test_that("1 discrete hyperparam", {
# generate data
ps = makeParamSet(makeDiscreteParam("kernel", values = c("vanilladot",
"polydot", "rbfdot"))
)
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
res = tuneParams("classif.ksvm", task = pid.task, resampling = rdesc,
par.set = ps, control = ctrl, measures = acc)
orig = as.data.frame(res$opt.path)
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# make sure plot is created and can be saved
plt = plotHyperParsEffect(new, x = "kernel", y = "acc.test.mean")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
# make sure plot has expected attributes
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
"GeomPoint")
expect_equal(plt$labels$x, "kernel")
expect_equal(plt$labels$y, "acc.test.mean")
# FIXME: make sure plot looks as expected
})
test_that("1 numeric hyperparam with optimizer failure", {
# generate data
ps = makeParamSet(makeDiscreteParam("C", values = c(-1, 0.5, 1.5))
)
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
res = tuneParams("classif.ksvm", task = pid.task, resampling = rdesc,
par.set = ps, control = ctrl, measures = acc)
orig = as.data.frame(res$opt.path)
orig$C = as.numeric(as.character(orig$C))
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# make sure plot is created and can be saved
plt = plotHyperParsEffect(new, x = "C", y = "acc.test.mean")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
# make sure plot has expected attributes
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
"GeomPoint")
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "acc.test.mean")
# FIXME: make sure plot looks as expected
})
test_that("1 numeric hyperparam with nested cv", {
# generate data
ps = makeParamSet(makeNumericParam("C", lower = 0.01, upper = 2)
)
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout")
lrn = makeTuneWrapper("classif.ksvm", control = ctrl,
resampling = rdesc, par.set = ps,
show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2,
extract = getTuneResult)
orig = getNestedTuneResultsOptPathDf(res)
new = generateHyperParsEffectData(res, include.diagnostics = TRUE)
expect_equivalent(new$data, orig)
# make sure plot is created and can be saved
plt = plotHyperParsEffect(new, x = "C", y = "mmce.test.mean")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
# make sure plot has expected attributes
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
"GeomPoint")
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "mmce.test.mean")
# FIXME: make sure plot looks as expected
})
test_that("2 hyperparams", {
# generate data
ps = makeParamSet(
makeNumericParam("C", lower = -5, upper = 5, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -5, upper = 5, trafo = function(x) 2^x))
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "rbfdot"))
res = tuneParams(learn, task = pid.task, control = ctrl, measures = acc,
resampling = rdesc, par.set = ps, show.info = FALSE)
data = generateHyperParsEffectData(res)
# test line creation
plt = plotHyperParsEffect(data, x = "iteration", y = "acc.test.mean",
plot.type = "line")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomLine", "GeomPoint"))
expect_equal(plt$labels$x, "iteration")
expect_equal(plt$labels$y, "acc.test.mean")
# test heatcontour creation with interpolation
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "heatmap", interpolate = "regr.earth",
show.experiments = TRUE)
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomRaster"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
expect_equal(plt$labels$shape, "learner_status")
# learner crash
ps = makeParamSet(
makeDiscreteParam("C", values = c(-1, 0.5, 1.5)),
makeDiscreteParam("sigma", values = c(-1, 0.5, 1.5)))
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "rbfdot"))
res = tuneParams(learn, task = pid.task, control = ctrl, measures = acc,
resampling = rdesc, par.set = ps, show.info = FALSE)
data = generateHyperParsEffectData(res)
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "heatmap", interpolate = "regr.earth")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomRaster"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
expect_equal(plt$labels$shape, "learner_status")
# FIXME: make sure plots looks as expected
})
test_that("2 hyperparams nested", {
# generate data
ps = makeParamSet(
makeNumericParam("C", lower = -5, upper = 5, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -5, upper = 5, trafo = function(x) 2^x))
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "rbfdot"))
lrn = makeTuneWrapper(learn, control = ctrl,
measures = list(acc, mmce), resampling = rdesc,
par.set = ps, show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2,
extract = getTuneResult)
data = generateHyperParsEffectData(res)
# contour plot
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "contour", interpolate = "regr.earth",
show.interpolated = TRUE)
expect_warning(print(plt))
dir = tempdir()
path = file.path(dir, "test.svg")
expect_warning(ggsave(path))
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomRaster", "GeomContour"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
expect_equal(plt$labels$shape, "learner_status")
# learner crashes
ps = makeParamSet(
makeDiscreteParam("C", values = c(-1, 0.5, 1.5)),
makeDiscreteParam("sigma", values = c(-1, 0.5, 1.5)))
lrn = makeTuneWrapper(learn, control = ctrl,
measures = list(acc, mmce), resampling = rdesc,
par.set = ps, show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2,
extract = getTuneResult)
data = generateHyperParsEffectData(res)
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "heatmap", interpolate = "regr.earth",
show.experiments = TRUE)
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomPoint", "GeomRaster"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
expect_equal(plt$labels$shape, "learner_status")
})
test_that("2+ hyperparams", {
# generate data
ps = makeParamSet(
makeNumericParam("C", lower = -5, upper = 5, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -5, upper = 5, trafo = function(x) 2^x),
makeDiscreteParam("degree", values = 2:5))
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout", predict = "both")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "besseldot"))
res = tuneParams(learn, task = pid.task, control = ctrl,
measures = list(acc, setAggregation(acc, train.mean)), resampling = rdesc,
par.set = ps, show.info = FALSE)
data = generateHyperParsEffectData(res, partial.dep = TRUE)
# test single hyperparam creation
plt = plotHyperParsEffect(data, x = "C", y = "acc.test.mean",
plot.type = "line", partial.dep.learn = "regr.rpart")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomLine", "GeomPoint"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "acc.test.mean")
# test bivariate
plt = plotHyperParsEffect(data, x = "C", y = "sigma", z = "acc.test.mean",
plot.type = "heatmap", partial.dep.learn = "regr.rpart")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_equal(class(plt$layers[[1]]$geom)[1], "GeomTile")
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "sigma")
expect_equal(plt$labels$fill, "acc.test.mean")
# simple example with nested cv
ps = makeParamSet(
makeNumericParam("C", lower = -5, upper = 5, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -5, upper = 5, trafo = function(x) 2^x),
makeDiscreteParam("degree", values = 2:5))
ctrl = makeTuneControlRandom(maxit = 5L)
rdesc = makeResampleDesc("Holdout", predict = "both")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "besseldot"))
lrn = makeTuneWrapper(learn, control = ctrl,
measures = list(acc, mmce), resampling = rdesc, par.set = ps,
show.info = FALSE)
res = resample(lrn, task = pid.task, resampling = cv2, extract = getTuneResult)
data = generateHyperParsEffectData(res, partial.dep = TRUE)
plt = plotHyperParsEffect(data, x = "C", y = "acc.test.mean",
plot.type = "line", partial.dep.learn = "regr.rpart")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomLine", "GeomPoint"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "acc.test.mean")
# learner crash with imputation works
ps = makeParamSet(
makeDiscreteParam("C", values = c(-1, 0.5, 1.5)),
makeDiscreteParam("sigma", values = c(-1, 0.5, 1.5)))
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("Holdout")
learn = makeLearner("classif.ksvm", par.vals = list(kernel = "besseldot"))
res = tuneParams(learn, task = pid.task, control = ctrl, measures = acc,
resampling = rdesc, par.set = ps, show.info = FALSE)
data = generateHyperParsEffectData(res, partial.dep = TRUE)
plt = plotHyperParsEffect(data, x = "C", y = "acc.test.mean",
plot.type = "line", partial.dep.learn = "regr.rpart")
print(plt)
dir = tempdir()
path = file.path(dir, "test.svg")
ggsave(path)
expect_set_equal(sapply(plt$layers, function(x) class(x$geom)[1]),
c("GeomLine", "GeomPoint"))
expect_equal(plt$labels$x, "C")
expect_equal(plt$labels$y, "acc.test.mean")
# FIXME: make sure plots looks as expected
})
|
# the purpose of this script is to demonstrate ability to collect, work with, and clean a data set
# please read README and CodeBook for additional information
# loading libraries
lybrary(tidyverse)
# downloading and storing
if (!(file.exists("data"))) {
dir.create("data")
}
if (!(file.exists("data/dataset.zip"))) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",destfile ="data/dataset.zip", method="curl")
}
if (!(file.exists("data/UCI HAR Dataset"))) {
unzip("data/dataset.zip",exdir="data")
}
# reading test data
xtest <- read_table("./data/UCI HAR Dataset/test/X_test.txt", col_names = FALSE)
ytest <- read_table("./data/UCI HAR Dataset/test/y_test.txt", col_names = FALSE)
# reading train data
xtrain <- read_table("./data/UCI HAR Dataset/train/X_train.txt", col_names = FALSE)
ytrain <- read_table("./data/UCI HAR Dataset/train/y_train.txt", col_names = FALSE)
# reading subject data
subjects_test <- read_table("./data/UCI HAR Dataset/test/subject_test.txt", col_names = "subject")
subjects_train <- read_table("./data/UCI HAR Dataset/train/subject_train.txt", col_names = "subject")
# reading labels
activities <- read_table2("./data/UCI HAR Dataset/activity_labels.txt", col_names = FALSE)
features <- read_table2("./data/UCI HAR Dataset/features.txt", col_names = FALSE)
# starting requested tasks
# 1. Merges the training and the test sets to create one data set.
# preparing x,y data
xdata <- bind_rows(xtest, xtrain); names(xdata) <- features[[2]]
ydata <- bind_rows(ytest, ytrain); colnames(ydata) <- "activity"
# preparing subjects
subjects <- bind_rows(subjects_test, subjects_train)
# merging sets to create one data set
data <- bind_cols(subjects, ydata, xdata)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
data <- data %>% select(1:3, matches("-mean()|-std()"))
# 3. Uses descriptive activity names to name the activities in the data set
data[[2]] <- factor(data[[2]], labels = activities[[2]]) %>% tolower
# 4. Appropriately labels the data set with descriptive variable names.
# since we already labeled the columns, we're doing some clean up
names(data) <- gsub("BodyBody","Body",names(data))
names(data) <- gsub("\\()","",names(data)) ; names(data) <- tolower(names(data))
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_data <- aggregate(. ~subject + activity, data, mean)
# Saving final dataset
write.table(tidy_data, file="./tidy_dataset.txt", row.names = FALSE)
| /run_analysis.R | no_license | RodMorais/Getting-and-Cleaning-Data-Course-Project | R | false | false | 2,619 | r | # the purpose of this script is to demonstrate ability to collect, work with, and clean a data set
# please read README and CodeBook for additional information
# loading libraries
lybrary(tidyverse)
# downloading and storing
if (!(file.exists("data"))) {
dir.create("data")
}
if (!(file.exists("data/dataset.zip"))) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",destfile ="data/dataset.zip", method="curl")
}
if (!(file.exists("data/UCI HAR Dataset"))) {
unzip("data/dataset.zip",exdir="data")
}
# reading test data
xtest <- read_table("./data/UCI HAR Dataset/test/X_test.txt", col_names = FALSE)
ytest <- read_table("./data/UCI HAR Dataset/test/y_test.txt", col_names = FALSE)
# reading train data
xtrain <- read_table("./data/UCI HAR Dataset/train/X_train.txt", col_names = FALSE)
ytrain <- read_table("./data/UCI HAR Dataset/train/y_train.txt", col_names = FALSE)
# reading subject data
subjects_test <- read_table("./data/UCI HAR Dataset/test/subject_test.txt", col_names = "subject")
subjects_train <- read_table("./data/UCI HAR Dataset/train/subject_train.txt", col_names = "subject")
# reading labels
activities <- read_table2("./data/UCI HAR Dataset/activity_labels.txt", col_names = FALSE)
features <- read_table2("./data/UCI HAR Dataset/features.txt", col_names = FALSE)
# starting requested tasks
# 1. Merges the training and the test sets to create one data set.
# preparing x,y data
xdata <- bind_rows(xtest, xtrain); names(xdata) <- features[[2]]
ydata <- bind_rows(ytest, ytrain); colnames(ydata) <- "activity"
# preparing subjects
subjects <- bind_rows(subjects_test, subjects_train)
# merging sets to create one data set
data <- bind_cols(subjects, ydata, xdata)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
data <- data %>% select(1:3, matches("-mean()|-std()"))
# 3. Uses descriptive activity names to name the activities in the data set
data[[2]] <- factor(data[[2]], labels = activities[[2]]) %>% tolower
# 4. Appropriately labels the data set with descriptive variable names.
# since we already labeled the columns, we're doing some clean up
names(data) <- gsub("BodyBody","Body",names(data))
names(data) <- gsub("\\()","",names(data)) ; names(data) <- tolower(names(data))
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_data <- aggregate(. ~subject + activity, data, mean)
# Saving final dataset
write.table(tidy_data, file="./tidy_dataset.txt", row.names = FALSE)
|
testlist <- list(doy = c(-1.34765550943381e+28, -6.41943608631928e+167, 5.06147895673148e-241, -3.80269803056297e+245, 7.31782995655776e-304, -2.37636445786895e-212, 6.65004044562904e-304, 1.26937468623092e-153, 1.75512488333167e+50, 2.64939586740134e-158, -15728640.1250391), latitude = c(-1.38615190940838e+304, 2.61207262352039e+248, 8465182275673408, 6.93341970218013e-05, 2.21250605903587e+76, 4.71383448583357e+139, -3.0587554226507e-205, -1.72386133337353e+306, -9.07410391080407e+305, 5.68302866920153e+225, 1.28925672398936e+99, -1.38209255127074e+56, 5.6464282514068e-141, 2.97080506454165e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = numeric(0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831608-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 919 | r | testlist <- list(doy = c(-1.34765550943381e+28, -6.41943608631928e+167, 5.06147895673148e-241, -3.80269803056297e+245, 7.31782995655776e-304, -2.37636445786895e-212, 6.65004044562904e-304, 1.26937468623092e-153, 1.75512488333167e+50, 2.64939586740134e-158, -15728640.1250391), latitude = c(-1.38615190940838e+304, 2.61207262352039e+248, 8465182275673408, 6.93341970218013e-05, 2.21250605903587e+76, 4.71383448583357e+139, -3.0587554226507e-205, -1.72386133337353e+306, -9.07410391080407e+305, 5.68302866920153e+225, 1.28925672398936e+99, -1.38209255127074e+56, 5.6464282514068e-141, 2.97080506454165e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = numeric(0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
library(aprean3)
### Name: dse22g
### Title: Dataset for Exercise G, Chapter 22
### Aliases: dse22g
### Keywords: datasets
### ** Examples
dse22g
| /data/genthat_extracted_code/aprean3/examples/dse22g.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 153 | r | library(aprean3)
### Name: dse22g
### Title: Dataset for Exercise G, Chapter 22
### Aliases: dse22g
### Keywords: datasets
### ** Examples
dse22g
|
#' Import your ERP data files.
#'
#' \code{load.data} imports your individual ERP data files. File extensions must be .txt and
#' file names must be in the format: YourFile_Condition.txt (e.g., SS34_Positive.txt). Raw data files to be
#' imported should be organized as follows:
#' \itemize{
#' \item each electrode must be a separate column
#' \item voltages at each time point should be listed under the appropriate electrode
#' column as rows
#' \item no other data should be present in the raw data file (e.g., subject, condition,
#' time, etc.)
#' }
#'
#' See also \code{\link{easy.load}} for a more user-friendly way to
#' generate the appropriate code.
#'
#' @param path The folder path containing your ERP files
#' @param condition In quotes, a string indicating which trial
#' type to be imported (i.e., the condition indicated in the file name)
#' @param num.subs The number of files (subjects) to import for a given condition
#' @param epoch.st The earliest time point sampled in the ERP files, including
#' the basline (e.g., -200)
#' @param epoch.end The final time point sampled in the ERP files (typically \eqn{
#' final time point - 1})
#' @param bsln.cor If "y", applies baseline correction to the imported data. Baseline
#' correction is achieved by subtracting the mean voltage prior to 0 ms on a channel-
#' by-channel basis.
#' @param header Only accepts values of TRUE or FALSE. Used to specify whether or not there
#' is an existing header row in the ERP files. If there is no header, \code{load.data}
#' will supply one (see details below).
#'
#' @details \itemize{
#' \item Name each individual file following the format mentioned above (e.g., SS34_Positive.txt).
#' \code{load.data} will ignore all text preceding the "_", and treat all text following the "_"
#' as the \code{condition}, (e.g., Positive). Use only one "_" in the file name (i.e., to separate
#' your own naming convention from the \code{condition}); using multiple "_" characters will lead to
#' faulty importing of data. The erp.easy convention for subjects is a capital "S" followed by the
#' number corresponding to the order in which the file was loaded (e.g., S1, S2, etc.). Subjects will
#' be loaded into the "Subject" column of the returned data frame.
#'
#' \item If no header is present in the ERP files, one will be supplied, using the standard R
#' convention of a capital "V" followed by increasing integers (e.g., V1, V2, V3). Use these
#' automatically assigned column name values to refer to the electrodes (unless a header is provided
#' in the raw data file).
#'
#' \item Enter the starting time of the baseline, if present in your individual files, in
#' \code{epoch.st} (e.g., -200).
#'
#' \item Once the desired data frames have been loaded, they can be
#' \href{http://www.statmethods.net/input/exportingdata.html}{exported} as a number of
#' different file types.
#'
#' \item The sample rate will be calculated for you, based on the starting (\code{epoch.st})
#' and ending (\code{epoch.end}) time points of the recording epoch and the number of time
#' points in a given condition (the number of rows in your file for each condition).
#'}
#'
#' @note While importing data must be done using a separate function call for each condition,
#' it can be convenient to use R's native \code{rbind.data.frame()} command to bind
#' several loaded conditions (variables) into a single data frame consisting of multiple
#' conditions. All erp.easy functions will act on all conditions included in the data frame
#' passed to the function. For example, if you'd like to see all conditions plotted, simply
#' use \code{rbind.data.frame()} to make a single data frame to pass to an erp.easy plotting
#' function, and you will see all added conditions plotted simultaneously in the same figure
#' (as opposed to making separate data frames for each condition, then passing each data
#' frame separately to a function).
#'
#' @return A single, concatenated data frame of all electrode data for all
#' subjects organized into columns, with three added columns:
#'
#' \enumerate{
#' \item "Subject" containing repeating subject names
#' \item "Stimulus" containing repeating condition names (e.g., Neutral)
#' \item "Time" containing a repeating list of timepoints sampled
#' }
#'
#' @author Travis Moore
#'
#' @examples
#' \dontrun{
#' # Importing data for a condition named "Neutral" (file names: "Sub1_Neutral.txt",
#' "Sub2_Neutral.txt", etc.)
#' neutral <- load.data(path = "/Users/Username/Folder/", condition = "Neutral",
#' num.subs = 20, epoch.st = -200, epoch.end = 899, header = FALSE)
#'
#' # Adding imported data named "positive" to the imported "neutral" data
#' combo <- rbind.data.frame(neutral, positive)
#' }
load.data <- function(path, condition, num.subs, epoch.st, epoch.end, bsln.cor = "n", header = FALSE) {
# restores original working directory upon exit
oldwd <- getwd()
on.exit(setwd(oldwd))
setwd(path)
# loads in all .txt files from specified directory
files = list.files(path, pattern = "*.txt")
# searches for specified condition
cond.files = grepl(paste("^[^_]+_", condition, sep = ""), files)
# count number of TRUE values in logical vector
trues <- sum(cond.files, na.rm = TRUE)
if (trues < 1) {
stop(paste("NO FILES FOUND FOR CONDITION ", condition, "!", sep = ""))
}
# gets files for specified condition only
sorted.files = files[cond.files]
# check for number of existing files and entered subjects is the same
if (trues != num.subs) {
stop(paste("NUMBER OF SUBJECTS ", "(", num.subs, ")", " AND ACTUAL FILES ",
"(", trues, ")", "DIFFERS!"))
}
# reads files into R
tables <- lapply(paste(path, sorted.files, sep = ""), read.table,
header = header)
# bind list into data frame
data.df = plyr::ldply(tables)
data.header <- colnames(data.df) # to reassign names after baseline correction
sublist = vector("list")
for (i in 1:num.subs) {
#sublist[[i]] = c(rep(paste("S", i, sep = ""), (nrow(data.df)/num.subs)))
sub.names <- unlist(lapply(strsplit(sorted.files[i], '_', fixed=TRUE), '[', 1))
sublist[[i]] = c(rep(sub.names, (nrow(data.df)/num.subs ) ))
}
sublist = data.frame(matrix(unlist(sublist), ncol = 1)) # assign subs to dataframe column
all.times = seq(epoch.st, epoch.end, 1)
number = round(length(all.times)/(nrow(data.df)/num.subs), digits = 0) # determine step size for time points
sampled.times = seq(epoch.st, epoch.end, number) # create time points column
stimlist = c(rep(condition, nrow(data.df))) # create stimulus column
data.df1 = cbind.data.frame(sublist, stimlist, sampled.times, data.df)
colnames(data.df1)[1:3] <- c("Subject", "Stimulus", "Time")
setwd(oldwd)
# if selected, baseline correction of data occurs here
if (bsln.cor == "y") {
baseline <- sum(sampled.times<0)
correction.factors <- by(data.df1[ , 4:ncol(data.df1)], data.df1$Subject,
function(x) apply(x, 2, function(y) mean(y[1:baseline])))
data.df2 <- vector("list")
for (k in 1:num.subs) {
h <- subset(data.df1, data.df1$Subject == levels(data.df1$Subject)[k])
h[ , 4:ncol(h)] <- sweep(h[ , 4:ncol(h)], 2, correction.factors[[k]], "-")
data.df2[[k]] <- h
}
data.df1 = plyr::ldply(data.df2)
} # close if baseline
return(data.df1)
} # close main function
| /R/Load.R | no_license | cran/erp.easy | R | false | false | 7,628 | r | #' Import your ERP data files.
#'
#' \code{load.data} imports your individual ERP data files. File extensions must be .txt and
#' file names must be in the format: YourFile_Condition.txt (e.g., SS34_Positive.txt). Raw data files to be
#' imported should be organized as follows:
#' \itemize{
#' \item each electrode must be a separate column
#' \item voltages at each time point should be listed under the appropriate electrode
#' column as rows
#' \item no other data should be present in the raw data file (e.g., subject, condition,
#' time, etc.)
#' }
#'
#' See also \code{\link{easy.load}} for a more user-friendly way to
#' generate the appropriate code.
#'
#' @param path The folder path containing your ERP files
#' @param condition In quotes, a string indicating which trial
#' type to be imported (i.e., the condition indicated in the file name)
#' @param num.subs The number of files (subjects) to import for a given condition
#' @param epoch.st The earliest time point sampled in the ERP files, including
#' the basline (e.g., -200)
#' @param epoch.end The final time point sampled in the ERP files (typically \eqn{
#' final time point - 1})
#' @param bsln.cor If "y", applies baseline correction to the imported data. Baseline
#' correction is achieved by subtracting the mean voltage prior to 0 ms on a channel-
#' by-channel basis.
#' @param header Only accepts values of TRUE or FALSE. Used to specify whether or not there
#' is an existing header row in the ERP files. If there is no header, \code{load.data}
#' will supply one (see details below).
#'
#' @details \itemize{
#' \item Name each individual file following the format mentioned above (e.g., SS34_Positive.txt).
#' \code{load.data} will ignore all text preceding the "_", and treat all text following the "_"
#' as the \code{condition}, (e.g., Positive). Use only one "_" in the file name (i.e., to separate
#' your own naming convention from the \code{condition}); using multiple "_" characters will lead to
#' faulty importing of data. The erp.easy convention for subjects is a capital "S" followed by the
#' number corresponding to the order in which the file was loaded (e.g., S1, S2, etc.). Subjects will
#' be loaded into the "Subject" column of the returned data frame.
#'
#' \item If no header is present in the ERP files, one will be supplied, using the standard R
#' convention of a capital "V" followed by increasing integers (e.g., V1, V2, V3). Use these
#' automatically assigned column name values to refer to the electrodes (unless a header is provided
#' in the raw data file).
#'
#' \item Enter the starting time of the baseline, if present in your individual files, in
#' \code{epoch.st} (e.g., -200).
#'
#' \item Once the desired data frames have been loaded, they can be
#' \href{http://www.statmethods.net/input/exportingdata.html}{exported} as a number of
#' different file types.
#'
#' \item The sample rate will be calculated for you, based on the starting (\code{epoch.st})
#' and ending (\code{epoch.end}) time points of the recording epoch and the number of time
#' points in a given condition (the number of rows in your file for each condition).
#'}
#'
#' @note While importing data must be done using a separate function call for each condition,
#' it can be convenient to use R's native \code{rbind.data.frame()} command to bind
#' several loaded conditions (variables) into a single data frame consisting of multiple
#' conditions. All erp.easy functions will act on all conditions included in the data frame
#' passed to the function. For example, if you'd like to see all conditions plotted, simply
#' use \code{rbind.data.frame()} to make a single data frame to pass to an erp.easy plotting
#' function, and you will see all added conditions plotted simultaneously in the same figure
#' (as opposed to making separate data frames for each condition, then passing each data
#' frame separately to a function).
#'
#' @return A single, concatenated data frame of all electrode data for all
#' subjects organized into columns, with three added columns:
#'
#' \enumerate{
#' \item "Subject" containing repeating subject names
#' \item "Stimulus" containing repeating condition names (e.g., Neutral)
#' \item "Time" containing a repeating list of timepoints sampled
#' }
#'
#' @author Travis Moore
#'
#' @examples
#' \dontrun{
#' # Importing data for a condition named "Neutral" (file names: "Sub1_Neutral.txt",
#' "Sub2_Neutral.txt", etc.)
#' neutral <- load.data(path = "/Users/Username/Folder/", condition = "Neutral",
#' num.subs = 20, epoch.st = -200, epoch.end = 899, header = FALSE)
#'
#' # Adding imported data named "positive" to the imported "neutral" data
#' combo <- rbind.data.frame(neutral, positive)
#' }
load.data <- function(path, condition, num.subs, epoch.st, epoch.end, bsln.cor = "n", header = FALSE) {
# restores original working directory upon exit
oldwd <- getwd()
on.exit(setwd(oldwd))
setwd(path)
# loads in all .txt files from specified directory
files = list.files(path, pattern = "*.txt")
# searches for specified condition
cond.files = grepl(paste("^[^_]+_", condition, sep = ""), files)
# count number of TRUE values in logical vector
trues <- sum(cond.files, na.rm = TRUE)
if (trues < 1) {
stop(paste("NO FILES FOUND FOR CONDITION ", condition, "!", sep = ""))
}
# gets files for specified condition only
sorted.files = files[cond.files]
# check for number of existing files and entered subjects is the same
if (trues != num.subs) {
stop(paste("NUMBER OF SUBJECTS ", "(", num.subs, ")", " AND ACTUAL FILES ",
"(", trues, ")", "DIFFERS!"))
}
# reads files into R
tables <- lapply(paste(path, sorted.files, sep = ""), read.table,
header = header)
# bind list into data frame
data.df = plyr::ldply(tables)
data.header <- colnames(data.df) # to reassign names after baseline correction
sublist = vector("list")
for (i in 1:num.subs) {
#sublist[[i]] = c(rep(paste("S", i, sep = ""), (nrow(data.df)/num.subs)))
sub.names <- unlist(lapply(strsplit(sorted.files[i], '_', fixed=TRUE), '[', 1))
sublist[[i]] = c(rep(sub.names, (nrow(data.df)/num.subs ) ))
}
sublist = data.frame(matrix(unlist(sublist), ncol = 1)) # assign subs to dataframe column
all.times = seq(epoch.st, epoch.end, 1)
number = round(length(all.times)/(nrow(data.df)/num.subs), digits = 0) # determine step size for time points
sampled.times = seq(epoch.st, epoch.end, number) # create time points column
stimlist = c(rep(condition, nrow(data.df))) # create stimulus column
data.df1 = cbind.data.frame(sublist, stimlist, sampled.times, data.df)
colnames(data.df1)[1:3] <- c("Subject", "Stimulus", "Time")
setwd(oldwd)
# if selected, baseline correction of data occurs here
if (bsln.cor == "y") {
baseline <- sum(sampled.times<0)
correction.factors <- by(data.df1[ , 4:ncol(data.df1)], data.df1$Subject,
function(x) apply(x, 2, function(y) mean(y[1:baseline])))
data.df2 <- vector("list")
for (k in 1:num.subs) {
h <- subset(data.df1, data.df1$Subject == levels(data.df1$Subject)[k])
h[ , 4:ncol(h)] <- sweep(h[ , 4:ncol(h)], 2, correction.factors[[k]], "-")
data.df2[[k]] <- h
}
data.df1 = plyr::ldply(data.df2)
} # close if baseline
return(data.df1)
} # close main function
|
## Put comments here that give an overall description of what your
## functions do
## The first function,makeCacheMatrix creates a "matrix",which is really a list
## containing a function to
## 1.set the value of the matrix
## 2.get the value of the matrix
## 3.set the value of inverse matrix
## 4.get the value of inverse matrix
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x <<- inv
inv <<- NULL
}
get<-function() x
setinverse<-function(inverse) inv<<-inverse
getinverse<-function() inv
list(set = set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## The following function does the inverse of the"matrix"
##created with the above function. However, it first checks to see if
##the inverse of the matrix is already there.
##If so, it gets the inverse from the cache and skips the computation.
##Otherwise, it calculates the inverse of the matrix
##and sets the value of the matrix in the cache via the setinverse function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x$getinverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <-x$get
inv<-solve(data,...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | BhaviChikhal/ProgrammingAssignment2 | R | false | false | 1,348 | r | ## Put comments here that give an overall description of what your
## functions do
## The first function,makeCacheMatrix creates a "matrix",which is really a list
## containing a function to
## 1.set the value of the matrix
## 2.get the value of the matrix
## 3.set the value of inverse matrix
## 4.get the value of inverse matrix
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x <<- inv
inv <<- NULL
}
get<-function() x
setinverse<-function(inverse) inv<<-inverse
getinverse<-function() inv
list(set = set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## The following function does the inverse of the"matrix"
##created with the above function. However, it first checks to see if
##the inverse of the matrix is already there.
##If so, it gets the inverse from the cache and skips the computation.
##Otherwise, it calculates the inverse of the matrix
##and sets the value of the matrix in the cache via the setinverse function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x$getinverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <-x$get
inv<-solve(data,...)
x$setinverse(inv)
inv
}
|
#######################################################
## Preparing the RAM data for B/Bmsy values
#######################################################
source('../ohiprep/src/R/common.R')
# STEP 1:
##### Prepare data to link RAM stocks to SAUP regions and Taxon IDs
# The initial MatchedPairs.csv file was from Lydia Teh (SAUP).
# I modified this file by adding in another list from Lydia for saup regions with >1 stocks sharing
# the same Taxaid and saup regions (indicated by "multiple stocks in same region").
# These b/bmsy scores will be averaged.
# We also noticed that some saup regions have >1 FAO and >1 stock with the same Taxa ID
# In these cases, the stock was linked with the saup and FAO region.
# She put these in a file
# called TaxonEEZ_FAO.csv. The following merges and checks these two files.
### Create a template file for each unique Taxon/saupregion/faoregion
### This will be used to generate the FAO data for the catch
catch <- read.csv(file.path(dir_neptune_data, 'git-annex/globalprep/SAUP_FIS_data/v2015/tmp/Catch_v16072015_summary.csv'))
eez_fao <- catch %>%
filter(EEZID != 0) %>%
dplyr::select(EEZID, FAOAreaID, TaxonKey) %>%
unique()
write.csv(eez_fao, "globalprep/SAUP_FIS/v2015/tmp/eez_fao_taxa_combos.csv", row.names=FALSE)
## my template data with eez/fao/species combinations
EEZandFAO <- read.csv('globalprep/SAUP_FIS/v2015/tmp/eez_fao_taxa_combos.csv')
## translates RAM data to SAUP regions/species (dataset 1)
RAM2saup <- read.csv('globalprep/SAUP_FIS/v2015/raw/MatchedPairs.csv') %>%
unique() %>%
filter(!(Taxonid == 607039 & EEZID == 910)) # removing here and adding to FAO/SAUP data because we do not have this region
# these are SAUP/FAO regions with multiple stocks of the same species (the b/bmsy scores will be averaged):
tmp <- RAM2saup[RAM2saup$notes=='multiple stocks in same region', ]
## joining with template data to get FAO regions
RAM2saup_join <- RAM2saup %>%
left_join(EEZandFAO, by=c('EEZID', 'Taxonid'='TaxonKey')) %>%
mutate(id=paste(EEZID, Taxonid, sep="_"))
## Checking the data
## NOTE: some EEZ/Taxonid's are duplicated upon merge due to EEZ's with >1 FAO region
## The increase in sample size after merge is good!
tmp <- RAM2saup_join$id[duplicated(RAM2saup_join$id)]
tmp2 <- RAM2saup_join[RAM2saup_join$id %in% tmp, ]
tmp2 <- arrange(tmp2, Taxonid, stocklong, EEZID)
write.csv(tmp2, "globalprep/SAUP_FIS/v2015/tmp/checkingDups.csv", row.names=FALSE) # everything looked ok to me.
## translates RAM data to SAUP regions/species (dataset 2): these ones had FAO regions included
## (in this case the saup had multiple FAO regions and multiple stocks - so Lydia determined which saup/fao
## the stock belonged to)
RAM2saup2 <- read.csv('globalprep/SAUP_FIS/v2015/raw/TaxonEEZ_FAO.csv') %>%
dplyr::select(Taxonid, EEZID, FAOAreaID = FAO, stocklong, notes)
## add in some data to replace EEZID=910
new910 <- data.frame(Taxonid = 607039,
EEZID = 0,
FAOAreaID = c(41, 48),
stocklong = "Antarctic toothfish Ross Sea",
notes=NA)
## Bind the two saup/fao/taxon datasets together:
RAM2saup2 <- rbind(RAM2saup2, new910)
RAMstocks <- bind_rows(RAM2saup_join, RAM2saup2) %>% #data.frame(RAMstocks[is.na(RAMstocks$FAOAreaID), ])
filter(!is.na(FAOAreaID))
# some NA values, but I checked catch and these stocks really aren't in these regions
# Taxonid/EEZID/stocklong combo in MatchedPairs data from Lydia, but not actually any catch in these regions
# STEP 2:
#### Preparing the RAM b/bmsy data (v3, downloaded from here: http://ramlegacy.org/database/)
## subsets the data to the most current 6 years of data and assigns a relative year to correspond to the catch data
## different stocks will have different ranges of years that are used for the analysis
## NOTE: data prior to 2003 is not used
catchYear <- data.frame(catchYear_rel = c(2010, 2009, 2008, 2007, 2006, 2005), distMax = c(0, 1, 2, 3, 4, 5))
ram <- read.csv('globalprep/SAUP_FIS/v2015/raw/RLSADBv3_timeseries_values_views.csv') %>%
select(stocklong, year, bbmsy=B.Bmsytouse) %>%
filter(!is.na(bbmsy)) %>%
filter(year >= 2002) %>%
group_by(stocklong) %>%
mutate(maxYear = max(year)) %>%
mutate(cutYear = maxYear - 5) %>%
filter(year >= cutYear) %>%
mutate(distMax = maxYear - year) %>%
left_join(catchYear) %>%
mutate(sampleYears = length(distMax)) %>%
filter(sampleYears >= 6) %>%
ungroup()
sum(table(ram$stocklong)>0)
table(ram$maxYear)
ram[ram$stocklong=="Albacore tuna Indian Ocean", ]
ram <- ram %>%
select(stocklong, catchYear_rel, bbmsy)
#### STEP3:
# Adding in some data because the RAM data didn't have the most up-to-date data for this important species
# (based on ICCAT documents - which is the datasource used by RAM for these stocks)
ram[ram$stocklong == "Skipjack tuna Western Atlantic",] #these were cut because data was too old, so no values should show up
ram[ram$stocklong == "Skipjack tuna Eastern Atlantic",]
skipjacks <- read.csv('globalprep/SAUP_FIS/v2015/tmp/Skipjack_Bmsy.csv') %>%
mutate(catchYear_rel = catchYear_rel - 2) # making the most recent year of data correspond to the 2010 catch
ram <- ram %>%
bind_rows(skipjacks)
ram[ram$stocklong == "Skipjack tuna Western Atlantic",]
ram[ram$stocklong == "Skipjack tuna Eastern Atlantic",]
head(ram)
### STEP 4
## Merging the SAUP/taxon key with the RAM data
setdiff(ram$stocklong, RAMstocks$stocklong) #35 stocks with no SAUP catch data
tmp <- setdiff(RAMstocks$stocklong, ram$stocklong) #30 stocks with no data in RAM (this is due to the data being too old in RAM and being cut)
dim(RAMstocks[RAMstocks$stocklong %in% tmp, ])
RAM_b_bmsy <- RAMstocks %>%
filter(!(stocklong %in% tmp)) %>% #cut stock that aren't in the RAM database
left_join(ram) %>% # expands data by adding a year for each stocklong, 1809*6=10854 years
group_by(Taxonid, EEZID, FAOAreaID, catchYear_rel) %>%
summarize(bbmsy = mean(bbmsy, na.rm=TRUE)) %>% #averaging the stocks of the same Taxa within an EEZID/FAO N=6668
ungroup()
data.frame(RAM_b_bmsy[RAM_b_bmsy$Taxonid == 600142 & RAM_b_bmsy$FAOAreaID == 57, ])
data.frame(filter(RAM_b_bmsy, Taxonid == 600107 & FAOAreaID == 71))
### STEP 5
## Convert to OHI region ID...make sure that this looks reasonable....
saup2ohi <- read.csv('src/LookupTables/new_saup_to_ohi_rgn.csv')
setdiff(saup2ohi$saup_id, RAM_b_bmsy$EEZID) #some of the countries do not have corresponding RAM catch data
saup2ohi[saup2ohi$notes == "split", ]
setdiff(RAM_b_bmsy$EEZID, saup2ohi$saup_id) # all of the RAM countries are represented in the master conversion table
# These SAUP regions are comprised of multiple OHI regions (the scores will be duplicated for these regions)
# this will expand the sample size when merged.
tmp <- RAM_b_bmsy %>%
filter(EEZID %in% c(251, 962, 947, 918, 830, 626, 908))
table(tmp$EEZID)
#trying to determine merged sample size:
2*30 + 1*60 + 1*24 + 1*54 + 1*24 + 1*36 + 1*150 #these are the number of extra duplicates, N=408 + N=10,674 (RAM_b_bmsy) = 11,082...which is correct!
RAM_b_bmsy_ohi_rgn <- RAM_b_bmsy %>%
left_join(saup2ohi, by=c('EEZID' = 'saup_id')) %>%
mutate(id = paste(Taxonid, ohi_id_2013, FAOAreaID, catchYear_rel, sep="_")) %>%
mutate(id2 = paste(ohi_id_2013, Taxonid, sep="_"))
RAM_b_bmsy_ohi_rgn[RAM_b_bmsy_ohi_rgn$Taxonid == 600142 & RAM_b_bmsy_ohi_rgn$FAOAreaID == 57, ]
## check that merge went well (appears to have gone well based on sample size!)
tmp <- RAM_b_bmsy_ohi_rgn %>%
filter(EEZID %in% c(251, 962, 947, 910, 918, 830, 626, 908))
table(tmp$EEZID)
data.frame(RAM_b_bmsy_ohi_rgn[RAM_b_bmsy_ohi_rgn$EEZID == 251, ])
## check on duplicates due to some OHI regions having multiple SAUP regions
dups <- RAM_b_bmsy_ohi_rgn$id[duplicated(RAM_b_bmsy_ohi_rgn$id)]
tmp <- RAM_b_bmsy_ohi_rgn[RAM_b_bmsy_ohi_rgn$id %in% dups, ]
data.frame(tmp[tmp$ohi_id_2013==16, ])
library(ggplot2)
ggplot(tmp, aes(x=catchYear_rel, y=bbmsy, col=as.factor(EEZID), group=as.factor(EEZID))) +
geom_point() +
geom_line() +
facet_wrap(~id2, scales='free')
ggsave('globalprep/SAUP_FIS/v2015/tmp/combining_bbmsy_rgns_multiple_saup.png')
# a couple worrisome ones...impetus for weighting by catch when multiple SAUP regions within an OHI region
data.frame(tmp[tmp$id2 == "163_600143", ]) # difference captured by different FAO regions
data.frame(tmp[tmp$id2 == "163_600223", ])
data.frame(tmp[tmp$id2 == "163_600361", ])
data.frame(tmp[tmp$id2 == "163_600504", ])
data.frame(tmp[tmp$id2 == "62_600226", ])
data.frame(tmp[tmp$id2 == "182_600142", ])
### STEP 6 ----
## Weight b/bmsy values by mean catch within regions
catch <- read.csv('globalprep/SAUP_FIS/v2015/tmp/mean_catch_saup_fao.csv') %>%
select(EEZID, FAOAreaID, Taxonid=TaxonKey, Year, Catch=mean_catch) %>%
filter(EEZID != 0)
data.frame(filter(catch, EEZID==8 & FAOAreaID==37 & Taxonid==100039)) #should all be the same
data.frame(filter(catch, EEZID==50 & FAOAreaID==57 & Taxonid==600142))
data.frame(filter(catch, EEZID==36 & FAOAreaID==57 & Taxonid==600142))
data.frame(filter(catch, EEZID==50 & FAOAreaID==57 & Taxonid==600107))
RAM_b_bmsy_ohi_rgn_catch <- RAM_b_bmsy_ohi_rgn %>%
left_join(catch, by=c('FAOAreaID', 'EEZID', 'Taxonid', 'catchYear_rel'='Year'))
data.frame(RAM_b_bmsy_ohi_rgn_catch[RAM_b_bmsy_ohi_rgn_catch$Taxonid == 600504 &
RAM_b_bmsy_ohi_rgn_catch$ohi_id_2013==163 &
RAM_b_bmsy_ohi_rgn_catch$FAOAreaID == 67, ])
data.frame(filter(RAM_b_bmsy_ohi_rgn_catch, EEZID==50 & FAOAreaID==57 & Taxonid==600142))
filter(catch, EEZID==50 & FAOAreaID==57 & Taxonid==600142)
data.frame(filter(RAM_b_bmsy_ohi_rgn_catch, EEZID==50 & FAOAreaID==57 & Taxonid==600107))
filter(catch, EEZID==50 & FAOAreaID==57 & Taxonid==600107)
data.frame(RAM_b_bmsy_ohi_rgn_catch[RAM_b_bmsy_ohi_rgn_catch$Taxonid == 600142 & RAM_b_bmsy_ohi_rgn_catch$FAOAreaID == 57, ])
RAM_b_bmsy_ohi_rgn_catch <- RAM_b_bmsy_ohi_rgn_catch %>%
filter(!is.na(Catch)) %>%
group_by(Taxonid, FAOAreaID, ohi_id_2013, catchYear_rel) %>%
summarize(bbmsy = weighted.mean(bbmsy, Catch, na.rm=TRUE)) %>%
select(Taxonid, FAO_rgn=FAOAreaID, ohi_rgn=ohi_id_2013, year=catchYear_rel, bbmsy); head(RAM_b_bmsy_ohi_rgn_catch)
## check: 2010 value should be just under 1.490 (yes):
RAM_b_bmsy_ohi_rgn_catch[RAM_b_bmsy_ohi_rgn_catch$Taxonid == 600504 &
RAM_b_bmsy_ohi_rgn_catch$ohi_rgn==163 &
RAM_b_bmsy_ohi_rgn_catch$FAO_rgn == 67, ]
filter(RAM_b_bmsy_ohi_rgn_catch, Taxonid==600142 & FAO_rgn==57 & ohi_rgn==204)
filter(RAM_b_bmsy_ohi_rgn_catch, Taxonid==600107 & FAO_rgn==57 & ohi_rgn==204)
write.csv(RAM_b_bmsy_ohi_rgn_catch, 'globalprep/SAUP_FIS/v2015/tmp/RAM_fao_ohi.csv', row.names=FALSE)
| /globalprep/fis/v2015/RAMdataPrep.R | no_license | OHI-Science/ohiprep_v2018 | R | false | false | 10,812 | r | #######################################################
## Preparing the RAM data for B/Bmsy values
#######################################################
source('../ohiprep/src/R/common.R')
# STEP 1:
##### Prepare data to link RAM stocks to SAUP regions and Taxon IDs
# The initial MatchedPairs.csv file was from Lydia Teh (SAUP).
# I modified this file by adding in another list from Lydia for saup regions with >1 stocks sharing
# the same Taxaid and saup regions (indicated by "multiple stocks in same region").
# These b/bmsy scores will be averaged.
# We also noticed that some saup regions have >1 FAO and >1 stock with the same Taxa ID
# In these cases, the stock was linked with the saup and FAO region.
# She put these in a file
# called TaxonEEZ_FAO.csv. The following merges and checks these two files.
### Create a template file for each unique Taxon/saupregion/faoregion
### This will be used to generate the FAO data for the catch
catch <- read.csv(file.path(dir_neptune_data, 'git-annex/globalprep/SAUP_FIS_data/v2015/tmp/Catch_v16072015_summary.csv'))
eez_fao <- catch %>%
filter(EEZID != 0) %>%
dplyr::select(EEZID, FAOAreaID, TaxonKey) %>%
unique()
write.csv(eez_fao, "globalprep/SAUP_FIS/v2015/tmp/eez_fao_taxa_combos.csv", row.names=FALSE)
## my template data with eez/fao/species combinations
EEZandFAO <- read.csv('globalprep/SAUP_FIS/v2015/tmp/eez_fao_taxa_combos.csv')
## translates RAM data to SAUP regions/species (dataset 1)
RAM2saup <- read.csv('globalprep/SAUP_FIS/v2015/raw/MatchedPairs.csv') %>%
unique() %>%
filter(!(Taxonid == 607039 & EEZID == 910)) # removing here and adding to FAO/SAUP data because we do not have this region
# these are SAUP/FAO regions with multiple stocks of the same species (the b/bmsy scores will be averaged):
tmp <- RAM2saup[RAM2saup$notes=='multiple stocks in same region', ]
## joining with template data to get FAO regions
RAM2saup_join <- RAM2saup %>%
left_join(EEZandFAO, by=c('EEZID', 'Taxonid'='TaxonKey')) %>%
mutate(id=paste(EEZID, Taxonid, sep="_"))
## Checking the data
## NOTE: some EEZ/Taxonid's are duplicated upon merge due to EEZ's with >1 FAO region
## The increase in sample size after merge is good!
tmp <- RAM2saup_join$id[duplicated(RAM2saup_join$id)]
tmp2 <- RAM2saup_join[RAM2saup_join$id %in% tmp, ]
tmp2 <- arrange(tmp2, Taxonid, stocklong, EEZID)
write.csv(tmp2, "globalprep/SAUP_FIS/v2015/tmp/checkingDups.csv", row.names=FALSE) # everything looked ok to me.
## translates RAM data to SAUP regions/species (dataset 2): these ones had FAO regions included
## (in this case the saup had multiple FAO regions and multiple stocks - so Lydia determined which saup/fao
## the stock belonged to)
RAM2saup2 <- read.csv('globalprep/SAUP_FIS/v2015/raw/TaxonEEZ_FAO.csv') %>%
dplyr::select(Taxonid, EEZID, FAOAreaID = FAO, stocklong, notes)
## add in some data to replace EEZID=910
new910 <- data.frame(Taxonid = 607039,
EEZID = 0,
FAOAreaID = c(41, 48),
stocklong = "Antarctic toothfish Ross Sea",
notes=NA)
## Bind the two saup/fao/taxon datasets together:
RAM2saup2 <- rbind(RAM2saup2, new910)
RAMstocks <- bind_rows(RAM2saup_join, RAM2saup2) %>% #data.frame(RAMstocks[is.na(RAMstocks$FAOAreaID), ])
filter(!is.na(FAOAreaID))
# some NA values, but I checked catch and these stocks really aren't in these regions
# Taxonid/EEZID/stocklong combo in MatchedPairs data from Lydia, but not actually any catch in these regions
# STEP 2:
#### Preparing the RAM b/bmsy data (v3, downloaded from here: http://ramlegacy.org/database/)
## subsets the data to the most current 6 years of data and assigns a relative year to correspond to the catch data
## different stocks will have different ranges of years that are used for the analysis
## NOTE: data prior to 2003 is not used
catchYear <- data.frame(catchYear_rel = c(2010, 2009, 2008, 2007, 2006, 2005), distMax = c(0, 1, 2, 3, 4, 5))
ram <- read.csv('globalprep/SAUP_FIS/v2015/raw/RLSADBv3_timeseries_values_views.csv') %>%
select(stocklong, year, bbmsy=B.Bmsytouse) %>%
filter(!is.na(bbmsy)) %>%
filter(year >= 2002) %>%
group_by(stocklong) %>%
mutate(maxYear = max(year)) %>%
mutate(cutYear = maxYear - 5) %>%
filter(year >= cutYear) %>%
mutate(distMax = maxYear - year) %>%
left_join(catchYear) %>%
mutate(sampleYears = length(distMax)) %>%
filter(sampleYears >= 6) %>%
ungroup()
sum(table(ram$stocklong)>0)
table(ram$maxYear)
ram[ram$stocklong=="Albacore tuna Indian Ocean", ]
ram <- ram %>%
select(stocklong, catchYear_rel, bbmsy)
#### STEP3:
# Adding in some data because the RAM data didn't have the most up-to-date data for this important species
# (based on ICCAT documents - which is the datasource used by RAM for these stocks)
ram[ram$stocklong == "Skipjack tuna Western Atlantic",] #these were cut because data was too old, so no values should show up
ram[ram$stocklong == "Skipjack tuna Eastern Atlantic",]
skipjacks <- read.csv('globalprep/SAUP_FIS/v2015/tmp/Skipjack_Bmsy.csv') %>%
mutate(catchYear_rel = catchYear_rel - 2) # making the most recent year of data correspond to the 2010 catch
ram <- ram %>%
bind_rows(skipjacks)
ram[ram$stocklong == "Skipjack tuna Western Atlantic",]
ram[ram$stocklong == "Skipjack tuna Eastern Atlantic",]
head(ram)
### STEP 4
## Merging the SAUP/taxon key with the RAM data
setdiff(ram$stocklong, RAMstocks$stocklong) #35 stocks with no SAUP catch data
tmp <- setdiff(RAMstocks$stocklong, ram$stocklong) #30 stocks with no data in RAM (this is due to the data being too old in RAM and being cut)
dim(RAMstocks[RAMstocks$stocklong %in% tmp, ])
RAM_b_bmsy <- RAMstocks %>%
filter(!(stocklong %in% tmp)) %>% #cut stock that aren't in the RAM database
left_join(ram) %>% # expands data by adding a year for each stocklong, 1809*6=10854 years
group_by(Taxonid, EEZID, FAOAreaID, catchYear_rel) %>%
summarize(bbmsy = mean(bbmsy, na.rm=TRUE)) %>% #averaging the stocks of the same Taxa within an EEZID/FAO N=6668
ungroup()
data.frame(RAM_b_bmsy[RAM_b_bmsy$Taxonid == 600142 & RAM_b_bmsy$FAOAreaID == 57, ])
data.frame(filter(RAM_b_bmsy, Taxonid == 600107 & FAOAreaID == 71))
### STEP 5
## Convert to OHI region ID...make sure that this looks reasonable....
saup2ohi <- read.csv('src/LookupTables/new_saup_to_ohi_rgn.csv')
setdiff(saup2ohi$saup_id, RAM_b_bmsy$EEZID) #some of the countries do not have corresponding RAM catch data
saup2ohi[saup2ohi$notes == "split", ]
setdiff(RAM_b_bmsy$EEZID, saup2ohi$saup_id) # all of the RAM countries are represented in the master conversion table
# These SAUP regions are comprised of multiple OHI regions (the scores will be duplicated for these regions)
# this will expand the sample size when merged.
tmp <- RAM_b_bmsy %>%
filter(EEZID %in% c(251, 962, 947, 918, 830, 626, 908))
table(tmp$EEZID)
#trying to determine merged sample size:
2*30 + 1*60 + 1*24 + 1*54 + 1*24 + 1*36 + 1*150 #these are the number of extra duplicates, N=408 + N=10,674 (RAM_b_bmsy) = 11,082...which is correct!
RAM_b_bmsy_ohi_rgn <- RAM_b_bmsy %>%
left_join(saup2ohi, by=c('EEZID' = 'saup_id')) %>%
mutate(id = paste(Taxonid, ohi_id_2013, FAOAreaID, catchYear_rel, sep="_")) %>%
mutate(id2 = paste(ohi_id_2013, Taxonid, sep="_"))
RAM_b_bmsy_ohi_rgn[RAM_b_bmsy_ohi_rgn$Taxonid == 600142 & RAM_b_bmsy_ohi_rgn$FAOAreaID == 57, ]
## check that merge went well (appears to have gone well based on sample size!)
tmp <- RAM_b_bmsy_ohi_rgn %>%
filter(EEZID %in% c(251, 962, 947, 910, 918, 830, 626, 908))
table(tmp$EEZID)
data.frame(RAM_b_bmsy_ohi_rgn[RAM_b_bmsy_ohi_rgn$EEZID == 251, ])
## check on duplicates due to some OHI regions having multiple SAUP regions
dups <- RAM_b_bmsy_ohi_rgn$id[duplicated(RAM_b_bmsy_ohi_rgn$id)]
tmp <- RAM_b_bmsy_ohi_rgn[RAM_b_bmsy_ohi_rgn$id %in% dups, ]
data.frame(tmp[tmp$ohi_id_2013==16, ])
library(ggplot2)
ggplot(tmp, aes(x=catchYear_rel, y=bbmsy, col=as.factor(EEZID), group=as.factor(EEZID))) +
geom_point() +
geom_line() +
facet_wrap(~id2, scales='free')
ggsave('globalprep/SAUP_FIS/v2015/tmp/combining_bbmsy_rgns_multiple_saup.png')
# a couple worrisome ones...impetus for weighting by catch when multiple SAUP regions within an OHI region
data.frame(tmp[tmp$id2 == "163_600143", ]) # difference captured by different FAO regions
data.frame(tmp[tmp$id2 == "163_600223", ])
data.frame(tmp[tmp$id2 == "163_600361", ])
data.frame(tmp[tmp$id2 == "163_600504", ])
data.frame(tmp[tmp$id2 == "62_600226", ])
data.frame(tmp[tmp$id2 == "182_600142", ])
### STEP 6 ----
## Weight b/bmsy values by mean catch within regions
catch <- read.csv('globalprep/SAUP_FIS/v2015/tmp/mean_catch_saup_fao.csv') %>%
select(EEZID, FAOAreaID, Taxonid=TaxonKey, Year, Catch=mean_catch) %>%
filter(EEZID != 0)
data.frame(filter(catch, EEZID==8 & FAOAreaID==37 & Taxonid==100039)) #should all be the same
data.frame(filter(catch, EEZID==50 & FAOAreaID==57 & Taxonid==600142))
data.frame(filter(catch, EEZID==36 & FAOAreaID==57 & Taxonid==600142))
data.frame(filter(catch, EEZID==50 & FAOAreaID==57 & Taxonid==600107))
RAM_b_bmsy_ohi_rgn_catch <- RAM_b_bmsy_ohi_rgn %>%
left_join(catch, by=c('FAOAreaID', 'EEZID', 'Taxonid', 'catchYear_rel'='Year'))
data.frame(RAM_b_bmsy_ohi_rgn_catch[RAM_b_bmsy_ohi_rgn_catch$Taxonid == 600504 &
RAM_b_bmsy_ohi_rgn_catch$ohi_id_2013==163 &
RAM_b_bmsy_ohi_rgn_catch$FAOAreaID == 67, ])
data.frame(filter(RAM_b_bmsy_ohi_rgn_catch, EEZID==50 & FAOAreaID==57 & Taxonid==600142))
filter(catch, EEZID==50 & FAOAreaID==57 & Taxonid==600142)
data.frame(filter(RAM_b_bmsy_ohi_rgn_catch, EEZID==50 & FAOAreaID==57 & Taxonid==600107))
filter(catch, EEZID==50 & FAOAreaID==57 & Taxonid==600107)
data.frame(RAM_b_bmsy_ohi_rgn_catch[RAM_b_bmsy_ohi_rgn_catch$Taxonid == 600142 & RAM_b_bmsy_ohi_rgn_catch$FAOAreaID == 57, ])
RAM_b_bmsy_ohi_rgn_catch <- RAM_b_bmsy_ohi_rgn_catch %>%
filter(!is.na(Catch)) %>%
group_by(Taxonid, FAOAreaID, ohi_id_2013, catchYear_rel) %>%
summarize(bbmsy = weighted.mean(bbmsy, Catch, na.rm=TRUE)) %>%
select(Taxonid, FAO_rgn=FAOAreaID, ohi_rgn=ohi_id_2013, year=catchYear_rel, bbmsy); head(RAM_b_bmsy_ohi_rgn_catch)
## check: 2010 value should be just under 1.490 (yes):
RAM_b_bmsy_ohi_rgn_catch[RAM_b_bmsy_ohi_rgn_catch$Taxonid == 600504 &
RAM_b_bmsy_ohi_rgn_catch$ohi_rgn==163 &
RAM_b_bmsy_ohi_rgn_catch$FAO_rgn == 67, ]
filter(RAM_b_bmsy_ohi_rgn_catch, Taxonid==600142 & FAO_rgn==57 & ohi_rgn==204)
filter(RAM_b_bmsy_ohi_rgn_catch, Taxonid==600107 & FAO_rgn==57 & ohi_rgn==204)
write.csv(RAM_b_bmsy_ohi_rgn_catch, 'globalprep/SAUP_FIS/v2015/tmp/RAM_fao_ohi.csv', row.names=FALSE)
|
context("parse columns")
test_that("apply funcs to columns", {
# when
df <- data.frame(x = 1:5, y = 11:15, z = "OK")
fx <- function(x) x * 2
fy <- function(x) x - 1
# then
result <- parse_columns(df, list(x = fx, y = fy))
expected_x <- seq(2, 10, 2)
expected_y <- 10:14
# ---
expect_equal(result$x, expected_x)
expect_equal(result$y, expected_y)
})
| /tests/testthat/test_parse_columns.R | no_license | aedobbyn/owmr | R | false | false | 372 | r | context("parse columns")
test_that("apply funcs to columns", {
# when
df <- data.frame(x = 1:5, y = 11:15, z = "OK")
fx <- function(x) x * 2
fy <- function(x) x - 1
# then
result <- parse_columns(df, list(x = fx, y = fy))
expected_x <- seq(2, 10, 2)
expected_y <- 10:14
# ---
expect_equal(result$x, expected_x)
expect_equal(result$y, expected_y)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot.R
\name{shenwei356.theme}
\alias{shenwei356.theme}
\title{shenwei356.theme}
\usage{
shenwei356.theme()
}
\description{
shenwei356's ggplot2 theme
}
| /man/shenwei356.theme.Rd | permissive | shenwei356/swr | R | false | true | 233 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot.R
\name{shenwei356.theme}
\alias{shenwei356.theme}
\title{shenwei356.theme}
\usage{
shenwei356.theme()
}
\description{
shenwei356's ggplot2 theme
}
|
plot21x <-
function(bwsurvival=NA,x=NA,...){
library(Hmisc)
x<-factor(x)
if (length(bwsurvival)%%2==0) par(mfrow=c(floor(length(bwsurvival)%/%2),2),font=1,font.axis=3,font.lab=4,las=1,oma=c(0,0,1,0)) else
par(mfrow=c(floor(length(bwsurvival)%/%2+1),2),font=1,font.axis=3,font.lab=4,las=1,oma=c(0,0,1,0))
plot(bwsurvival[[1]]$times2, bwsurvival[[1]]$s2c1[,1], col=1,lty=1, lwd=2, type="s",xlim=c(0,max(bwsurvival[[1]]$times2)), ylim=c(0,1),main=paste("",levels(x)[[1]],sep=""),...)
axis(2,at=seq(0,1,.1))
if (bwsurvival[[1]]$wmet==0) title(main=" \n \n (Non-weighted methodology)", outer=T) else title(main=" \n \n (Weighted methodology)", outer=T)
for (i in 1:ncol(bwsurvival[[1]]$s2c1)){
lines(bwsurvival[[1]]$times2, bwsurvival[[1]]$s2c1[,i], col=i, lty=i, lwd=2,type="s")
legend('topright',colnames(bwsurvival[[1]]$s2c1),col=1:100,lty=1:100,lwd=2,box.lwd=1)
}
for (j in 2:length(bwsurvival)){
plot(bwsurvival[[j]]$times2, bwsurvival[[j]]$s2c1[,1], col=j,lty=j, lwd=2,type="s", xlim=c(0,max(bwsurvival[[j]]$times2)),ylim=c(0,1),main=paste("",levels(x)[[j]],sep=""),...)
for (i in 1:ncol(bwsurvival[[1]]$s2c1)){
lines(bwsurvival[[j]]$times2, bwsurvival[[j]]$s2c1[,i], col=i, lty=i, lwd=2,type="s")
}
legend('topright',colnames(bwsurvival[[1]]$s2c1),col=1:i,lty=1:100,lwd=2,box.lwd=1)
}
axis(2,at=seq(0,1,.1))
}
| /R/plot21x.R | no_license | cran/bwsurvival | R | false | false | 1,340 | r | plot21x <-
function(bwsurvival=NA,x=NA,...){
library(Hmisc)
x<-factor(x)
if (length(bwsurvival)%%2==0) par(mfrow=c(floor(length(bwsurvival)%/%2),2),font=1,font.axis=3,font.lab=4,las=1,oma=c(0,0,1,0)) else
par(mfrow=c(floor(length(bwsurvival)%/%2+1),2),font=1,font.axis=3,font.lab=4,las=1,oma=c(0,0,1,0))
plot(bwsurvival[[1]]$times2, bwsurvival[[1]]$s2c1[,1], col=1,lty=1, lwd=2, type="s",xlim=c(0,max(bwsurvival[[1]]$times2)), ylim=c(0,1),main=paste("",levels(x)[[1]],sep=""),...)
axis(2,at=seq(0,1,.1))
if (bwsurvival[[1]]$wmet==0) title(main=" \n \n (Non-weighted methodology)", outer=T) else title(main=" \n \n (Weighted methodology)", outer=T)
for (i in 1:ncol(bwsurvival[[1]]$s2c1)){
lines(bwsurvival[[1]]$times2, bwsurvival[[1]]$s2c1[,i], col=i, lty=i, lwd=2,type="s")
legend('topright',colnames(bwsurvival[[1]]$s2c1),col=1:100,lty=1:100,lwd=2,box.lwd=1)
}
for (j in 2:length(bwsurvival)){
plot(bwsurvival[[j]]$times2, bwsurvival[[j]]$s2c1[,1], col=j,lty=j, lwd=2,type="s", xlim=c(0,max(bwsurvival[[j]]$times2)),ylim=c(0,1),main=paste("",levels(x)[[j]],sep=""),...)
for (i in 1:ncol(bwsurvival[[1]]$s2c1)){
lines(bwsurvival[[j]]$times2, bwsurvival[[j]]$s2c1[,i], col=i, lty=i, lwd=2,type="s")
}
legend('topright',colnames(bwsurvival[[1]]$s2c1),col=1:i,lty=1:100,lwd=2,box.lwd=1)
}
axis(2,at=seq(0,1,.1))
}
|
## Functions described here caches the value of the inverse matrix of a given matrix
## This function makes cache of a matrix and the Inverse of it
## Matrix must be invertible
makeCacheMatrix <- function(x = matrix()) {
inverseValue <- NULL
## sets the matrix value. In this time, the inverse
set <- function(y)
{
x <<- y
inverseValue <<- NULL
}
## gets the matrix value
get <- function()
{
x
}
## sets the inverse value
setInverse <- function(inverse)
{
inverseValue <<- inverse
}
## gets the inverse value
getInverse <- function()
{
inverseValue
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## This function uses solve function to make the matrix inverse
## and sets it on the special matrix created by function makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## gets the cached inverse value from Matrix
inverseValue <- x$getInverse()
## if exists, show a message and return this value else compute inverse value and returns it
if(!is.null(m))
{
message("getting cached data")
return(m)
}
## gets matrix value
data <- x$get()
## computes inverse matrix
m <- solve(data, ...)
## makes cache
x$setInverse(m)
## return
m
}
| /cachematrix.R | no_license | amandaorbite/ProgrammingAssignment2 | R | false | false | 1,513 | r | ## Functions described here caches the value of the inverse matrix of a given matrix
## This function makes cache of a matrix and the Inverse of it
## Matrix must be invertible
makeCacheMatrix <- function(x = matrix()) {
inverseValue <- NULL
## sets the matrix value. In this time, the inverse
set <- function(y)
{
x <<- y
inverseValue <<- NULL
}
## gets the matrix value
get <- function()
{
x
}
## sets the inverse value
setInverse <- function(inverse)
{
inverseValue <<- inverse
}
## gets the inverse value
getInverse <- function()
{
inverseValue
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## This function uses solve function to make the matrix inverse
## and sets it on the special matrix created by function makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## gets the cached inverse value from Matrix
inverseValue <- x$getInverse()
## if exists, show a message and return this value else compute inverse value and returns it
if(!is.null(m))
{
message("getting cached data")
return(m)
}
## gets matrix value
data <- x$get()
## computes inverse matrix
m <- solve(data, ...)
## makes cache
x$setInverse(m)
## return
m
}
|
## Function: entropy
## Calculates the entropy, given a dataset and a variable
entropy <- function(data, variable) {
p_overall <- 0
for (i in 1:length(levels(as.factor(data[,which(names(data) == variable)])))){
p_level <- (length(data[,which(names(data) == variable)]
[data[,which(names(data) == variable)] == levels(as.factor(data[,which(names(data) == variable)]))[i]])) /
length(data[,which(names(data) == variable)])
p_overall <- p_overall - (p_level*log2(p_level))
}
p_overall
}
| /ch1_entropy.R | permissive | bbrewington/data-science-for-business | R | false | false | 578 | r | ## Function: entropy
## Calculates the entropy, given a dataset and a variable
entropy <- function(data, variable) {
p_overall <- 0
for (i in 1:length(levels(as.factor(data[,which(names(data) == variable)])))){
p_level <- (length(data[,which(names(data) == variable)]
[data[,which(names(data) == variable)] == levels(as.factor(data[,which(names(data) == variable)]))[i]])) /
length(data[,which(names(data) == variable)])
p_overall <- p_overall - (p_level*log2(p_level))
}
p_overall
}
|
#Power calculation
rld.design <- function(nsim, nv, np, ndlevel, nexposure, rho, p0, RR, method = c("LRT", "log-rank"), Siglevel){
##data generation
Gendata <- function(nv, np, ndlevel, nexposure, rho, p0, RR){
if (rho>0){
Nu <- rho/(1-rho)*(pi^2/6)
AlphaBetap <- log((1/Nu)*(exp(-Nu*log(1-p0))-1))
AlphaBetav <- log((1/Nu)*(exp(-Nu*log(1-RR*p0))-1))
n <- nv+np
W <- rgamma(n, shape = 1/Nu, rate = 1/Nu)
R <- log(W)
Xv <- rep(1, nv)
Xp <- rep(0, np)
tempX <- c(Xv, Xp)
X <- as.matrix(sample(tempX), n, 1)
lambda <- matrix(NA, nrow = n, ncol = ndlevel)
for (i in 1:n){
if (X[i]==1) {
lambda[i,] <- 1-exp(-exp(AlphaBetav+R[i]))
} else{
lambda[i,] <- 1-exp(-exp(AlphaBetap+R[i]))
}
}
} else if (rho==0){
AlphaBetap <- log(-log(1-p0))
AlphaBetav <- log(-log(1-RR*p0))
n <- nv+np
Xv <- rep(1, nv)
Xp <- rep(0, np)
tempX <- c(Xv, Xp)
X <- as.matrix(sample(tempX), n, 1)
lambda <- matrix(NA, nrow = n, ncol = ndlevel)
for (i in 1:n){
if (X[i]==1) {
lambda[i,] <- 1-exp(-exp(AlphaBetav))
} else{
lambda[i,] <- 1-exp(-exp(AlphaBetap))
}
}
}
Y <- matrix(NA, nrow = n, ncol = sum(nexposure))
for (i in 1:n){
lambdarep <- rep(lambda[i,], nexposure)
Y[i,] <- rbinom(n = sum(nexposure), size = 1, prob = lambdarep)
}
Ti <- c()
for (i in 1:n){
poi <- which(Y[i,]==1)
if (length(poi)>0){
Ti[i] <- min(poi)
} else {
Ti[i] <- 99
}
}
delta <- c()
delta[which(Ti<=sum(nexposure))] <- 1
delta[which(Ti>sum(nexposure))] <- 0
Ti[which(Ti==99)] <- sum(nexposure)
origdata <- cbind(Ti, delta, X)
colnames(origdata) <- c("time", "delta", "Trt")
origdata <- data.frame(origdata)
return(origdata)
}
##do regression
if (method=='LRT'){
if (rho>0){
initialval0 <- c(seq(-1, 1, length.out = ndlevel), 0.1)
lwrb0 <- c(rep(-Inf, ndlevel), 0.01)
uprb0 <- c(rep(Inf, ndlevel), Inf)
initialval1 <- c(seq(-1, 1, length.out = ndlevel), 1, rep(0, ndlevel-1), 0.1)
lwrb1 <- c(rep(-Inf, ndlevel), -Inf, rep(-Inf, ndlevel-1), 0.01)
uprb1 <- c(rep(Inf, ndlevel), Inf, rep(Inf, ndlevel-1), Inf)
ind <- c()
for (i in 1:nsim){
repeat{
origdata <- Gendata(nv = nv, np = np, ndlevel = ndlevel, nexposure = nexposure, rho = rho,
p0 = p0, RR = RR)
if (max(origdata$time)>sum(nexposure[-ndlevel])) {break}
}
newdata <- transdata(data = origdata, ndlevel = ndlevel, nexposure = nexposure)
result0 <- try(rld(formula = Surv(time, delta)~factor(dose), data = newdata, initial = initialval0,
lower = lwrb0, upper = uprb0, frailty = TRUE))
rldcorr1 <- function(initial){
tempresult1 <- try(rld(formula = Surv(time, delta)~factor(dose)*factor(Trt), data = newdata,
initial = initialval1, lower = lwrb1, upper = uprb1, frailty = TRUE))
if (class(tempresult1)=='try-error'){
cat('initial value issue, change another one.\n')
repeat{
newinitialval1 <- c(seq(-1, 1, length.out = ndlevel), 0.5, runif(ndlevel-1, -1, 1), 0.1)
tempresult1 <- try(rld(formula = Surv(time, delta)~factor(dose)*factor(Trt), data = newdata,
initial = newinitialval1, lower = lwrb1, upper = uprb1, frailty = TRUE))
if (class(tempresult1)!='try-error') {break}
}
}
return(tempresult1)
}
result1 <- rldcorr1(initialval1)
LRTresult <- lrtest(model1 = result0, model2 = result1, TestNu = FALSE, Siglevel = Siglevel)
if (LRTresult$pvalue<=Siglevel){
ind[i] <- 1
} else {
ind[i] <- 0
}
print(paste("Simulation", i))
}
} else if (rho==0){
initialval0 <- seq(-1, 1, length.out = ndlevel)
lwrb0 <- rep(-Inf, ndlevel)
uprb0 <- rep(Inf, ndlevel)
initialval1 <- c(seq(-1, 1, length.out = ndlevel), 1, rep(0, ndlevel-1))
lwrb1 <- c(rep(-Inf, ndlevel), -Inf, rep(-Inf, ndlevel-1))
uprb1 <- c(rep(Inf, ndlevel), Inf, rep(Inf, ndlevel-1))
ind <- c()
for (i in 1:nsim){
repeat {
origdata <- Gendata(nv = nv, np = np, ndlevel = ndlevel, nexposure = nexposure, rho = rho,
p0 = p0, RR = RR)
if (max(origdata$time)>sum(nexposure[-ndlevel])) {break}
}
newdata <- transdata(data = origdata, ndlevel = ndlevel, nexposure = nexposure)
result0 <- try(rld(formula = Surv(time, delta)~factor(dose), data = newdata, initial = initialval0,
lower = lwrb0, upper = uprb0, frailty = FALSE))
rldcorr1 <- function(initial){
tempresult1 <- try(rld(formula = Surv(time, delta)~factor(dose)*factor(Trt), data = newdata,
initial = initialval1, lower = lwrb1, upper = uprb1, frailty = FALSE))
if (class(tempresult1)=='try-error'){
cat('initial value issue, change another one.\n')
repeat{
newinitialval1 <- c(seq(-1, 1, length.out = ndlevel), 0.5, runif(ndlevel-1, -1, 1))
tempresult1 <- try(rld(formula = Surv(time, delta)~factor(dose)*factor(Trt), data = newdata,
initial = newinitialval1, lower = lwrb1, upper = uprb1, frailty = FALSE))
if (class(tempresult1)!='try-error') {break}
}
}
return(tempresult1)
}
result1 <- rldcorr1(initialval1)
LRTresult <- lrtest(model1 = result0, model2 = result1, TestNu = FALSE, Siglevel = Siglevel)
if (LRTresult$pvalue<=Siglevel){
ind[i] <- 1
} else {
ind[i] <- 0
}
print(paste("Simulation", i))
}
}
} else if (method=='log-rank'){
ind <- c()
for (i in 1:nsim){
repeat {
origdata <- Gendata(nv = nv, np = np, ndlevel = ndlevel, nexposure = nexposure, rho = rho,
p0 = p0, RR = RR)
if (max(origdata$time)>sum(nexposure[-ndlevel])) {break}
}
result <- survdiff(formula = Surv(time, delta)~Trt, data = origdata, rho = 0)
pvalue <- pchisq(q = result$chisq, df = 1, ncp = 0, lower.tail = FALSE, log.p = FALSE)
if (pvalue<=Siglevel){
ind[i] <- 1
} else {
ind[i] <- 0
}
print(paste("Simulation", i))
}
}
power <- sum(ind)/nsim
output <- list(method, power)
names(output) <- c("method", "power")
return(output)
}
print.rld.design <- function(x, digits = max(3, getOption("digits") - 3), ...){
cat("Statistical test:\n")
print(x$method)
cat("Power calculation:\n")
print(x$power, digits = digits)
class(x) <- "rld.design"
invisible(x)
}
| /R/rld.design.R | no_license | cran/rld | R | false | false | 7,583 | r |
#Power calculation
rld.design <- function(nsim, nv, np, ndlevel, nexposure, rho, p0, RR, method = c("LRT", "log-rank"), Siglevel){
##data generation
Gendata <- function(nv, np, ndlevel, nexposure, rho, p0, RR){
if (rho>0){
Nu <- rho/(1-rho)*(pi^2/6)
AlphaBetap <- log((1/Nu)*(exp(-Nu*log(1-p0))-1))
AlphaBetav <- log((1/Nu)*(exp(-Nu*log(1-RR*p0))-1))
n <- nv+np
W <- rgamma(n, shape = 1/Nu, rate = 1/Nu)
R <- log(W)
Xv <- rep(1, nv)
Xp <- rep(0, np)
tempX <- c(Xv, Xp)
X <- as.matrix(sample(tempX), n, 1)
lambda <- matrix(NA, nrow = n, ncol = ndlevel)
for (i in 1:n){
if (X[i]==1) {
lambda[i,] <- 1-exp(-exp(AlphaBetav+R[i]))
} else{
lambda[i,] <- 1-exp(-exp(AlphaBetap+R[i]))
}
}
} else if (rho==0){
AlphaBetap <- log(-log(1-p0))
AlphaBetav <- log(-log(1-RR*p0))
n <- nv+np
Xv <- rep(1, nv)
Xp <- rep(0, np)
tempX <- c(Xv, Xp)
X <- as.matrix(sample(tempX), n, 1)
lambda <- matrix(NA, nrow = n, ncol = ndlevel)
for (i in 1:n){
if (X[i]==1) {
lambda[i,] <- 1-exp(-exp(AlphaBetav))
} else{
lambda[i,] <- 1-exp(-exp(AlphaBetap))
}
}
}
Y <- matrix(NA, nrow = n, ncol = sum(nexposure))
for (i in 1:n){
lambdarep <- rep(lambda[i,], nexposure)
Y[i,] <- rbinom(n = sum(nexposure), size = 1, prob = lambdarep)
}
Ti <- c()
for (i in 1:n){
poi <- which(Y[i,]==1)
if (length(poi)>0){
Ti[i] <- min(poi)
} else {
Ti[i] <- 99
}
}
delta <- c()
delta[which(Ti<=sum(nexposure))] <- 1
delta[which(Ti>sum(nexposure))] <- 0
Ti[which(Ti==99)] <- sum(nexposure)
origdata <- cbind(Ti, delta, X)
colnames(origdata) <- c("time", "delta", "Trt")
origdata <- data.frame(origdata)
return(origdata)
}
##do regression
if (method=='LRT'){
if (rho>0){
initialval0 <- c(seq(-1, 1, length.out = ndlevel), 0.1)
lwrb0 <- c(rep(-Inf, ndlevel), 0.01)
uprb0 <- c(rep(Inf, ndlevel), Inf)
initialval1 <- c(seq(-1, 1, length.out = ndlevel), 1, rep(0, ndlevel-1), 0.1)
lwrb1 <- c(rep(-Inf, ndlevel), -Inf, rep(-Inf, ndlevel-1), 0.01)
uprb1 <- c(rep(Inf, ndlevel), Inf, rep(Inf, ndlevel-1), Inf)
ind <- c()
for (i in 1:nsim){
repeat{
origdata <- Gendata(nv = nv, np = np, ndlevel = ndlevel, nexposure = nexposure, rho = rho,
p0 = p0, RR = RR)
if (max(origdata$time)>sum(nexposure[-ndlevel])) {break}
}
newdata <- transdata(data = origdata, ndlevel = ndlevel, nexposure = nexposure)
result0 <- try(rld(formula = Surv(time, delta)~factor(dose), data = newdata, initial = initialval0,
lower = lwrb0, upper = uprb0, frailty = TRUE))
rldcorr1 <- function(initial){
tempresult1 <- try(rld(formula = Surv(time, delta)~factor(dose)*factor(Trt), data = newdata,
initial = initialval1, lower = lwrb1, upper = uprb1, frailty = TRUE))
if (class(tempresult1)=='try-error'){
cat('initial value issue, change another one.\n')
repeat{
newinitialval1 <- c(seq(-1, 1, length.out = ndlevel), 0.5, runif(ndlevel-1, -1, 1), 0.1)
tempresult1 <- try(rld(formula = Surv(time, delta)~factor(dose)*factor(Trt), data = newdata,
initial = newinitialval1, lower = lwrb1, upper = uprb1, frailty = TRUE))
if (class(tempresult1)!='try-error') {break}
}
}
return(tempresult1)
}
result1 <- rldcorr1(initialval1)
LRTresult <- lrtest(model1 = result0, model2 = result1, TestNu = FALSE, Siglevel = Siglevel)
if (LRTresult$pvalue<=Siglevel){
ind[i] <- 1
} else {
ind[i] <- 0
}
print(paste("Simulation", i))
}
} else if (rho==0){
initialval0 <- seq(-1, 1, length.out = ndlevel)
lwrb0 <- rep(-Inf, ndlevel)
uprb0 <- rep(Inf, ndlevel)
initialval1 <- c(seq(-1, 1, length.out = ndlevel), 1, rep(0, ndlevel-1))
lwrb1 <- c(rep(-Inf, ndlevel), -Inf, rep(-Inf, ndlevel-1))
uprb1 <- c(rep(Inf, ndlevel), Inf, rep(Inf, ndlevel-1))
ind <- c()
for (i in 1:nsim){
repeat {
origdata <- Gendata(nv = nv, np = np, ndlevel = ndlevel, nexposure = nexposure, rho = rho,
p0 = p0, RR = RR)
if (max(origdata$time)>sum(nexposure[-ndlevel])) {break}
}
newdata <- transdata(data = origdata, ndlevel = ndlevel, nexposure = nexposure)
result0 <- try(rld(formula = Surv(time, delta)~factor(dose), data = newdata, initial = initialval0,
lower = lwrb0, upper = uprb0, frailty = FALSE))
rldcorr1 <- function(initial){
tempresult1 <- try(rld(formula = Surv(time, delta)~factor(dose)*factor(Trt), data = newdata,
initial = initialval1, lower = lwrb1, upper = uprb1, frailty = FALSE))
if (class(tempresult1)=='try-error'){
cat('initial value issue, change another one.\n')
repeat{
newinitialval1 <- c(seq(-1, 1, length.out = ndlevel), 0.5, runif(ndlevel-1, -1, 1))
tempresult1 <- try(rld(formula = Surv(time, delta)~factor(dose)*factor(Trt), data = newdata,
initial = newinitialval1, lower = lwrb1, upper = uprb1, frailty = FALSE))
if (class(tempresult1)!='try-error') {break}
}
}
return(tempresult1)
}
result1 <- rldcorr1(initialval1)
LRTresult <- lrtest(model1 = result0, model2 = result1, TestNu = FALSE, Siglevel = Siglevel)
if (LRTresult$pvalue<=Siglevel){
ind[i] <- 1
} else {
ind[i] <- 0
}
print(paste("Simulation", i))
}
}
} else if (method=='log-rank'){
ind <- c()
for (i in 1:nsim){
repeat {
origdata <- Gendata(nv = nv, np = np, ndlevel = ndlevel, nexposure = nexposure, rho = rho,
p0 = p0, RR = RR)
if (max(origdata$time)>sum(nexposure[-ndlevel])) {break}
}
result <- survdiff(formula = Surv(time, delta)~Trt, data = origdata, rho = 0)
pvalue <- pchisq(q = result$chisq, df = 1, ncp = 0, lower.tail = FALSE, log.p = FALSE)
if (pvalue<=Siglevel){
ind[i] <- 1
} else {
ind[i] <- 0
}
print(paste("Simulation", i))
}
}
power <- sum(ind)/nsim
output <- list(method, power)
names(output) <- c("method", "power")
return(output)
}
print.rld.design <- function(x, digits = max(3, getOption("digits") - 3), ...){
cat("Statistical test:\n")
print(x$method)
cat("Power calculation:\n")
print(x$power, digits = digits)
class(x) <- "rld.design"
invisible(x)
}
|
##############################################################################################################################
# Title: Profile-Line.R
# Type: Module for DCR Shiny App
# Description: Line Plot for
# Written by: Nick Zinck, Spring 2017
##############################################################################################################################
# Notes:
# 1. req() will delay the rendering of a widget or other reactive object until a certain logical expression is TRUE or not NULL
# 2. Tried Progress Bar for Plot and did not work well. Used Custom Message instead
# To-Do List:
# 1. Make Loading Bar for Plot
# 2. Make option for COloring Scale (whether based on Site, Year; Site; or None)
# 3. Change Decimal Date to DOY
##############################################################################################################################
# User Interface
##
PROF_LINE_UI <- function(id, df) {
ns <- NS(id)
tagList(
wellPanel(
fluidRow(
column(2,
# SITE
wellPanel(
checkboxGroupInput(ns("site"), "Site: (Select First)",
choices = levels(factor(df$Site)))
)
),
column(2,
# SITE
wellPanel(
h3(textOutput(ns("text_site_null")), align = "center"),
h3(textOutput(ns("text_param_null")), align = "center"),
h4(textOutput(ns("text_num_text")), align = "center"),
h3(textOutput(ns("text_num")), align = "center")
)
),
column(3,
# PARAMETER
wellPanel(
uiOutput(ns("param_ui"))
)
),
column(5,
# DATE
wellPanel(
fluidRow(
column(6,
radioButtons(ns("date_option"), "Choose Date Method:",
choices=c("Calendar Range",
"Select Year",
"Select Month",
"Select Day"),
selected = "Calendar Range")
),
column(6,
uiOutput(ns("date_ui"))
)
) # end Fluid Row
) # end Well Panel
) # end Column
) # end Fluid Row
), # well panel
tabsetPanel(
# the "Plot" tab panel where everything realted to the plot goes
tabPanel("Custom Plot",
PLOT_PROFLINE_CUSTOM_UI(ns("plot"))
),
tabPanel("Standard Template Line Plot",
fluidRow(
h2("Soon to Come", align = "center")
)
),
tabPanel("Table",
fluidRow(
dataTableOutput(ns("table_dynamic"))
)
)
) # end tabsetpanel
) # end taglist
}
##############################################################################################################################
# Server Function
##############################################################################################################################
PROF_LINE <- function(input, output, session, df) {
ns <- session$ns
# filter DF for blank data
df <- df %>% filter(!is.na(Date),
!is.na(Depth_m),
!is.na(Result))
# Non Historical Parameters (when a Parameter has not been used in over 5 years). See General Note 6
parameters_non_historical <- df %>%
filter(Date > Sys.Date()-years(5), Date < Sys.Date()) %>%
.$Parameter %>%
factor() %>%
levels()
# Parameter Selection UI
output$param_ui <- renderUI({
req(input$site) # See General Note 5
ns <- session$ns # see General Note 1
# Parameters which have data at any Site (in the mofule's df) within 5 years.
param_choices_new <- df %>%
filter(Site %in% input$site,
Parameter %in% parameters_non_historical) %>%
.$Parameter %>%
factor() %>%
levels()
# Parameters which do NOT have data at any Site (in the mofule's df) within 5 years.
param_choices_old <- df %>%
filter(Site %in% input$site,
!(Parameter %in% parameters_non_historical)) %>%
.$Parameter %>%
factor() %>%
levels()
# Recent Parameters first and then old parameters
param_choices <- c(param_choices_new, param_choices_old)
# Parameter Input
checkboxGroupInput(ns("param"), "Parameter:",
choices=levels(factor(df$Parameter)))
})
# Depending on input$date.option, we'll generate a different UI date component
output$date_ui <- renderUI({
req(input$site) # See General Note 5
dates <- df %>%
filter(Site %in% input$site) %>%
.$Date
date_min <- dates %>% min(na.rm=TRUE)
date_max <- dates %>% max(na.rm=TRUE)
# Date Input
months_unique <- levels(factor(month(dates)))
days_unique <- levels(factor(dates))
switch(input$date_option,
"Calendar Range" = dateRangeInput(ns("date"), "Date Range:",
start = date_max - years(1),
end = date_max,
min = date_min,
max = date_max,
startview = "year"),
"Select Year" = selectInput(ns("date"), "Year:",
choices = year(seq(date_min, date_max, "years")),
selected = year(date_max)),
"Select Month" = selectInput(ns("date"), "Month:",
choices = c(months_unique),
selected = month(Sys.Date())),
"Select Day" = selectInput(ns("date"), "Day:",
choices = days_unique)
)
})
# Reactive Data Frames for different date selection methods:
Df2 <- reactive({
req(input$site, input$param, input$date, input$date_option) # See General Note 5
if(input$date_option == "Select Year"){
df %>%
filter(Parameter %in% input$param,
Site %in% c(input$site),
year(Date) == input$date)
} else if (input$date_option == "Select Month"){
df %>%
filter(Parameter %in% input$param,
Site %in% c(input$site),
month(Date) == input$date)
} else if (input$date_option == "Calendar Range"){
df %>%
filter(Parameter %in% input$param,
Site %in% c(input$site),
Date > input$date[1], Date < input$date[2])
} else if (input$date_option == "Select Day"){
df %>%
filter(Parameter %in% input$param,
Site %in% c(input$site),
Date == input$date)
}
})
# Text - Select Site
output$text_site_null <- renderText({
req(is.null(input$site)) # See General Note 1
"Select Site(s)"
})
# Text - Select Parameter
output$text_param_null <- renderText({
req(!is.null(input$site), is.null(input$param)) # See General Note 1
"Select Parameter(s)"
})
# Text - Number of Samples
output$text_num_text <- renderText({
req(input$site, input$param) # See General Note 1
"Number of Samples in Selected Data"
})
# Text - Number of Samples
output$text_num <- renderText({
req(Df2()) # See General Note 1
Df2() %>% summarise(n()) %>% paste()
})
# Plot
callModule(PLOT_PROFLINE_CUSTOM, "plot", Df = Df2)
# Table
output$table_dynamic <- renderDataTable(Df2())
}
| /modules/plots/profile_line.R | permissive | dancrocker/DCR-WAVE | R | false | false | 7,722 | r | ##############################################################################################################################
# Title: Profile-Line.R
# Type: Module for DCR Shiny App
# Description: Line Plot for
# Written by: Nick Zinck, Spring 2017
##############################################################################################################################
# Notes:
# 1. req() will delay the rendering of a widget or other reactive object until a certain logical expression is TRUE or not NULL
# 2. Tried Progress Bar for Plot and did not work well. Used Custom Message instead
# To-Do List:
# 1. Make Loading Bar for Plot
# 2. Make option for COloring Scale (whether based on Site, Year; Site; or None)
# 3. Change Decimal Date to DOY
##############################################################################################################################
# User Interface
##
PROF_LINE_UI <- function(id, df) {
ns <- NS(id)
tagList(
wellPanel(
fluidRow(
column(2,
# SITE
wellPanel(
checkboxGroupInput(ns("site"), "Site: (Select First)",
choices = levels(factor(df$Site)))
)
),
column(2,
# SITE
wellPanel(
h3(textOutput(ns("text_site_null")), align = "center"),
h3(textOutput(ns("text_param_null")), align = "center"),
h4(textOutput(ns("text_num_text")), align = "center"),
h3(textOutput(ns("text_num")), align = "center")
)
),
column(3,
# PARAMETER
wellPanel(
uiOutput(ns("param_ui"))
)
),
column(5,
# DATE
wellPanel(
fluidRow(
column(6,
radioButtons(ns("date_option"), "Choose Date Method:",
choices=c("Calendar Range",
"Select Year",
"Select Month",
"Select Day"),
selected = "Calendar Range")
),
column(6,
uiOutput(ns("date_ui"))
)
) # end Fluid Row
) # end Well Panel
) # end Column
) # end Fluid Row
), # well panel
tabsetPanel(
# the "Plot" tab panel where everything realted to the plot goes
tabPanel("Custom Plot",
PLOT_PROFLINE_CUSTOM_UI(ns("plot"))
),
tabPanel("Standard Template Line Plot",
fluidRow(
h2("Soon to Come", align = "center")
)
),
tabPanel("Table",
fluidRow(
dataTableOutput(ns("table_dynamic"))
)
)
) # end tabsetpanel
) # end taglist
}
##############################################################################################################################
# Server Function
##############################################################################################################################
PROF_LINE <- function(input, output, session, df) {
ns <- session$ns
# filter DF for blank data
df <- df %>% filter(!is.na(Date),
!is.na(Depth_m),
!is.na(Result))
# Non Historical Parameters (when a Parameter has not been used in over 5 years). See General Note 6
parameters_non_historical <- df %>%
filter(Date > Sys.Date()-years(5), Date < Sys.Date()) %>%
.$Parameter %>%
factor() %>%
levels()
# Parameter Selection UI
output$param_ui <- renderUI({
req(input$site) # See General Note 5
ns <- session$ns # see General Note 1
# Parameters which have data at any Site (in the mofule's df) within 5 years.
param_choices_new <- df %>%
filter(Site %in% input$site,
Parameter %in% parameters_non_historical) %>%
.$Parameter %>%
factor() %>%
levels()
# Parameters which do NOT have data at any Site (in the mofule's df) within 5 years.
param_choices_old <- df %>%
filter(Site %in% input$site,
!(Parameter %in% parameters_non_historical)) %>%
.$Parameter %>%
factor() %>%
levels()
# Recent Parameters first and then old parameters
param_choices <- c(param_choices_new, param_choices_old)
# Parameter Input
checkboxGroupInput(ns("param"), "Parameter:",
choices=levels(factor(df$Parameter)))
})
# Depending on input$date.option, we'll generate a different UI date component
output$date_ui <- renderUI({
req(input$site) # See General Note 5
dates <- df %>%
filter(Site %in% input$site) %>%
.$Date
date_min <- dates %>% min(na.rm=TRUE)
date_max <- dates %>% max(na.rm=TRUE)
# Date Input
months_unique <- levels(factor(month(dates)))
days_unique <- levels(factor(dates))
switch(input$date_option,
"Calendar Range" = dateRangeInput(ns("date"), "Date Range:",
start = date_max - years(1),
end = date_max,
min = date_min,
max = date_max,
startview = "year"),
"Select Year" = selectInput(ns("date"), "Year:",
choices = year(seq(date_min, date_max, "years")),
selected = year(date_max)),
"Select Month" = selectInput(ns("date"), "Month:",
choices = c(months_unique),
selected = month(Sys.Date())),
"Select Day" = selectInput(ns("date"), "Day:",
choices = days_unique)
)
})
# Reactive Data Frames for different date selection methods:
Df2 <- reactive({
req(input$site, input$param, input$date, input$date_option) # See General Note 5
if(input$date_option == "Select Year"){
df %>%
filter(Parameter %in% input$param,
Site %in% c(input$site),
year(Date) == input$date)
} else if (input$date_option == "Select Month"){
df %>%
filter(Parameter %in% input$param,
Site %in% c(input$site),
month(Date) == input$date)
} else if (input$date_option == "Calendar Range"){
df %>%
filter(Parameter %in% input$param,
Site %in% c(input$site),
Date > input$date[1], Date < input$date[2])
} else if (input$date_option == "Select Day"){
df %>%
filter(Parameter %in% input$param,
Site %in% c(input$site),
Date == input$date)
}
})
# Text - Select Site
output$text_site_null <- renderText({
req(is.null(input$site)) # See General Note 1
"Select Site(s)"
})
# Text - Select Parameter
output$text_param_null <- renderText({
req(!is.null(input$site), is.null(input$param)) # See General Note 1
"Select Parameter(s)"
})
# Text - Number of Samples
output$text_num_text <- renderText({
req(input$site, input$param) # See General Note 1
"Number of Samples in Selected Data"
})
# Text - Number of Samples
output$text_num <- renderText({
req(Df2()) # See General Note 1
Df2() %>% summarise(n()) %>% paste()
})
# Plot
callModule(PLOT_PROFLINE_CUSTOM, "plot", Df = Df2)
# Table
output$table_dynamic <- renderDataTable(Df2())
}
|
\name{autoFill}
\alias{autoFill}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Automatically fill out blanks of a vector with the preceding label }
\description{
\code{autoFill} is commonly used to generate labels from columns of a
spreadsheet when many cells are left blank in order to save a lot of
typing. For instance, c("a","","","b","") becomes
c("a","a","a","b","b")
}
\usage{
autoFill(x, squash = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a vector of character strings}
\item{squash}{ If set to TRUE then
leading and trailing spaces are removed which is useful if spaces are
inadvertantly typed because these may be hard to track down. Default: FALSE}
}
\value{
\item{x }{a vector of character strings with blank strings replaced by
preceding non--blank strings}
}
\author{Peter Baker \email{p.baker1@uq.edu.au}}
\note{ While this function may be called directly, it is more often
called by \code{makeLabel}
}
\seealso{ \code{\link{makeLabel}} uses \code{autoFill} to create labels
from two columns of marker names }
\examples{
## description: fill out blanks of a vector with preceeding label
label.1 <- c("a","","","b","")
print(autoFill(label.1))
label.2 <- c("agc","","","","gct5","","ccc","","")
print(autoFill(label.2))
}
\keyword{manip}
\keyword{category}
| /man/autoFill.Rd | no_license | petebaker/polysegratio | R | false | false | 1,378 | rd | \name{autoFill}
\alias{autoFill}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Automatically fill out blanks of a vector with the preceding label }
\description{
\code{autoFill} is commonly used to generate labels from columns of a
spreadsheet when many cells are left blank in order to save a lot of
typing. For instance, c("a","","","b","") becomes
c("a","a","a","b","b")
}
\usage{
autoFill(x, squash = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a vector of character strings}
\item{squash}{ If set to TRUE then
leading and trailing spaces are removed which is useful if spaces are
inadvertantly typed because these may be hard to track down. Default: FALSE}
}
\value{
\item{x }{a vector of character strings with blank strings replaced by
preceding non--blank strings}
}
\author{Peter Baker \email{p.baker1@uq.edu.au}}
\note{ While this function may be called directly, it is more often
called by \code{makeLabel}
}
\seealso{ \code{\link{makeLabel}} uses \code{autoFill} to create labels
from two columns of marker names }
\examples{
## description: fill out blanks of a vector with preceeding label
label.1 <- c("a","","","b","")
print(autoFill(label.1))
label.2 <- c("agc","","","","gct5","","ccc","","")
print(autoFill(label.2))
}
\keyword{manip}
\keyword{category}
|
library(testthat)
library(tsic)
test_check("tsic")
| /tests/testthat.R | no_license | philliplab/tsic | R | false | false | 52 | r | library(testthat)
library(tsic)
test_check("tsic")
|
## PCA+KNN implemented
## Accuracy : 0.9556
#install.packages("caret")
library("caret")
#removing categorical column
k = 100L
####update the file path####
iris <- read.csv("C:\\Users\\Rachit Agrawal\\Downloads\\iris.data", header=F, sep=",")
data <- iris[,c(1:4)]
############
visualize <- prcomp(data, scale = TRUE)
visualize <- visualize$x[,1:2]
visualize <- cbind(V1 = as.character(iris$V1), visualize)
plot(visualize[, 2L], visualize[, 3L],
bg = c("#E41A1C", "#377EB8", "#4DAF4A")[transpose(c(rep(1,50),rep(2,50),rep(3,50)))],
pch = c(rep(22, k), rep(21, k), rep(25, k))
)
##############
| /iris/pca_only.R | no_license | rachitagrawal20/Dimentionality-Reduction-and-classification | R | false | false | 630 | r | ## PCA+KNN implemented
## Accuracy : 0.9556
#install.packages("caret")
library("caret")
#removing categorical column
k = 100L
####update the file path####
iris <- read.csv("C:\\Users\\Rachit Agrawal\\Downloads\\iris.data", header=F, sep=",")
data <- iris[,c(1:4)]
############
visualize <- prcomp(data, scale = TRUE)
visualize <- visualize$x[,1:2]
visualize <- cbind(V1 = as.character(iris$V1), visualize)
plot(visualize[, 2L], visualize[, 3L],
bg = c("#E41A1C", "#377EB8", "#4DAF4A")[transpose(c(rep(1,50),rep(2,50),rep(3,50)))],
pch = c(rep(22, k), rep(21, k), rep(25, k))
)
##############
|
context("test-getupdates")
test_that("can get updates", {
skip_if_offline()
skip_if(!login())
updates <- getUpdates()
expect_true(length(updates) > 0)
expect_true("matches" %in% names(updates))
})
| /tests/testthat/test-getupdates.R | no_license | jcrodriguez1989/Rtinder | R | false | false | 219 | r | context("test-getupdates")
test_that("can get updates", {
skip_if_offline()
skip_if(!login())
updates <- getUpdates()
expect_true(length(updates) > 0)
expect_true("matches" %in% names(updates))
})
|
rThomas.sp2 <- function(alpha, scale, mu, win=sphwin(type="sphere"), parents=FALSE) {
stopifnot(inherits(win, "sphwin") && alpha > 0 && scale > 0 && mu > 0)
rp <- rpoispp.sp2(lambda=alpha, win=win, as.sp=FALSE)
rpl <- nrow(rp)
rThom2 <- rp
for(i in 1:rpl) {
np <- rpois(1, mu)
if(np > 0) {
rThom1 <- rFisher(n=np, mode=rp[i,], kappa=scale, win=sphwin(type="sphere", rad=win$rad))$X
inrt <- in.W(points=rThom1, win=win)
rThom2 <- rbind(rThom2, rThom1[inrt,])
}
}
if(!parents) {
output <- rThom2[(rpl+1):nrow(rThom2),]
}
else {
output <- rThom2
}
output <- sp2(X=output, win=win)
output
}
| /R/rThomas.sp2.R | no_license | baddstats/spherstat | R | false | false | 641 | r | rThomas.sp2 <- function(alpha, scale, mu, win=sphwin(type="sphere"), parents=FALSE) {
stopifnot(inherits(win, "sphwin") && alpha > 0 && scale > 0 && mu > 0)
rp <- rpoispp.sp2(lambda=alpha, win=win, as.sp=FALSE)
rpl <- nrow(rp)
rThom2 <- rp
for(i in 1:rpl) {
np <- rpois(1, mu)
if(np > 0) {
rThom1 <- rFisher(n=np, mode=rp[i,], kappa=scale, win=sphwin(type="sphere", rad=win$rad))$X
inrt <- in.W(points=rThom1, win=win)
rThom2 <- rbind(rThom2, rThom1[inrt,])
}
}
if(!parents) {
output <- rThom2[(rpl+1):nrow(rThom2),]
}
else {
output <- rThom2
}
output <- sp2(X=output, win=win)
output
}
|
source("../COMMON.R")
| /kubobook_2012/glmm/COMMON.R | no_license | yoshiki146/Stat_Modelling_for_Data_Analysis | R | false | false | 22 | r | source("../COMMON.R")
|
\name{print.lm.madlib}
\alias{print.lm.madlib}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
function to do ... ~~
}
\description{
A concise (1-5 lines) description of what the function does. ~~
}
\usage{
\method{print}{lm.madlib}(x, digits = max(3L, getOption("digits") - 3L), ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Describe \code{x} here~~
}
\item{digits}{
Describe \code{digits} here~~
}
\item{\dots}{
Describe \code{\dots} here~~
}
}
\details{
If necessary, more details than the description above ~~
}
\value{
Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
put references to the literature/web site here ~
}
\author{
who you are~~
}
\note{
further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## Not run:
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
"print.lm.madlib"
## End (Not run)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /PivotalR/man/print.lm.madlib.Rd | no_license | nborwankar/rmadlib | R | false | false | 1,382 | rd | \name{print.lm.madlib}
\alias{print.lm.madlib}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
function to do ... ~~
}
\description{
A concise (1-5 lines) description of what the function does. ~~
}
\usage{
\method{print}{lm.madlib}(x, digits = max(3L, getOption("digits") - 3L), ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Describe \code{x} here~~
}
\item{digits}{
Describe \code{digits} here~~
}
\item{\dots}{
Describe \code{\dots} here~~
}
}
\details{
If necessary, more details than the description above ~~
}
\value{
Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
put references to the literature/web site here ~
}
\author{
who you are~~
}
\note{
further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## Not run:
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
"print.lm.madlib"
## End (Not run)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
## Question 3.
## Reads in the relevant dataset.
q3_data <- read.csv("question3_data.csv")
## Re-directs output to a file.
sink(file="question3_results_UPDRS_maleandfemale.txt")
## Performs linear regression.
## Output = UPDRS.
cat("\nUPDRS vs. Statin and All Variables\n")
UPDRS_all_statin <- lm(UPDRS~college+agri+statin+gender+ethnic+income+
smoke+symptom_years, data=q3_data)
summary(UPDRS_all_statin)
confint(UPDRS_all_statin)
cat("\nUPDRS vs. Statin\n")
UPDRS_statin <- lm(UPDRS~statin+symptom_years, data=q3_data)
summary(UPDRS_statin)
confint(UPDRS_statin)
cat("\nUPDRS vs. college\n")
UPDRS_college <- lm(UPDRS~college+symptom_years, data=q3_data)
summary(UPDRS_college)
confint(UPDRS_college)
cat("\nUPDRS vs. agri\n")
UPDRS_agri <- lm(UPDRS~agri+symptom_years, data=q3_data)
summary(UPDRS_statin)
confint(UPDRS_statin)
cat("\nUPDRS vs. gender\n")
UPDRS_gender <- lm(UPDRS~gender+symptom_years, data=q3_data)
summary(UPDRS_gender)
confint(UPDRS_gender)
cat("\nUPDRS vs. ethnic\n")
UPDRS_ethnic <- lm(UPDRS~ethnic+symptom_years, data=q3_data)
summary(UPDRS_ethnic)
confint(UPDRS_ethnic)
cat("\nUPDRS vs. income\n")
UPDRS_income <- lm(UPDRS~income+symptom_years, data=q3_data)
summary(UPDRS_income)
confint(UPDRS_income)
cat("\nUPDRS vs. smoke\n")
UPDRS_smoke <- lm(UPDRS~smoke+symptom_years, data=q3_data)
summary(UPDRS_smoke)
confint(UPDRS_smoke)
sink()
## Output = Golbe.
sink(file="question3_results_Golbe_maleandfemale.txt")
cat("\nGolbe vs. Statin and All Variables\n")
Golbe_all_statin <- lm(Golbe~college+agri+statin+gender+ethnic+income+
smoke+symptom_years, data=q3_data)
summary(Golbe_all_statin)
confint(Golbe_all_statin)
cat("\nGolbe vs. Statin\n")
Golbe_statin <- lm(Golbe~statin+symptom_years, data=q3_data)
summary(Golbe_statin)
confint(Golbe_statin)
cat("\nGolbe vs. college\n")
Golbe_college <- lm(Golbe~college+symptom_years, data=q3_data)
summary(Golbe_college)
confint(Golbe_college)
cat("\nGolbe vs. agri\n")
Golbe_agri <- lm(Golbe~agri+symptom_years, data=q3_data)
summary(Golbe_statin)
confint(Golbe_statin)
cat("\nGolbe vs. gender\n")
Golbe_gender <- lm(Golbe~gender+symptom_years, data=q3_data)
summary(Golbe_gender)
confint(Golbe_gender)
cat("\nGolbe vs. ethnic\n")
Golbe_ethnic <- lm(Golbe~ethnic+symptom_years, data=q3_data)
summary(Golbe_ethnic)
confint(Golbe_ethnic)
cat("\nGolbe vs. income\n")
Golbe_income <- lm(Golbe~income+symptom_years, data=q3_data)
summary(Golbe_income)
confint(Golbe_income)
cat("\nGolbe vs. smoke\n")
Golbe_smoke <- lm(Golbe~smoke+symptom_years, data=q3_data)
summary(Golbe_smoke)
confint(Golbe_smoke)
sink()
## Output = MDRS.
sink(file="question3_results_MDRS_maleandfemale.txt")
cat("\nMDRS vs. Statin and All Variables\n")
MDRS_all_statin <- lm(MDRS~college+agri+statin+gender+ethnic+income+
smoke+symptom_years, data=q3_data)
summary(MDRS_all_statin)
confint(MDRS_all_statin)
cat("\nMDRS vs. Statin\n")
MDRS_statin <- lm(MDRS~statin+symptom_years, data=q3_data)
summary(MDRS_statin)
confint(MDRS_statin)
cat("\nMDRS vs. college\n")
MDRS_college <- lm(MDRS~college+symptom_years, data=q3_data)
summary(MDRS_college)
confint(MDRS_college)
cat("\nMDRS vs. agri\n")
MDRS_agri <- lm(MDRS~agri+symptom_years, data=q3_data)
summary(MDRS_statin)
confint(MDRS_statin)
cat("\nMDRS vs. gender\n")
MDRS_gender <- lm(MDRS~gender+symptom_years, data=q3_data)
summary(MDRS_gender)
confint(MDRS_gender)
cat("\nMDRS vs. ethnic\n")
MDRS_ethnic <- lm(MDRS~ethnic+symptom_years, data=q3_data)
summary(MDRS_ethnic)
confint(MDRS_ethnic)
cat("\nMDRS vs. income\n")
MDRS_income <- lm(MDRS~income+symptom_years, data=q3_data)
summary(MDRS_income)
confint(MDRS_income)
cat("\nMDRS vs. smoke\n")
MDRS_smoke <- lm(MDRS~smoke+symptom_years, data=q3_data)
summary(MDRS_smoke)
confint(MDRS_smoke)
sink()
## Separates the males and females.
male <- q3_data$gender=="Male"
q3_data_male <- q3_data[male,]
q3_data_female <- q3_data[!male,]
## Repeats above for Males.
## Output = UPDRS.
sink(file="question3_results_UPDRS_male.txt")
cat("\nUPDRS vs. Statin and All Variables\n")
UPDRS_all_statin_male <- lm(UPDRS~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_male)
summary(UPDRS_all_statin_male)
confint(UPDRS_all_statin_male)
cat("\nUPDRS vs. Statin\n")
UPDRS_statin_male <- lm(UPDRS~statin+symptom_years, data=q3_data_male)
summary(UPDRS_statin_male)
confint(UPDRS_statin_male)
cat("\nUPDRS vs. college\n")
UPDRS_college_male <- lm(UPDRS~college+symptom_years, data=q3_data_male)
summary(UPDRS_college_male)
confint(UPDRS_college_male)
cat("\nUPDRS vs. agri\n")
UPDRS_agri_male <- lm(UPDRS~agri+symptom_years, data=q3_data_male)
summary(UPDRS_statin_male)
confint(UPDRS_statin_male)
cat("\nUPDRS vs. ethnic\n")
UPDRS_ethnic_male <- lm(UPDRS~ethnic+symptom_years, data=q3_data_male)
summary(UPDRS_ethnic_male)
confint(UPDRS_ethnic_male)
cat("\nUPDRS vs. income\n")
UPDRS_income_male <- lm(UPDRS~income+symptom_years, data=q3_data_male)
summary(UPDRS_income_male)
confint(UPDRS_income_male)
cat("\nUPDRS vs. smoke\n")
UPDRS_smoke_male <- lm(UPDRS~smoke+symptom_years, data=q3_data_male)
summary(UPDRS_smoke_male)
confint(UPDRS_smoke_male)
sink()
## Output = Golbe.
sink(file="question3_results_Golbe_male.txt")
cat("\nGolbe vs. Statin and All Variables\n")
Golbe_all_statin_male <- lm(Golbe~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_male)
summary(Golbe_all_statin_male)
confint(Golbe_all_statin_male)
cat("\nGolbe vs. Statin\n")
Golbe_statin_male <- lm(Golbe~statin+symptom_years, data=q3_data_male)
summary(Golbe_statin_male)
confint(Golbe_statin_male)
cat("\nGolbe vs. college\n")
Golbe_college_male <- lm(Golbe~college+symptom_years, data=q3_data_male)
summary(Golbe_college_male)
confint(Golbe_college_male)
cat("\nGolbe vs. agri\n")
Golbe_agri_male <- lm(Golbe~agri+symptom_years, data=q3_data_male)
summary(Golbe_statin_male)
confint(Golbe_statin_male)
cat("\nGolbe vs. ethnic\n")
Golbe_ethnic_male <- lm(Golbe~ethnic+symptom_years, data=q3_data_male)
summary(Golbe_ethnic_male)
confint(Golbe_ethnic_male)
cat("\nGolbe vs. income\n")
Golbe_income_male <- lm(Golbe~income+symptom_years, data=q3_data_male)
summary(Golbe_income_male)
confint(Golbe_income_male)
cat("\nGolbe vs. smoke\n")
Golbe_smoke_male <- lm(Golbe~smoke+symptom_years, data=q3_data_male)
summary(Golbe_smoke_male)
confint(Golbe_smoke_male)
sink()
## Output = MDRS.
sink(file="question3_results_MDRS_male.txt")
cat("\nMDRS vs. Statin and All Variables\n")
MDRS_all_statin_male <- lm(MDRS~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_male)
summary(MDRS_all_statin_male)
confint(MDRS_all_statin_male)
cat("\nMDRS vs. Statin\n")
MDRS_statin_male <- lm(MDRS~statin+symptom_years, data=q3_data_male)
summary(MDRS_statin_male)
confint(MDRS_statin_male)
cat("\nMDRS vs. college\n")
MDRS_college_male <- lm(MDRS~college+symptom_years, data=q3_data_male)
summary(MDRS_college_male)
confint(MDRS_college_male)
cat("\nMDRS vs. agri\n")
MDRS_agri_male <- lm(MDRS~agri+symptom_years, data=q3_data_male)
summary(MDRS_statin_male)
confint(MDRS_statin_male)
cat("\nMDRS vs. ethnic\n")
MDRS_ethnic_male <- lm(MDRS~ethnic+symptom_years, data=q3_data_male)
summary(MDRS_ethnic_male)
confint(MDRS_ethnic_male)
cat("\nMDRS vs. income\n")
MDRS_income_male <- lm(MDRS~income+symptom_years, data=q3_data_male)
summary(MDRS_income_male)
confint(MDRS_income_male)
cat("\nMDRS vs. smoke\n")
MDRS_smoke_male <- lm(MDRS~smoke+symptom_years, data=q3_data_male)
summary(MDRS_smoke_male)
confint(MDRS_smoke_male)
sink()
## Repeats above for Females.
## Output = UPDRS.
sink(file="question3_results_UPDRS_female.txt")
cat("\nUPDRS vs. Statin and All Variables\n")
UPDRS_all_statin_female <- lm(UPDRS~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_female)
summary(UPDRS_all_statin_female)
confint(UPDRS_all_statin_female)
cat("\nUPDRS vs. Statin\n")
UPDRS_statin_female <- lm(UPDRS~statin+symptom_years, data=q3_data_female)
summary(UPDRS_statin_female)
confint(UPDRS_statin_female)
cat("\nUPDRS vs. college\n")
UPDRS_college_female <- lm(UPDRS~college+symptom_years, data=q3_data_female)
summary(UPDRS_college_female)
confint(UPDRS_college_female)
cat("\nUPDRS vs. agri\n")
UPDRS_agri_female <- lm(UPDRS~agri+symptom_years, data=q3_data_female)
summary(UPDRS_statin_female)
confint(UPDRS_statin_female)
cat("\nUPDRS vs. ethnic\n")
UPDRS_ethnic_female <- lm(UPDRS~ethnic+symptom_years, data=q3_data_female)
summary(UPDRS_ethnic_female)
confint(UPDRS_ethnic_female)
cat("\nUPDRS vs. income\n")
UPDRS_income_female <- lm(UPDRS~income+symptom_years, data=q3_data_female)
summary(UPDRS_income_female)
confint(UPDRS_income_female)
cat("\nUPDRS vs. smoke\n")
UPDRS_smoke_female <- lm(UPDRS~smoke+symptom_years, data=q3_data_female)
summary(UPDRS_smoke_female)
confint(UPDRS_smoke_female)
sink()
## Output = Golbe.
sink(file="question3_results_Golbe_female.txt")
cat("\nGolbe vs. Statin and All Variables\n")
Golbe_all_statin_female <- lm(Golbe~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_female)
summary(Golbe_all_statin_female)
confint(Golbe_all_statin_female)
cat("\nGolbe vs. Statin\n")
Golbe_statin_female <- lm(Golbe~statin+symptom_years, data=q3_data_female)
summary(Golbe_statin_female)
confint(Golbe_statin_female)
cat("\nGolbe vs. college\n")
Golbe_college_female <- lm(Golbe~college+symptom_years, data=q3_data_female)
summary(Golbe_college_female)
confint(Golbe_college_female)
cat("\nGolbe vs. agri\n")
Golbe_agri_female <- lm(Golbe~agri+symptom_years, data=q3_data_female)
summary(Golbe_statin_female)
confint(Golbe_statin_female)
cat("\nGolbe vs. ethnic\n")
Golbe_ethnic_female <- lm(Golbe~ethnic+symptom_years, data=q3_data_female)
summary(Golbe_ethnic_female)
confint(Golbe_ethnic_female)
cat("\nGolbe vs. income\n")
Golbe_income_female <- lm(Golbe~income+symptom_years, data=q3_data_female)
summary(Golbe_income_female)
confint(Golbe_income_female)
cat("\nGolbe vs. smoke\n")
Golbe_smoke_female <- lm(Golbe~smoke+symptom_years, data=q3_data_female)
summary(Golbe_smoke_female)
confint(Golbe_smoke_female)
sink()
## Output = MDRS.
sink(file="question3_results_MDRS_female.txt")
cat("\nMDRS vs. Statin and All Variables\n")
MDRS_all_statin_female <- lm(MDRS~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_female)
summary(MDRS_all_statin_female)
confint(MDRS_all_statin_female)
cat("\nMDRS vs. Statin\n")
MDRS_statin_female <- lm(MDRS~statin+symptom_years, data=q3_data_female)
summary(MDRS_statin_female)
confint(MDRS_statin_female)
cat("\nMDRS vs. college\n")
MDRS_college_female <- lm(MDRS~college+symptom_years, data=q3_data_female)
summary(MDRS_college_female)
confint(MDRS_college_female)
cat("\nMDRS vs. agri\n")
MDRS_agri_female <- lm(MDRS~agri+symptom_years, data=q3_data_female)
summary(MDRS_statin_female)
confint(MDRS_statin_female)
cat("\nMDRS vs. ethnic\n")
MDRS_ethnic_female <- lm(MDRS~ethnic+symptom_years, data=q3_data_female)
summary(MDRS_ethnic_female)
confint(MDRS_ethnic_female)
cat("\nMDRS vs. income\n")
MDRS_income_female <- lm(MDRS~income+symptom_years, data=q3_data_female)
summary(MDRS_income_female)
confint(MDRS_income_female)
cat("\nMDRS vs. smoke\n")
MDRS_smoke_female <- lm(MDRS~smoke+symptom_years, data=q3_data_female)
summary(MDRS_smoke_female)
confint(MDRS_smoke_female)
sink()
| /question3.R | no_license | cashuseyepatch/medicalStats | R | false | false | 11,654 | r | ## Question 3.
## Reads in the relevant dataset.
q3_data <- read.csv("question3_data.csv")
## Re-directs output to a file.
sink(file="question3_results_UPDRS_maleandfemale.txt")
## Performs linear regression.
## Output = UPDRS.
cat("\nUPDRS vs. Statin and All Variables\n")
UPDRS_all_statin <- lm(UPDRS~college+agri+statin+gender+ethnic+income+
smoke+symptom_years, data=q3_data)
summary(UPDRS_all_statin)
confint(UPDRS_all_statin)
cat("\nUPDRS vs. Statin\n")
UPDRS_statin <- lm(UPDRS~statin+symptom_years, data=q3_data)
summary(UPDRS_statin)
confint(UPDRS_statin)
cat("\nUPDRS vs. college\n")
UPDRS_college <- lm(UPDRS~college+symptom_years, data=q3_data)
summary(UPDRS_college)
confint(UPDRS_college)
cat("\nUPDRS vs. agri\n")
UPDRS_agri <- lm(UPDRS~agri+symptom_years, data=q3_data)
summary(UPDRS_statin)
confint(UPDRS_statin)
cat("\nUPDRS vs. gender\n")
UPDRS_gender <- lm(UPDRS~gender+symptom_years, data=q3_data)
summary(UPDRS_gender)
confint(UPDRS_gender)
cat("\nUPDRS vs. ethnic\n")
UPDRS_ethnic <- lm(UPDRS~ethnic+symptom_years, data=q3_data)
summary(UPDRS_ethnic)
confint(UPDRS_ethnic)
cat("\nUPDRS vs. income\n")
UPDRS_income <- lm(UPDRS~income+symptom_years, data=q3_data)
summary(UPDRS_income)
confint(UPDRS_income)
cat("\nUPDRS vs. smoke\n")
UPDRS_smoke <- lm(UPDRS~smoke+symptom_years, data=q3_data)
summary(UPDRS_smoke)
confint(UPDRS_smoke)
sink()
## Output = Golbe.
sink(file="question3_results_Golbe_maleandfemale.txt")
cat("\nGolbe vs. Statin and All Variables\n")
Golbe_all_statin <- lm(Golbe~college+agri+statin+gender+ethnic+income+
smoke+symptom_years, data=q3_data)
summary(Golbe_all_statin)
confint(Golbe_all_statin)
cat("\nGolbe vs. Statin\n")
Golbe_statin <- lm(Golbe~statin+symptom_years, data=q3_data)
summary(Golbe_statin)
confint(Golbe_statin)
cat("\nGolbe vs. college\n")
Golbe_college <- lm(Golbe~college+symptom_years, data=q3_data)
summary(Golbe_college)
confint(Golbe_college)
cat("\nGolbe vs. agri\n")
Golbe_agri <- lm(Golbe~agri+symptom_years, data=q3_data)
summary(Golbe_statin)
confint(Golbe_statin)
cat("\nGolbe vs. gender\n")
Golbe_gender <- lm(Golbe~gender+symptom_years, data=q3_data)
summary(Golbe_gender)
confint(Golbe_gender)
cat("\nGolbe vs. ethnic\n")
Golbe_ethnic <- lm(Golbe~ethnic+symptom_years, data=q3_data)
summary(Golbe_ethnic)
confint(Golbe_ethnic)
cat("\nGolbe vs. income\n")
Golbe_income <- lm(Golbe~income+symptom_years, data=q3_data)
summary(Golbe_income)
confint(Golbe_income)
cat("\nGolbe vs. smoke\n")
Golbe_smoke <- lm(Golbe~smoke+symptom_years, data=q3_data)
summary(Golbe_smoke)
confint(Golbe_smoke)
sink()
## Output = MDRS.
sink(file="question3_results_MDRS_maleandfemale.txt")
cat("\nMDRS vs. Statin and All Variables\n")
MDRS_all_statin <- lm(MDRS~college+agri+statin+gender+ethnic+income+
smoke+symptom_years, data=q3_data)
summary(MDRS_all_statin)
confint(MDRS_all_statin)
cat("\nMDRS vs. Statin\n")
MDRS_statin <- lm(MDRS~statin+symptom_years, data=q3_data)
summary(MDRS_statin)
confint(MDRS_statin)
cat("\nMDRS vs. college\n")
MDRS_college <- lm(MDRS~college+symptom_years, data=q3_data)
summary(MDRS_college)
confint(MDRS_college)
cat("\nMDRS vs. agri\n")
MDRS_agri <- lm(MDRS~agri+symptom_years, data=q3_data)
summary(MDRS_statin)
confint(MDRS_statin)
cat("\nMDRS vs. gender\n")
MDRS_gender <- lm(MDRS~gender+symptom_years, data=q3_data)
summary(MDRS_gender)
confint(MDRS_gender)
cat("\nMDRS vs. ethnic\n")
MDRS_ethnic <- lm(MDRS~ethnic+symptom_years, data=q3_data)
summary(MDRS_ethnic)
confint(MDRS_ethnic)
cat("\nMDRS vs. income\n")
MDRS_income <- lm(MDRS~income+symptom_years, data=q3_data)
summary(MDRS_income)
confint(MDRS_income)
cat("\nMDRS vs. smoke\n")
MDRS_smoke <- lm(MDRS~smoke+symptom_years, data=q3_data)
summary(MDRS_smoke)
confint(MDRS_smoke)
sink()
## Separates the males and females.
male <- q3_data$gender=="Male"
q3_data_male <- q3_data[male,]
q3_data_female <- q3_data[!male,]
## Repeats above for Males.
## Output = UPDRS.
sink(file="question3_results_UPDRS_male.txt")
cat("\nUPDRS vs. Statin and All Variables\n")
UPDRS_all_statin_male <- lm(UPDRS~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_male)
summary(UPDRS_all_statin_male)
confint(UPDRS_all_statin_male)
cat("\nUPDRS vs. Statin\n")
UPDRS_statin_male <- lm(UPDRS~statin+symptom_years, data=q3_data_male)
summary(UPDRS_statin_male)
confint(UPDRS_statin_male)
cat("\nUPDRS vs. college\n")
UPDRS_college_male <- lm(UPDRS~college+symptom_years, data=q3_data_male)
summary(UPDRS_college_male)
confint(UPDRS_college_male)
cat("\nUPDRS vs. agri\n")
UPDRS_agri_male <- lm(UPDRS~agri+symptom_years, data=q3_data_male)
summary(UPDRS_statin_male)
confint(UPDRS_statin_male)
cat("\nUPDRS vs. ethnic\n")
UPDRS_ethnic_male <- lm(UPDRS~ethnic+symptom_years, data=q3_data_male)
summary(UPDRS_ethnic_male)
confint(UPDRS_ethnic_male)
cat("\nUPDRS vs. income\n")
UPDRS_income_male <- lm(UPDRS~income+symptom_years, data=q3_data_male)
summary(UPDRS_income_male)
confint(UPDRS_income_male)
cat("\nUPDRS vs. smoke\n")
UPDRS_smoke_male <- lm(UPDRS~smoke+symptom_years, data=q3_data_male)
summary(UPDRS_smoke_male)
confint(UPDRS_smoke_male)
sink()
## Output = Golbe.
sink(file="question3_results_Golbe_male.txt")
cat("\nGolbe vs. Statin and All Variables\n")
Golbe_all_statin_male <- lm(Golbe~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_male)
summary(Golbe_all_statin_male)
confint(Golbe_all_statin_male)
cat("\nGolbe vs. Statin\n")
Golbe_statin_male <- lm(Golbe~statin+symptom_years, data=q3_data_male)
summary(Golbe_statin_male)
confint(Golbe_statin_male)
cat("\nGolbe vs. college\n")
Golbe_college_male <- lm(Golbe~college+symptom_years, data=q3_data_male)
summary(Golbe_college_male)
confint(Golbe_college_male)
cat("\nGolbe vs. agri\n")
Golbe_agri_male <- lm(Golbe~agri+symptom_years, data=q3_data_male)
summary(Golbe_statin_male)
confint(Golbe_statin_male)
cat("\nGolbe vs. ethnic\n")
Golbe_ethnic_male <- lm(Golbe~ethnic+symptom_years, data=q3_data_male)
summary(Golbe_ethnic_male)
confint(Golbe_ethnic_male)
cat("\nGolbe vs. income\n")
Golbe_income_male <- lm(Golbe~income+symptom_years, data=q3_data_male)
summary(Golbe_income_male)
confint(Golbe_income_male)
cat("\nGolbe vs. smoke\n")
Golbe_smoke_male <- lm(Golbe~smoke+symptom_years, data=q3_data_male)
summary(Golbe_smoke_male)
confint(Golbe_smoke_male)
sink()
## Output = MDRS.
sink(file="question3_results_MDRS_male.txt")
cat("\nMDRS vs. Statin and All Variables\n")
MDRS_all_statin_male <- lm(MDRS~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_male)
summary(MDRS_all_statin_male)
confint(MDRS_all_statin_male)
cat("\nMDRS vs. Statin\n")
MDRS_statin_male <- lm(MDRS~statin+symptom_years, data=q3_data_male)
summary(MDRS_statin_male)
confint(MDRS_statin_male)
cat("\nMDRS vs. college\n")
MDRS_college_male <- lm(MDRS~college+symptom_years, data=q3_data_male)
summary(MDRS_college_male)
confint(MDRS_college_male)
cat("\nMDRS vs. agri\n")
MDRS_agri_male <- lm(MDRS~agri+symptom_years, data=q3_data_male)
summary(MDRS_statin_male)
confint(MDRS_statin_male)
cat("\nMDRS vs. ethnic\n")
MDRS_ethnic_male <- lm(MDRS~ethnic+symptom_years, data=q3_data_male)
summary(MDRS_ethnic_male)
confint(MDRS_ethnic_male)
cat("\nMDRS vs. income\n")
MDRS_income_male <- lm(MDRS~income+symptom_years, data=q3_data_male)
summary(MDRS_income_male)
confint(MDRS_income_male)
cat("\nMDRS vs. smoke\n")
MDRS_smoke_male <- lm(MDRS~smoke+symptom_years, data=q3_data_male)
summary(MDRS_smoke_male)
confint(MDRS_smoke_male)
sink()
## Repeats above for Females.
## Output = UPDRS.
sink(file="question3_results_UPDRS_female.txt")
cat("\nUPDRS vs. Statin and All Variables\n")
UPDRS_all_statin_female <- lm(UPDRS~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_female)
summary(UPDRS_all_statin_female)
confint(UPDRS_all_statin_female)
cat("\nUPDRS vs. Statin\n")
UPDRS_statin_female <- lm(UPDRS~statin+symptom_years, data=q3_data_female)
summary(UPDRS_statin_female)
confint(UPDRS_statin_female)
cat("\nUPDRS vs. college\n")
UPDRS_college_female <- lm(UPDRS~college+symptom_years, data=q3_data_female)
summary(UPDRS_college_female)
confint(UPDRS_college_female)
cat("\nUPDRS vs. agri\n")
UPDRS_agri_female <- lm(UPDRS~agri+symptom_years, data=q3_data_female)
summary(UPDRS_statin_female)
confint(UPDRS_statin_female)
cat("\nUPDRS vs. ethnic\n")
UPDRS_ethnic_female <- lm(UPDRS~ethnic+symptom_years, data=q3_data_female)
summary(UPDRS_ethnic_female)
confint(UPDRS_ethnic_female)
cat("\nUPDRS vs. income\n")
UPDRS_income_female <- lm(UPDRS~income+symptom_years, data=q3_data_female)
summary(UPDRS_income_female)
confint(UPDRS_income_female)
cat("\nUPDRS vs. smoke\n")
UPDRS_smoke_female <- lm(UPDRS~smoke+symptom_years, data=q3_data_female)
summary(UPDRS_smoke_female)
confint(UPDRS_smoke_female)
sink()
## Output = Golbe.
sink(file="question3_results_Golbe_female.txt")
cat("\nGolbe vs. Statin and All Variables\n")
Golbe_all_statin_female <- lm(Golbe~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_female)
summary(Golbe_all_statin_female)
confint(Golbe_all_statin_female)
cat("\nGolbe vs. Statin\n")
Golbe_statin_female <- lm(Golbe~statin+symptom_years, data=q3_data_female)
summary(Golbe_statin_female)
confint(Golbe_statin_female)
cat("\nGolbe vs. college\n")
Golbe_college_female <- lm(Golbe~college+symptom_years, data=q3_data_female)
summary(Golbe_college_female)
confint(Golbe_college_female)
cat("\nGolbe vs. agri\n")
Golbe_agri_female <- lm(Golbe~agri+symptom_years, data=q3_data_female)
summary(Golbe_statin_female)
confint(Golbe_statin_female)
cat("\nGolbe vs. ethnic\n")
Golbe_ethnic_female <- lm(Golbe~ethnic+symptom_years, data=q3_data_female)
summary(Golbe_ethnic_female)
confint(Golbe_ethnic_female)
cat("\nGolbe vs. income\n")
Golbe_income_female <- lm(Golbe~income+symptom_years, data=q3_data_female)
summary(Golbe_income_female)
confint(Golbe_income_female)
cat("\nGolbe vs. smoke\n")
Golbe_smoke_female <- lm(Golbe~smoke+symptom_years, data=q3_data_female)
summary(Golbe_smoke_female)
confint(Golbe_smoke_female)
sink()
## Output = MDRS.
sink(file="question3_results_MDRS_female.txt")
cat("\nMDRS vs. Statin and All Variables\n")
MDRS_all_statin_female <- lm(MDRS~college+agri+statin+ethnic+income+
smoke+symptom_years, data=q3_data_female)
summary(MDRS_all_statin_female)
confint(MDRS_all_statin_female)
cat("\nMDRS vs. Statin\n")
MDRS_statin_female <- lm(MDRS~statin+symptom_years, data=q3_data_female)
summary(MDRS_statin_female)
confint(MDRS_statin_female)
cat("\nMDRS vs. college\n")
MDRS_college_female <- lm(MDRS~college+symptom_years, data=q3_data_female)
summary(MDRS_college_female)
confint(MDRS_college_female)
cat("\nMDRS vs. agri\n")
MDRS_agri_female <- lm(MDRS~agri+symptom_years, data=q3_data_female)
summary(MDRS_statin_female)
confint(MDRS_statin_female)
cat("\nMDRS vs. ethnic\n")
MDRS_ethnic_female <- lm(MDRS~ethnic+symptom_years, data=q3_data_female)
summary(MDRS_ethnic_female)
confint(MDRS_ethnic_female)
cat("\nMDRS vs. income\n")
MDRS_income_female <- lm(MDRS~income+symptom_years, data=q3_data_female)
summary(MDRS_income_female)
confint(MDRS_income_female)
cat("\nMDRS vs. smoke\n")
MDRS_smoke_female <- lm(MDRS~smoke+symptom_years, data=q3_data_female)
summary(MDRS_smoke_female)
confint(MDRS_smoke_female)
sink()
|
library(psychometric)
### Name: CAFAA
### Title: Compound Attenuation Factor for Meta-Analytic Artifact
### Corrections
### Aliases: CAFAA
### Keywords: univar models
### ** Examples
#From Arthur et al
data(ABHt32)
CAFAA(ABHt32)
rhoCA(ABHt32)
# From Hunter et al
data(HSJt35)
CAFAA(HSJt35)
rhoCA(HSJt35)
| /data/genthat_extracted_code/psychometric/examples/CAFAA.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 316 | r | library(psychometric)
### Name: CAFAA
### Title: Compound Attenuation Factor for Meta-Analytic Artifact
### Corrections
### Aliases: CAFAA
### Keywords: univar models
### ** Examples
#From Arthur et al
data(ABHt32)
CAFAA(ABHt32)
rhoCA(ABHt32)
# From Hunter et al
data(HSJt35)
CAFAA(HSJt35)
rhoCA(HSJt35)
|
# USAGE
# source("storm_data.R")
# read_raw()
# data = read_tmp("data.RData")
require(data.table)
read_raw <- function () {
file_path = "data/repdata-data-StormData.csv";
data = read.csv(file_path,sep = ",")
}
read_tmp <- function (var) {
cat ("Read from tmp:", var ,"\n")
return (readRDS( paste('data','tmp',var,sep = "/")));
}
cleanup <- function(data) {
# lowercase EVTYPE
ev = tolower(data$EVTYPE)
# remove special sign (non word)
ev = gsub("\\W", " ", ev, perl = TRUE)
# single space, use "sub" for single, "gsub" for global
ev = gsub("\\s+", " ", ev, perl = TRUE)
# trim
ev = sub("^\\s+", "", ev, perl = TRUE)
ev = sub("\\s+$", "", ev, perl = TRUE)
# plural
ev = sub("s$", "", ev, perl = TRUE)
data$EVTYPE = ev
# lowercase damage exp
data$PROPDMGEXP = cleanup_exp(data$PROPDMGEXP)
data$CROPDMGEXP = cleanup_exp(data$CROPDMGEXP)
# calculate property damage PROPDMG * 10 ^ PROPDMGEXP
data$PROPDMG = data$PROPDMG * 10^(data$PROPDMGEXP)
# calculate crop damage CROPDMG * 10 ^ CROPDMGEXP
data$CROPDMG = data$CROPDMG * 10^(data$CROPDMGEXP)
return(data)
}
cleanup_exp <- function(ex) {
ex = gsub("K|k", 3, ex, perl = TRUE)
ex = gsub("M|m", 6, ex, perl = TRUE)
ex = gsub("B|b", 9, ex, perl = TRUE)
ex = gsub("\\D", 0, ex, perl = TRUE)
ex = as.numeric(ex)
ex[is.na(ex)] = 0 # set NA to 0
ex
}
filter_result <- function(agg, limit=10) {
idx = agg[2] > 0
agg = agg[idx,]
agg = agg[order(-agg[[2]]), ]
total_damage = sum(agg[[2]])
agg['damage'] = agg[2] / total_damage * 100
head(agg, limit)
}
| /storm_data.R | no_license | natapone/RepData_PeerAssessment2 | R | false | false | 1,721 | r | # USAGE
# source("storm_data.R")
# read_raw()
# data = read_tmp("data.RData")
require(data.table)
read_raw <- function () {
file_path = "data/repdata-data-StormData.csv";
data = read.csv(file_path,sep = ",")
}
read_tmp <- function (var) {
cat ("Read from tmp:", var ,"\n")
return (readRDS( paste('data','tmp',var,sep = "/")));
}
cleanup <- function(data) {
# lowercase EVTYPE
ev = tolower(data$EVTYPE)
# remove special sign (non word)
ev = gsub("\\W", " ", ev, perl = TRUE)
# single space, use "sub" for single, "gsub" for global
ev = gsub("\\s+", " ", ev, perl = TRUE)
# trim
ev = sub("^\\s+", "", ev, perl = TRUE)
ev = sub("\\s+$", "", ev, perl = TRUE)
# plural
ev = sub("s$", "", ev, perl = TRUE)
data$EVTYPE = ev
# lowercase damage exp
data$PROPDMGEXP = cleanup_exp(data$PROPDMGEXP)
data$CROPDMGEXP = cleanup_exp(data$CROPDMGEXP)
# calculate property damage PROPDMG * 10 ^ PROPDMGEXP
data$PROPDMG = data$PROPDMG * 10^(data$PROPDMGEXP)
# calculate crop damage CROPDMG * 10 ^ CROPDMGEXP
data$CROPDMG = data$CROPDMG * 10^(data$CROPDMGEXP)
return(data)
}
cleanup_exp <- function(ex) {
ex = gsub("K|k", 3, ex, perl = TRUE)
ex = gsub("M|m", 6, ex, perl = TRUE)
ex = gsub("B|b", 9, ex, perl = TRUE)
ex = gsub("\\D", 0, ex, perl = TRUE)
ex = as.numeric(ex)
ex[is.na(ex)] = 0 # set NA to 0
ex
}
filter_result <- function(agg, limit=10) {
idx = agg[2] > 0
agg = agg[idx,]
agg = agg[order(-agg[[2]]), ]
total_damage = sum(agg[[2]])
agg['damage'] = agg[2] / total_damage * 100
head(agg, limit)
}
|
##Authors: SO (with edits from TH)
##Last Updated: 2017 July (adapted for mounting on my local machine - sorry!)
##Notes: Run this *after* doing motion correction A03 scripts (bc need mcplots.1D files)
# but *before* running nuisance regression (A06_RunNuisanceRegression.bash)
print("*************************************************")
print("************** assuming subject list is in a text file *********************")
print("*************************************************")
#Use A if you want this to happen on all files within a dir
#Use B if you only have a few, specific files you want to process. Put them in a list:
#A: subDirs <- dir("/Volumes/iang/users/hoTC/TIGER/RSFC/Subjects/")
#B: subDirs <- read.table("/Volumes/iang/users/hoTC/TIGER/RSFC/Subjects/20170109_ListForCreateNuisanceRegressors.txt", stringsAsFactors = FALSE, header = FALSE)
subDirs <- read.table("/DATA/TIGER_RSFC_Analysis/MotionOutliers/Mar_26_2020.txt", stringsAsFactors = FALSE, header = FALSE)
numsub<-dim(subDirs)[1]
print("*************************************************")
print("**************1: mcplots1.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
print(subDirs$V1[s])
#NOTE HERE THE FILENAME IS mcplots1.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots1.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots1_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots1_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots1_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots1_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************2: mcplots2.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
print(subDirs$V1[s])
#NOTE HERE THE FILENAME IS mcplots2.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots2.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots2_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots2_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots2_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots2_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************3: mcplots3.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS mcplots3.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots3.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots3_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots3_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots3_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots3_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************4: mcplots4.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS mcplots4.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots4.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots4_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots4_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots4_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots4_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************5: mcplots5.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
# print(subDir)
#NOTE HERE THE FILENAME IS mcplots5.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots5.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots5_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots5_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots5_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots5_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************6: mcplots6.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS mcplots1.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots6.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots6_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots6_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots6_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots6_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************7: XXX_csf.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS XXX_csf.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/", subDirs$V1[s], "_csf.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
e <- x[2:234,]
print(paste("length(e)= ", length(e)))
write.table(e, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/", subDirs$V1[s], "_csf_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(e)
}
}
print("*************************************************")
print("**************8: XXX_wm.1D***********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS XXX_wm.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/", subDirs$V1[s], "_wm.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
y <- read.delim(fileName, header=FALSE)
print(paste("nrow(y) =", nrow(y)))
f <- y[2:234,]
print(paste("length(f)= ", length(f)))
write.table(f, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/", subDirs$V1[s], "_wm_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(y)
rm(f)
}
}
| /TIGER_A05b_CreateRegressors.R | no_license | tiffanycheingho/TIGER | R | false | false | 12,786 | r |
##Authors: SO (with edits from TH)
##Last Updated: 2017 July (adapted for mounting on my local machine - sorry!)
##Notes: Run this *after* doing motion correction A03 scripts (bc need mcplots.1D files)
# but *before* running nuisance regression (A06_RunNuisanceRegression.bash)
print("*************************************************")
print("************** assuming subject list is in a text file *********************")
print("*************************************************")
#Use A if you want this to happen on all files within a dir
#Use B if you only have a few, specific files you want to process. Put them in a list:
#A: subDirs <- dir("/Volumes/iang/users/hoTC/TIGER/RSFC/Subjects/")
#B: subDirs <- read.table("/Volumes/iang/users/hoTC/TIGER/RSFC/Subjects/20170109_ListForCreateNuisanceRegressors.txt", stringsAsFactors = FALSE, header = FALSE)
subDirs <- read.table("/DATA/TIGER_RSFC_Analysis/MotionOutliers/Mar_26_2020.txt", stringsAsFactors = FALSE, header = FALSE)
numsub<-dim(subDirs)[1]
print("*************************************************")
print("**************1: mcplots1.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
print(subDirs$V1[s])
#NOTE HERE THE FILENAME IS mcplots1.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots1.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots1_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots1_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots1_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots1_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************2: mcplots2.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
print(subDirs$V1[s])
#NOTE HERE THE FILENAME IS mcplots2.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots2.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots2_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots2_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots2_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots2_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************3: mcplots3.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS mcplots3.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots3.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots3_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots3_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots3_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots3_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************4: mcplots4.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS mcplots4.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots4.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots4_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots4_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots4_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots4_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************5: mcplots5.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
# print(subDir)
#NOTE HERE THE FILENAME IS mcplots5.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots5.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots5_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots5_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots5_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots5_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************6: mcplots6.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS mcplots1.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/",subDirs$V1[s],"/Analysis/mcplots6.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
y <- x*x
print(paste("nrow(y)= ", nrow(y)))
a <- x[2:234,]
b <- y[2:234,]
c <- x[1:233,]
d <- y[1:233,]
print(paste("length(a)= ", length(a)))
print(paste("length(b)= ", length(b)))
print(paste("length(c)= ", length(c)))
print(paste("length(d)= ", length(d)))
write.table(a, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots6_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(b, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots6_233sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(c, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots6_233t-1.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
write.table(d, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/mcplots6_233t-1_sq.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(y)
rm(a)
rm(b)
rm(c)
rm(d)
}
}
print("*************************************************")
print("**************7: XXX_csf.1D*********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS XXX_csf.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/", subDirs$V1[s], "_csf.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
x <- read.delim(fileName, header=FALSE)
print(paste("nrow(x) =", nrow(x)))
e <- x[2:234,]
print(paste("length(e)= ", length(e)))
write.table(e, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/", subDirs$V1[s], "_csf_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(x)
rm(e)
}
}
print("*************************************************")
print("**************8: XXX_wm.1D***********************")
print("*************************************************")
for(s in 1:numsub){
print("****************************")
#print(subDir)
#NOTE HERE THE FILENAME IS XXX_wm.1D
fileName <- paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/", subDirs$V1[s], "_wm.1D")
cat(fileName,"\n")
print(file.exists(fileName))
if (file.exists(fileName)){
print(fileName)
y <- read.delim(fileName, header=FALSE)
print(paste("nrow(y) =", nrow(y)))
f <- y[2:234,]
print(paste("length(f)= ", length(f)))
write.table(f, file = paste0("/DATA/TIGER_RSFC_Analysis/Subjects/", subDirs$V1[s], "/Analysis/", subDirs$V1[s], "_wm_233.1D"), append=FALSE, row.names=FALSE, col.names=FALSE)
rm(y)
rm(f)
}
}
|
##################################################################################################
##################################################################################################
library(ape)
library(Rcpp)
sourceCpp("beta_binom_cov.cpp")
source('beta_binom_cov.R')
source('legend.R')
##################################################################################################
##################################################################################################
####----Core function----####
BGCR = function(PrJAP = 0.5,
sum_PrMAP = "default",
threshold = 0.005,
gamma = 0,
tree,
otu_group_1,
otu_group_2,
X_group_1 = "default",
X_group_2 = "default",
nu = 10 ^ (seq(-1, 4)),
sigma = 4,
verbose = FALSE){
################################################################################################
####----Functions deal with the index----####
####
get_child_index = function(x, map_for_child, which = 0){
num_leaf = dim(map_for_child)[1]/2 + 1
if(x <= num_leaf){
print("Warning: node has no child.")
return(NA)
}else if(x > 2 * num_leaf - 1){
print("Error: node index out of bound.")
return(NA)
}else{
if(which == 0){
return(map_for_child[(x - num_leaf) * 2, 2])
}else if(which == 1){
return(map_for_child[(x - num_leaf) * 2 - 1, 2])
}else{
print("Error: 'which' should be 0/1.")
return(NULL)
}
}
}
####
get_parent_index = function(x, map_for_parent){
num_leaf = dim(map_for_parent)[1]/2 + 1
if(0 <= x && x <= num_leaf){
return(map_for_parent[x, 1])
}else if(num_leaf + 1 < x && x< 2 * num_leaf){
return(map_for_parent[x - 1, 1])
}else if(x == num_leaf + 1){
print("Warning: root node has no parent.")
return(NA)
}else{
print("Error: index out of bound.")
return(NA)
}
}
####
get_which_child = function(x, map_for_parent, map_for_child, root = 0){
num_leaf = dim(map_for_parent)[1]/2 + 1
if(x == num_leaf + 1){
return(root)
}else{
parent = get_parent_index(x, map_for_parent)
if(x == get_child_index(parent, map_for_child, which = 0)) return(0)
else return(1)
}
}
################################################################################################
####----Functions deal with the data/BF----####
####
get_data = function(tree, otu){
data = matrix(0, nrow = num_leaf + num_node, ncol = dim(otu)[2])
label = tree$tip.label
for(i in 1:num_leaf){
data[i, ] = otu[label[i], ]
}
for(i in end : start){
child_left = get_child_index(i, map_for_child, 0)
child_right = get_child_index(i, map_for_child, 1)
data[i, ] = data[child_left, ] + data[child_right, ]
}
return(data)
}
####
get_cov = function(X_group_1 = "default", X_group_2 = "default", otu_group_1, otu_group_2){
if(X_group_1 == "default" && X_group_2 == "default"){
X_null = matrix(1, nrow = dim(otu_group_1)[2] + dim(otu_group_2)[2], ncol = 1)
X_alt = cbind(X_null, c(rep(0, dim(otu_group_1)[2]), rep(1, dim(otu_group_2)[2])))
return(list(X_null = X_null, X_alt = X_alt))
}else if(X_group_1 == "default" && X_group_2 != "default"){
print("Error: covariates for the two group should have same columns.")
return(0)
}else if(X_group_1 != "default" && X_group_2 == "default"){
print("Error: covariates for the two group should have same columns.")
return(0)
}else if(dim(X_group_1)[2] != dim(X_group_2)[2]){
print("Error: covariates for the two group should have same columns.")
return(0)
}else{
X = rbind(X_group_1, X_group_2)
X_null = cbind(rep(1, dim(X)[1]), X)
X_alt = cbind(X_null, c(rep(0, dim(X_group_1)[1]), rep(1, dim(X_group_2)[1])))
return(list(X_null = X_null, X_alt = X_alt))
}
}
####
get_BF = function(otu_group_1, otu_group_2, X_group_1, X_group_2, tree, nu = 10 ^ (seq(-1, 4)), sigma = 4, verbose){
data_group_1 = get_data(tree, otu_group_1)
data_group_2 = get_data(tree, otu_group_2)
BF = rep(0, num_leaf + num_node)
cov = get_cov(X_group_1, X_group_2, otu_group_1, otu_group_2)
X_null = cov$X_null
X_alt = cov$X_alt
for(i in start : end){
left = get_child_index(i, map_for_child, 0)
right = get_child_index(i, map_for_child, 1)
n1 = cbind(data_group_1[left, ], data_group_1[right, ])
n2 = cbind(data_group_2[left, ], data_group_2[right, ])
BF[i] = beta_binom_cov(n1, n2, X_null, X_alt, nu, sigma)
if(verbose == TRUE){
print(i)
}
}
return(BF)
}
################################################################################################
####----Functions deal with the prior----####
####
get_prior = function(alpha, beta, gamma){
prior = matrix(0, ncol = 2, nrow = 4)
prior[1, 2] = exp(alpha)/(1 + exp(alpha))
prior[2, 2] = exp(alpha + beta)/(1 + exp(alpha + beta))
prior[3, 2] = prior[2, 2]
prior[4, 2] = exp(alpha + beta + gamma)/(1 + exp(alpha + beta + gamma))
prior[ , 1] = 1 - prior[ , 2]
return(prior)
}
####
get_prior_updown = function(alpha, beta, gamma, only_prior = TRUE, root = 0){
prior_updown = array(0, dim = c(num_node + num_leaf + 1, 2, 4))
prior = get_prior(alpha, beta, gamma)
clique_margin = matrix(0, nrow = num_node + num_leaf, ncol = 8)
node_margin = matrix(0, nrow = num_node + num_leaf, ncol = 2)
for(i in 1:num_leaf){
node_margin[i, ] = prior[1, ]
clique_margin[i, ] = c(rep(node_margin[i, 1], 4), rep(node_margin[i, 2], 4))/4
node_margin[i, ] = c(1, 0)
}
for(i in end : start){
left = get_child_index(i, map_for_child, 0)
right = get_child_index(i, map_for_child, 1)
left_margin = node_margin[left, ]
right_margin = node_margin[right, ]
margin = prior
margin[1, ] = margin[1, ] * left_margin[1] * right_margin[1]
margin[2, ] = margin[2, ] * left_margin[1] * right_margin[2]
margin[3, ] = margin[3, ] * left_margin[2] * right_margin[1]
margin[4, ] = margin[4, ] * left_margin[2] * right_margin[2]
clique_margin[i, 1:4] = margin[, 1]
clique_margin[i, 5:8] = margin[, 2]
node_margin[i, ] = c(sum( clique_margin[i, 1:4]), sum(clique_margin[i, 5:8]))
prior_updown[i, 1, ] = clique_margin[i, 1:4]/node_margin[i, 1]
prior_updown[i, 2, ] = clique_margin[i, 5:8]/node_margin[i, 2]
}
r = num_leaf + 1
if(root == 0){
prior_updown[end + 1, 1, ] = c(node_margin[r, 1], node_margin[r, 1], node_margin[r, 2], node_margin[r, 2])/2
}else{
prior_updown[end + 1, 1, ] = c(node_margin[r, 1], node_margin[r, 2], node_margin[r, 1], node_margin[r, 2])/2
}
prior_updown[end + 1, 2, ] = prior_updown[end + 1, 1, ]
if(only_prior){
return(prior_updown)
}else{
return(list(prior_updown = prior_updown, clique_margin = clique_margin, node_margin = node_margin) )
}
}
####
compute_PrJAP = function(alpha, beta, gamma = 0, root = 0){
res = get_prior_updown(alpha, beta, gamma, FALSE, root)
prior_updown = res$prior_updown
node_margin = res$node_margin
pr_jap = node_margin[num_leaf + 1, 1]
for(i in (num_leaf + 1):(num_leaf + num_node)){
left = get_child_index(i, map_for_child, 0)
right = get_child_index(i, map_for_child, 1)
pr_jap = pr_jap * prior_updown[i, 1, 1]
}
pr_sum_pmap = sum(node_margin[ , 2])
return(list(pr_jap = 1 - pr_jap, pr_sum_pmap = pr_sum_pmap))
}
####
get_alpha_beta = function(PrJAP = 0.5, sum_PrMAP = 2, threshold = 0.005, max_iter = 50){
alpha_b = -20
alpha_t = 20
alpha_mid = (alpha_b + alpha_t)/2
beta = 0
prjap_mid = compute_PrJAP(alpha_mid, beta, gamma = 0)$pr_jap
diff = abs(prjap_mid - PrJAP)
iter = 0
while(iter <= max_iter && diff >= threshold){
if(prjap_mid <= PrJAP){
alpha_b = alpha_mid
alpha_mid = (alpha_b + alpha_t)/2
prjap_mid = compute_PrJAP(alpha_mid, beta, gamma = 0)$pr_jap
diff = abs(prjap_mid - PrJAP)
}else{
alpha_t = alpha_mid
alpha_mid = (alpha_b + alpha_t)/2
prjap_mid = compute_PrJAP(alpha_mid, beta, gamma = 0)$pr_jap
diff = abs(prjap_mid - PrJAP)
}
iter = iter + 1
}
alpha = alpha_mid
beta_b = -20
beta_t = 20
beta_mid = (beta_b + beta_t)/2
pr_sum_pmap_mid = compute_PrJAP(alpha, beta_mid, gamma = 0)$pr_sum_pmap
diff = abs(pr_sum_pmap_mid - sum_PrMAP)
while(iter <= max_iter && diff >= threshold){
if(pr_sum_pmap_mid <= sum_PrMAP){
beta_b = beta_mid
beta_mid = (beta_b + beta_t)/2
pr_sum_pmap_mid = compute_PrJAP(alpha, beta_mid, gamma = 0)$pr_sum_pmap
diff = abs(pr_sum_pmap_mid - sum_PrMAP)
}else{
beta_t = beta_mid
beta_mid = (beta_b + beta_t)/2
pr_sum_pmap_mid = compute_PrJAP(alpha, beta_mid, gamma = 0)$pr_sum_pmap
diff = abs(pr_sum_pmap_mid - sum_PrMAP)
}
iter = iter + 1
}
return(list(alpha = alpha, beta = beta_mid))
}
##################################################################################################
####----Functions for the posterior----####
####
get_xi = function(prior_updown, root = 0){
xi = array(0, dim = c(num_node + num_leaf + 1, 8, 8))
for(i in start : end){
updown_node = prior_updown[i, , ]
updown_clique = matrix(0, nrow = 8, ncol = 8)
if(get_which_child(i, map_for_parent, map_for_child, root) == 0){
updown_clique[c(1, 2, 5, 6), 1:4] = rep(updown_node[1, ], each = 4)
updown_clique[c(3, 4, 7, 8), 5:8] = rep(updown_node[2, ], each = 4)
}else{
updown_clique[c(1, 3, 5, 7), 1:4] = rep(updown_node[1, ], each = 4)
updown_clique[c(2, 4, 6, 8), 5:8] = rep(updown_node[2, ], each = 4)
}
xi[i, , ] = updown_clique
}
i = end + 1
updown_node = prior_updown[i, , ]
updown_clique = matrix(0, nrow = 8, ncol = 8)
updown_clique[c(1, 2, 5, 6), 1:4] = rep(updown_node[1, ], each = 4)
updown_clique[c(3, 4, 7, 8), 5:8] = rep(updown_node[2, ], each = 4)
xi[i, , ] = updown_clique
return(xi)
}
####
compute_phi = function(xi, BF){
phi = matrix(1, ncol = 8, nrow = num_leaf + num_node + 1)
for(i in end : start){
mar_likelihood = c(rep(1, 4), rep(BF[i], 4))
left = get_child_index(i, map_for_child, which = 0)
right = get_child_index(i, map_for_child, which = 1)
xi_A = xi[i, , ]
phi[i, ] = xi_A %*% diag(phi[left, ] * phi[right, ]) %*% mar_likelihood
}
i = end + 1
xi_A = xi[i, , ]
mar_likelihood = rep(1, 8)
left = num_leaf + 1
phi[i, ] = xi_A %*% diag(phi[left, ] * rep(1, 8)) %*% mar_likelihood
return(phi)
}
####
compute_posterior = function(prior_updown, BF, root = 0){
xi = get_xi(prior_updown, root = 0)
phi = compute_phi(xi, BF)
xi_tilde = array(0, dim = c(num_node + num_leaf + 1, 8, 8))
for(i in start : end){
left = get_child_index(i, map_for_child, which = 0)
right = get_child_index(i, map_for_child, which = 1)
mar_likelihood = c(rep(1, 4), rep(BF[i], 4))
xi_tilde[i, , ] = diag(1/phi[i, ]) %*% xi[i, , ] %*% diag(mar_likelihood) %*% diag(phi[left, ] * phi[right, ])
}
i = end + 1
xi_tilde[i, , ] = diag(1/phi[i, ]) %*% xi[i, , ] %*% diag(phi[num_leaf + 1, ])
post_updown = array(0, dim = c(num_node + num_leaf + 1, 2, 4))
for(i in start : (end + 1)){
post_updown[i, 1, ] = xi_tilde[i, 1, 1:4]
post_updown[i, 2, ] = xi_tilde[i, 8, 5:8]
}
return(post_updown)
}
####
compute_PJAP = function(post_updown){
null = 1
for(i in start : end ){
null = null * post_updown[i, 1, 1]
}
null = null * post_updown[end + 1, 1, 1] * 2
return(1 - null)
}
####
compute_PMAP = function(post_updown){
PMAP = rep(0, num_leaf + num_node)
already = rep(0, num_leaf + num_node)
PMAP[num_leaf + 1] = post_updown[end + 1, 2, 4] * 2
already[num_leaf + 1] = 1
for(i in (start + 1):end){
if(already[i] == 0){
clique_margin = matrix(0, ncol = 4, nrow = 2)
parent = get_parent_index(i, map_for_parent)
which = get_which_child(i, map_for_parent, map_for_child, root = 0)
clique_margin[1, ] = post_updown[parent, 1, ] * (1 - PMAP[parent])
clique_margin[2, ] = post_updown[parent, 2, ] * PMAP[parent]
j = get_child_index(parent, map_for_child, 1 - which)
if(which == 0){
PMAP[i] = sum(clique_margin[ , 3:4])
if(j > num_leaf) PMAP[j] = sum(clique_margin[ , c(2, 4)])
already[i] = already[j] = 1
}else{
PMAP[i] = sum(clique_margin[ , c(2, 4)])
if(j > num_leaf) PMAP[j] = sum(clique_margin[ , 3:4])
already[i] = already[j] = 1
}
}
}
return(PMAP)
}
####
compute_ml = function(beta){
prior_updown = get_prior_updown(alpha, beta, gamma, only_prior = TRUE, root = 0)
xi = get_xi(prior_updown)
phi = compute_phi(xi, BF)
ml = phi[end + 1, 1]
return(ml)
}
##################################################################################################
####----necessary variables----####
tree = reorder(tree, order = "cladewise")
num_node = tree$Nnode
num_leaf = tree$Nnode + 1
start = num_leaf + 1
end = num_leaf + num_node
tree_postorder = reorder(tree, order = "postorder")
map_for_child = tree_postorder$edge[dim(tree_postorder$edge)[1]:1, ] ## map to find child index
map_for_parent = map_for_child[order(map_for_child[,2]), ] ## map to find parent index
####----get BF----####
BF = get_BF(otu_group_1, otu_group_2, X_group_1, X_group_2, tree, nu, sigma, verbose)
BF[BF == "NaN"] = 1
####----get parameters----####
if(sum_PrMAP == "default"){
parameter = get_alpha_beta(PrJAP, 0, threshold)
alpha = parameter$alpha
PrJAP_temp = compute_PrJAP(alpha, -20)
beta_min = 0
beta_max = get_alpha_beta(PrJAP_temp$pr_jap, 2)$beta
beta = optimize(compute_ml, lower = beta_min, upper = beta_max, maximum = TRUE)$maximum
prior_updown = get_prior_updown(alpha, beta, gamma, only_prior = TRUE, root = 0)
post_updown = compute_posterior(prior_updown, BF, root = 0)
PJAP = compute_PJAP(post_updown)
PMAP = compute_PMAP(post_updown)
}else{
parameter = get_alpha_beta(PrJAP, sum_PrMAP, threshold)
alpha = parameter$alpha
beta = parameter$beta
prior_updown = get_prior_updown(alpha, beta, gamma, only_prior = TRUE, root = 0)
post_updown = compute_posterior(prior_updown, BF, root = 0)
PJAP = compute_PJAP(post_updown)
PMAP = compute_PMAP(post_updown)
}
####----get posterior----####
beta_ind = 0
prior_updown_ind = get_prior_updown(alpha, beta_ind, gamma, only_prior = TRUE, root = 0)
post_updown_ind = compute_posterior(prior_updown_ind, BF, root = 0)
PJAP_ind = compute_PJAP(post_updown_ind)
PMAP_ind = compute_PMAP(post_updown_ind)
res = list(tree = tree, PJAP = PJAP, PMAP = PMAP, PJAP_ind = PJAP_ind, PMAP_ind = PMAP_ind,
BF = BF, alpha = alpha, beta = beta)
class(res) = "BGCR"
return(res)
}
##################################################################################################
##################################################################################################
####---plot the PMAP----####
plot.BGCR = function(res, BF = FALSE, ind = FALSE, subtree = "whole", cex = 0.3, main = "PMAP"){
tree = res$tree
num_node = res$tree$Nnode
num_leaf = res$tree$Nnode + 1
start = num_leaf + 1
end = num_leaf + num_node
for(i in 1:num_node){
tree$node.label[i] = i + num_leaf
}
if(subtree != "whole"){
tree = subtrees(tree)[[subtree]]
start = min(as.numeric(tree$node.label))
end = max(as.numeric(tree$node.label))
}
if(class(res)!="BGCR"){
print("ERROR: this function takes a BGCR object.")
return(0)
}
if(ind == FALSE){
if(BF == FALSE){
par(mai=c(0.5, 0.4, 0.6 , 1.1))
col_Pal = colorRampPalette(c('white', 'red'))
node_col = col_Pal(500)[as.numeric(cut(c(res$PMAP[start : end], 0, 1), breaks = 500)) ]
plot(tree, show.tip.label = FALSE, use.edge.length = FALSE,show.node.label=FALSE,
direction="downwards", cex = 0.6, main=main)
nodelabels(text=format(round(res$PMAP[start : end], digits=2), nsmall=2), cex=cex, bg=node_col, frame="circle")
legend_col(col_Pal(500), seq(0, 1))
}else{
par(mai=c(0.5, 0.4, 0.6 , 1.1))
pi0 = exp(log(0.5) / num_node)
pi1 = 1 - exp(log(0.5) / num_node)
priorp = pi1 * res$BF[start : end] / (pi1 * res$BF[start : end] + pi0)
col_Pal = colorRampPalette(c('white', 'red'))
node_col = col_Pal(500)[as.numeric(cut(priorp, breaks = 500)) ]
plot(tree, show.tip.label = FALSE, use.edge.length = FALSE,show.node.label=FALSE,
direction="downwards", cex = 0.3, main="BF")
nodelabels(text=format(round(priorp, digits=2), nsmall=2), cex=0.3, bg=node_col, frame="circle")
legend_col(col_Pal(500), seq(0, 1))
}
}else{
if(BF == FALSE){
par(mai=c(0.5, 0.4, 0.6 , 1.1))
col_Pal = colorRampPalette(c('white', 'red'))
node_col = col_Pal(500)[as.numeric(cut(c(res$PMAP_ind[start : end], 0, 1), breaks = 500)) ]
plot(tree, show.tip.label = FALSE, use.edge.length = FALSE,show.node.label=FALSE,
direction="downwards", cex = 0.6, main=main)
nodelabels(text=format(round(res$PMAP_ind[start : end], digits=2), nsmall=2), cex=cex, bg=node_col, frame="circle")
legend_col(col_Pal(500), seq(0, 1))
}else{
par(mai=c(0.5, 0.4, 0.6 , 1.1))
pi0 = exp(log(0.5) / num_node)
pi1 = 1 - exp(log(0.5) / num_node)
priorp = pi1 * res$BF[start : end] / (pi1 * res$BF[start : end] + pi0)
col_Pal = colorRampPalette(c('white', 'red'))
node_col = col_Pal(500)[as.numeric(cut(priorp, breaks = 500)) ]
plot(tree, show.tip.label = FALSE, use.edge.length = FALSE,show.node.label=FALSE,
direction="downwards", cex = 0.3, main="BF")
nodelabels(text=format(round(priorp, digits=2), nsmall=2), cex=0.3, bg=node_col, frame="circle")
legend_col(col_Pal(500), seq(0, 1))
}
}
}
##################################################################################################
##################################################################################################
| /BGCR.R | no_license | JialiangMao/BGCR | R | false | false | 19,246 | r |
##################################################################################################
##################################################################################################
library(ape)
library(Rcpp)
sourceCpp("beta_binom_cov.cpp")
source('beta_binom_cov.R')
source('legend.R')
##################################################################################################
##################################################################################################
####----Core function----####
BGCR = function(PrJAP = 0.5,
sum_PrMAP = "default",
threshold = 0.005,
gamma = 0,
tree,
otu_group_1,
otu_group_2,
X_group_1 = "default",
X_group_2 = "default",
nu = 10 ^ (seq(-1, 4)),
sigma = 4,
verbose = FALSE){
################################################################################################
####----Functions deal with the index----####
####
get_child_index = function(x, map_for_child, which = 0){
num_leaf = dim(map_for_child)[1]/2 + 1
if(x <= num_leaf){
print("Warning: node has no child.")
return(NA)
}else if(x > 2 * num_leaf - 1){
print("Error: node index out of bound.")
return(NA)
}else{
if(which == 0){
return(map_for_child[(x - num_leaf) * 2, 2])
}else if(which == 1){
return(map_for_child[(x - num_leaf) * 2 - 1, 2])
}else{
print("Error: 'which' should be 0/1.")
return(NULL)
}
}
}
####
get_parent_index = function(x, map_for_parent){
num_leaf = dim(map_for_parent)[1]/2 + 1
if(0 <= x && x <= num_leaf){
return(map_for_parent[x, 1])
}else if(num_leaf + 1 < x && x< 2 * num_leaf){
return(map_for_parent[x - 1, 1])
}else if(x == num_leaf + 1){
print("Warning: root node has no parent.")
return(NA)
}else{
print("Error: index out of bound.")
return(NA)
}
}
####
get_which_child = function(x, map_for_parent, map_for_child, root = 0){
num_leaf = dim(map_for_parent)[1]/2 + 1
if(x == num_leaf + 1){
return(root)
}else{
parent = get_parent_index(x, map_for_parent)
if(x == get_child_index(parent, map_for_child, which = 0)) return(0)
else return(1)
}
}
################################################################################################
####----Functions deal with the data/BF----####
####
get_data = function(tree, otu){
data = matrix(0, nrow = num_leaf + num_node, ncol = dim(otu)[2])
label = tree$tip.label
for(i in 1:num_leaf){
data[i, ] = otu[label[i], ]
}
for(i in end : start){
child_left = get_child_index(i, map_for_child, 0)
child_right = get_child_index(i, map_for_child, 1)
data[i, ] = data[child_left, ] + data[child_right, ]
}
return(data)
}
####
get_cov = function(X_group_1 = "default", X_group_2 = "default", otu_group_1, otu_group_2){
if(X_group_1 == "default" && X_group_2 == "default"){
X_null = matrix(1, nrow = dim(otu_group_1)[2] + dim(otu_group_2)[2], ncol = 1)
X_alt = cbind(X_null, c(rep(0, dim(otu_group_1)[2]), rep(1, dim(otu_group_2)[2])))
return(list(X_null = X_null, X_alt = X_alt))
}else if(X_group_1 == "default" && X_group_2 != "default"){
print("Error: covariates for the two group should have same columns.")
return(0)
}else if(X_group_1 != "default" && X_group_2 == "default"){
print("Error: covariates for the two group should have same columns.")
return(0)
}else if(dim(X_group_1)[2] != dim(X_group_2)[2]){
print("Error: covariates for the two group should have same columns.")
return(0)
}else{
X = rbind(X_group_1, X_group_2)
X_null = cbind(rep(1, dim(X)[1]), X)
X_alt = cbind(X_null, c(rep(0, dim(X_group_1)[1]), rep(1, dim(X_group_2)[1])))
return(list(X_null = X_null, X_alt = X_alt))
}
}
####
get_BF = function(otu_group_1, otu_group_2, X_group_1, X_group_2, tree, nu = 10 ^ (seq(-1, 4)), sigma = 4, verbose){
data_group_1 = get_data(tree, otu_group_1)
data_group_2 = get_data(tree, otu_group_2)
BF = rep(0, num_leaf + num_node)
cov = get_cov(X_group_1, X_group_2, otu_group_1, otu_group_2)
X_null = cov$X_null
X_alt = cov$X_alt
for(i in start : end){
left = get_child_index(i, map_for_child, 0)
right = get_child_index(i, map_for_child, 1)
n1 = cbind(data_group_1[left, ], data_group_1[right, ])
n2 = cbind(data_group_2[left, ], data_group_2[right, ])
BF[i] = beta_binom_cov(n1, n2, X_null, X_alt, nu, sigma)
if(verbose == TRUE){
print(i)
}
}
return(BF)
}
################################################################################################
####----Functions deal with the prior----####
####
get_prior = function(alpha, beta, gamma){
prior = matrix(0, ncol = 2, nrow = 4)
prior[1, 2] = exp(alpha)/(1 + exp(alpha))
prior[2, 2] = exp(alpha + beta)/(1 + exp(alpha + beta))
prior[3, 2] = prior[2, 2]
prior[4, 2] = exp(alpha + beta + gamma)/(1 + exp(alpha + beta + gamma))
prior[ , 1] = 1 - prior[ , 2]
return(prior)
}
####
get_prior_updown = function(alpha, beta, gamma, only_prior = TRUE, root = 0){
prior_updown = array(0, dim = c(num_node + num_leaf + 1, 2, 4))
prior = get_prior(alpha, beta, gamma)
clique_margin = matrix(0, nrow = num_node + num_leaf, ncol = 8)
node_margin = matrix(0, nrow = num_node + num_leaf, ncol = 2)
for(i in 1:num_leaf){
node_margin[i, ] = prior[1, ]
clique_margin[i, ] = c(rep(node_margin[i, 1], 4), rep(node_margin[i, 2], 4))/4
node_margin[i, ] = c(1, 0)
}
for(i in end : start){
left = get_child_index(i, map_for_child, 0)
right = get_child_index(i, map_for_child, 1)
left_margin = node_margin[left, ]
right_margin = node_margin[right, ]
margin = prior
margin[1, ] = margin[1, ] * left_margin[1] * right_margin[1]
margin[2, ] = margin[2, ] * left_margin[1] * right_margin[2]
margin[3, ] = margin[3, ] * left_margin[2] * right_margin[1]
margin[4, ] = margin[4, ] * left_margin[2] * right_margin[2]
clique_margin[i, 1:4] = margin[, 1]
clique_margin[i, 5:8] = margin[, 2]
node_margin[i, ] = c(sum( clique_margin[i, 1:4]), sum(clique_margin[i, 5:8]))
prior_updown[i, 1, ] = clique_margin[i, 1:4]/node_margin[i, 1]
prior_updown[i, 2, ] = clique_margin[i, 5:8]/node_margin[i, 2]
}
r = num_leaf + 1
if(root == 0){
prior_updown[end + 1, 1, ] = c(node_margin[r, 1], node_margin[r, 1], node_margin[r, 2], node_margin[r, 2])/2
}else{
prior_updown[end + 1, 1, ] = c(node_margin[r, 1], node_margin[r, 2], node_margin[r, 1], node_margin[r, 2])/2
}
prior_updown[end + 1, 2, ] = prior_updown[end + 1, 1, ]
if(only_prior){
return(prior_updown)
}else{
return(list(prior_updown = prior_updown, clique_margin = clique_margin, node_margin = node_margin) )
}
}
####
compute_PrJAP = function(alpha, beta, gamma = 0, root = 0){
res = get_prior_updown(alpha, beta, gamma, FALSE, root)
prior_updown = res$prior_updown
node_margin = res$node_margin
pr_jap = node_margin[num_leaf + 1, 1]
for(i in (num_leaf + 1):(num_leaf + num_node)){
left = get_child_index(i, map_for_child, 0)
right = get_child_index(i, map_for_child, 1)
pr_jap = pr_jap * prior_updown[i, 1, 1]
}
pr_sum_pmap = sum(node_margin[ , 2])
return(list(pr_jap = 1 - pr_jap, pr_sum_pmap = pr_sum_pmap))
}
####
get_alpha_beta = function(PrJAP = 0.5, sum_PrMAP = 2, threshold = 0.005, max_iter = 50){
alpha_b = -20
alpha_t = 20
alpha_mid = (alpha_b + alpha_t)/2
beta = 0
prjap_mid = compute_PrJAP(alpha_mid, beta, gamma = 0)$pr_jap
diff = abs(prjap_mid - PrJAP)
iter = 0
while(iter <= max_iter && diff >= threshold){
if(prjap_mid <= PrJAP){
alpha_b = alpha_mid
alpha_mid = (alpha_b + alpha_t)/2
prjap_mid = compute_PrJAP(alpha_mid, beta, gamma = 0)$pr_jap
diff = abs(prjap_mid - PrJAP)
}else{
alpha_t = alpha_mid
alpha_mid = (alpha_b + alpha_t)/2
prjap_mid = compute_PrJAP(alpha_mid, beta, gamma = 0)$pr_jap
diff = abs(prjap_mid - PrJAP)
}
iter = iter + 1
}
alpha = alpha_mid
beta_b = -20
beta_t = 20
beta_mid = (beta_b + beta_t)/2
pr_sum_pmap_mid = compute_PrJAP(alpha, beta_mid, gamma = 0)$pr_sum_pmap
diff = abs(pr_sum_pmap_mid - sum_PrMAP)
while(iter <= max_iter && diff >= threshold){
if(pr_sum_pmap_mid <= sum_PrMAP){
beta_b = beta_mid
beta_mid = (beta_b + beta_t)/2
pr_sum_pmap_mid = compute_PrJAP(alpha, beta_mid, gamma = 0)$pr_sum_pmap
diff = abs(pr_sum_pmap_mid - sum_PrMAP)
}else{
beta_t = beta_mid
beta_mid = (beta_b + beta_t)/2
pr_sum_pmap_mid = compute_PrJAP(alpha, beta_mid, gamma = 0)$pr_sum_pmap
diff = abs(pr_sum_pmap_mid - sum_PrMAP)
}
iter = iter + 1
}
return(list(alpha = alpha, beta = beta_mid))
}
##################################################################################################
####----Functions for the posterior----####
####
get_xi = function(prior_updown, root = 0){
xi = array(0, dim = c(num_node + num_leaf + 1, 8, 8))
for(i in start : end){
updown_node = prior_updown[i, , ]
updown_clique = matrix(0, nrow = 8, ncol = 8)
if(get_which_child(i, map_for_parent, map_for_child, root) == 0){
updown_clique[c(1, 2, 5, 6), 1:4] = rep(updown_node[1, ], each = 4)
updown_clique[c(3, 4, 7, 8), 5:8] = rep(updown_node[2, ], each = 4)
}else{
updown_clique[c(1, 3, 5, 7), 1:4] = rep(updown_node[1, ], each = 4)
updown_clique[c(2, 4, 6, 8), 5:8] = rep(updown_node[2, ], each = 4)
}
xi[i, , ] = updown_clique
}
i = end + 1
updown_node = prior_updown[i, , ]
updown_clique = matrix(0, nrow = 8, ncol = 8)
updown_clique[c(1, 2, 5, 6), 1:4] = rep(updown_node[1, ], each = 4)
updown_clique[c(3, 4, 7, 8), 5:8] = rep(updown_node[2, ], each = 4)
xi[i, , ] = updown_clique
return(xi)
}
####
compute_phi = function(xi, BF){
phi = matrix(1, ncol = 8, nrow = num_leaf + num_node + 1)
for(i in end : start){
mar_likelihood = c(rep(1, 4), rep(BF[i], 4))
left = get_child_index(i, map_for_child, which = 0)
right = get_child_index(i, map_for_child, which = 1)
xi_A = xi[i, , ]
phi[i, ] = xi_A %*% diag(phi[left, ] * phi[right, ]) %*% mar_likelihood
}
i = end + 1
xi_A = xi[i, , ]
mar_likelihood = rep(1, 8)
left = num_leaf + 1
phi[i, ] = xi_A %*% diag(phi[left, ] * rep(1, 8)) %*% mar_likelihood
return(phi)
}
####
compute_posterior = function(prior_updown, BF, root = 0){
xi = get_xi(prior_updown, root = 0)
phi = compute_phi(xi, BF)
xi_tilde = array(0, dim = c(num_node + num_leaf + 1, 8, 8))
for(i in start : end){
left = get_child_index(i, map_for_child, which = 0)
right = get_child_index(i, map_for_child, which = 1)
mar_likelihood = c(rep(1, 4), rep(BF[i], 4))
xi_tilde[i, , ] = diag(1/phi[i, ]) %*% xi[i, , ] %*% diag(mar_likelihood) %*% diag(phi[left, ] * phi[right, ])
}
i = end + 1
xi_tilde[i, , ] = diag(1/phi[i, ]) %*% xi[i, , ] %*% diag(phi[num_leaf + 1, ])
post_updown = array(0, dim = c(num_node + num_leaf + 1, 2, 4))
for(i in start : (end + 1)){
post_updown[i, 1, ] = xi_tilde[i, 1, 1:4]
post_updown[i, 2, ] = xi_tilde[i, 8, 5:8]
}
return(post_updown)
}
####
compute_PJAP = function(post_updown){
null = 1
for(i in start : end ){
null = null * post_updown[i, 1, 1]
}
null = null * post_updown[end + 1, 1, 1] * 2
return(1 - null)
}
####
compute_PMAP = function(post_updown){
PMAP = rep(0, num_leaf + num_node)
already = rep(0, num_leaf + num_node)
PMAP[num_leaf + 1] = post_updown[end + 1, 2, 4] * 2
already[num_leaf + 1] = 1
for(i in (start + 1):end){
if(already[i] == 0){
clique_margin = matrix(0, ncol = 4, nrow = 2)
parent = get_parent_index(i, map_for_parent)
which = get_which_child(i, map_for_parent, map_for_child, root = 0)
clique_margin[1, ] = post_updown[parent, 1, ] * (1 - PMAP[parent])
clique_margin[2, ] = post_updown[parent, 2, ] * PMAP[parent]
j = get_child_index(parent, map_for_child, 1 - which)
if(which == 0){
PMAP[i] = sum(clique_margin[ , 3:4])
if(j > num_leaf) PMAP[j] = sum(clique_margin[ , c(2, 4)])
already[i] = already[j] = 1
}else{
PMAP[i] = sum(clique_margin[ , c(2, 4)])
if(j > num_leaf) PMAP[j] = sum(clique_margin[ , 3:4])
already[i] = already[j] = 1
}
}
}
return(PMAP)
}
####
compute_ml = function(beta){
prior_updown = get_prior_updown(alpha, beta, gamma, only_prior = TRUE, root = 0)
xi = get_xi(prior_updown)
phi = compute_phi(xi, BF)
ml = phi[end + 1, 1]
return(ml)
}
##################################################################################################
####----necessary variables----####
tree = reorder(tree, order = "cladewise")
num_node = tree$Nnode
num_leaf = tree$Nnode + 1
start = num_leaf + 1
end = num_leaf + num_node
tree_postorder = reorder(tree, order = "postorder")
map_for_child = tree_postorder$edge[dim(tree_postorder$edge)[1]:1, ] ## map to find child index
map_for_parent = map_for_child[order(map_for_child[,2]), ] ## map to find parent index
####----get BF----####
BF = get_BF(otu_group_1, otu_group_2, X_group_1, X_group_2, tree, nu, sigma, verbose)
BF[BF == "NaN"] = 1
####----get parameters----####
if(sum_PrMAP == "default"){
parameter = get_alpha_beta(PrJAP, 0, threshold)
alpha = parameter$alpha
PrJAP_temp = compute_PrJAP(alpha, -20)
beta_min = 0
beta_max = get_alpha_beta(PrJAP_temp$pr_jap, 2)$beta
beta = optimize(compute_ml, lower = beta_min, upper = beta_max, maximum = TRUE)$maximum
prior_updown = get_prior_updown(alpha, beta, gamma, only_prior = TRUE, root = 0)
post_updown = compute_posterior(prior_updown, BF, root = 0)
PJAP = compute_PJAP(post_updown)
PMAP = compute_PMAP(post_updown)
}else{
parameter = get_alpha_beta(PrJAP, sum_PrMAP, threshold)
alpha = parameter$alpha
beta = parameter$beta
prior_updown = get_prior_updown(alpha, beta, gamma, only_prior = TRUE, root = 0)
post_updown = compute_posterior(prior_updown, BF, root = 0)
PJAP = compute_PJAP(post_updown)
PMAP = compute_PMAP(post_updown)
}
####----get posterior----####
beta_ind = 0
prior_updown_ind = get_prior_updown(alpha, beta_ind, gamma, only_prior = TRUE, root = 0)
post_updown_ind = compute_posterior(prior_updown_ind, BF, root = 0)
PJAP_ind = compute_PJAP(post_updown_ind)
PMAP_ind = compute_PMAP(post_updown_ind)
res = list(tree = tree, PJAP = PJAP, PMAP = PMAP, PJAP_ind = PJAP_ind, PMAP_ind = PMAP_ind,
BF = BF, alpha = alpha, beta = beta)
class(res) = "BGCR"
return(res)
}
##################################################################################################
##################################################################################################
####---plot the PMAP----####
plot.BGCR = function(res, BF = FALSE, ind = FALSE, subtree = "whole", cex = 0.3, main = "PMAP"){
tree = res$tree
num_node = res$tree$Nnode
num_leaf = res$tree$Nnode + 1
start = num_leaf + 1
end = num_leaf + num_node
for(i in 1:num_node){
tree$node.label[i] = i + num_leaf
}
if(subtree != "whole"){
tree = subtrees(tree)[[subtree]]
start = min(as.numeric(tree$node.label))
end = max(as.numeric(tree$node.label))
}
if(class(res)!="BGCR"){
print("ERROR: this function takes a BGCR object.")
return(0)
}
if(ind == FALSE){
if(BF == FALSE){
par(mai=c(0.5, 0.4, 0.6 , 1.1))
col_Pal = colorRampPalette(c('white', 'red'))
node_col = col_Pal(500)[as.numeric(cut(c(res$PMAP[start : end], 0, 1), breaks = 500)) ]
plot(tree, show.tip.label = FALSE, use.edge.length = FALSE,show.node.label=FALSE,
direction="downwards", cex = 0.6, main=main)
nodelabels(text=format(round(res$PMAP[start : end], digits=2), nsmall=2), cex=cex, bg=node_col, frame="circle")
legend_col(col_Pal(500), seq(0, 1))
}else{
par(mai=c(0.5, 0.4, 0.6 , 1.1))
pi0 = exp(log(0.5) / num_node)
pi1 = 1 - exp(log(0.5) / num_node)
priorp = pi1 * res$BF[start : end] / (pi1 * res$BF[start : end] + pi0)
col_Pal = colorRampPalette(c('white', 'red'))
node_col = col_Pal(500)[as.numeric(cut(priorp, breaks = 500)) ]
plot(tree, show.tip.label = FALSE, use.edge.length = FALSE,show.node.label=FALSE,
direction="downwards", cex = 0.3, main="BF")
nodelabels(text=format(round(priorp, digits=2), nsmall=2), cex=0.3, bg=node_col, frame="circle")
legend_col(col_Pal(500), seq(0, 1))
}
}else{
if(BF == FALSE){
par(mai=c(0.5, 0.4, 0.6 , 1.1))
col_Pal = colorRampPalette(c('white', 'red'))
node_col = col_Pal(500)[as.numeric(cut(c(res$PMAP_ind[start : end], 0, 1), breaks = 500)) ]
plot(tree, show.tip.label = FALSE, use.edge.length = FALSE,show.node.label=FALSE,
direction="downwards", cex = 0.6, main=main)
nodelabels(text=format(round(res$PMAP_ind[start : end], digits=2), nsmall=2), cex=cex, bg=node_col, frame="circle")
legend_col(col_Pal(500), seq(0, 1))
}else{
par(mai=c(0.5, 0.4, 0.6 , 1.1))
pi0 = exp(log(0.5) / num_node)
pi1 = 1 - exp(log(0.5) / num_node)
priorp = pi1 * res$BF[start : end] / (pi1 * res$BF[start : end] + pi0)
col_Pal = colorRampPalette(c('white', 'red'))
node_col = col_Pal(500)[as.numeric(cut(priorp, breaks = 500)) ]
plot(tree, show.tip.label = FALSE, use.edge.length = FALSE,show.node.label=FALSE,
direction="downwards", cex = 0.3, main="BF")
nodelabels(text=format(round(priorp, digits=2), nsmall=2), cex=0.3, bg=node_col, frame="circle")
legend_col(col_Pal(500), seq(0, 1))
}
}
}
##################################################################################################
##################################################################################################
|
`convertTime` <-
function(tmp.time) {
tmp <- tail(c(0, 0, rev(as.numeric(rev(unlist(strsplit(sub("sec", "", tmp.time), ":")))))), 3)
tmp.label <- c("Hour", "Minute", "Second")
tmp.label[which(tmp!=1)] <- paste(tmp.label, "s", sep="")[which(tmp!=1)]
return(paste(paste(tmp[tmp!=0], tmp.label[tmp!=0]), collapse=", "))
} ### END convertTime
| /SGP/R/convertTime.R | no_license | ingted/R-Examples | R | false | false | 343 | r | `convertTime` <-
function(tmp.time) {
tmp <- tail(c(0, 0, rev(as.numeric(rev(unlist(strsplit(sub("sec", "", tmp.time), ":")))))), 3)
tmp.label <- c("Hour", "Minute", "Second")
tmp.label[which(tmp!=1)] <- paste(tmp.label, "s", sep="")[which(tmp!=1)]
return(paste(paste(tmp[tmp!=0], tmp.label[tmp!=0]), collapse=", "))
} ### END convertTime
|
# This script runs the regression model on CD4 T cells 7 days after malaria infection.
# Data was taken from Loennberg et al.
library(BASiCS)
#setwd("/nfs/research2/marioni/Nils/BASiCS/")
setwd("~/Documents/OneDrive/Projects/SingleCell/Datasets/Regression")
chains.path <- "~/Documents/OneDrive/Projects/SingleCell/BASiCS/Chains/Regression"
#### Read in data
input <- read.table("Data/Test_Data/CD4_diff.txt", sep = "\t")
#### Read in Spike-ins
ERCC.conc <- read.table("Data/Test_Data/ERCC_malaria.txt", header=TRUE, sep = "\t")
ERCC.num <- matrix(data=NA, nrow=nrow(ERCC.conc), ncol=1)
ERCC.num[,1] <- (ERCC.conc[,2]*(10^(-18)))*(6.0221417*(10^23))*9e-3
ERCC.num.final <- ERCC.num
rownames(ERCC.num) <- rownames(ERCC.num.final) <- ERCC.conc[,1]
SpikeInput <- ERCC.num.final[rownames(input)[grepl("ERCC", rownames(input))],1]
SpikeInput.1 <- data.frame("Name" = names(SpikeInput),
"Molecules" = SpikeInput,
stringsAsFactors = FALSE)
input <- input[,grepl("7_infect", colnames(input))]
chips <- sapply(colnames(input), function(n){unlist(strsplit(n, "\\."))[1]})
# Generate data object
Data.7day <- newBASiCS_Data(Counts = input,
Tech = grepl("ERCC", rownames(input)), SpikeInfo = SpikeInput.1,
BatchInfo=chips)
# Run the regression model
MCMC.7day <- BASiCS_MCMC(Data.7day, 80000, 40, 40000,
Regression = TRUE, PrintProgress=TRUE,
StoreChains = TRUE, StoreDir = chains.path,
RunName = "CD4diff_7day_long")
#saveRDS(MCMC.7day, "Tdist/Results/Differential_testing/MCMC_CD4diff_7day.rds")
| /ChainGeneration/DifferentialTesting/CD4diff_7day.R | no_license | catavallejos/RegressionBASiCS2017 | R | false | false | 1,645 | r | # This script runs the regression model on CD4 T cells 7 days after malaria infection.
# Data was taken from Loennberg et al.
library(BASiCS)
#setwd("/nfs/research2/marioni/Nils/BASiCS/")
setwd("~/Documents/OneDrive/Projects/SingleCell/Datasets/Regression")
chains.path <- "~/Documents/OneDrive/Projects/SingleCell/BASiCS/Chains/Regression"
#### Read in data
input <- read.table("Data/Test_Data/CD4_diff.txt", sep = "\t")
#### Read in Spike-ins
ERCC.conc <- read.table("Data/Test_Data/ERCC_malaria.txt", header=TRUE, sep = "\t")
ERCC.num <- matrix(data=NA, nrow=nrow(ERCC.conc), ncol=1)
ERCC.num[,1] <- (ERCC.conc[,2]*(10^(-18)))*(6.0221417*(10^23))*9e-3
ERCC.num.final <- ERCC.num
rownames(ERCC.num) <- rownames(ERCC.num.final) <- ERCC.conc[,1]
SpikeInput <- ERCC.num.final[rownames(input)[grepl("ERCC", rownames(input))],1]
SpikeInput.1 <- data.frame("Name" = names(SpikeInput),
"Molecules" = SpikeInput,
stringsAsFactors = FALSE)
input <- input[,grepl("7_infect", colnames(input))]
chips <- sapply(colnames(input), function(n){unlist(strsplit(n, "\\."))[1]})
# Generate data object
Data.7day <- newBASiCS_Data(Counts = input,
Tech = grepl("ERCC", rownames(input)), SpikeInfo = SpikeInput.1,
BatchInfo=chips)
# Run the regression model
MCMC.7day <- BASiCS_MCMC(Data.7day, 80000, 40, 40000,
Regression = TRUE, PrintProgress=TRUE,
StoreChains = TRUE, StoreDir = chains.path,
RunName = "CD4diff_7day_long")
#saveRDS(MCMC.7day, "Tdist/Results/Differential_testing/MCMC_CD4diff_7day.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sheets_objects.R
\name{DeleteProtectedRangeRequest}
\alias{DeleteProtectedRangeRequest}
\title{DeleteProtectedRangeRequest Object}
\usage{
DeleteProtectedRangeRequest(protectedRangeId = NULL)
}
\arguments{
\item{protectedRangeId}{The ID of the protected range to delete}
}
\value{
DeleteProtectedRangeRequest object
}
\description{
DeleteProtectedRangeRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Deletes the protected range with the given ID.
}
\concept{DeleteProtectedRangeRequest functions}
| /man/DeleteProtectedRangeRequest.Rd | no_license | bradgwest/googleSheetsR | R | false | true | 622 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sheets_objects.R
\name{DeleteProtectedRangeRequest}
\alias{DeleteProtectedRangeRequest}
\title{DeleteProtectedRangeRequest Object}
\usage{
DeleteProtectedRangeRequest(protectedRangeId = NULL)
}
\arguments{
\item{protectedRangeId}{The ID of the protected range to delete}
}
\value{
DeleteProtectedRangeRequest object
}
\description{
DeleteProtectedRangeRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Deletes the protected range with the given ID.
}
\concept{DeleteProtectedRangeRequest functions}
|
\docType{data}
\name{demo-SensorAffinity}
\alias{demo-SensorAffinity}
\title{Demo SensorAffinity.}
\description{
Demo SensorAffinity.
}
\examples{
# define a representative set of gases A, C and AC
# - appropriate to test sensor affinities across two species A and C
set <- c("A 0.01", "A 0.05", "C 0.1", "C 1", "A 0.01, C 0.1", "A 0.05, C 1")
# 0) check UNIMAN sensors and their sorption affinities
data(UNIMANsorption)
df <- as.data.frame(UNIMANsorption$qkc[, , "K"])
head(df)
df <- mutate(df,
sensor = 1:nrow(df),
sensor.group = ifelse(A > C, "More affinity to A", "More affinity to C"))
p <- ggplot(df, aes(reorder(x = factor(sensor), A - C), y = A - C, fill = sensor.group)) +
geom_bar(position = "identity") + coord_flip() +
xlab("sensor") + ylab("Difference in K between A and C")
p
# in result:
# - sensors with affinities A > C: 17, 13, 14, ...
# - sensors with affinities C > A: 2, 1, 3, ...
# 1) sensors with affinities A > C
# - set drift noise level 'dsd' to zero,
# in order to see more a class-relevant information, than drift
sa1 <- SensorArray(num = c(13, 14, 17), dsd = 0)
# look at the level of signal in reponse to pure analytes and to a mixture
# - it is important, as
# 1) PCA mostly captures a variation in the absolute level of signals
# 2) accroding to the models for data geenration, mixture response is
# a sum of responses to pure analytes (mixture is composed of),
# thus, absolute values of signals matter.
p0 <- plotSignal(sa1, set = set)
p0
p1 <- plotPCA(sa1, set = rep(set, 3), air = FALSE, main = "sensors of affinities A > C")
p1
# 2) sensors with affinities A < C
sa2 <- SensorArray(num = 1:3, dsd = 0)
p2 <- plotPCA(sa2, set = rep(set, 3), air = FALSE, main = "sensors of affinities A < C")
p2
# 3) all available 17 types of sensors
sa3 <- SensorArray(num = 1:17, dsd = 0)
p3 <- plotPCA(sa3, set = rep(set, 3), air = FALSE, main = "all types of affinities")
p3
}
\keyword{datasets}
\keyword{demo}
| /man/demo-SensorAffinity.Rd | no_license | cran/chemosensors | R | false | false | 1,976 | rd | \docType{data}
\name{demo-SensorAffinity}
\alias{demo-SensorAffinity}
\title{Demo SensorAffinity.}
\description{
Demo SensorAffinity.
}
\examples{
# define a representative set of gases A, C and AC
# - appropriate to test sensor affinities across two species A and C
set <- c("A 0.01", "A 0.05", "C 0.1", "C 1", "A 0.01, C 0.1", "A 0.05, C 1")
# 0) check UNIMAN sensors and their sorption affinities
data(UNIMANsorption)
df <- as.data.frame(UNIMANsorption$qkc[, , "K"])
head(df)
df <- mutate(df,
sensor = 1:nrow(df),
sensor.group = ifelse(A > C, "More affinity to A", "More affinity to C"))
p <- ggplot(df, aes(reorder(x = factor(sensor), A - C), y = A - C, fill = sensor.group)) +
geom_bar(position = "identity") + coord_flip() +
xlab("sensor") + ylab("Difference in K between A and C")
p
# in result:
# - sensors with affinities A > C: 17, 13, 14, ...
# - sensors with affinities C > A: 2, 1, 3, ...
# 1) sensors with affinities A > C
# - set drift noise level 'dsd' to zero,
# in order to see more a class-relevant information, than drift
sa1 <- SensorArray(num = c(13, 14, 17), dsd = 0)
# look at the level of signal in reponse to pure analytes and to a mixture
# - it is important, as
# 1) PCA mostly captures a variation in the absolute level of signals
# 2) accroding to the models for data geenration, mixture response is
# a sum of responses to pure analytes (mixture is composed of),
# thus, absolute values of signals matter.
p0 <- plotSignal(sa1, set = set)
p0
p1 <- plotPCA(sa1, set = rep(set, 3), air = FALSE, main = "sensors of affinities A > C")
p1
# 2) sensors with affinities A < C
sa2 <- SensorArray(num = 1:3, dsd = 0)
p2 <- plotPCA(sa2, set = rep(set, 3), air = FALSE, main = "sensors of affinities A < C")
p2
# 3) all available 17 types of sensors
sa3 <- SensorArray(num = 1:17, dsd = 0)
p3 <- plotPCA(sa3, set = rep(set, 3), air = FALSE, main = "all types of affinities")
p3
}
\keyword{datasets}
\keyword{demo}
|
#
# ggbiplot2-package.R
#
# Copyright 2014 Vincent Q. Vu. All rights reserved
#
#' ggbiplot2
#'
#' @import ggplot2 plyr grid scales
#' @name ggbiplot2
#' @docType package
NULL
#' @title Wine dataset
#' @name wine
#' @aliases wine.class
#' @description
#' Results of a chemical analysis of wines grown in the same region in Italy
#' but derived from three different cultivars. The analysis determined the
#' quantities of 13 constituents found in each of the three types of wines.
#'
#' @docType data
#' @usage data(wine)
#' @format
#' A \code{wine} data frame consisting of 178 observations (rows) and
#' 13 columns and vector \code{wine.class} of factors indicating the cultivars.
#' @source UCI Machine Learning Repository (\url{http://archive.ics.uci.edu/ml/datasets/Wine})
#'
NULL
| /R/ggbiplot2-package.R | no_license | deprekate/ggbiplot2 | R | false | false | 795 | r | #
# ggbiplot2-package.R
#
# Copyright 2014 Vincent Q. Vu. All rights reserved
#
#' ggbiplot2
#'
#' @import ggplot2 plyr grid scales
#' @name ggbiplot2
#' @docType package
NULL
#' @title Wine dataset
#' @name wine
#' @aliases wine.class
#' @description
#' Results of a chemical analysis of wines grown in the same region in Italy
#' but derived from three different cultivars. The analysis determined the
#' quantities of 13 constituents found in each of the three types of wines.
#'
#' @docType data
#' @usage data(wine)
#' @format
#' A \code{wine} data frame consisting of 178 observations (rows) and
#' 13 columns and vector \code{wine.class} of factors indicating the cultivars.
#' @source UCI Machine Learning Repository (\url{http://archive.ics.uci.edu/ml/datasets/Wine})
#'
NULL
|
# More tests are in test-write_fs_weight
test_that("Invalid arguments to read.fs.weight lead to errors", {
tmp_file = tempfile(fileext=".w");
expect_error(read.fs.weight(tmp_file, format = "no such format")); # invalid format
})
| /tests/testthat/test-read_fs_weight.R | permissive | dfsp-spirit/freesurferformats | R | false | false | 236 | r | # More tests are in test-write_fs_weight
test_that("Invalid arguments to read.fs.weight lead to errors", {
tmp_file = tempfile(fileext=".w");
expect_error(read.fs.weight(tmp_file, format = "no such format")); # invalid format
})
|
## ----my-first-shiny-app, eval = FALSE------------------------------------
## library(shiny)
##
## ui <- fluidPage()
## server <- function(input, output) {}
##
## shinyApp(ui = ui, server = server)
## ----shinydashboard------------------------------------------------------
library(shiny)
library(shinydashboard)
ui <- dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody()
)
server <- function(input, output) { }
shinyApp(ui, server)
| /060-ggplot/output/purl/04-01-my-first-shiny.R | no_license | quantide/qtraining | R | false | false | 476 | r | ## ----my-first-shiny-app, eval = FALSE------------------------------------
## library(shiny)
##
## ui <- fluidPage()
## server <- function(input, output) {}
##
## shinyApp(ui = ui, server = server)
## ----shinydashboard------------------------------------------------------
library(shiny)
library(shinydashboard)
ui <- dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody()
)
server <- function(input, output) { }
shinyApp(ui, server)
|
#' @rdname fixationIndels
#' @title Fixation indels prediction
#' @description The fixation of insertions of deletions.
#' @param x The return from \code{\link{sitesMinEntropy}} function.
#' @param ... Other arguments.
#' @return A \code{fixationIndels} object.
#' @export
#' @examples
#' data(zikv_tree_reduced)
#' data(zikv_align_reduced)
#' tree <- addMSA(zikv_tree_reduced, alignment = zikv_align_reduced)
#' fixationIndels(sitesMinEntropy(tree))
fixationIndels <- function(x, ...) {
UseMethod("fixationIndels")
}
#' @rdname fixationIndels
#' @export
fixationIndels.sitesMinEntropy <- function(x, ...) {
paths <- attr(x, "paths")
tree <- attr(paths, "tree")
seqType <- attr(paths, "seqType")
gapChar <- attr(paths, "gapChar")
minSize <- attr(paths, "minSize")
# 'res' is going to be the return of this function. Each entry in the list
# is the 'indelPath' for a fragment of sequence.
res <- list()
for (segs in x) {
pathNodeTips <- lapply(attr(segs, "pathNodeTips"), as.integer)
prevSite <- -1
currIndels <- list()
for (site in names(segs)) {
seg <- segs[[site]]
# Find the tips having 'gapChar' at the site
siteChars <- vapply(
X = seg,
FUN = attr,
FUN.VALUE = character(1),
which = "AA"
)
tipsWithDeletion <- seg[which(siteChars == gapChar)]
if (length(tipsWithDeletion)) {
currSite <- as.integer(site)
# Test the continuity of the deletion
if (currSite - prevSite == 1) {
# Find the overlapping tips to further ensure the continuity
for (iter in seq_along(currIndels)) {
# Existing tips with continuing deletion
refTips <- currIndels[[iter]]
indelSites <- c(attr(refTips, "indelSites"),
currSite)
for (tips in tipsWithDeletion) {
continued <- intersect(refTips, tips)
# The deletion of the tips is ended if the current
# site is not gap
ended <- setdiff(refTips, continued)
# A new deletion is started if a new group of tips
# are gap at the current site
started <- setdiff(tips, continued)
if (length(continued)) {
continued <- .findAncestralNode(continued,
pathNodeTips,
indelSites)
currIndels[iter] <- continued
if (length(ended)) {
ended <- .findAncestralNode(ended,
pathNodeTips,
indelSites)
currIndels <-
c(currIndels, ended)
}
} else {
if (length(ended)) {
ended <- .findAncestralNode(ended,
pathNodeTips,
indelSites)
currIndels[iter] <- ended
}
}
if (length(started)) {
started <- .findAncestralNode(started,
pathNodeTips,
currSite)
currIndels <- c(currIndels, started)
}
}
}
} else {
# Initiate the first deletion fragment or re-initiate new
# deletion fragment if the gap can't be extended due to
# discontunity of the site
currIndels <-
lapply(tipsWithDeletion, function(tips) {
attr(tips, "indelSites") <- currSite
return(tips)
})
}
# Update the 'prevSite' only when the site is a gap
prevSite <- currSite
}
}
# All indel for the current path
for (tips in currIndels) {
if (length(tips) >= minSize) {
indelSites <- attr(tips, "indelSites")
if (length(indelSites) > 1) {
indelSites <- range(indelSites)
indelSites <- paste0(indelSites, collapse = "-")
}
node <- attr(tips, "node")
res[[indelSites]][[node]] <- tips
attr(res[[indelSites]], "indelSites") <- indelSites
attr(res[[indelSites]], "tree") <- tree
attr(res[[indelSites]], "seqType") <- seqType
class(res[[indelSites]]) <- "indelPath"
}
}
}
# Set 'paths' and 'clustersByPath' attributes
attr(res, "paths") <- paths
class(res) <- "fixationIndels"
return(res)
}
.findAncestralNode <- function(tipsWithGap,
pathNodeTips,
indelSites) {
res <- list()
# The tips to be grouped
currTips <- integer()
ancestralNode <- names(pathNodeTips[1])
# Iterate the tips along the path
for (node in names(pathNodeTips)) {
tips <- pathNodeTips[[node]]
# To find the tips that are grouped in the 'tipsWithGap'
if (any(tips %in% tipsWithGap)) {
# The ancestral node is from its starting tips in 'pathNodeTips'
if (length(currTips) == 0) {
ancestralNode <- node
}
# Accumulating the tips (it's assumed the all 'tips' are in
# 'tipsWithGap' if any)
currTips <- c(currTips, tips)
tipsWithGap <- setdiff(tipsWithGap, tips)
} else {
if (length(currTips)) {
# The continuity stopped and new tip group formed
attr(currTips, "node") <- ancestralNode
res[[ancestralNode]] <- currTips
}
# Reset the tips to be grouped
currTips <- integer()
}
}
if (length(currTips)) {
# The continuity stopped and new tip group formed
attr(currTips, "node") <- ancestralNode
attr(currTips, "indelSites") <- indelSites
res[[ancestralNode]] <- currTips
}
return(res)
}
| /R/fixationIndels.R | permissive | wuaipinglab/sitePath | R | false | false | 7,046 | r | #' @rdname fixationIndels
#' @title Fixation indels prediction
#' @description The fixation of insertions of deletions.
#' @param x The return from \code{\link{sitesMinEntropy}} function.
#' @param ... Other arguments.
#' @return A \code{fixationIndels} object.
#' @export
#' @examples
#' data(zikv_tree_reduced)
#' data(zikv_align_reduced)
#' tree <- addMSA(zikv_tree_reduced, alignment = zikv_align_reduced)
#' fixationIndels(sitesMinEntropy(tree))
fixationIndels <- function(x, ...) {
UseMethod("fixationIndels")
}
#' @rdname fixationIndels
#' @export
fixationIndels.sitesMinEntropy <- function(x, ...) {
paths <- attr(x, "paths")
tree <- attr(paths, "tree")
seqType <- attr(paths, "seqType")
gapChar <- attr(paths, "gapChar")
minSize <- attr(paths, "minSize")
# 'res' is going to be the return of this function. Each entry in the list
# is the 'indelPath' for a fragment of sequence.
res <- list()
for (segs in x) {
pathNodeTips <- lapply(attr(segs, "pathNodeTips"), as.integer)
prevSite <- -1
currIndels <- list()
for (site in names(segs)) {
seg <- segs[[site]]
# Find the tips having 'gapChar' at the site
siteChars <- vapply(
X = seg,
FUN = attr,
FUN.VALUE = character(1),
which = "AA"
)
tipsWithDeletion <- seg[which(siteChars == gapChar)]
if (length(tipsWithDeletion)) {
currSite <- as.integer(site)
# Test the continuity of the deletion
if (currSite - prevSite == 1) {
# Find the overlapping tips to further ensure the continuity
for (iter in seq_along(currIndels)) {
# Existing tips with continuing deletion
refTips <- currIndels[[iter]]
indelSites <- c(attr(refTips, "indelSites"),
currSite)
for (tips in tipsWithDeletion) {
continued <- intersect(refTips, tips)
# The deletion of the tips is ended if the current
# site is not gap
ended <- setdiff(refTips, continued)
# A new deletion is started if a new group of tips
# are gap at the current site
started <- setdiff(tips, continued)
if (length(continued)) {
continued <- .findAncestralNode(continued,
pathNodeTips,
indelSites)
currIndels[iter] <- continued
if (length(ended)) {
ended <- .findAncestralNode(ended,
pathNodeTips,
indelSites)
currIndels <-
c(currIndels, ended)
}
} else {
if (length(ended)) {
ended <- .findAncestralNode(ended,
pathNodeTips,
indelSites)
currIndels[iter] <- ended
}
}
if (length(started)) {
started <- .findAncestralNode(started,
pathNodeTips,
currSite)
currIndels <- c(currIndels, started)
}
}
}
} else {
# Initiate the first deletion fragment or re-initiate new
# deletion fragment if the gap can't be extended due to
# discontunity of the site
currIndels <-
lapply(tipsWithDeletion, function(tips) {
attr(tips, "indelSites") <- currSite
return(tips)
})
}
# Update the 'prevSite' only when the site is a gap
prevSite <- currSite
}
}
# All indel for the current path
for (tips in currIndels) {
if (length(tips) >= minSize) {
indelSites <- attr(tips, "indelSites")
if (length(indelSites) > 1) {
indelSites <- range(indelSites)
indelSites <- paste0(indelSites, collapse = "-")
}
node <- attr(tips, "node")
res[[indelSites]][[node]] <- tips
attr(res[[indelSites]], "indelSites") <- indelSites
attr(res[[indelSites]], "tree") <- tree
attr(res[[indelSites]], "seqType") <- seqType
class(res[[indelSites]]) <- "indelPath"
}
}
}
# Set 'paths' and 'clustersByPath' attributes
attr(res, "paths") <- paths
class(res) <- "fixationIndels"
return(res)
}
.findAncestralNode <- function(tipsWithGap,
pathNodeTips,
indelSites) {
res <- list()
# The tips to be grouped
currTips <- integer()
ancestralNode <- names(pathNodeTips[1])
# Iterate the tips along the path
for (node in names(pathNodeTips)) {
tips <- pathNodeTips[[node]]
# To find the tips that are grouped in the 'tipsWithGap'
if (any(tips %in% tipsWithGap)) {
# The ancestral node is from its starting tips in 'pathNodeTips'
if (length(currTips) == 0) {
ancestralNode <- node
}
# Accumulating the tips (it's assumed the all 'tips' are in
# 'tipsWithGap' if any)
currTips <- c(currTips, tips)
tipsWithGap <- setdiff(tipsWithGap, tips)
} else {
if (length(currTips)) {
# The continuity stopped and new tip group formed
attr(currTips, "node") <- ancestralNode
res[[ancestralNode]] <- currTips
}
# Reset the tips to be grouped
currTips <- integer()
}
}
if (length(currTips)) {
# The continuity stopped and new tip group formed
attr(currTips, "node") <- ancestralNode
attr(currTips, "indelSites") <- indelSites
res[[ancestralNode]] <- currTips
}
return(res)
}
|
library(MASS)
library(caret)
library(magrittr)
library(ggplot2)
data(Boston)
set.seed(123)
# mean centering
b2 <- preProcess(Boston, method = "center") %>% predict(., Boston)
idx <- createDataPartition(b2$medv, p = 0.8, list = FALSE)
train <- Boston[idx,]
test <- Boston[-idx,]
mod0 <- lm(data = train, medv ~.)
sm <- summary(mod0)
betas <- sm$coefficients[,1]
testcase <- test[1,]
pred <- predict(mod0, testcase)
# dot product between feature vector and beta
featvec <- testcase[-which(testcase %>% names == "medv")] %>% as.matrix
betas2 <- betas[-1]
nm <- names(betas)
#betas2 %*% t(featvec)
# feature contributions
featcont <- betas2*featvec
featcont <- c(betas[1], featcont, pred)
names(featcont) <- c(nm, "Prediction")
# waterfall chart on feature contribution
plotdata <- data.frame(coef = names(featcont), featcont = featcont, row.names = NULL)
plotdata$coef <- factor(plotdata$coef, levels = plotdata$coef)
plotdata$id <- seq_along(plotdata$coef)
plotdata$Impact <- ifelse(plotdata$featcont > 0, "+ve", "-ve")
plotdata[plotdata$coef %in% c("(Intercept)", "Prediction"), "Impact"] <- "Initial/Net"
plotdata$end <- cumsum(plotdata$featcont)
plotdata$end <- c(head(plotdata$end, -1), 0)
plotdata$start <- c(0, head(plotdata$end, -1))
plotdata <- plotdata[, c(3, 1, 4, 6, 5, 2)]
gg <- ggplot(plotdata, aes(fill = Impact)) +
geom_rect(aes(coef,
xmin = id - 0.45,
xmax = id + 0.45,
ymin = end,
ymax = start)) +
theme_minimal() +
#scale_fill_manual(values=c("#999999", "#E69F00", "#56B4E9"))
scale_fill_manual(values=c("darkred", "darkgreen", "darkblue")) +
theme(axis.text.x=element_text(angle=90, hjust=1))
#coord_flip()
if(sign(plotdata$end[1]) != sign(plotdata$start[nrow(plotdata)]))
gg <- gg + geom_hline(yintercept = 0)
gg
cont_prop <- featcont/pred
plot_data <- data.frame(coef = names(cont_prop),
cont_prop = cont_prop,
row.names = NULL)
plot_data <- plot_data[-nrow(plot_data),]
plot_data <- plot_data[order(plot_data$cont_prop, decreasing = FALSE),]
plot_data$coef <- factor(plot_data$coef, levels = plot_data$coef)
p<-ggplot(data=plot_data, aes(x=coef, y = cont_prop)) +
geom_bar(stat="identity", fill = "darkblue") +
coord_flip() +
theme_minimal() +
xlab("Features") +
ggtitle("Feature Contributions")
p | /waterfall/regression.R | no_license | tohweizhong/auxml-lab | R | false | false | 2,377 | r |
library(MASS)
library(caret)
library(magrittr)
library(ggplot2)
data(Boston)
set.seed(123)
# mean centering
b2 <- preProcess(Boston, method = "center") %>% predict(., Boston)
idx <- createDataPartition(b2$medv, p = 0.8, list = FALSE)
train <- Boston[idx,]
test <- Boston[-idx,]
mod0 <- lm(data = train, medv ~.)
sm <- summary(mod0)
betas <- sm$coefficients[,1]
testcase <- test[1,]
pred <- predict(mod0, testcase)
# dot product between feature vector and beta
featvec <- testcase[-which(testcase %>% names == "medv")] %>% as.matrix
betas2 <- betas[-1]
nm <- names(betas)
#betas2 %*% t(featvec)
# feature contributions
featcont <- betas2*featvec
featcont <- c(betas[1], featcont, pred)
names(featcont) <- c(nm, "Prediction")
# waterfall chart on feature contribution
plotdata <- data.frame(coef = names(featcont), featcont = featcont, row.names = NULL)
plotdata$coef <- factor(plotdata$coef, levels = plotdata$coef)
plotdata$id <- seq_along(plotdata$coef)
plotdata$Impact <- ifelse(plotdata$featcont > 0, "+ve", "-ve")
plotdata[plotdata$coef %in% c("(Intercept)", "Prediction"), "Impact"] <- "Initial/Net"
plotdata$end <- cumsum(plotdata$featcont)
plotdata$end <- c(head(plotdata$end, -1), 0)
plotdata$start <- c(0, head(plotdata$end, -1))
plotdata <- plotdata[, c(3, 1, 4, 6, 5, 2)]
gg <- ggplot(plotdata, aes(fill = Impact)) +
geom_rect(aes(coef,
xmin = id - 0.45,
xmax = id + 0.45,
ymin = end,
ymax = start)) +
theme_minimal() +
#scale_fill_manual(values=c("#999999", "#E69F00", "#56B4E9"))
scale_fill_manual(values=c("darkred", "darkgreen", "darkblue")) +
theme(axis.text.x=element_text(angle=90, hjust=1))
#coord_flip()
if(sign(plotdata$end[1]) != sign(plotdata$start[nrow(plotdata)]))
gg <- gg + geom_hline(yintercept = 0)
gg
cont_prop <- featcont/pred
plot_data <- data.frame(coef = names(cont_prop),
cont_prop = cont_prop,
row.names = NULL)
plot_data <- plot_data[-nrow(plot_data),]
plot_data <- plot_data[order(plot_data$cont_prop, decreasing = FALSE),]
plot_data$coef <- factor(plot_data$coef, levels = plot_data$coef)
p<-ggplot(data=plot_data, aes(x=coef, y = cont_prop)) +
geom_bar(stat="identity", fill = "darkblue") +
coord_flip() +
theme_minimal() +
xlab("Features") +
ggtitle("Feature Contributions")
p |
processing_setting <- function(s1)
{
s2=list()
data.in <- FALSE
id.out <- FALSE
if (!is.null(s1)){
if (!is.null(s1$record.file)) {s2$recordFile=s1$record.file}
if (!is.null(s1$load.design)) {s2$loadDesign=s1$load.design}
if (!is.null(s1$seed)) {s2$seed=s1$seed}
if (!is.null(s1$data.in)) {data.in=s1$data.in}
if (!is.null(s1$id.out)) {id.out=s1$id.out}
}
return(list(s2, data.in, id.out))
}
| /R/processing_setting.R | no_license | MarcLavielle/mlxR | R | false | false | 468 | r | processing_setting <- function(s1)
{
s2=list()
data.in <- FALSE
id.out <- FALSE
if (!is.null(s1)){
if (!is.null(s1$record.file)) {s2$recordFile=s1$record.file}
if (!is.null(s1$load.design)) {s2$loadDesign=s1$load.design}
if (!is.null(s1$seed)) {s2$seed=s1$seed}
if (!is.null(s1$data.in)) {data.in=s1$data.in}
if (!is.null(s1$id.out)) {id.out=s1$id.out}
}
return(list(s2, data.in, id.out))
}
|
#S05_C26_Trees_Motivation
#Motivacion para Arboles de Decisión
#Describimos cómo los métodos como lda y qda no deben usarse con conjuntos de datos que tienen muchos predictores.
#Esto se debe a que el número de parámetros que necesitamos estimar se vuelve demasiado grande.
#Por ejemplo, con el ejemplo de dígitos donde tenemos 784 predictores, lda tendría que estimar más de 600,000 parámetros.
#Con qda, tendrías que multiplicar eso por el número de clases, que es 10 aquí.
#Los métodos de kernel tales como k-nn o regresión local no tienen parámetros de modelo para estimar.
#Pero también enfrentan un desafío cuando se usan predictores múltiples debido a lo que se conoce como la maldición de la dimensionalidad.
#La dimensión aquí se refiere al hecho de que cuando tenemos predictores p, la distancia entre dos observaciones se calcula en p espacio dimensional.
#Una forma útil de comprender la maldición de la dimensionalidad es considerar qué tan grande tenemos que hacer un vecindario, el vecindario que usamos para hacer las estimaciones, para incluir un porcentaje dado de los datos.
#Recuerde que con vecindarios grandes, nuestros métodos pierden flexibilidad.
#Por ejemplo, supongamos que tenemos un predictor continuo con puntos igualmente espaciados en el intervalo [0,1], y desea crear ventanas que incluyan 1/10 de los datos.
#Entonces es fácil ver que nuestras ventanas tienen que ser del tamaño 0.1.
#Puedes verlo en esta figura.
#Ahora, para dos predictores, si decidimos mantener el vecindario solo un pequeño, el 10% de cada dimensión solo incluimos un punto.
#Si queremos incluir el 10% de los datos, entonces necesitamos aumentar el tamaño de cada lado del cuadrado a la raíz cuadrada de 10 para que el área sea 10 de 100.
#Esto es ahora 0.316.
#En general, para incluir el 10% de los datos en un caso con p dimensiones, necesitamos un intervalo con cada lado que tenga un tamaño de 0,10 a 1 / p.
#Esta proporción se acerca a 1, lo que significa que estamos incluyendo prácticamente todos los datos, y ya no se suaviza muy rápidamente.
#Puedes verlo en este gráfico, trazando p versus 0.1 a 1 / p.
#Entonces, cuando llegamos a 100 predictores, el vecindario ya no es muy local, ya que cada lado cubre casi todo el conjunto de datos.
#En este tema, presentamos un conjunto de métodos elegantes y versátiles que se adaptan a dimensiones más altas y también permiten que estas regiones tomen formas más complejas, al tiempo que producen modelos que son interpretables.
#Estos son métodos muy conocidos, y estudiados.
#Nos centraremos en los árboles de regresión y decisión y su extensión, los bosques aleatorios. | /HC_SeccionN05/S05_C26_Trees_Motivation.R | no_license | wparedesgt/MachineLearning | R | false | false | 2,701 | r | #S05_C26_Trees_Motivation
#Motivacion para Arboles de Decisión
#Describimos cómo los métodos como lda y qda no deben usarse con conjuntos de datos que tienen muchos predictores.
#Esto se debe a que el número de parámetros que necesitamos estimar se vuelve demasiado grande.
#Por ejemplo, con el ejemplo de dígitos donde tenemos 784 predictores, lda tendría que estimar más de 600,000 parámetros.
#Con qda, tendrías que multiplicar eso por el número de clases, que es 10 aquí.
#Los métodos de kernel tales como k-nn o regresión local no tienen parámetros de modelo para estimar.
#Pero también enfrentan un desafío cuando se usan predictores múltiples debido a lo que se conoce como la maldición de la dimensionalidad.
#La dimensión aquí se refiere al hecho de que cuando tenemos predictores p, la distancia entre dos observaciones se calcula en p espacio dimensional.
#Una forma útil de comprender la maldición de la dimensionalidad es considerar qué tan grande tenemos que hacer un vecindario, el vecindario que usamos para hacer las estimaciones, para incluir un porcentaje dado de los datos.
#Recuerde que con vecindarios grandes, nuestros métodos pierden flexibilidad.
#Por ejemplo, supongamos que tenemos un predictor continuo con puntos igualmente espaciados en el intervalo [0,1], y desea crear ventanas que incluyan 1/10 de los datos.
#Entonces es fácil ver que nuestras ventanas tienen que ser del tamaño 0.1.
#Puedes verlo en esta figura.
#Ahora, para dos predictores, si decidimos mantener el vecindario solo un pequeño, el 10% de cada dimensión solo incluimos un punto.
#Si queremos incluir el 10% de los datos, entonces necesitamos aumentar el tamaño de cada lado del cuadrado a la raíz cuadrada de 10 para que el área sea 10 de 100.
#Esto es ahora 0.316.
#En general, para incluir el 10% de los datos en un caso con p dimensiones, necesitamos un intervalo con cada lado que tenga un tamaño de 0,10 a 1 / p.
#Esta proporción se acerca a 1, lo que significa que estamos incluyendo prácticamente todos los datos, y ya no se suaviza muy rápidamente.
#Puedes verlo en este gráfico, trazando p versus 0.1 a 1 / p.
#Entonces, cuando llegamos a 100 predictores, el vecindario ya no es muy local, ya que cada lado cubre casi todo el conjunto de datos.
#En este tema, presentamos un conjunto de métodos elegantes y versátiles que se adaptan a dimensiones más altas y también permiten que estas regiones tomen formas más complejas, al tiempo que producen modelos que son interpretables.
#Estos son métodos muy conocidos, y estudiados.
#Nos centraremos en los árboles de regresión y decisión y su extensión, los bosques aleatorios. |
library(data.table)
exclusion_name <- "YES-NEALELAB-MARCH-19"
# The old version of the outcome info file
old_variable_info <- "variable-info/outcome_info_final_round2.tsv"
# The latest version of the Data Dictionary Showcase - download from UKB if updating.
# wget http://biobank.ctsu.ox.ac.uk/%7Ebbdatan/Data_Dictionary_Showcase.csv
new_variable_info <- "Data_Dictionary_Showcase.csv"
PHEASANT_variable_info <- "variable-info/outcome_info_PHESANT_main.tsv"
old_df <- fread(old_variable_info, header=TRUE, data.table=FALSE)
new_df <- fread(new_variable_info, header=TRUE, data.table=FALSE)
# We only read the following in to makes sure that the overall format is right, and we've convinced ourselves that
# we've included/removed the right things.
PH_df <- fread(PHEASANT_variable_info, header=TRUE, data.table=FALSE)
PH_df <- PH_df[,!(names(PH_df) %in%
c("Path", "Category", "Participants", "Items", "Stability",
"ValueType", "Units", "ItemType", "Strata", "Sexed", "Instances",
"Array", "Coding", "Notes", "Link"))]
# Merge the two data frames.
df <- merge(x=old_df, y=new_df, by="FieldID", all=TRUE)
checking_differences <- function(field_to_check, dataframe=df) {
x <- paste0(field_to_check, '.x')
y <- paste0(field_to_check, '.y')
where <- which(df[x] != df[y])
return(list(where=where, fields=df[where, c('FieldID', 'Field.x', 'Field.y', x,y)]))
}
# Manually checked through, and things have nicely matched up, with the .y variables being updated versions of the .x variables.
# To merge, we can simply remove all instances of the .x variables, replacing them with the .y variables.
# Before we do that, find out which phenotypes have been added, and write this subset to disk to determine which of these to include.
df_added <- df[is.na(df$Field.x),]
df_removed <- df[is.na(df$Field.y),]
# Now write the FieldID and the Field to disk.
names(df_added)[names(df_added) == 'Field.y'] <- 'Field'
names(df_removed)[names(df_removed) == 'Field.x'] <- 'Field'
df_to_check <- rbind(df_added[,c('FieldID', 'Field')], df_removed[,c('FieldID', 'Field')])
fwrite(df_to_check, sep='\t', file='variable-info/new_phenotypes.tsv')
# We then add an 'EXCLUDED' column and fill it in manually.
df <- df[,-grep('\\.x', names(df))]
names(df) <- gsub('\\.y', '', names(df))
# Now, remove all the column names that are no longer present in the variable info file.
# The columns that remain are the new columns (or columns that didn't change), and the extra columns that PHESANT requires.
# Now, we're done...let's just double check that this matches the type of file expected by PHESANT.
df <- merge(x=PH_df, y=df, by="FieldID", all=TRUE)
checking_differences('TRAIT_OF_INTEREST')
checking_differences('CAT_MULT_INDICATOR_FIELDS')
checking_differences('CAT_SINGLE_TO_CAT_MULT')
checking_differences('DATA_CODING')
# Do we believe that our changes are the right ones - if so, we're all good.
# This portion is to check if we've made any mistakes - just sanity checking what we removed.
# There have been some changes - so change to the latest PHESANT version.
exclude <- checking_differences('EXCLUDED')$fields
# There are two differences that aren't NEALELAB exclusions, so change these back to being included?
exclude[which(exclude$EXCLUDED.y != "YES-NEALELAB"),]
# No, they're the ICD9 codes.
names(df)[which(names(df) == "EXCLUDED.y")] <- "EXCLUDED"
# Now I can do the same as before to remove unwanted columns.
df <- df[,-grep('\\.x', names(df))]
names(df) <- gsub('\\.y', '', names(df))
# Now read in and merge in the manually curated list of new variables to be excluded.
# Need to manually create this file using the output 'variable-info/new_phenotypes.tsv' file: add in a
# new 'EXCLUDED' column and manually curate.
manual_df <- fread("variable-info/new_phenotypes_march_2019_excluded.tsv", sep='\t', header=TRUE, data.table=FALSE)
df <- merge(df, manual_df, by="FieldID", all=TRUE)
# Double check that there's no overlap in the EXCLUDED - they should be completely disjoint.
if (any(is.na(df$EXCLUDED.y) & is.na(df$EXCLUDED.x)))
print('ERROR')
df$EXCLUDED.x[which(df$EXCLUDED.y != "")] <- exclusion_name
names(df)[which(names(df) == "EXCLUDED.x")] <- "EXCLUDED"
df <- df[,-grep('\\.y', names(df))]
names(df) <- gsub('\\.x', '', names(df))
# Finally, set 'CODING' equal to the newer 'coding'
df$DATA_CODING <- df$Coding
# Also, need to fill in information for the categorical multiple variables that have been added.
# Write out, and make sure it's tab separated.
fwrite(df, sep='\t', file = "variable-info/outcome_info_final_round3.tsv")
# NOTE: Have to read in with Excel and write to .tsv to get the correct behavious with PHESANT...
| /WAS/merging_variable_info.r | permissive | astheeggeggs/PHESANT | R | false | false | 4,670 | r | library(data.table)
exclusion_name <- "YES-NEALELAB-MARCH-19"
# The old version of the outcome info file
old_variable_info <- "variable-info/outcome_info_final_round2.tsv"
# The latest version of the Data Dictionary Showcase - download from UKB if updating.
# wget http://biobank.ctsu.ox.ac.uk/%7Ebbdatan/Data_Dictionary_Showcase.csv
new_variable_info <- "Data_Dictionary_Showcase.csv"
PHEASANT_variable_info <- "variable-info/outcome_info_PHESANT_main.tsv"
old_df <- fread(old_variable_info, header=TRUE, data.table=FALSE)
new_df <- fread(new_variable_info, header=TRUE, data.table=FALSE)
# We only read the following in to makes sure that the overall format is right, and we've convinced ourselves that
# we've included/removed the right things.
PH_df <- fread(PHEASANT_variable_info, header=TRUE, data.table=FALSE)
PH_df <- PH_df[,!(names(PH_df) %in%
c("Path", "Category", "Participants", "Items", "Stability",
"ValueType", "Units", "ItemType", "Strata", "Sexed", "Instances",
"Array", "Coding", "Notes", "Link"))]
# Merge the two data frames.
df <- merge(x=old_df, y=new_df, by="FieldID", all=TRUE)
checking_differences <- function(field_to_check, dataframe=df) {
x <- paste0(field_to_check, '.x')
y <- paste0(field_to_check, '.y')
where <- which(df[x] != df[y])
return(list(where=where, fields=df[where, c('FieldID', 'Field.x', 'Field.y', x,y)]))
}
# Manually checked through, and things have nicely matched up, with the .y variables being updated versions of the .x variables.
# To merge, we can simply remove all instances of the .x variables, replacing them with the .y variables.
# Before we do that, find out which phenotypes have been added, and write this subset to disk to determine which of these to include.
df_added <- df[is.na(df$Field.x),]
df_removed <- df[is.na(df$Field.y),]
# Now write the FieldID and the Field to disk.
names(df_added)[names(df_added) == 'Field.y'] <- 'Field'
names(df_removed)[names(df_removed) == 'Field.x'] <- 'Field'
df_to_check <- rbind(df_added[,c('FieldID', 'Field')], df_removed[,c('FieldID', 'Field')])
fwrite(df_to_check, sep='\t', file='variable-info/new_phenotypes.tsv')
# We then add an 'EXCLUDED' column and fill it in manually.
df <- df[,-grep('\\.x', names(df))]
names(df) <- gsub('\\.y', '', names(df))
# Now, remove all the column names that are no longer present in the variable info file.
# The columns that remain are the new columns (or columns that didn't change), and the extra columns that PHESANT requires.
# Now, we're done...let's just double check that this matches the type of file expected by PHESANT.
df <- merge(x=PH_df, y=df, by="FieldID", all=TRUE)
checking_differences('TRAIT_OF_INTEREST')
checking_differences('CAT_MULT_INDICATOR_FIELDS')
checking_differences('CAT_SINGLE_TO_CAT_MULT')
checking_differences('DATA_CODING')
# Do we believe that our changes are the right ones - if so, we're all good.
# This portion is to check if we've made any mistakes - just sanity checking what we removed.
# There have been some changes - so change to the latest PHESANT version.
exclude <- checking_differences('EXCLUDED')$fields
# There are two differences that aren't NEALELAB exclusions, so change these back to being included?
exclude[which(exclude$EXCLUDED.y != "YES-NEALELAB"),]
# No, they're the ICD9 codes.
names(df)[which(names(df) == "EXCLUDED.y")] <- "EXCLUDED"
# Now I can do the same as before to remove unwanted columns.
df <- df[,-grep('\\.x', names(df))]
names(df) <- gsub('\\.y', '', names(df))
# Now read in and merge in the manually curated list of new variables to be excluded.
# Need to manually create this file using the output 'variable-info/new_phenotypes.tsv' file: add in a
# new 'EXCLUDED' column and manually curate.
manual_df <- fread("variable-info/new_phenotypes_march_2019_excluded.tsv", sep='\t', header=TRUE, data.table=FALSE)
df <- merge(df, manual_df, by="FieldID", all=TRUE)
# Double check that there's no overlap in the EXCLUDED - they should be completely disjoint.
if (any(is.na(df$EXCLUDED.y) & is.na(df$EXCLUDED.x)))
print('ERROR')
df$EXCLUDED.x[which(df$EXCLUDED.y != "")] <- exclusion_name
names(df)[which(names(df) == "EXCLUDED.x")] <- "EXCLUDED"
df <- df[,-grep('\\.y', names(df))]
names(df) <- gsub('\\.x', '', names(df))
# Finally, set 'CODING' equal to the newer 'coding'
df$DATA_CODING <- df$Coding
# Also, need to fill in information for the categorical multiple variables that have been added.
# Write out, and make sure it's tab separated.
fwrite(df, sep='\t', file = "variable-info/outcome_info_final_round3.tsv")
# NOTE: Have to read in with Excel and write to .tsv to get the correct behavious with PHESANT...
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{sql.regr}
\alias{sql.regr}
\title{sql.regr}
\usage{
sql.regr(x, y, n)
}
\arguments{
\item{x}{= a string vector (independent variable(s))}
\item{y}{= a string (dependent variable)}
\item{n}{= T/F depending on whether there's an intercept term}
}
\description{
SQL for regression coefficients
}
\seealso{
Other sql: \code{\link{sql.1dActWtTrend.Alloc}},
\code{\link{sql.1dActWtTrend.Final}},
\code{\link{sql.1dActWtTrend.Flow}},
\code{\link{sql.1dActWtTrend.select}},
\code{\link{sql.1dActWtTrend.topline.from}},
\code{\link{sql.1dActWtTrend.topline}},
\code{\link{sql.1dActWtTrend.underlying.basic}},
\code{\link{sql.1dActWtTrend.underlying}},
\code{\link{sql.1dActWtTrend}},
\code{\link{sql.1dFloMo.CountryId.List}},
\code{\link{sql.1dFloMo.FI}},
\code{\link{sql.1dFloMo.Rgn}},
\code{\link{sql.1dFloMo.Sec.topline}},
\code{\link{sql.1dFloMo.filter}},
\code{\link{sql.1dFloMo.grp}},
\code{\link{sql.1dFloMo.select.wrapper}},
\code{\link{sql.1dFloMo.select}},
\code{\link{sql.1dFloMo.underlying}},
\code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}},
\code{\link{sql.1dFloTrend.Alloc.data}},
\code{\link{sql.1dFloTrend.Alloc.fetch}},
\code{\link{sql.1dFloTrend.Alloc.final}},
\code{\link{sql.1dFloTrend.Alloc.from}},
\code{\link{sql.1dFloTrend.Alloc.purge}},
\code{\link{sql.1dFloTrend.Alloc}},
\code{\link{sql.1dFloTrend.select}},
\code{\link{sql.1dFloTrend.underlying}},
\code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}},
\code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}},
\code{\link{sql.1mActWt.underlying}},
\code{\link{sql.1mActWtIncrPct}},
\code{\link{sql.1mActWtTrend.underlying}},
\code{\link{sql.1mActWtTrend}},
\code{\link{sql.1mActWt}},
\code{\link{sql.1mAllocD.from}},
\code{\link{sql.1mAllocD.select}},
\code{\link{sql.1mAllocD.topline.from}},
\code{\link{sql.1mAllocD}},
\code{\link{sql.1mAllocMo.select}},
\code{\link{sql.1mAllocMo.underlying.from}},
\code{\link{sql.1mAllocMo.underlying.pre}},
\code{\link{sql.1mAllocMo}},
\code{\link{sql.1mAllocSkew.topline.from}},
\code{\link{sql.1mAllocSkew}},
\code{\link{sql.1mBullish.Alloc}},
\code{\link{sql.1mBullish.Final}},
\code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloMo}},
\code{\link{sql.1mFloTrend.underlying}},
\code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}},
\code{\link{sql.1mHoldAum}},
\code{\link{sql.1mSRIAdvisorPct}},
\code{\link{sql.1wFlow.Corp}},
\code{\link{sql.ActWtDiff2}},
\code{\link{sql.Allocation.Sec.FinsExREst}},
\code{\link{sql.Allocation.Sec}},
\code{\link{sql.Allocations.bulk.EqWtAvg}},
\code{\link{sql.Allocations.bulk.Single}},
\code{\link{sql.Allocation}},
\code{\link{sql.BenchIndex.duplication}},
\code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}},
\code{\link{sql.Diff}}, \code{\link{sql.Dispersion}},
\code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}},
\code{\link{sql.Foreign}},
\code{\link{sql.FundHistory.macro}},
\code{\link{sql.FundHistory.sf}},
\code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}},
\code{\link{sql.HerdingLSV}},
\code{\link{sql.Holdings.bulk.wrapper}},
\code{\link{sql.Holdings.bulk}},
\code{\link{sql.Holdings}}, \code{\link{sql.ION}},
\code{\link{sql.MonthlyAlloc}},
\code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}},
\code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}},
\code{\link{sql.ReportDate}}, \code{\link{sql.SRI}},
\code{\link{sql.ShareClass}},
\code{\link{sql.TopDownAllocs.items}},
\code{\link{sql.TopDownAllocs.underlying}},
\code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}},
\code{\link{sql.and}}, \code{\link{sql.arguments}},
\code{\link{sql.bcp}}, \code{\link{sql.breakdown}},
\code{\link{sql.case}}, \code{\link{sql.close}},
\code{\link{sql.connect.wrapper}},
\code{\link{sql.connect}},
\code{\link{sql.cross.border}},
\code{\link{sql.datediff}}, \code{\link{sql.declare}},
\code{\link{sql.delete}}, \code{\link{sql.drop}},
\code{\link{sql.exists}},
\code{\link{sql.extra.domicile}},
\code{\link{sql.index}}, \code{\link{sql.into}},
\code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}},
\code{\link{sql.label}}, \code{\link{sql.map.classif}},
\code{\link{sql.mat.cofactor}},
\code{\link{sql.mat.crossprod.vector}},
\code{\link{sql.mat.crossprod}},
\code{\link{sql.mat.determinant}},
\code{\link{sql.mat.flip}},
\code{\link{sql.mat.multiply}}, \code{\link{sql.median}},
\code{\link{sql.nonneg}},
\code{\link{sql.query.underlying}},
\code{\link{sql.query}}, \code{\link{sql.tbl}},
\code{\link{sql.ui}}, \code{\link{sql.unbracket}},
\code{\link{sql.update}},
\code{\link{sql.yield.curve.1dFloMo}},
\code{\link{sql.yield.curve}},
\code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}}
}
\keyword{sql.regr}
| /man/sql.regr.Rd | no_license | vsrimurthy/EPFR | R | false | true | 4,894 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{sql.regr}
\alias{sql.regr}
\title{sql.regr}
\usage{
sql.regr(x, y, n)
}
\arguments{
\item{x}{= a string vector (independent variable(s))}
\item{y}{= a string (dependent variable)}
\item{n}{= T/F depending on whether there's an intercept term}
}
\description{
SQL for regression coefficients
}
\seealso{
Other sql: \code{\link{sql.1dActWtTrend.Alloc}},
\code{\link{sql.1dActWtTrend.Final}},
\code{\link{sql.1dActWtTrend.Flow}},
\code{\link{sql.1dActWtTrend.select}},
\code{\link{sql.1dActWtTrend.topline.from}},
\code{\link{sql.1dActWtTrend.topline}},
\code{\link{sql.1dActWtTrend.underlying.basic}},
\code{\link{sql.1dActWtTrend.underlying}},
\code{\link{sql.1dActWtTrend}},
\code{\link{sql.1dFloMo.CountryId.List}},
\code{\link{sql.1dFloMo.FI}},
\code{\link{sql.1dFloMo.Rgn}},
\code{\link{sql.1dFloMo.Sec.topline}},
\code{\link{sql.1dFloMo.filter}},
\code{\link{sql.1dFloMo.grp}},
\code{\link{sql.1dFloMo.select.wrapper}},
\code{\link{sql.1dFloMo.select}},
\code{\link{sql.1dFloMo.underlying}},
\code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}},
\code{\link{sql.1dFloTrend.Alloc.data}},
\code{\link{sql.1dFloTrend.Alloc.fetch}},
\code{\link{sql.1dFloTrend.Alloc.final}},
\code{\link{sql.1dFloTrend.Alloc.from}},
\code{\link{sql.1dFloTrend.Alloc.purge}},
\code{\link{sql.1dFloTrend.Alloc}},
\code{\link{sql.1dFloTrend.select}},
\code{\link{sql.1dFloTrend.underlying}},
\code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}},
\code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}},
\code{\link{sql.1mActWt.underlying}},
\code{\link{sql.1mActWtIncrPct}},
\code{\link{sql.1mActWtTrend.underlying}},
\code{\link{sql.1mActWtTrend}},
\code{\link{sql.1mActWt}},
\code{\link{sql.1mAllocD.from}},
\code{\link{sql.1mAllocD.select}},
\code{\link{sql.1mAllocD.topline.from}},
\code{\link{sql.1mAllocD}},
\code{\link{sql.1mAllocMo.select}},
\code{\link{sql.1mAllocMo.underlying.from}},
\code{\link{sql.1mAllocMo.underlying.pre}},
\code{\link{sql.1mAllocMo}},
\code{\link{sql.1mAllocSkew.topline.from}},
\code{\link{sql.1mAllocSkew}},
\code{\link{sql.1mBullish.Alloc}},
\code{\link{sql.1mBullish.Final}},
\code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloMo}},
\code{\link{sql.1mFloTrend.underlying}},
\code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}},
\code{\link{sql.1mHoldAum}},
\code{\link{sql.1mSRIAdvisorPct}},
\code{\link{sql.1wFlow.Corp}},
\code{\link{sql.ActWtDiff2}},
\code{\link{sql.Allocation.Sec.FinsExREst}},
\code{\link{sql.Allocation.Sec}},
\code{\link{sql.Allocations.bulk.EqWtAvg}},
\code{\link{sql.Allocations.bulk.Single}},
\code{\link{sql.Allocation}},
\code{\link{sql.BenchIndex.duplication}},
\code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}},
\code{\link{sql.Diff}}, \code{\link{sql.Dispersion}},
\code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}},
\code{\link{sql.Foreign}},
\code{\link{sql.FundHistory.macro}},
\code{\link{sql.FundHistory.sf}},
\code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}},
\code{\link{sql.HerdingLSV}},
\code{\link{sql.Holdings.bulk.wrapper}},
\code{\link{sql.Holdings.bulk}},
\code{\link{sql.Holdings}}, \code{\link{sql.ION}},
\code{\link{sql.MonthlyAlloc}},
\code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}},
\code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}},
\code{\link{sql.ReportDate}}, \code{\link{sql.SRI}},
\code{\link{sql.ShareClass}},
\code{\link{sql.TopDownAllocs.items}},
\code{\link{sql.TopDownAllocs.underlying}},
\code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}},
\code{\link{sql.and}}, \code{\link{sql.arguments}},
\code{\link{sql.bcp}}, \code{\link{sql.breakdown}},
\code{\link{sql.case}}, \code{\link{sql.close}},
\code{\link{sql.connect.wrapper}},
\code{\link{sql.connect}},
\code{\link{sql.cross.border}},
\code{\link{sql.datediff}}, \code{\link{sql.declare}},
\code{\link{sql.delete}}, \code{\link{sql.drop}},
\code{\link{sql.exists}},
\code{\link{sql.extra.domicile}},
\code{\link{sql.index}}, \code{\link{sql.into}},
\code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}},
\code{\link{sql.label}}, \code{\link{sql.map.classif}},
\code{\link{sql.mat.cofactor}},
\code{\link{sql.mat.crossprod.vector}},
\code{\link{sql.mat.crossprod}},
\code{\link{sql.mat.determinant}},
\code{\link{sql.mat.flip}},
\code{\link{sql.mat.multiply}}, \code{\link{sql.median}},
\code{\link{sql.nonneg}},
\code{\link{sql.query.underlying}},
\code{\link{sql.query}}, \code{\link{sql.tbl}},
\code{\link{sql.ui}}, \code{\link{sql.unbracket}},
\code{\link{sql.update}},
\code{\link{sql.yield.curve.1dFloMo}},
\code{\link{sql.yield.curve}},
\code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}}
}
\keyword{sql.regr}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca.R
\name{plot_projection_features}
\alias{plot_projection_features}
\alias{plot_pca_features}
\alias{plot_sma_features}
\alias{plot_lda_features}
\alias{plot_pls_features}
\title{Plot PCA/PLS/LDA/SMA features}
\usage{
plot_projection_features(
object,
method,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
plot_pca_features(
object,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
plot_sma_features(
object,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
plot_lda_features(
object,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
plot_pls_features(
object,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
}
\arguments{
\item{object}{SummarizedExperiment}
\item{method}{'pca', 'lda', 'pls', 'sma'}
\item{implementation}{'character' or NULL}
\item{geom}{value in \code{\link[autonomics.plot]{FEATURE_PLOTS}}}
\item{fvars}{fvars used for plot annotation}
\item{dim}{principal component dimension}
\item{n}{number of top features to plot}
\item{na.impute}{TRUE or FALSE}
\item{title}{title}
\item{file}{file}
\item{...}{passed to \code{\link[autonomics.plot]{plot_features}}}
}
\description{
Plots top PCA/PLS/LDA/SMA features.
Uses factor loadings in object when available.
}
\examples{
require(magrittr)
# STEM CELL COMPARISON
if (require(autonomics.data)){
object <- autonomics.data::stemcomp.proteinratios
object \%>\% plot_pca_features(n=9)
object \%>\% plot_pca_features(geom = 'bar')
}
# GLUTAMINASE
if (require(autonomics.data)){
object <- autonomics.data::glutaminase
object \%>\% plot_pca_features()
}
}
| /autonomics.plot/man/plot_projection_features.Rd | no_license | bhagwataditya/autonomics0 | R | false | true | 2,383 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca.R
\name{plot_projection_features}
\alias{plot_projection_features}
\alias{plot_pca_features}
\alias{plot_sma_features}
\alias{plot_lda_features}
\alias{plot_pls_features}
\title{Plot PCA/PLS/LDA/SMA features}
\usage{
plot_projection_features(
object,
method,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
plot_pca_features(
object,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
plot_sma_features(
object,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
plot_lda_features(
object,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
plot_pls_features(
object,
implementation = NULL,
geom = default_feature_plots(object)[1],
fvars = default_fvars(object),
dim = 1,
n = 9,
na.impute = FALSE,
title = sprintf("X\%d", dim),
file = NULL,
...
)
}
\arguments{
\item{object}{SummarizedExperiment}
\item{method}{'pca', 'lda', 'pls', 'sma'}
\item{implementation}{'character' or NULL}
\item{geom}{value in \code{\link[autonomics.plot]{FEATURE_PLOTS}}}
\item{fvars}{fvars used for plot annotation}
\item{dim}{principal component dimension}
\item{n}{number of top features to plot}
\item{na.impute}{TRUE or FALSE}
\item{title}{title}
\item{file}{file}
\item{...}{passed to \code{\link[autonomics.plot]{plot_features}}}
}
\description{
Plots top PCA/PLS/LDA/SMA features.
Uses factor loadings in object when available.
}
\examples{
require(magrittr)
# STEM CELL COMPARISON
if (require(autonomics.data)){
object <- autonomics.data::stemcomp.proteinratios
object \%>\% plot_pca_features(n=9)
object \%>\% plot_pca_features(geom = 'bar')
}
# GLUTAMINASE
if (require(autonomics.data)){
object <- autonomics.data::glutaminase
object \%>\% plot_pca_features()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GREA.functions.R
\name{get.genes.within.loci}
\alias{get.genes.within.loci}
\title{get.genes.within.loci
\code{get.genes.within.loci} Given a set of \code{positions}(genomic coordinates), will return a data.frame with the genes within the input \code{positions}}
\usage{
get.genes.within.loci(positions, position.colnames = c("CHR", "BP",
"SNP"), gene.window = 5e+05, gene.info, pVal.col = NULL)
}
\arguments{
\item{positions}{A data.frame with genomic coordinates.}
\item{position.colnames}{Character vector defining the colnames used for defining the genomic coordinates within the \code{positions} data.frame.}
\item{gene.window}{Numeric, which will deifine the genomic window that will be used to get neighbouring genes given a certain genomic position defined by \code{positions}.}
\item{gene.info}{A data.frame with genomic coordinates and genes. For reference please look at data(gene.info)}
\item{pVal.col}{Character, if provided, then an extra column to the returned data-frame will be added with the pValue of the SNP associated.}
}
\value{
A dataframe
}
\description{
get.genes.within.loci
\code{get.genes.within.loci} Given a set of \code{positions}(genomic coordinates), will return a data.frame with the genes within the input \code{positions}
}
| /man/get.genes.within.loci.Rd | no_license | raguirreg/GREA | R | false | true | 1,344 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GREA.functions.R
\name{get.genes.within.loci}
\alias{get.genes.within.loci}
\title{get.genes.within.loci
\code{get.genes.within.loci} Given a set of \code{positions}(genomic coordinates), will return a data.frame with the genes within the input \code{positions}}
\usage{
get.genes.within.loci(positions, position.colnames = c("CHR", "BP",
"SNP"), gene.window = 5e+05, gene.info, pVal.col = NULL)
}
\arguments{
\item{positions}{A data.frame with genomic coordinates.}
\item{position.colnames}{Character vector defining the colnames used for defining the genomic coordinates within the \code{positions} data.frame.}
\item{gene.window}{Numeric, which will deifine the genomic window that will be used to get neighbouring genes given a certain genomic position defined by \code{positions}.}
\item{gene.info}{A data.frame with genomic coordinates and genes. For reference please look at data(gene.info)}
\item{pVal.col}{Character, if provided, then an extra column to the returned data-frame will be added with the pValue of the SNP associated.}
}
\value{
A dataframe
}
\description{
get.genes.within.loci
\code{get.genes.within.loci} Given a set of \code{positions}(genomic coordinates), will return a data.frame with the genes within the input \code{positions}
}
|
census_geocoder <- function(address,type,secondary,state){
library(jsonlite)
library(RCurl)
addy <- paste("street=",gsub(" ","+",address),sep="")
if(type=="z"){
wild <- paste("zip=",gsub(" ","+",secondary),sep="")
}else{
wild <- paste("city=",gsub(" ","+",secondary),sep="")
}
state <- paste("state=",gsub(" ","+",state),sep="")
string <- paste("https://geocoding.geo.census.gov/geocoder/geographies/address?",addy,"&",wild,"&",state,"&benchmark=4&vintage=4&format=json",sep="")
json_file<-fromJSON(getURL(string))
#Check if there are results
if(length(json_file$result$addressMatches$coordinates)>0){
#If not, kick back an empty dataframe
if(is.null(json_file$result$addressMatches$coordinates$x[1])==TRUE){
print("no result")
return(data.frame(
address="",
lat = "",
lon= "",
tract = "",
block = ""))
} else{
#Address,lat,lon,tract, block (keep first match)
return(data.frame(
address=as.character(data.frame(json_file$result$addressMatches$matchedAddress)[1,]),
lat = as.character(json_file$result$addressMatches$coordinates$y[1]),
lon= as.character(json_file$result$addressMatches$coordinates$x[1]),
tract = data.frame(json_file$result$addressMatches$geographies$`Census Tracts`)$GEOID[1],
block = data.frame(json_file$result$addressMatches$geographies$`2010 Census Blocks`)[1,c("GEOID")]))
}
}
}
| /wrapper.R | no_license | SigmaMonstR/census-geocoder | R | false | false | 1,601 | r |
census_geocoder <- function(address,type,secondary,state){
library(jsonlite)
library(RCurl)
addy <- paste("street=",gsub(" ","+",address),sep="")
if(type=="z"){
wild <- paste("zip=",gsub(" ","+",secondary),sep="")
}else{
wild <- paste("city=",gsub(" ","+",secondary),sep="")
}
state <- paste("state=",gsub(" ","+",state),sep="")
string <- paste("https://geocoding.geo.census.gov/geocoder/geographies/address?",addy,"&",wild,"&",state,"&benchmark=4&vintage=4&format=json",sep="")
json_file<-fromJSON(getURL(string))
#Check if there are results
if(length(json_file$result$addressMatches$coordinates)>0){
#If not, kick back an empty dataframe
if(is.null(json_file$result$addressMatches$coordinates$x[1])==TRUE){
print("no result")
return(data.frame(
address="",
lat = "",
lon= "",
tract = "",
block = ""))
} else{
#Address,lat,lon,tract, block (keep first match)
return(data.frame(
address=as.character(data.frame(json_file$result$addressMatches$matchedAddress)[1,]),
lat = as.character(json_file$result$addressMatches$coordinates$y[1]),
lon= as.character(json_file$result$addressMatches$coordinates$x[1]),
tract = data.frame(json_file$result$addressMatches$geographies$`Census Tracts`)$GEOID[1],
block = data.frame(json_file$result$addressMatches$geographies$`2010 Census Blocks`)[1,c("GEOID")]))
}
}
}
|
\name{model.matrixBayes}
%\docType{genericFunction}
\alias{model.matrixBayes}
\title{Construct Design Matrices}
\description{
\code{model.matrixBayes} creates a design matrix.
}
\usage{
model.matrixBayes(object, data = environment(object),
contrasts.arg = NULL, xlev = NULL, keep.order = FALSE, drop.baseline=FALSE,...)
%model.matrix.bayes.h(object, data = environment(object),
% contrasts.arg = NULL, xlev = NULL, keep.order = FALSE, batch = NULL, ...)
}
\arguments{
\item{object}{an object of an appropriate class. For the default
method, a model formula or terms object.}
\item{data}{a data frame created with \code{\link{model.frame}}. If
another sort of object, \code{model.frame} is called first.}
\item{contrasts.arg}{A list, whose entries are contrasts suitable for
input to the \code{\link{contrasts}} replacement function and
whose names are the names of columns of \code{data} containing
\code{\link{factor}}s.}
\item{xlev}{to be used as argument of \code{\link{model.frame}} if
\code{data} has no \code{"terms"} attribute.}
\item{keep.order}{a logical value indicating whether the terms should
keep their positions. If \code{FALSE} the terms are reordered so
that main effects come first, followed by the interactions,
all second-order, all third-order and so on. Effects of a given
order are kept in the order specified.}
\item{drop.baseline}{Drop the base level of categorical Xs, default is TRUE.}
% \item{batch}{Not implement yet!}
\item{\dots}{further arguments passed to or from other methods.}
}
\details{
\code{model.matrixBayes} is adapted from \code{model.matrix} in the \code{stats}
pacakge and is designed for the use of \code{\link{bayesglm}}.% and \code{bayesglm.hierachical} (not yet implemented!).
It is designed to keep baseline levels of all categorical varaibles and keep the
variable names unodered in the output. The design matrices created by
\code{model.matrixBayes} are unidentifiable using classical regression methods,
though; they can be identified using \code{\link{bayesglm}}.% and
%\code{bayesglm.hierachical}.
}
\references{Andrew Gelman, Aleks Jakulin, Maria Grazia Pittau and Yu-Sung Su. (2009).
\dQuote{A Weakly Informative Default Prior Distribution For
Logistic And Other Regression Models.}
\emph{The Annals of Applied Statistics} 2 (4): 1360--1383.
\url{http://www.stat.columbia.edu/~gelman/research/published/priors11.pdf}
}
\seealso{
\code{\link[stats]{model.frame}}, \code{\link[stats]{model.extract}},
\code{\link[stats]{terms}}, \code{\link[stats]{terms.formula}},
\code{\link{bayesglm}}.
}
\author{Yu-Sung Su \email{suyusung@tsinghua.edu.cn}}
\examples{
ff <- log(Volume) ~ log(Height) + log(Girth)
str(m <- model.frame(ff, trees))
(model.matrix(ff, m))
class(ff) <- c("bayesglm", "terms", "formula")
(model.matrixBayes(ff, m))
%class(ff) <- c("bayesglm.h", "terms", "formula")
%(model.matrixBayes(ff, m))
}
\keyword{models}
\keyword{manip}
| /man/model.matrixBayes.Rd | no_license | suyusung/arm | R | false | false | 3,103 | rd | \name{model.matrixBayes}
%\docType{genericFunction}
\alias{model.matrixBayes}
\title{Construct Design Matrices}
\description{
\code{model.matrixBayes} creates a design matrix.
}
\usage{
model.matrixBayes(object, data = environment(object),
contrasts.arg = NULL, xlev = NULL, keep.order = FALSE, drop.baseline=FALSE,...)
%model.matrix.bayes.h(object, data = environment(object),
% contrasts.arg = NULL, xlev = NULL, keep.order = FALSE, batch = NULL, ...)
}
\arguments{
\item{object}{an object of an appropriate class. For the default
method, a model formula or terms object.}
\item{data}{a data frame created with \code{\link{model.frame}}. If
another sort of object, \code{model.frame} is called first.}
\item{contrasts.arg}{A list, whose entries are contrasts suitable for
input to the \code{\link{contrasts}} replacement function and
whose names are the names of columns of \code{data} containing
\code{\link{factor}}s.}
\item{xlev}{to be used as argument of \code{\link{model.frame}} if
\code{data} has no \code{"terms"} attribute.}
\item{keep.order}{a logical value indicating whether the terms should
keep their positions. If \code{FALSE} the terms are reordered so
that main effects come first, followed by the interactions,
all second-order, all third-order and so on. Effects of a given
order are kept in the order specified.}
\item{drop.baseline}{Drop the base level of categorical Xs, default is TRUE.}
% \item{batch}{Not implement yet!}
\item{\dots}{further arguments passed to or from other methods.}
}
\details{
\code{model.matrixBayes} is adapted from \code{model.matrix} in the \code{stats}
pacakge and is designed for the use of \code{\link{bayesglm}}.% and \code{bayesglm.hierachical} (not yet implemented!).
It is designed to keep baseline levels of all categorical varaibles and keep the
variable names unodered in the output. The design matrices created by
\code{model.matrixBayes} are unidentifiable using classical regression methods,
though; they can be identified using \code{\link{bayesglm}}.% and
%\code{bayesglm.hierachical}.
}
\references{Andrew Gelman, Aleks Jakulin, Maria Grazia Pittau and Yu-Sung Su. (2009).
\dQuote{A Weakly Informative Default Prior Distribution For
Logistic And Other Regression Models.}
\emph{The Annals of Applied Statistics} 2 (4): 1360--1383.
\url{http://www.stat.columbia.edu/~gelman/research/published/priors11.pdf}
}
\seealso{
\code{\link[stats]{model.frame}}, \code{\link[stats]{model.extract}},
\code{\link[stats]{terms}}, \code{\link[stats]{terms.formula}},
\code{\link{bayesglm}}.
}
\author{Yu-Sung Su \email{suyusung@tsinghua.edu.cn}}
\examples{
ff <- log(Volume) ~ log(Height) + log(Girth)
str(m <- model.frame(ff, trees))
(model.matrix(ff, m))
class(ff) <- c("bayesglm", "terms", "formula")
(model.matrixBayes(ff, m))
%class(ff) <- c("bayesglm.h", "terms", "formula")
%(model.matrixBayes(ff, m))
}
\keyword{models}
\keyword{manip}
|
data <- read.table("household_power_consumption.txt", header=T, sep=";",
na.strings="?", nrows=1000000, stringsAsFactors=F)
data$DateTime <- paste(data$Date, data$Time)
data$DateTime <- as.Date(data$DateTime, format = "%d/%m/%Y %H:%M:%S")
library(dplyr)
subsetData <- filter(data, DateTime >= as.Date("2007-02-01 00:00:00"),
DateTime < as.Date("2007-02-03 00:00:00"))
png("plot2.png", width=400, height=500)
plot(subsetData$Global_active_power, type="l", xaxt='n',
ylab='Global Active Power (kilowatts)', xlab='')
axis(1, at=c(1, 1440, 2880), labels=c("Thu", "Fri", "Sat"))
dev.off() | /plot2.R | no_license | kpal/ExData_Plotting1 | R | false | false | 633 | r | data <- read.table("household_power_consumption.txt", header=T, sep=";",
na.strings="?", nrows=1000000, stringsAsFactors=F)
data$DateTime <- paste(data$Date, data$Time)
data$DateTime <- as.Date(data$DateTime, format = "%d/%m/%Y %H:%M:%S")
library(dplyr)
subsetData <- filter(data, DateTime >= as.Date("2007-02-01 00:00:00"),
DateTime < as.Date("2007-02-03 00:00:00"))
png("plot2.png", width=400, height=500)
plot(subsetData$Global_active_power, type="l", xaxt='n',
ylab='Global Active Power (kilowatts)', xlab='')
axis(1, at=c(1, 1440, 2880), labels=c("Thu", "Fri", "Sat"))
dev.off() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.